Merge branch 'linus' into release
Conflicts: arch/x86/kernel/acpi/sleep.c Signed-off-by: Len Brown <len.brown@intel.com>
Tento commit je obsažen v:
@@ -64,8 +64,12 @@ config X86
|
||||
select HAVE_TEXT_POKE_SMP
|
||||
select HAVE_GENERIC_HARDIRQS
|
||||
select HAVE_SPARSE_IRQ
|
||||
select GENERIC_FIND_FIRST_BIT
|
||||
select GENERIC_FIND_NEXT_BIT
|
||||
select GENERIC_IRQ_PROBE
|
||||
select GENERIC_PENDING_IRQ if SMP
|
||||
select GENERIC_IRQ_SHOW
|
||||
select IRQ_FORCED_THREADING
|
||||
select USE_GENERIC_SMP_HELPERS if SMP
|
||||
|
||||
config INSTRUCTION_DECODER
|
||||
@@ -119,7 +123,7 @@ config NEED_SG_DMA_LENGTH
|
||||
def_bool y
|
||||
|
||||
config GENERIC_ISA_DMA
|
||||
def_bool y
|
||||
def_bool ISA_DMA_API
|
||||
|
||||
config GENERIC_IOMAP
|
||||
def_bool y
|
||||
@@ -139,7 +143,7 @@ config GENERIC_GPIO
|
||||
bool
|
||||
|
||||
config ARCH_MAY_HAVE_PC_FDC
|
||||
def_bool y
|
||||
def_bool ISA_DMA_API
|
||||
|
||||
config RWSEM_GENERIC_SPINLOCK
|
||||
def_bool !X86_XADD
|
||||
@@ -217,10 +221,6 @@ config X86_HT
|
||||
def_bool y
|
||||
depends on SMP
|
||||
|
||||
config X86_TRAMPOLINE
|
||||
def_bool y
|
||||
depends on SMP || (64BIT && ACPI_SLEEP)
|
||||
|
||||
config X86_32_LAZY_GS
|
||||
def_bool y
|
||||
depends on X86_32 && !CC_STACKPROTECTOR
|
||||
@@ -382,6 +382,8 @@ config X86_INTEL_CE
|
||||
depends on X86_32
|
||||
depends on X86_EXTENDED_PLATFORM
|
||||
select X86_REBOOTFIXUPS
|
||||
select OF
|
||||
select OF_EARLY_FLATTREE
|
||||
---help---
|
||||
Select for the Intel CE media processor (CE4100) SOC.
|
||||
This option compiles in support for the CE4100 SOC for settop
|
||||
@@ -811,7 +813,7 @@ config X86_LOCAL_APIC
|
||||
|
||||
config X86_IO_APIC
|
||||
def_bool y
|
||||
depends on X86_64 || SMP || X86_32_NON_STANDARD || X86_UP_APIC
|
||||
depends on X86_64 || SMP || X86_32_NON_STANDARD || X86_UP_IOAPIC
|
||||
|
||||
config X86_VISWS_APIC
|
||||
def_bool y
|
||||
@@ -1705,7 +1707,7 @@ config HAVE_ARCH_EARLY_PFN_TO_NID
|
||||
depends on NUMA
|
||||
|
||||
config USE_PERCPU_NUMA_NODE_ID
|
||||
def_bool X86_64
|
||||
def_bool y
|
||||
depends on NUMA
|
||||
|
||||
menu "Power management and ACPI options"
|
||||
@@ -2000,9 +2002,13 @@ source "drivers/pci/pcie/Kconfig"
|
||||
|
||||
source "drivers/pci/Kconfig"
|
||||
|
||||
# x86_64 have no ISA slots, but do have ISA-style DMA.
|
||||
# x86_64 have no ISA slots, but can have ISA-style DMA.
|
||||
config ISA_DMA_API
|
||||
def_bool y
|
||||
bool "ISA-style DMA support" if (X86_64 && EXPERT)
|
||||
default y
|
||||
help
|
||||
Enables ISA-style DMA support for devices requiring such controllers.
|
||||
If unsure, say Y.
|
||||
|
||||
if X86_32
|
||||
|
||||
@@ -2066,9 +2072,10 @@ config SCx200HR_TIMER
|
||||
|
||||
config OLPC
|
||||
bool "One Laptop Per Child support"
|
||||
depends on !X86_PAE
|
||||
select GPIOLIB
|
||||
select OLPC_OPENFIRMWARE
|
||||
depends on !X86_64 && !X86_PAE
|
||||
select OF
|
||||
select OF_PROMTREE if PROC_DEVICETREE
|
||||
---help---
|
||||
Add support for detecting the unique features of the OLPC
|
||||
XO hardware.
|
||||
@@ -2079,21 +2086,6 @@ config OLPC_XO1
|
||||
---help---
|
||||
Add support for non-essential features of the OLPC XO-1 laptop.
|
||||
|
||||
config OLPC_OPENFIRMWARE
|
||||
bool "Support for OLPC's Open Firmware"
|
||||
depends on !X86_64 && !X86_PAE
|
||||
default n
|
||||
select OF
|
||||
help
|
||||
This option adds support for the implementation of Open Firmware
|
||||
that is used on the OLPC XO-1 Children's Machine.
|
||||
If unsure, say N here.
|
||||
|
||||
config OLPC_OPENFIRMWARE_DT
|
||||
bool
|
||||
default y if OLPC_OPENFIRMWARE && PROC_DEVICETREE
|
||||
select OF_PROMTREE
|
||||
|
||||
endif # X86_32
|
||||
|
||||
config AMD_NB
|
||||
@@ -2138,6 +2130,11 @@ config SYSVIPC_COMPAT
|
||||
def_bool y
|
||||
depends on COMPAT && SYSVIPC
|
||||
|
||||
config KEYS_COMPAT
|
||||
bool
|
||||
depends on COMPAT && KEYS
|
||||
default y
|
||||
|
||||
endmenu
|
||||
|
||||
|
||||
|
@@ -294,11 +294,6 @@ config X86_GENERIC
|
||||
|
||||
endif
|
||||
|
||||
config X86_CPU
|
||||
def_bool y
|
||||
select GENERIC_FIND_FIRST_BIT
|
||||
select GENERIC_FIND_NEXT_BIT
|
||||
|
||||
#
|
||||
# Define implied options from the CPU selection here
|
||||
config X86_INTERNODE_CACHE_SHIFT
|
||||
@@ -331,7 +326,7 @@ config X86_PPRO_FENCE
|
||||
Old PentiumPro multiprocessor systems had errata that could cause
|
||||
memory operations to violate the x86 ordering standard in rare cases.
|
||||
Enabling this option will attempt to work around some (but not all)
|
||||
occurances of this problem, at the cost of much heavier spinlock and
|
||||
occurrences of this problem, at the cost of much heavier spinlock and
|
||||
memory barrier operations.
|
||||
|
||||
If unsure, say n here. Even distro kernels should think twice before
|
||||
@@ -371,7 +366,7 @@ config X86_INTEL_USERCOPY
|
||||
|
||||
config X86_USE_PPRO_CHECKSUM
|
||||
def_bool y
|
||||
depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || MK7 || MK6 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MK8 || MVIAC3_2 || MEFFICEON || MGEODE_LX || MCORE2 || MATOM
|
||||
depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || MK7 || MK6 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MK8 || MVIAC3_2 || MVIAC7 || MEFFICEON || MGEODE_LX || MCORE2 || MATOM
|
||||
|
||||
config X86_USE_3DNOW
|
||||
def_bool y
|
||||
|
@@ -1346,7 +1346,7 @@ _zero_cipher_left_decrypt:
|
||||
and $15, %r13 # %r13 = arg4 (mod 16)
|
||||
je _multiple_of_16_bytes_decrypt
|
||||
|
||||
# Handle the last <16 byte block seperately
|
||||
# Handle the last <16 byte block separately
|
||||
|
||||
paddd ONE(%rip), %xmm0 # increment CNT to get Yn
|
||||
movdqa SHUF_MASK(%rip), %xmm10
|
||||
@@ -1355,7 +1355,7 @@ _zero_cipher_left_decrypt:
|
||||
ENCRYPT_SINGLE_BLOCK %xmm0, %xmm1 # E(K, Yn)
|
||||
sub $16, %r11
|
||||
add %r13, %r11
|
||||
movdqu (%arg3,%r11,1), %xmm1 # recieve the last <16 byte block
|
||||
movdqu (%arg3,%r11,1), %xmm1 # receive the last <16 byte block
|
||||
lea SHIFT_MASK+16(%rip), %r12
|
||||
sub %r13, %r12
|
||||
# adjust the shuffle mask pointer to be able to shift 16-%r13 bytes
|
||||
@@ -1607,7 +1607,7 @@ _zero_cipher_left_encrypt:
|
||||
and $15, %r13 # %r13 = arg4 (mod 16)
|
||||
je _multiple_of_16_bytes_encrypt
|
||||
|
||||
# Handle the last <16 Byte block seperately
|
||||
# Handle the last <16 Byte block separately
|
||||
paddd ONE(%rip), %xmm0 # INCR CNT to get Yn
|
||||
movdqa SHUF_MASK(%rip), %xmm10
|
||||
PSHUFB_XMM %xmm10, %xmm0
|
||||
|
@@ -873,22 +873,18 @@ rfc4106_set_hash_subkey(u8 *hash_subkey, const u8 *key, unsigned int key_len)
|
||||
crypto_ablkcipher_clear_flags(ctr_tfm, ~0);
|
||||
|
||||
ret = crypto_ablkcipher_setkey(ctr_tfm, key, key_len);
|
||||
if (ret) {
|
||||
crypto_free_ablkcipher(ctr_tfm);
|
||||
return ret;
|
||||
}
|
||||
if (ret)
|
||||
goto out_free_ablkcipher;
|
||||
|
||||
ret = -ENOMEM;
|
||||
req = ablkcipher_request_alloc(ctr_tfm, GFP_KERNEL);
|
||||
if (!req) {
|
||||
crypto_free_ablkcipher(ctr_tfm);
|
||||
return -EINVAL;
|
||||
}
|
||||
if (!req)
|
||||
goto out_free_ablkcipher;
|
||||
|
||||
req_data = kmalloc(sizeof(*req_data), GFP_KERNEL);
|
||||
if (!req_data) {
|
||||
crypto_free_ablkcipher(ctr_tfm);
|
||||
return -ENOMEM;
|
||||
}
|
||||
if (!req_data)
|
||||
goto out_free_request;
|
||||
|
||||
memset(req_data->iv, 0, sizeof(req_data->iv));
|
||||
|
||||
/* Clear the data in the hash sub key container to zero.*/
|
||||
@@ -913,8 +909,10 @@ rfc4106_set_hash_subkey(u8 *hash_subkey, const u8 *key, unsigned int key_len)
|
||||
if (!ret)
|
||||
ret = req_data->result.err;
|
||||
}
|
||||
ablkcipher_request_free(req);
|
||||
kfree(req_data);
|
||||
out_free_request:
|
||||
ablkcipher_request_free(req);
|
||||
out_free_ablkcipher:
|
||||
crypto_free_ablkcipher(ctr_tfm);
|
||||
return ret;
|
||||
}
|
||||
|
@@ -25,6 +25,8 @@
|
||||
#define sysretl_audit ia32_ret_from_sys_call
|
||||
#endif
|
||||
|
||||
.section .entry.text, "ax"
|
||||
|
||||
#define IA32_NR_syscalls ((ia32_syscall_end - ia32_sys_call_table)/8)
|
||||
|
||||
.macro IA32_ARG_FIXUP noebp=0
|
||||
@@ -126,26 +128,20 @@ ENTRY(ia32_sysenter_target)
|
||||
*/
|
||||
ENABLE_INTERRUPTS(CLBR_NONE)
|
||||
movl %ebp,%ebp /* zero extension */
|
||||
pushq $__USER32_DS
|
||||
CFI_ADJUST_CFA_OFFSET 8
|
||||
pushq_cfi $__USER32_DS
|
||||
/*CFI_REL_OFFSET ss,0*/
|
||||
pushq %rbp
|
||||
CFI_ADJUST_CFA_OFFSET 8
|
||||
pushq_cfi %rbp
|
||||
CFI_REL_OFFSET rsp,0
|
||||
pushfq
|
||||
CFI_ADJUST_CFA_OFFSET 8
|
||||
pushfq_cfi
|
||||
/*CFI_REL_OFFSET rflags,0*/
|
||||
movl 8*3-THREAD_SIZE+TI_sysenter_return(%rsp), %r10d
|
||||
CFI_REGISTER rip,r10
|
||||
pushq $__USER32_CS
|
||||
CFI_ADJUST_CFA_OFFSET 8
|
||||
pushq_cfi $__USER32_CS
|
||||
/*CFI_REL_OFFSET cs,0*/
|
||||
movl %eax, %eax
|
||||
pushq %r10
|
||||
CFI_ADJUST_CFA_OFFSET 8
|
||||
pushq_cfi %r10
|
||||
CFI_REL_OFFSET rip,0
|
||||
pushq %rax
|
||||
CFI_ADJUST_CFA_OFFSET 8
|
||||
pushq_cfi %rax
|
||||
cld
|
||||
SAVE_ARGS 0,0,1
|
||||
/* no need to do an access_ok check here because rbp has been
|
||||
@@ -182,11 +178,9 @@ sysexit_from_sys_call:
|
||||
xorq %r9,%r9
|
||||
xorq %r10,%r10
|
||||
xorq %r11,%r11
|
||||
popfq
|
||||
CFI_ADJUST_CFA_OFFSET -8
|
||||
popfq_cfi
|
||||
/*CFI_RESTORE rflags*/
|
||||
popq %rcx /* User %esp */
|
||||
CFI_ADJUST_CFA_OFFSET -8
|
||||
popq_cfi %rcx /* User %esp */
|
||||
CFI_REGISTER rsp,rcx
|
||||
TRACE_IRQS_ON
|
||||
ENABLE_INTERRUPTS_SYSEXIT32
|
||||
@@ -421,8 +415,7 @@ ENTRY(ia32_syscall)
|
||||
*/
|
||||
ENABLE_INTERRUPTS(CLBR_NONE)
|
||||
movl %eax,%eax
|
||||
pushq %rax
|
||||
CFI_ADJUST_CFA_OFFSET 8
|
||||
pushq_cfi %rax
|
||||
cld
|
||||
/* note the registers are not zero extended to the sf.
|
||||
this could be a problem. */
|
||||
@@ -851,4 +844,8 @@ ia32_sys_call_table:
|
||||
.quad sys_fanotify_init
|
||||
.quad sys32_fanotify_mark
|
||||
.quad sys_prlimit64 /* 340 */
|
||||
.quad sys_name_to_handle_at
|
||||
.quad compat_sys_open_by_handle_at
|
||||
.quad compat_sys_clock_adjtime
|
||||
.quad sys_syncfs
|
||||
ia32_syscall_end:
|
||||
|
@@ -29,6 +29,7 @@
|
||||
#include <asm/processor.h>
|
||||
#include <asm/mmu.h>
|
||||
#include <asm/mpspec.h>
|
||||
#include <asm/trampoline.h>
|
||||
|
||||
#define COMPILER_DEPENDENT_INT64 long long
|
||||
#define COMPILER_DEPENDENT_UINT64 unsigned long long
|
||||
@@ -116,7 +117,8 @@ static inline void acpi_disable_pci(void)
|
||||
/* Low-level suspend routine. */
|
||||
extern int acpi_suspend_lowlevel(void);
|
||||
|
||||
extern unsigned long acpi_wakeup_address;
|
||||
extern const unsigned char acpi_wakeup_code[];
|
||||
#define acpi_wakeup_address (__pa(TRAMPOLINE_SYM(acpi_wakeup_code)))
|
||||
|
||||
/* early initialization routine */
|
||||
extern void acpi_reserve_wakeup_memory(void);
|
||||
@@ -185,15 +187,7 @@ struct bootnode;
|
||||
|
||||
#ifdef CONFIG_ACPI_NUMA
|
||||
extern int acpi_numa;
|
||||
extern void acpi_get_nodes(struct bootnode *physnodes, unsigned long start,
|
||||
unsigned long end);
|
||||
extern int acpi_scan_nodes(unsigned long start, unsigned long end);
|
||||
#define NR_NODE_MEMBLKS (MAX_NUMNODES*2)
|
||||
|
||||
#ifdef CONFIG_NUMA_EMU
|
||||
extern void acpi_fake_nodes(const struct bootnode *fake_nodes,
|
||||
int num_nodes);
|
||||
#endif
|
||||
extern int x86_acpi_numa_init(void);
|
||||
#endif /* CONFIG_ACPI_NUMA */
|
||||
|
||||
#define acpi_unlazy_tlb(x) leave_mm(x)
|
||||
|
@@ -9,23 +9,20 @@ struct amd_nb_bus_dev_range {
|
||||
u8 dev_limit;
|
||||
};
|
||||
|
||||
extern struct pci_device_id amd_nb_misc_ids[];
|
||||
extern const struct pci_device_id amd_nb_misc_ids[];
|
||||
extern const struct amd_nb_bus_dev_range amd_nb_bus_dev_ranges[];
|
||||
struct bootnode;
|
||||
|
||||
extern int early_is_amd_nb(u32 value);
|
||||
extern bool early_is_amd_nb(u32 value);
|
||||
extern int amd_cache_northbridges(void);
|
||||
extern void amd_flush_garts(void);
|
||||
extern int amd_numa_init(unsigned long start_pfn, unsigned long end_pfn);
|
||||
extern int amd_scan_nodes(void);
|
||||
|
||||
#ifdef CONFIG_NUMA_EMU
|
||||
extern void amd_fake_nodes(const struct bootnode *nodes, int nr_nodes);
|
||||
extern void amd_get_nodes(struct bootnode *nodes);
|
||||
#endif
|
||||
extern int amd_numa_init(void);
|
||||
extern int amd_get_subcaches(int);
|
||||
extern int amd_set_subcaches(int, int);
|
||||
|
||||
struct amd_northbridge {
|
||||
struct pci_dev *misc;
|
||||
struct pci_dev *link;
|
||||
};
|
||||
|
||||
struct amd_northbridge_info {
|
||||
@@ -35,17 +32,18 @@ struct amd_northbridge_info {
|
||||
};
|
||||
extern struct amd_northbridge_info amd_northbridges;
|
||||
|
||||
#define AMD_NB_GART 0x1
|
||||
#define AMD_NB_L3_INDEX_DISABLE 0x2
|
||||
#define AMD_NB_GART BIT(0)
|
||||
#define AMD_NB_L3_INDEX_DISABLE BIT(1)
|
||||
#define AMD_NB_L3_PARTITIONING BIT(2)
|
||||
|
||||
#ifdef CONFIG_AMD_NB
|
||||
|
||||
static inline int amd_nb_num(void)
|
||||
static inline u16 amd_nb_num(void)
|
||||
{
|
||||
return amd_northbridges.num;
|
||||
}
|
||||
|
||||
static inline int amd_nb_has_feature(int feature)
|
||||
static inline bool amd_nb_has_feature(unsigned feature)
|
||||
{
|
||||
return ((amd_northbridges.flags & feature) == feature);
|
||||
}
|
||||
|
@@ -220,7 +220,6 @@ extern void enable_IR_x2apic(void);
|
||||
|
||||
extern int get_physical_broadcast(void);
|
||||
|
||||
extern void apic_disable(void);
|
||||
extern int lapic_get_maxlvt(void);
|
||||
extern void clear_local_APIC(void);
|
||||
extern void connect_bsp_APIC(void);
|
||||
@@ -228,7 +227,6 @@ extern void disconnect_bsp_APIC(int virt_wire_setup);
|
||||
extern void disable_local_APIC(void);
|
||||
extern void lapic_shutdown(void);
|
||||
extern int verify_local_APIC(void);
|
||||
extern void cache_APIC_registers(void);
|
||||
extern void sync_Arb_IDs(void);
|
||||
extern void init_bsp_APIC(void);
|
||||
extern void setup_local_APIC(void);
|
||||
@@ -239,8 +237,7 @@ void register_lapic_address(unsigned long address);
|
||||
extern void setup_boot_APIC_clock(void);
|
||||
extern void setup_secondary_APIC_clock(void);
|
||||
extern int APIC_init_uniprocessor(void);
|
||||
extern void enable_NMI_through_LVT0(void);
|
||||
extern int apic_force_enable(void);
|
||||
extern int apic_force_enable(unsigned long addr);
|
||||
|
||||
/*
|
||||
* On 32bit this is mach-xxx local
|
||||
@@ -261,7 +258,6 @@ static inline void lapic_shutdown(void) { }
|
||||
#define local_apic_timer_c2_ok 1
|
||||
static inline void init_apic_mappings(void) { }
|
||||
static inline void disable_local_APIC(void) { }
|
||||
static inline void apic_disable(void) { }
|
||||
# define setup_boot_APIC_clock x86_init_noop
|
||||
# define setup_secondary_APIC_clock x86_init_noop
|
||||
#endif /* !CONFIG_X86_LOCAL_APIC */
|
||||
@@ -307,8 +303,6 @@ struct apic {
|
||||
|
||||
void (*setup_apic_routing)(void);
|
||||
int (*multi_timer_check)(int apic, int irq);
|
||||
int (*apicid_to_node)(int logical_apicid);
|
||||
int (*cpu_to_logical_apicid)(int cpu);
|
||||
int (*cpu_present_to_apicid)(int mps_cpu);
|
||||
void (*apicid_to_cpu_present)(int phys_apicid, physid_mask_t *retmap);
|
||||
void (*setup_portio_remap)(void);
|
||||
@@ -356,6 +350,23 @@ struct apic {
|
||||
void (*icr_write)(u32 low, u32 high);
|
||||
void (*wait_icr_idle)(void);
|
||||
u32 (*safe_wait_icr_idle)(void);
|
||||
|
||||
#ifdef CONFIG_X86_32
|
||||
/*
|
||||
* Called very early during boot from get_smp_config(). It should
|
||||
* return the logical apicid. x86_[bios]_cpu_to_apicid is
|
||||
* initialized before this function is called.
|
||||
*
|
||||
* If logical apicid can't be determined that early, the function
|
||||
* may return BAD_APICID. Logical apicid will be configured after
|
||||
* init_apic_ldr() while bringing up CPUs. Note that NUMA affinity
|
||||
* won't be applied properly during early boot in this case.
|
||||
*/
|
||||
int (*x86_32_early_logical_apicid)(int cpu);
|
||||
|
||||
/* determine CPU -> NUMA node mapping */
|
||||
int (*x86_32_numa_cpu_node)(int cpu);
|
||||
#endif
|
||||
};
|
||||
|
||||
/*
|
||||
@@ -503,6 +514,11 @@ extern struct apic apic_noop;
|
||||
|
||||
extern struct apic apic_default;
|
||||
|
||||
static inline int noop_x86_32_early_logical_apicid(int cpu)
|
||||
{
|
||||
return BAD_APICID;
|
||||
}
|
||||
|
||||
/*
|
||||
* Set up the logical destination ID.
|
||||
*
|
||||
@@ -522,7 +538,7 @@ static inline int default_phys_pkg_id(int cpuid_apic, int index_msb)
|
||||
return cpuid_apic >> index_msb;
|
||||
}
|
||||
|
||||
extern int default_apicid_to_node(int logical_apicid);
|
||||
extern int default_x86_32_numa_cpu_node(int cpu);
|
||||
|
||||
#endif
|
||||
|
||||
@@ -558,12 +574,6 @@ static inline void default_ioapic_phys_id_map(physid_mask_t *phys_map, physid_ma
|
||||
*retmap = *phys_map;
|
||||
}
|
||||
|
||||
/* Mapping from cpu number to logical apicid */
|
||||
static inline int default_cpu_to_logical_apicid(int cpu)
|
||||
{
|
||||
return 1 << cpu;
|
||||
}
|
||||
|
||||
static inline int __default_cpu_present_to_apicid(int mps_cpu)
|
||||
{
|
||||
if (mps_cpu < nr_cpu_ids && cpu_present(mps_cpu))
|
||||
@@ -596,8 +606,4 @@ extern int default_check_phys_apicid_present(int phys_apicid);
|
||||
|
||||
#endif /* CONFIG_X86_LOCAL_APIC */
|
||||
|
||||
#ifdef CONFIG_X86_32
|
||||
extern u8 cpu_2_logical_apicid[NR_CPUS];
|
||||
#endif
|
||||
|
||||
#endif /* _ASM_X86_APIC_H */
|
||||
|
@@ -426,4 +426,16 @@ struct local_apic {
|
||||
#else
|
||||
#define BAD_APICID 0xFFFFu
|
||||
#endif
|
||||
|
||||
enum ioapic_irq_destination_types {
|
||||
dest_Fixed = 0,
|
||||
dest_LowestPrio = 1,
|
||||
dest_SMI = 2,
|
||||
dest__reserved_1 = 3,
|
||||
dest_NMI = 4,
|
||||
dest_INIT = 5,
|
||||
dest__reserved_2 = 6,
|
||||
dest_ExtINT = 7
|
||||
};
|
||||
|
||||
#endif /* _ASM_X86_APICDEF_H */
|
||||
|
@@ -12,6 +12,7 @@
|
||||
/* setup data types */
|
||||
#define SETUP_NONE 0
|
||||
#define SETUP_E820_EXT 1
|
||||
#define SETUP_DTB 2
|
||||
|
||||
/* extensible setup data list node */
|
||||
struct setup_data {
|
||||
|
@@ -71,7 +71,7 @@ static inline void set_page_memtype(struct page *pg, unsigned long memtype) { }
|
||||
* Read/Write : ReadOnly, ReadWrite
|
||||
* Presence : NotPresent
|
||||
*
|
||||
* Within a catagory, the attributes are mutually exclusive.
|
||||
* Within a category, the attributes are mutually exclusive.
|
||||
*
|
||||
* The implementation of this API will take care of various aspects that
|
||||
* are associated with changing such attributes, such as:
|
||||
|
@@ -160,6 +160,7 @@
|
||||
#define X86_FEATURE_NODEID_MSR (6*32+19) /* NodeId MSR */
|
||||
#define X86_FEATURE_TBM (6*32+21) /* trailing bit manipulations */
|
||||
#define X86_FEATURE_TOPOEXT (6*32+22) /* topology extensions CPUID leafs */
|
||||
#define X86_FEATURE_PERFCTR_CORE (6*32+23) /* core performance counter extensions */
|
||||
|
||||
/*
|
||||
* Auxiliary flags: Linux defined - For features scattered in various
|
||||
@@ -279,6 +280,7 @@ extern const char * const x86_power_flags[32];
|
||||
#define cpu_has_xsave boot_cpu_has(X86_FEATURE_XSAVE)
|
||||
#define cpu_has_hypervisor boot_cpu_has(X86_FEATURE_HYPERVISOR)
|
||||
#define cpu_has_pclmulqdq boot_cpu_has(X86_FEATURE_PCLMULQDQ)
|
||||
#define cpu_has_perfctr_core boot_cpu_has(X86_FEATURE_PERFCTR_CORE)
|
||||
|
||||
#if defined(CONFIG_X86_INVLPG) || defined(CONFIG_X86_64)
|
||||
# define cpu_has_invlpg 1
|
||||
|
@@ -151,6 +151,7 @@
|
||||
#define DMA_AUTOINIT 0x10
|
||||
|
||||
|
||||
#ifdef CONFIG_ISA_DMA_API
|
||||
extern spinlock_t dma_spin_lock;
|
||||
|
||||
static inline unsigned long claim_dma_lock(void)
|
||||
@@ -164,6 +165,7 @@ static inline void release_dma_lock(unsigned long flags)
|
||||
{
|
||||
spin_unlock_irqrestore(&dma_spin_lock, flags);
|
||||
}
|
||||
#endif /* CONFIG_ISA_DMA_API */
|
||||
|
||||
/* enable/disable a specific DMA channel */
|
||||
static inline void enable_dma(unsigned int dmanr)
|
||||
@@ -303,9 +305,11 @@ static inline int get_dma_residue(unsigned int dmanr)
|
||||
}
|
||||
|
||||
|
||||
/* These are in kernel/dma.c: */
|
||||
/* These are in kernel/dma.c because x86 uses CONFIG_GENERIC_ISA_DMA */
|
||||
#ifdef CONFIG_ISA_DMA_API
|
||||
extern int request_dma(unsigned int dmanr, const char *device_id);
|
||||
extern void free_dma(unsigned int dmanr);
|
||||
#endif
|
||||
|
||||
/* From PCI */
|
||||
|
||||
|
@@ -96,7 +96,7 @@ extern void e820_setup_gap(void);
|
||||
extern int e820_search_gap(unsigned long *gapstart, unsigned long *gapsize,
|
||||
unsigned long start_addr, unsigned long long end_addr);
|
||||
struct setup_data;
|
||||
extern void parse_e820_ext(struct setup_data *data, unsigned long pa_data);
|
||||
extern void parse_e820_ext(struct setup_data *data);
|
||||
|
||||
#if defined(CONFIG_X86_64) || \
|
||||
(defined(CONFIG_X86_32) && defined(CONFIG_HIBERNATION))
|
||||
|
@@ -16,10 +16,13 @@ BUILD_INTERRUPT(call_function_single_interrupt,CALL_FUNCTION_SINGLE_VECTOR)
|
||||
BUILD_INTERRUPT(irq_move_cleanup_interrupt,IRQ_MOVE_CLEANUP_VECTOR)
|
||||
BUILD_INTERRUPT(reboot_interrupt,REBOOT_VECTOR)
|
||||
|
||||
.irpc idx, "01234567"
|
||||
.irp idx,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15, \
|
||||
16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31
|
||||
.if NUM_INVALIDATE_TLB_VECTORS > \idx
|
||||
BUILD_INTERRUPT3(invalidate_interrupt\idx,
|
||||
(INVALIDATE_TLB_VECTOR_START)+\idx,
|
||||
smp_invalidate_interrupt)
|
||||
.endif
|
||||
.endr
|
||||
#endif
|
||||
|
||||
|
@@ -7,14 +7,12 @@
|
||||
frame pointer later */
|
||||
#ifdef CONFIG_FRAME_POINTER
|
||||
.macro FRAME
|
||||
pushl %ebp
|
||||
CFI_ADJUST_CFA_OFFSET 4
|
||||
pushl_cfi %ebp
|
||||
CFI_REL_OFFSET ebp,0
|
||||
movl %esp,%ebp
|
||||
.endm
|
||||
.macro ENDFRAME
|
||||
popl %ebp
|
||||
CFI_ADJUST_CFA_OFFSET -4
|
||||
popl_cfi %ebp
|
||||
CFI_RESTORE ebp
|
||||
.endm
|
||||
#else
|
||||
|
@@ -37,7 +37,7 @@
|
||||
"+m" (*uaddr), "=&r" (tem) \
|
||||
: "r" (oparg), "i" (-EFAULT), "1" (0))
|
||||
|
||||
static inline int futex_atomic_op_inuser(int encoded_op, int __user *uaddr)
|
||||
static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
|
||||
{
|
||||
int op = (encoded_op >> 28) & 7;
|
||||
int cmp = (encoded_op >> 24) & 15;
|
||||
@@ -48,7 +48,7 @@ static inline int futex_atomic_op_inuser(int encoded_op, int __user *uaddr)
|
||||
if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28))
|
||||
oparg = 1 << oparg;
|
||||
|
||||
if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int)))
|
||||
if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
|
||||
return -EFAULT;
|
||||
|
||||
#if defined(CONFIG_X86_32) && !defined(CONFIG_X86_BSWAP)
|
||||
@@ -109,9 +109,10 @@ static inline int futex_atomic_op_inuser(int encoded_op, int __user *uaddr)
|
||||
return ret;
|
||||
}
|
||||
|
||||
static inline int futex_atomic_cmpxchg_inatomic(int __user *uaddr, int oldval,
|
||||
int newval)
|
||||
static inline int futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
|
||||
u32 oldval, u32 newval)
|
||||
{
|
||||
int ret = 0;
|
||||
|
||||
#if defined(CONFIG_X86_32) && !defined(CONFIG_X86_BSWAP)
|
||||
/* Real i386 machines have no cmpxchg instruction */
|
||||
@@ -119,21 +120,22 @@ static inline int futex_atomic_cmpxchg_inatomic(int __user *uaddr, int oldval,
|
||||
return -ENOSYS;
|
||||
#endif
|
||||
|
||||
if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int)))
|
||||
if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
|
||||
return -EFAULT;
|
||||
|
||||
asm volatile("1:\t" LOCK_PREFIX "cmpxchgl %3, %1\n"
|
||||
asm volatile("1:\t" LOCK_PREFIX "cmpxchgl %4, %2\n"
|
||||
"2:\t.section .fixup, \"ax\"\n"
|
||||
"3:\tmov %2, %0\n"
|
||||
"3:\tmov %3, %0\n"
|
||||
"\tjmp 2b\n"
|
||||
"\t.previous\n"
|
||||
_ASM_EXTABLE(1b, 3b)
|
||||
: "=a" (oldval), "+m" (*uaddr)
|
||||
: "i" (-EFAULT), "r" (newval), "0" (oldval)
|
||||
: "+r" (ret), "=a" (oldval), "+m" (*uaddr)
|
||||
: "i" (-EFAULT), "r" (newval), "1" (oldval)
|
||||
: "memory"
|
||||
);
|
||||
|
||||
return oldval;
|
||||
*uval = oldval;
|
||||
return ret;
|
||||
}
|
||||
|
||||
#endif
|
||||
|
@@ -45,6 +45,30 @@ extern void invalidate_interrupt4(void);
|
||||
extern void invalidate_interrupt5(void);
|
||||
extern void invalidate_interrupt6(void);
|
||||
extern void invalidate_interrupt7(void);
|
||||
extern void invalidate_interrupt8(void);
|
||||
extern void invalidate_interrupt9(void);
|
||||
extern void invalidate_interrupt10(void);
|
||||
extern void invalidate_interrupt11(void);
|
||||
extern void invalidate_interrupt12(void);
|
||||
extern void invalidate_interrupt13(void);
|
||||
extern void invalidate_interrupt14(void);
|
||||
extern void invalidate_interrupt15(void);
|
||||
extern void invalidate_interrupt16(void);
|
||||
extern void invalidate_interrupt17(void);
|
||||
extern void invalidate_interrupt18(void);
|
||||
extern void invalidate_interrupt19(void);
|
||||
extern void invalidate_interrupt20(void);
|
||||
extern void invalidate_interrupt21(void);
|
||||
extern void invalidate_interrupt22(void);
|
||||
extern void invalidate_interrupt23(void);
|
||||
extern void invalidate_interrupt24(void);
|
||||
extern void invalidate_interrupt25(void);
|
||||
extern void invalidate_interrupt26(void);
|
||||
extern void invalidate_interrupt27(void);
|
||||
extern void invalidate_interrupt28(void);
|
||||
extern void invalidate_interrupt29(void);
|
||||
extern void invalidate_interrupt30(void);
|
||||
extern void invalidate_interrupt31(void);
|
||||
|
||||
extern void irq_move_cleanup_interrupt(void);
|
||||
extern void reboot_interrupt(void);
|
||||
|
@@ -11,8 +11,8 @@ kernel_physical_mapping_init(unsigned long start,
|
||||
unsigned long page_size_mask);
|
||||
|
||||
|
||||
extern unsigned long __initdata e820_table_start;
|
||||
extern unsigned long __meminitdata e820_table_end;
|
||||
extern unsigned long __meminitdata e820_table_top;
|
||||
extern unsigned long __initdata pgt_buf_start;
|
||||
extern unsigned long __meminitdata pgt_buf_end;
|
||||
extern unsigned long __meminitdata pgt_buf_top;
|
||||
|
||||
#endif /* _ASM_X86_INIT_32_H */
|
||||
|
@@ -63,17 +63,6 @@ union IO_APIC_reg_03 {
|
||||
} __attribute__ ((packed)) bits;
|
||||
};
|
||||
|
||||
enum ioapic_irq_destination_types {
|
||||
dest_Fixed = 0,
|
||||
dest_LowestPrio = 1,
|
||||
dest_SMI = 2,
|
||||
dest__reserved_1 = 3,
|
||||
dest_NMI = 4,
|
||||
dest_INIT = 5,
|
||||
dest__reserved_2 = 6,
|
||||
dest_ExtINT = 7
|
||||
};
|
||||
|
||||
struct IO_APIC_route_entry {
|
||||
__u32 vector : 8,
|
||||
delivery_mode : 3, /* 000: FIXED
|
||||
@@ -106,6 +95,10 @@ struct IR_IO_APIC_route_entry {
|
||||
index : 15;
|
||||
} __attribute__ ((packed));
|
||||
|
||||
#define IOAPIC_AUTO -1
|
||||
#define IOAPIC_EDGE 0
|
||||
#define IOAPIC_LEVEL 1
|
||||
|
||||
#ifdef CONFIG_X86_IO_APIC
|
||||
|
||||
/*
|
||||
@@ -150,11 +143,6 @@ extern int timer_through_8259;
|
||||
#define io_apic_assign_pci_irqs \
|
||||
(mp_irq_entries && !skip_ioapic_setup && io_apic_irqs)
|
||||
|
||||
extern u8 io_apic_unique_id(u8 id);
|
||||
extern int io_apic_get_unique_id(int ioapic, int apic_id);
|
||||
extern int io_apic_get_version(int ioapic);
|
||||
extern int io_apic_get_redir_entries(int ioapic);
|
||||
|
||||
struct io_apic_irq_attr;
|
||||
extern int io_apic_set_pci_routing(struct device *dev, int irq,
|
||||
struct io_apic_irq_attr *irq_attr);
|
||||
@@ -162,6 +150,8 @@ void setup_IO_APIC_irq_extra(u32 gsi);
|
||||
extern void ioapic_and_gsi_init(void);
|
||||
extern void ioapic_insert_resources(void);
|
||||
|
||||
int io_apic_setup_irq_pin(unsigned int irq, int node, struct io_apic_irq_attr *attr);
|
||||
|
||||
extern struct IO_APIC_route_entry **alloc_ioapic_entries(void);
|
||||
extern void free_ioapic_entries(struct IO_APIC_route_entry **ioapic_entries);
|
||||
extern int save_IO_APIC_setup(struct IO_APIC_route_entry **ioapic_entries);
|
||||
@@ -186,6 +176,8 @@ extern void __init pre_init_apic_IRQ0(void);
|
||||
|
||||
extern void mp_save_irq(struct mpc_intsrc *m);
|
||||
|
||||
extern void disable_ioapic_support(void);
|
||||
|
||||
#else /* !CONFIG_X86_IO_APIC */
|
||||
|
||||
#define io_apic_assign_pci_irqs 0
|
||||
@@ -199,6 +191,26 @@ static inline int mp_find_ioapic(u32 gsi) { return 0; }
|
||||
struct io_apic_irq_attr;
|
||||
static inline int io_apic_set_pci_routing(struct device *dev, int irq,
|
||||
struct io_apic_irq_attr *irq_attr) { return 0; }
|
||||
|
||||
static inline struct IO_APIC_route_entry **alloc_ioapic_entries(void)
|
||||
{
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static inline void free_ioapic_entries(struct IO_APIC_route_entry **ent) { }
|
||||
static inline int save_IO_APIC_setup(struct IO_APIC_route_entry **ent)
|
||||
{
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
static inline void mask_IO_APIC_setup(struct IO_APIC_route_entry **ent) { }
|
||||
static inline int restore_IO_APIC_setup(struct IO_APIC_route_entry **ent)
|
||||
{
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
static inline void mp_save_irq(struct mpc_intsrc *m) { };
|
||||
static inline void disable_ioapic_support(void) { }
|
||||
#endif
|
||||
|
||||
#endif /* _ASM_X86_IO_APIC_H */
|
||||
|
@@ -123,10 +123,6 @@ extern void default_send_IPI_mask_sequence_phys(const struct cpumask *mask,
|
||||
int vector);
|
||||
extern void default_send_IPI_mask_allbutself_phys(const struct cpumask *mask,
|
||||
int vector);
|
||||
extern void default_send_IPI_mask_sequence_logical(const struct cpumask *mask,
|
||||
int vector);
|
||||
extern void default_send_IPI_mask_allbutself_logical(const struct cpumask *mask,
|
||||
int vector);
|
||||
|
||||
/* Avoid include hell */
|
||||
#define NMI_VECTOR 0x02
|
||||
@@ -150,6 +146,10 @@ static inline void __default_local_send_IPI_all(int vector)
|
||||
}
|
||||
|
||||
#ifdef CONFIG_X86_32
|
||||
extern void default_send_IPI_mask_sequence_logical(const struct cpumask *mask,
|
||||
int vector);
|
||||
extern void default_send_IPI_mask_allbutself_logical(const struct cpumask *mask,
|
||||
int vector);
|
||||
extern void default_send_IPI_mask_logical(const struct cpumask *mask,
|
||||
int vector);
|
||||
extern void default_send_IPI_allbutself(int vector);
|
||||
|
@@ -10,9 +10,6 @@
|
||||
#include <asm/apicdef.h>
|
||||
#include <asm/irq_vectors.h>
|
||||
|
||||
/* Even though we don't support this, supply it to appease OF */
|
||||
static inline void irq_dispose_mapping(unsigned int virq) { }
|
||||
|
||||
static inline int irq_canonicalize(int irq)
|
||||
{
|
||||
return ((irq == 2) ? 9 : irq);
|
||||
|
12
arch/x86/include/asm/irq_controller.h
Normální soubor
12
arch/x86/include/asm/irq_controller.h
Normální soubor
@@ -0,0 +1,12 @@
|
||||
#ifndef __IRQ_CONTROLLER__
|
||||
#define __IRQ_CONTROLLER__
|
||||
|
||||
struct irq_domain {
|
||||
int (*xlate)(struct irq_domain *h, const u32 *intspec, u32 intsize,
|
||||
u32 *out_hwirq, u32 *out_type);
|
||||
void *priv;
|
||||
struct device_node *controller;
|
||||
struct list_head l;
|
||||
};
|
||||
|
||||
#endif
|
@@ -1,6 +1,7 @@
|
||||
#ifndef _ASM_X86_IRQ_VECTORS_H
|
||||
#define _ASM_X86_IRQ_VECTORS_H
|
||||
|
||||
#include <linux/threads.h>
|
||||
/*
|
||||
* Linux IRQ vector layout.
|
||||
*
|
||||
@@ -16,8 +17,8 @@
|
||||
* Vectors 0 ... 31 : system traps and exceptions - hardcoded events
|
||||
* Vectors 32 ... 127 : device interrupts
|
||||
* Vector 128 : legacy int80 syscall interface
|
||||
* Vectors 129 ... 237 : device interrupts
|
||||
* Vectors 238 ... 255 : special interrupts
|
||||
* Vectors 129 ... INVALIDATE_TLB_VECTOR_START-1 : device interrupts
|
||||
* Vectors INVALIDATE_TLB_VECTOR_START ... 255 : special interrupts
|
||||
*
|
||||
* 64-bit x86 has per CPU IDT tables, 32-bit has one shared IDT table.
|
||||
*
|
||||
@@ -96,10 +97,25 @@
|
||||
#define THRESHOLD_APIC_VECTOR 0xf9
|
||||
#define REBOOT_VECTOR 0xf8
|
||||
|
||||
/* f0-f7 used for spreading out TLB flushes: */
|
||||
#define INVALIDATE_TLB_VECTOR_END 0xf7
|
||||
#define INVALIDATE_TLB_VECTOR_START 0xf0
|
||||
#define NUM_INVALIDATE_TLB_VECTORS 8
|
||||
/*
|
||||
* Generic system vector for platform specific use
|
||||
*/
|
||||
#define X86_PLATFORM_IPI_VECTOR 0xf7
|
||||
|
||||
/*
|
||||
* IRQ work vector:
|
||||
*/
|
||||
#define IRQ_WORK_VECTOR 0xf6
|
||||
|
||||
#define UV_BAU_MESSAGE 0xf5
|
||||
|
||||
/*
|
||||
* Self IPI vector for machine checks
|
||||
*/
|
||||
#define MCE_SELF_VECTOR 0xf4
|
||||
|
||||
/* Xen vector callback to receive events in a HVM domain */
|
||||
#define XEN_HVM_EVTCHN_CALLBACK 0xf3
|
||||
|
||||
/*
|
||||
* Local APIC timer IRQ vector is on a different priority level,
|
||||
@@ -108,25 +124,16 @@
|
||||
*/
|
||||
#define LOCAL_TIMER_VECTOR 0xef
|
||||
|
||||
/*
|
||||
* Generic system vector for platform specific use
|
||||
*/
|
||||
#define X86_PLATFORM_IPI_VECTOR 0xed
|
||||
/* up to 32 vectors used for spreading out TLB flushes: */
|
||||
#if NR_CPUS <= 32
|
||||
# define NUM_INVALIDATE_TLB_VECTORS (NR_CPUS)
|
||||
#else
|
||||
# define NUM_INVALIDATE_TLB_VECTORS (32)
|
||||
#endif
|
||||
|
||||
/*
|
||||
* IRQ work vector:
|
||||
*/
|
||||
#define IRQ_WORK_VECTOR 0xec
|
||||
|
||||
#define UV_BAU_MESSAGE 0xea
|
||||
|
||||
/*
|
||||
* Self IPI vector for machine checks
|
||||
*/
|
||||
#define MCE_SELF_VECTOR 0xeb
|
||||
|
||||
/* Xen vector callback to receive events in a HVM domain */
|
||||
#define XEN_HVM_EVTCHN_CALLBACK 0xe9
|
||||
#define INVALIDATE_TLB_VECTOR_END (0xee)
|
||||
#define INVALIDATE_TLB_VECTOR_START \
|
||||
(INVALIDATE_TLB_VECTOR_END-NUM_INVALIDATE_TLB_VECTORS+1)
|
||||
|
||||
#define NR_VECTORS 256
|
||||
|
||||
|
@@ -13,7 +13,6 @@ enum die_val {
|
||||
DIE_PANIC,
|
||||
DIE_NMI,
|
||||
DIE_DIE,
|
||||
DIE_NMIWATCHDOG,
|
||||
DIE_KERNELDEBUG,
|
||||
DIE_TRAP,
|
||||
DIE_GPF,
|
||||
@@ -27,7 +26,7 @@ extern void die(const char *, struct pt_regs *,long);
|
||||
extern int __must_check __die(const char *, struct pt_regs *, long);
|
||||
extern void show_registers(struct pt_regs *regs);
|
||||
extern void show_trace(struct task_struct *t, struct pt_regs *regs,
|
||||
unsigned long *sp);
|
||||
unsigned long *sp, unsigned long bp);
|
||||
extern void __show_regs(struct pt_regs *regs, int all);
|
||||
extern void show_regs(struct pt_regs *regs);
|
||||
extern unsigned long oops_begin(void);
|
||||
|
@@ -142,9 +142,9 @@ struct x86_emulate_ops {
|
||||
int (*pio_out_emulated)(int size, unsigned short port, const void *val,
|
||||
unsigned int count, struct kvm_vcpu *vcpu);
|
||||
|
||||
bool (*get_cached_descriptor)(struct desc_struct *desc,
|
||||
bool (*get_cached_descriptor)(struct desc_struct *desc, u32 *base3,
|
||||
int seg, struct kvm_vcpu *vcpu);
|
||||
void (*set_cached_descriptor)(struct desc_struct *desc,
|
||||
void (*set_cached_descriptor)(struct desc_struct *desc, u32 base3,
|
||||
int seg, struct kvm_vcpu *vcpu);
|
||||
u16 (*get_segment_selector)(int seg, struct kvm_vcpu *vcpu);
|
||||
void (*set_segment_selector)(u16 sel, int seg, struct kvm_vcpu *vcpu);
|
||||
@@ -239,6 +239,7 @@ struct x86_emulate_ctxt {
|
||||
int interruptibility;
|
||||
|
||||
bool perm_ok; /* do not check permissions if true */
|
||||
bool only_vendor_specific_insn;
|
||||
|
||||
bool have_exception;
|
||||
struct x86_exception exception;
|
||||
|
@@ -85,7 +85,7 @@
|
||||
|
||||
#define ASYNC_PF_PER_VCPU 64
|
||||
|
||||
extern spinlock_t kvm_lock;
|
||||
extern raw_spinlock_t kvm_lock;
|
||||
extern struct list_head vm_list;
|
||||
|
||||
struct kvm_vcpu;
|
||||
@@ -255,6 +255,8 @@ struct kvm_mmu {
|
||||
int (*sync_page)(struct kvm_vcpu *vcpu,
|
||||
struct kvm_mmu_page *sp);
|
||||
void (*invlpg)(struct kvm_vcpu *vcpu, gva_t gva);
|
||||
void (*update_pte)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
|
||||
u64 *spte, const void *pte, unsigned long mmu_seq);
|
||||
hpa_t root_hpa;
|
||||
int root_level;
|
||||
int shadow_root_level;
|
||||
@@ -335,12 +337,6 @@ struct kvm_vcpu_arch {
|
||||
u64 *last_pte_updated;
|
||||
gfn_t last_pte_gfn;
|
||||
|
||||
struct {
|
||||
gfn_t gfn; /* presumed gfn during guest pte update */
|
||||
pfn_t pfn; /* pfn corresponding to that gfn */
|
||||
unsigned long mmu_seq;
|
||||
} update_pte;
|
||||
|
||||
struct fpu guest_fpu;
|
||||
u64 xcr0;
|
||||
|
||||
@@ -448,7 +444,7 @@ struct kvm_arch {
|
||||
|
||||
unsigned long irq_sources_bitmap;
|
||||
s64 kvmclock_offset;
|
||||
spinlock_t tsc_write_lock;
|
||||
raw_spinlock_t tsc_write_lock;
|
||||
u64 last_tsc_nsec;
|
||||
u64 last_tsc_offset;
|
||||
u64 last_tsc_write;
|
||||
|
@@ -25,7 +25,6 @@ extern int pic_mode;
|
||||
#define MAX_IRQ_SOURCES 256
|
||||
|
||||
extern unsigned int def_to_bigsmp;
|
||||
extern u8 apicid_2_node[];
|
||||
|
||||
#ifdef CONFIG_X86_NUMAQ
|
||||
extern int mp_bus_id_to_node[MAX_MP_BUSSES];
|
||||
@@ -33,8 +32,6 @@ extern int mp_bus_id_to_local[MAX_MP_BUSSES];
|
||||
extern int quad_local_to_mp_bus_id [NR_CPUS/4][4];
|
||||
#endif
|
||||
|
||||
#define MAX_APICID 256
|
||||
|
||||
#else /* CONFIG_X86_64: */
|
||||
|
||||
#define MAX_MP_BUSSES 256
|
||||
|
@@ -43,6 +43,7 @@
|
||||
|
||||
#define MSR_MTRRcap 0x000000fe
|
||||
#define MSR_IA32_BBL_CR_CTL 0x00000119
|
||||
#define MSR_IA32_BBL_CR_CTL3 0x0000011e
|
||||
|
||||
#define MSR_IA32_SYSENTER_CS 0x00000174
|
||||
#define MSR_IA32_SYSENTER_ESP 0x00000175
|
||||
@@ -52,6 +53,9 @@
|
||||
#define MSR_IA32_MCG_STATUS 0x0000017a
|
||||
#define MSR_IA32_MCG_CTL 0x0000017b
|
||||
|
||||
#define MSR_OFFCORE_RSP_0 0x000001a6
|
||||
#define MSR_OFFCORE_RSP_1 0x000001a7
|
||||
|
||||
#define MSR_IA32_PEBS_ENABLE 0x000003f1
|
||||
#define MSR_IA32_DS_AREA 0x00000600
|
||||
#define MSR_IA32_PERF_CAPABILITIES 0x00000345
|
||||
|
@@ -7,7 +7,6 @@
|
||||
|
||||
#ifdef CONFIG_X86_LOCAL_APIC
|
||||
|
||||
extern void die_nmi(char *str, struct pt_regs *regs, int do_panic);
|
||||
extern int avail_to_resrv_perfctr_nmi_bit(unsigned int);
|
||||
extern int reserve_perfctr_nmi(unsigned int);
|
||||
extern void release_perfctr_nmi(unsigned int);
|
||||
@@ -30,8 +29,8 @@ void arch_trigger_all_cpu_backtrace(void);
|
||||
* external nmis, because the local ones are more frequent.
|
||||
*
|
||||
* Also setup some default high/normal/low settings for
|
||||
* subsystems to registers with. Using 4 bits to seperate
|
||||
* the priorities. This can go alot higher if needed be.
|
||||
* subsystems to registers with. Using 4 bits to separate
|
||||
* the priorities. This can go a lot higher if needed be.
|
||||
*/
|
||||
|
||||
#define NMI_LOCAL_SHIFT 16 /* randomly picked */
|
||||
|
@@ -38,7 +38,7 @@
|
||||
#define K8_NOP8 K8_NOP4 K8_NOP4
|
||||
|
||||
/* K7 nops
|
||||
uses eax dependencies (arbitary choice)
|
||||
uses eax dependencies (arbitrary choice)
|
||||
1: nop
|
||||
2: movl %eax,%eax
|
||||
3: leal (,%eax,1),%eax
|
||||
|
@@ -1,5 +1,57 @@
|
||||
#ifndef _ASM_X86_NUMA_H
|
||||
#define _ASM_X86_NUMA_H
|
||||
|
||||
#include <asm/topology.h>
|
||||
#include <asm/apicdef.h>
|
||||
|
||||
#ifdef CONFIG_NUMA
|
||||
|
||||
#define NR_NODE_MEMBLKS (MAX_NUMNODES*2)
|
||||
|
||||
/*
|
||||
* __apicid_to_node[] stores the raw mapping between physical apicid and
|
||||
* node and is used to initialize cpu_to_node mapping.
|
||||
*
|
||||
* The mapping may be overridden by apic->numa_cpu_node() on 32bit and thus
|
||||
* should be accessed by the accessors - set_apicid_to_node() and
|
||||
* numa_cpu_node().
|
||||
*/
|
||||
extern s16 __apicid_to_node[MAX_LOCAL_APIC];
|
||||
|
||||
static inline void set_apicid_to_node(int apicid, s16 node)
|
||||
{
|
||||
__apicid_to_node[apicid] = node;
|
||||
}
|
||||
#else /* CONFIG_NUMA */
|
||||
static inline void set_apicid_to_node(int apicid, s16 node)
|
||||
{
|
||||
}
|
||||
#endif /* CONFIG_NUMA */
|
||||
|
||||
#ifdef CONFIG_X86_32
|
||||
# include "numa_32.h"
|
||||
#else
|
||||
# include "numa_64.h"
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_NUMA
|
||||
extern void __cpuinit numa_set_node(int cpu, int node);
|
||||
extern void __cpuinit numa_clear_node(int cpu);
|
||||
extern void __init numa_init_array(void);
|
||||
extern void __init init_cpu_to_node(void);
|
||||
extern void __cpuinit numa_add_cpu(int cpu);
|
||||
extern void __cpuinit numa_remove_cpu(int cpu);
|
||||
#else /* CONFIG_NUMA */
|
||||
static inline void numa_set_node(int cpu, int node) { }
|
||||
static inline void numa_clear_node(int cpu) { }
|
||||
static inline void numa_init_array(void) { }
|
||||
static inline void init_cpu_to_node(void) { }
|
||||
static inline void numa_add_cpu(int cpu) { }
|
||||
static inline void numa_remove_cpu(int cpu) { }
|
||||
#endif /* CONFIG_NUMA */
|
||||
|
||||
#ifdef CONFIG_DEBUG_PER_CPU_MAPS
|
||||
struct cpumask __cpuinit *debug_cpumask_set_cpu(int cpu, int enable);
|
||||
#endif
|
||||
|
||||
#endif /* _ASM_X86_NUMA_H */
|
||||
|
@@ -4,7 +4,12 @@
|
||||
extern int numa_off;
|
||||
|
||||
extern int pxm_to_nid(int pxm);
|
||||
extern void numa_remove_cpu(int cpu);
|
||||
|
||||
#ifdef CONFIG_NUMA
|
||||
extern int __cpuinit numa_cpu_node(int cpu);
|
||||
#else /* CONFIG_NUMA */
|
||||
static inline int numa_cpu_node(int cpu) { return NUMA_NO_NODE; }
|
||||
#endif /* CONFIG_NUMA */
|
||||
|
||||
#ifdef CONFIG_HIGHMEM
|
||||
extern void set_highmem_pages_init(void);
|
||||
|
@@ -2,23 +2,16 @@
|
||||
#define _ASM_X86_NUMA_64_H
|
||||
|
||||
#include <linux/nodemask.h>
|
||||
#include <asm/apicdef.h>
|
||||
|
||||
struct bootnode {
|
||||
u64 start;
|
||||
u64 end;
|
||||
};
|
||||
|
||||
extern int compute_hash_shift(struct bootnode *nodes, int numblks,
|
||||
int *nodeids);
|
||||
|
||||
#define ZONE_ALIGN (1UL << (MAX_ORDER+PAGE_SHIFT))
|
||||
|
||||
extern void numa_init_array(void);
|
||||
extern int numa_off;
|
||||
|
||||
extern s16 apicid_to_node[MAX_LOCAL_APIC];
|
||||
|
||||
extern unsigned long numa_free_all_bootmem(void);
|
||||
extern void setup_node_bootmem(int nodeid, unsigned long start,
|
||||
unsigned long end);
|
||||
@@ -31,11 +24,11 @@ extern void setup_node_bootmem(int nodeid, unsigned long start,
|
||||
*/
|
||||
#define NODE_MIN_SIZE (4*1024*1024)
|
||||
|
||||
extern void __init init_cpu_to_node(void);
|
||||
extern void __cpuinit numa_set_node(int cpu, int node);
|
||||
extern void __cpuinit numa_clear_node(int cpu);
|
||||
extern void __cpuinit numa_add_cpu(int cpu);
|
||||
extern void __cpuinit numa_remove_cpu(int cpu);
|
||||
extern nodemask_t numa_nodes_parsed __initdata;
|
||||
|
||||
extern int __cpuinit numa_cpu_node(int cpu);
|
||||
extern int __init numa_add_memblk(int nodeid, u64 start, u64 end);
|
||||
extern void __init numa_set_distance(int from, int to, int distance);
|
||||
|
||||
#ifdef CONFIG_NUMA_EMU
|
||||
#define FAKE_NODE_MIN_SIZE ((u64)32 << 20)
|
||||
@@ -43,11 +36,7 @@ extern void __cpuinit numa_remove_cpu(int cpu);
|
||||
void numa_emu_cmdline(char *);
|
||||
#endif /* CONFIG_NUMA_EMU */
|
||||
#else
|
||||
static inline void init_cpu_to_node(void) { }
|
||||
static inline void numa_set_node(int cpu, int node) { }
|
||||
static inline void numa_clear_node(int cpu) { }
|
||||
static inline void numa_add_cpu(int cpu, int node) { }
|
||||
static inline void numa_remove_cpu(int cpu) { }
|
||||
static inline int numa_cpu_node(int cpu) { return NUMA_NO_NODE; }
|
||||
#endif
|
||||
|
||||
#endif /* _ASM_X86_NUMA_64_H */
|
||||
|
@@ -20,7 +20,7 @@ extern struct olpc_platform_t olpc_platform_info;
|
||||
|
||||
/*
|
||||
* OLPC board IDs contain the major build number within the mask 0x0ff0,
|
||||
* and the minor build number withing 0x000f. Pre-builds have a minor
|
||||
* and the minor build number within 0x000f. Pre-builds have a minor
|
||||
* number less than 8, and normal builds start at 8. For example, 0x0B10
|
||||
* is a PreB1, and 0x0C18 is a C1.
|
||||
*/
|
||||
|
@@ -6,7 +6,7 @@
|
||||
|
||||
#define OLPC_OFW_SIG 0x2057464F /* aka "OFW " */
|
||||
|
||||
#ifdef CONFIG_OLPC_OPENFIRMWARE
|
||||
#ifdef CONFIG_OLPC
|
||||
|
||||
extern bool olpc_ofw_is_installed(void);
|
||||
|
||||
@@ -26,19 +26,15 @@ extern void setup_olpc_ofw_pgd(void);
|
||||
/* check if OFW was detected during boot */
|
||||
extern bool olpc_ofw_present(void);
|
||||
|
||||
#else /* !CONFIG_OLPC_OPENFIRMWARE */
|
||||
|
||||
static inline bool olpc_ofw_is_installed(void) { return false; }
|
||||
#else /* !CONFIG_OLPC */
|
||||
static inline void olpc_ofw_detect(void) { }
|
||||
static inline void setup_olpc_ofw_pgd(void) { }
|
||||
static inline bool olpc_ofw_present(void) { return false; }
|
||||
#endif /* !CONFIG_OLPC */
|
||||
|
||||
#endif /* !CONFIG_OLPC_OPENFIRMWARE */
|
||||
|
||||
#ifdef CONFIG_OLPC_OPENFIRMWARE_DT
|
||||
#ifdef CONFIG_OF_PROMTREE
|
||||
extern void olpc_dt_build_devicetree(void);
|
||||
#else
|
||||
static inline void olpc_dt_build_devicetree(void) { }
|
||||
#endif /* CONFIG_OLPC_OPENFIRMWARE_DT */
|
||||
#endif
|
||||
|
||||
#endif /* _ASM_X86_OLPC_OFW_H */
|
||||
|
@@ -2,6 +2,7 @@
|
||||
#define _ASM_X86_PAGE_DEFS_H
|
||||
|
||||
#include <linux/const.h>
|
||||
#include <linux/types.h>
|
||||
|
||||
/* PAGE_SHIFT determines the page size */
|
||||
#define PAGE_SHIFT 12
|
||||
@@ -45,11 +46,15 @@ extern int devmem_is_allowed(unsigned long pagenr);
|
||||
extern unsigned long max_low_pfn_mapped;
|
||||
extern unsigned long max_pfn_mapped;
|
||||
|
||||
static inline phys_addr_t get_max_mapped(void)
|
||||
{
|
||||
return (phys_addr_t)max_pfn_mapped << PAGE_SHIFT;
|
||||
}
|
||||
|
||||
extern unsigned long init_memory_mapping(unsigned long start,
|
||||
unsigned long end);
|
||||
|
||||
extern void initmem_init(unsigned long start_pfn, unsigned long end_pfn,
|
||||
int acpi, int k8);
|
||||
extern void initmem_init(void);
|
||||
extern void free_initmem(void);
|
||||
|
||||
#endif /* !__ASSEMBLY__ */
|
||||
|
@@ -451,6 +451,26 @@ do { \
|
||||
#define irqsafe_cpu_cmpxchg_4(pcp, oval, nval) percpu_cmpxchg_op(pcp, oval, nval)
|
||||
#endif /* !CONFIG_M386 */
|
||||
|
||||
#ifdef CONFIG_X86_CMPXCHG64
|
||||
#define percpu_cmpxchg8b_double(pcp1, o1, o2, n1, n2) \
|
||||
({ \
|
||||
char __ret; \
|
||||
typeof(o1) __o1 = o1; \
|
||||
typeof(o1) __n1 = n1; \
|
||||
typeof(o2) __o2 = o2; \
|
||||
typeof(o2) __n2 = n2; \
|
||||
typeof(o2) __dummy = n2; \
|
||||
asm volatile("cmpxchg8b "__percpu_arg(1)"\n\tsetz %0\n\t" \
|
||||
: "=a"(__ret), "=m" (pcp1), "=d"(__dummy) \
|
||||
: "b"(__n1), "c"(__n2), "a"(__o1), "d"(__o2)); \
|
||||
__ret; \
|
||||
})
|
||||
|
||||
#define __this_cpu_cmpxchg_double_4(pcp1, pcp2, o1, o2, n1, n2) percpu_cmpxchg8b_double(pcp1, o1, o2, n1, n2)
|
||||
#define this_cpu_cmpxchg_double_4(pcp1, pcp2, o1, o2, n1, n2) percpu_cmpxchg8b_double(pcp1, o1, o2, n1, n2)
|
||||
#define irqsafe_cpu_cmpxchg_double_4(pcp1, pcp2, o1, o2, n1, n2) percpu_cmpxchg8b_double(pcp1, o1, o2, n1, n2)
|
||||
#endif /* CONFIG_X86_CMPXCHG64 */
|
||||
|
||||
/*
|
||||
* Per cpu atomic 64 bit operations are only available under 64 bit.
|
||||
* 32 bit must fall back to generic operations.
|
||||
@@ -480,6 +500,34 @@ do { \
|
||||
#define irqsafe_cpu_xor_8(pcp, val) percpu_to_op("xor", (pcp), val)
|
||||
#define irqsafe_cpu_xchg_8(pcp, nval) percpu_xchg_op(pcp, nval)
|
||||
#define irqsafe_cpu_cmpxchg_8(pcp, oval, nval) percpu_cmpxchg_op(pcp, oval, nval)
|
||||
|
||||
/*
|
||||
* Pretty complex macro to generate cmpxchg16 instruction. The instruction
|
||||
* is not supported on early AMD64 processors so we must be able to emulate
|
||||
* it in software. The address used in the cmpxchg16 instruction must be
|
||||
* aligned to a 16 byte boundary.
|
||||
*/
|
||||
#define percpu_cmpxchg16b_double(pcp1, o1, o2, n1, n2) \
|
||||
({ \
|
||||
char __ret; \
|
||||
typeof(o1) __o1 = o1; \
|
||||
typeof(o1) __n1 = n1; \
|
||||
typeof(o2) __o2 = o2; \
|
||||
typeof(o2) __n2 = n2; \
|
||||
typeof(o2) __dummy; \
|
||||
alternative_io("call this_cpu_cmpxchg16b_emu\n\t" P6_NOP4, \
|
||||
"cmpxchg16b %%gs:(%%rsi)\n\tsetz %0\n\t", \
|
||||
X86_FEATURE_CX16, \
|
||||
ASM_OUTPUT2("=a"(__ret), "=d"(__dummy)), \
|
||||
"S" (&pcp1), "b"(__n1), "c"(__n2), \
|
||||
"a"(__o1), "d"(__o2)); \
|
||||
__ret; \
|
||||
})
|
||||
|
||||
#define __this_cpu_cmpxchg_double_8(pcp1, pcp2, o1, o2, n1, n2) percpu_cmpxchg16b_double(pcp1, o1, o2, n1, n2)
|
||||
#define this_cpu_cmpxchg_double_8(pcp1, pcp2, o1, o2, n1, n2) percpu_cmpxchg16b_double(pcp1, o1, o2, n1, n2)
|
||||
#define irqsafe_cpu_cmpxchg_double_8(pcp1, pcp2, o1, o2, n1, n2) percpu_cmpxchg16b_double(pcp1, o1, o2, n1, n2)
|
||||
|
||||
#endif
|
||||
|
||||
/* This is not atomic against other CPUs -- CPU preemption needs to be off */
|
||||
|
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Netburst Perfomance Events (P4, old Xeon)
|
||||
* Netburst Performance Events (P4, old Xeon)
|
||||
*/
|
||||
|
||||
#ifndef PERF_EVENT_P4_H
|
||||
@@ -9,7 +9,7 @@
|
||||
#include <linux/bitops.h>
|
||||
|
||||
/*
|
||||
* NetBurst has perfomance MSRs shared between
|
||||
* NetBurst has performance MSRs shared between
|
||||
* threads if HT is turned on, ie for both logical
|
||||
* processors (mem: in turn in Atom with HT support
|
||||
* perf-MSRs are not shared and every thread has its
|
||||
|
@@ -69,8 +69,6 @@ static inline void native_pmd_clear(pmd_t *pmd)
|
||||
|
||||
static inline void pud_clear(pud_t *pudp)
|
||||
{
|
||||
unsigned long pgd;
|
||||
|
||||
set_pud(pudp, __pud(0));
|
||||
|
||||
/*
|
||||
@@ -79,13 +77,10 @@ static inline void pud_clear(pud_t *pudp)
|
||||
* section 8.1: in PAE mode we explicitly have to flush the
|
||||
* TLB via cr3 if the top-level pgd is changed...
|
||||
*
|
||||
* Make sure the pud entry we're updating is within the
|
||||
* current pgd to avoid unnecessary TLB flushes.
|
||||
* Currently all places where pud_clear() is called either have
|
||||
* flush_tlb_mm() followed or don't need TLB flush (x86_64 code or
|
||||
* pud_clear_bad()), so we don't need TLB flush here.
|
||||
*/
|
||||
pgd = read_cr3();
|
||||
if (__pa(pudp) >= pgd && __pa(pudp) <
|
||||
(pgd + sizeof(pgd_t)*PTRS_PER_PGD))
|
||||
write_cr3(pgd);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
|
@@ -7,7 +7,7 @@
|
||||
*/
|
||||
#define X86_EFLAGS_CF 0x00000001 /* Carry Flag */
|
||||
#define X86_EFLAGS_PF 0x00000004 /* Parity Flag */
|
||||
#define X86_EFLAGS_AF 0x00000010 /* Auxillary carry Flag */
|
||||
#define X86_EFLAGS_AF 0x00000010 /* Auxiliary carry Flag */
|
||||
#define X86_EFLAGS_ZF 0x00000040 /* Zero Flag */
|
||||
#define X86_EFLAGS_SF 0x00000080 /* Sign Flag */
|
||||
#define X86_EFLAGS_TF 0x00000100 /* Trap Flag */
|
||||
|
@@ -94,10 +94,6 @@ struct cpuinfo_x86 {
|
||||
int x86_cache_alignment; /* In bytes */
|
||||
int x86_power;
|
||||
unsigned long loops_per_jiffy;
|
||||
#ifdef CONFIG_SMP
|
||||
/* cpus sharing the last level cache: */
|
||||
cpumask_var_t llc_shared_map;
|
||||
#endif
|
||||
/* cpuid returned max cores value: */
|
||||
u16 x86_max_cores;
|
||||
u16 apicid;
|
||||
|
@@ -1 +1,69 @@
|
||||
/* dummy prom.h; here to make linux/of.h's #includes happy */
|
||||
/*
|
||||
* Definitions for Device tree / OpenFirmware handling on X86
|
||||
*
|
||||
* based on arch/powerpc/include/asm/prom.h which is
|
||||
* Copyright (C) 1996-2005 Paul Mackerras.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
* as published by the Free Software Foundation; either version
|
||||
* 2 of the License, or (at your option) any later version.
|
||||
*/
|
||||
|
||||
#ifndef _ASM_X86_PROM_H
|
||||
#define _ASM_X86_PROM_H
|
||||
#ifndef __ASSEMBLY__
|
||||
|
||||
#include <linux/of.h>
|
||||
#include <linux/types.h>
|
||||
#include <linux/pci.h>
|
||||
|
||||
#include <asm/irq.h>
|
||||
#include <asm/atomic.h>
|
||||
#include <asm/setup.h>
|
||||
#include <asm/irq_controller.h>
|
||||
|
||||
#ifdef CONFIG_OF
|
||||
extern int of_ioapic;
|
||||
extern u64 initial_dtb;
|
||||
extern void add_dtb(u64 data);
|
||||
extern void x86_add_irq_domains(void);
|
||||
void __cpuinit x86_of_pci_init(void);
|
||||
void x86_dtb_init(void);
|
||||
|
||||
static inline struct device_node *pci_device_to_OF_node(struct pci_dev *pdev)
|
||||
{
|
||||
return pdev ? pdev->dev.of_node : NULL;
|
||||
}
|
||||
|
||||
static inline struct device_node *pci_bus_to_OF_node(struct pci_bus *bus)
|
||||
{
|
||||
return pci_device_to_OF_node(bus->self);
|
||||
}
|
||||
|
||||
#else
|
||||
static inline void add_dtb(u64 data) { }
|
||||
static inline void x86_add_irq_domains(void) { }
|
||||
static inline void x86_of_pci_init(void) { }
|
||||
static inline void x86_dtb_init(void) { }
|
||||
#define of_ioapic 0
|
||||
#endif
|
||||
|
||||
extern char cmd_line[COMMAND_LINE_SIZE];
|
||||
|
||||
#define pci_address_to_pio pci_address_to_pio
|
||||
unsigned long pci_address_to_pio(phys_addr_t addr);
|
||||
|
||||
/**
|
||||
* irq_dispose_mapping - Unmap an interrupt
|
||||
* @virq: linux virq number of the interrupt to unmap
|
||||
*
|
||||
* FIXME: We really should implement proper virq handling like power,
|
||||
* but that's going to be major surgery.
|
||||
*/
|
||||
static inline void irq_dispose_mapping(unsigned int virq) { }
|
||||
|
||||
#define HAVE_ARCH_DEVTREE_FIXUPS
|
||||
|
||||
#endif /* __ASSEMBLY__ */
|
||||
#endif
|
||||
|
@@ -31,7 +31,7 @@
|
||||
#define R12 24
|
||||
#define RBP 32
|
||||
#define RBX 40
|
||||
/* arguments: interrupts/non tracing syscalls only save upto here*/
|
||||
/* arguments: interrupts/non tracing syscalls only save up to here*/
|
||||
#define R11 48
|
||||
#define R10 56
|
||||
#define R9 64
|
||||
|
@@ -73,7 +73,7 @@ struct pt_regs {
|
||||
unsigned long r12;
|
||||
unsigned long rbp;
|
||||
unsigned long rbx;
|
||||
/* arguments: non interrupts/non tracing syscalls only save upto here*/
|
||||
/* arguments: non interrupts/non tracing syscalls only save up to here*/
|
||||
unsigned long r11;
|
||||
unsigned long r10;
|
||||
unsigned long r9;
|
||||
@@ -103,7 +103,7 @@ struct pt_regs {
|
||||
unsigned long r12;
|
||||
unsigned long bp;
|
||||
unsigned long bx;
|
||||
/* arguments: non interrupts/non tracing syscalls only save upto here*/
|
||||
/* arguments: non interrupts/non tracing syscalls only save up to here*/
|
||||
unsigned long r11;
|
||||
unsigned long r10;
|
||||
unsigned long r9;
|
||||
|
@@ -18,7 +18,10 @@ extern struct machine_ops machine_ops;
|
||||
|
||||
void native_machine_crash_shutdown(struct pt_regs *regs);
|
||||
void native_machine_shutdown(void);
|
||||
void machine_real_restart(const unsigned char *code, int length);
|
||||
void machine_real_restart(unsigned int type);
|
||||
/* These must match dispatch_table in reboot_32.S */
|
||||
#define MRR_BIOS 0
|
||||
#define MRR_APM 1
|
||||
|
||||
typedef void (*nmi_shootdown_cb)(int, struct die_args*);
|
||||
void nmi_shootdown_cpus(nmi_shootdown_cb callback);
|
||||
|
@@ -37,26 +37,9 @@
|
||||
#endif
|
||||
|
||||
#ifdef __KERNEL__
|
||||
|
||||
#include <linux/list.h>
|
||||
#include <linux/spinlock.h>
|
||||
#include <linux/lockdep.h>
|
||||
#include <asm/asm.h>
|
||||
|
||||
struct rwsem_waiter;
|
||||
|
||||
extern asmregparm struct rw_semaphore *
|
||||
rwsem_down_read_failed(struct rw_semaphore *sem);
|
||||
extern asmregparm struct rw_semaphore *
|
||||
rwsem_down_write_failed(struct rw_semaphore *sem);
|
||||
extern asmregparm struct rw_semaphore *
|
||||
rwsem_wake(struct rw_semaphore *);
|
||||
extern asmregparm struct rw_semaphore *
|
||||
rwsem_downgrade_wake(struct rw_semaphore *sem);
|
||||
|
||||
/*
|
||||
* the semaphore definition
|
||||
*
|
||||
* The bias values and the counter type limits the number of
|
||||
* potential readers/writers to 32767 for 32 bits and 2147483647
|
||||
* for 64 bits.
|
||||
@@ -74,43 +57,6 @@ extern asmregparm struct rw_semaphore *
|
||||
#define RWSEM_ACTIVE_READ_BIAS RWSEM_ACTIVE_BIAS
|
||||
#define RWSEM_ACTIVE_WRITE_BIAS (RWSEM_WAITING_BIAS + RWSEM_ACTIVE_BIAS)
|
||||
|
||||
typedef signed long rwsem_count_t;
|
||||
|
||||
struct rw_semaphore {
|
||||
rwsem_count_t count;
|
||||
spinlock_t wait_lock;
|
||||
struct list_head wait_list;
|
||||
#ifdef CONFIG_DEBUG_LOCK_ALLOC
|
||||
struct lockdep_map dep_map;
|
||||
#endif
|
||||
};
|
||||
|
||||
#ifdef CONFIG_DEBUG_LOCK_ALLOC
|
||||
# define __RWSEM_DEP_MAP_INIT(lockname) , .dep_map = { .name = #lockname }
|
||||
#else
|
||||
# define __RWSEM_DEP_MAP_INIT(lockname)
|
||||
#endif
|
||||
|
||||
|
||||
#define __RWSEM_INITIALIZER(name) \
|
||||
{ \
|
||||
RWSEM_UNLOCKED_VALUE, __SPIN_LOCK_UNLOCKED((name).wait_lock), \
|
||||
LIST_HEAD_INIT((name).wait_list) __RWSEM_DEP_MAP_INIT(name) \
|
||||
}
|
||||
|
||||
#define DECLARE_RWSEM(name) \
|
||||
struct rw_semaphore name = __RWSEM_INITIALIZER(name)
|
||||
|
||||
extern void __init_rwsem(struct rw_semaphore *sem, const char *name,
|
||||
struct lock_class_key *key);
|
||||
|
||||
#define init_rwsem(sem) \
|
||||
do { \
|
||||
static struct lock_class_key __key; \
|
||||
\
|
||||
__init_rwsem((sem), #sem, &__key); \
|
||||
} while (0)
|
||||
|
||||
/*
|
||||
* lock for reading
|
||||
*/
|
||||
@@ -133,7 +79,7 @@ static inline void __down_read(struct rw_semaphore *sem)
|
||||
*/
|
||||
static inline int __down_read_trylock(struct rw_semaphore *sem)
|
||||
{
|
||||
rwsem_count_t result, tmp;
|
||||
long result, tmp;
|
||||
asm volatile("# beginning __down_read_trylock\n\t"
|
||||
" mov %0,%1\n\t"
|
||||
"1:\n\t"
|
||||
@@ -155,7 +101,7 @@ static inline int __down_read_trylock(struct rw_semaphore *sem)
|
||||
*/
|
||||
static inline void __down_write_nested(struct rw_semaphore *sem, int subclass)
|
||||
{
|
||||
rwsem_count_t tmp;
|
||||
long tmp;
|
||||
asm volatile("# beginning down_write\n\t"
|
||||
LOCK_PREFIX " xadd %1,(%2)\n\t"
|
||||
/* adds 0xffff0001, returns the old value */
|
||||
@@ -180,9 +126,8 @@ static inline void __down_write(struct rw_semaphore *sem)
|
||||
*/
|
||||
static inline int __down_write_trylock(struct rw_semaphore *sem)
|
||||
{
|
||||
rwsem_count_t ret = cmpxchg(&sem->count,
|
||||
RWSEM_UNLOCKED_VALUE,
|
||||
RWSEM_ACTIVE_WRITE_BIAS);
|
||||
long ret = cmpxchg(&sem->count, RWSEM_UNLOCKED_VALUE,
|
||||
RWSEM_ACTIVE_WRITE_BIAS);
|
||||
if (ret == RWSEM_UNLOCKED_VALUE)
|
||||
return 1;
|
||||
return 0;
|
||||
@@ -193,7 +138,7 @@ static inline int __down_write_trylock(struct rw_semaphore *sem)
|
||||
*/
|
||||
static inline void __up_read(struct rw_semaphore *sem)
|
||||
{
|
||||
rwsem_count_t tmp;
|
||||
long tmp;
|
||||
asm volatile("# beginning __up_read\n\t"
|
||||
LOCK_PREFIX " xadd %1,(%2)\n\t"
|
||||
/* subtracts 1, returns the old value */
|
||||
@@ -211,7 +156,7 @@ static inline void __up_read(struct rw_semaphore *sem)
|
||||
*/
|
||||
static inline void __up_write(struct rw_semaphore *sem)
|
||||
{
|
||||
rwsem_count_t tmp;
|
||||
long tmp;
|
||||
asm volatile("# beginning __up_write\n\t"
|
||||
LOCK_PREFIX " xadd %1,(%2)\n\t"
|
||||
/* subtracts 0xffff0001, returns the old value */
|
||||
@@ -247,8 +192,7 @@ static inline void __downgrade_write(struct rw_semaphore *sem)
|
||||
/*
|
||||
* implement atomic add functionality
|
||||
*/
|
||||
static inline void rwsem_atomic_add(rwsem_count_t delta,
|
||||
struct rw_semaphore *sem)
|
||||
static inline void rwsem_atomic_add(long delta, struct rw_semaphore *sem)
|
||||
{
|
||||
asm volatile(LOCK_PREFIX _ASM_ADD "%1,%0"
|
||||
: "+m" (sem->count)
|
||||
@@ -258,10 +202,9 @@ static inline void rwsem_atomic_add(rwsem_count_t delta,
|
||||
/*
|
||||
* implement exchange and add functionality
|
||||
*/
|
||||
static inline rwsem_count_t rwsem_atomic_update(rwsem_count_t delta,
|
||||
struct rw_semaphore *sem)
|
||||
static inline long rwsem_atomic_update(long delta, struct rw_semaphore *sem)
|
||||
{
|
||||
rwsem_count_t tmp = delta;
|
||||
long tmp = delta;
|
||||
|
||||
asm volatile(LOCK_PREFIX "xadd %0,%1"
|
||||
: "+r" (tmp), "+m" (sem->count)
|
||||
@@ -270,10 +213,5 @@ static inline rwsem_count_t rwsem_atomic_update(rwsem_count_t delta,
|
||||
return tmp + delta;
|
||||
}
|
||||
|
||||
static inline int rwsem_is_locked(struct rw_semaphore *sem)
|
||||
{
|
||||
return (sem->count != 0);
|
||||
}
|
||||
|
||||
#endif /* __KERNEL__ */
|
||||
#endif /* _ASM_X86_RWSEM_H */
|
||||
|
@@ -1,14 +1,16 @@
|
||||
#ifndef _ASM_X86_SEGMENT_H
|
||||
#define _ASM_X86_SEGMENT_H
|
||||
|
||||
#include <linux/const.h>
|
||||
|
||||
/* Constructor for a conventional segment GDT (or LDT) entry */
|
||||
/* This is a macro so it can be used in initializers */
|
||||
#define GDT_ENTRY(flags, base, limit) \
|
||||
((((base) & 0xff000000ULL) << (56-24)) | \
|
||||
(((flags) & 0x0000f0ffULL) << 40) | \
|
||||
(((limit) & 0x000f0000ULL) << (48-16)) | \
|
||||
(((base) & 0x00ffffffULL) << 16) | \
|
||||
(((limit) & 0x0000ffffULL)))
|
||||
((((base) & _AC(0xff000000,ULL)) << (56-24)) | \
|
||||
(((flags) & _AC(0x0000f0ff,ULL)) << 40) | \
|
||||
(((limit) & _AC(0x000f0000,ULL)) << (48-16)) | \
|
||||
(((base) & _AC(0x00ffffff,ULL)) << 16) | \
|
||||
(((limit) & _AC(0x0000ffff,ULL))))
|
||||
|
||||
/* Simple and small GDT entries for booting only */
|
||||
|
||||
|
@@ -17,12 +17,24 @@
|
||||
#endif
|
||||
#include <asm/thread_info.h>
|
||||
#include <asm/cpumask.h>
|
||||
#include <asm/cpufeature.h>
|
||||
|
||||
extern int smp_num_siblings;
|
||||
extern unsigned int num_processors;
|
||||
|
||||
static inline bool cpu_has_ht_siblings(void)
|
||||
{
|
||||
bool has_siblings = false;
|
||||
#ifdef CONFIG_SMP
|
||||
has_siblings = cpu_has_ht && smp_num_siblings > 1;
|
||||
#endif
|
||||
return has_siblings;
|
||||
}
|
||||
|
||||
DECLARE_PER_CPU(cpumask_var_t, cpu_sibling_map);
|
||||
DECLARE_PER_CPU(cpumask_var_t, cpu_core_map);
|
||||
/* cpus sharing the last level cache: */
|
||||
DECLARE_PER_CPU(cpumask_var_t, cpu_llc_shared_map);
|
||||
DECLARE_PER_CPU(u16, cpu_llc_id);
|
||||
DECLARE_PER_CPU(int, cpu_number);
|
||||
|
||||
@@ -36,8 +48,16 @@ static inline struct cpumask *cpu_core_mask(int cpu)
|
||||
return per_cpu(cpu_core_map, cpu);
|
||||
}
|
||||
|
||||
static inline struct cpumask *cpu_llc_shared_mask(int cpu)
|
||||
{
|
||||
return per_cpu(cpu_llc_shared_map, cpu);
|
||||
}
|
||||
|
||||
DECLARE_EARLY_PER_CPU(u16, x86_cpu_to_apicid);
|
||||
DECLARE_EARLY_PER_CPU(u16, x86_bios_cpu_apicid);
|
||||
#if defined(CONFIG_X86_LOCAL_APIC) && defined(CONFIG_X86_32)
|
||||
DECLARE_EARLY_PER_CPU(int, x86_cpu_to_logical_apicid);
|
||||
#endif
|
||||
|
||||
/* Static state in head.S used to set up a CPU */
|
||||
extern unsigned long stack_start; /* Initial stack pointer address */
|
||||
|
@@ -47,7 +47,7 @@ struct stacktrace_ops {
|
||||
};
|
||||
|
||||
void dump_trace(struct task_struct *tsk, struct pt_regs *regs,
|
||||
unsigned long *stack,
|
||||
unsigned long *stack, unsigned long bp,
|
||||
const struct stacktrace_ops *ops, void *data);
|
||||
|
||||
#ifdef CONFIG_X86_32
|
||||
@@ -86,11 +86,11 @@ stack_frame(struct task_struct *task, struct pt_regs *regs)
|
||||
|
||||
extern void
|
||||
show_trace_log_lvl(struct task_struct *task, struct pt_regs *regs,
|
||||
unsigned long *stack, char *log_lvl);
|
||||
unsigned long *stack, unsigned long bp, char *log_lvl);
|
||||
|
||||
extern void
|
||||
show_stack_log_lvl(struct task_struct *task, struct pt_regs *regs,
|
||||
unsigned long *sp, char *log_lvl);
|
||||
unsigned long *sp, unsigned long bp, char *log_lvl);
|
||||
|
||||
extern unsigned int code_bytes;
|
||||
|
||||
|
@@ -98,8 +98,6 @@ do { \
|
||||
*/
|
||||
#define HAVE_DISABLE_HLT
|
||||
#else
|
||||
#define __SAVE(reg, offset) "movq %%" #reg ",(14-" #offset ")*8(%%rsp)\n\t"
|
||||
#define __RESTORE(reg, offset) "movq (14-" #offset ")*8(%%rsp),%%" #reg "\n\t"
|
||||
|
||||
/* frame pointer must be last for get_wchan */
|
||||
#define SAVE_CONTEXT "pushf ; pushq %%rbp ; movq %%rsi,%%rbp\n\t"
|
||||
|
@@ -161,8 +161,14 @@ struct thread_info {
|
||||
|
||||
#define __HAVE_ARCH_THREAD_INFO_ALLOCATOR
|
||||
|
||||
#define alloc_thread_info(tsk) \
|
||||
((struct thread_info *)__get_free_pages(THREAD_FLAGS, THREAD_ORDER))
|
||||
#define alloc_thread_info_node(tsk, node) \
|
||||
({ \
|
||||
struct page *page = alloc_pages_node(node, THREAD_FLAGS, \
|
||||
THREAD_ORDER); \
|
||||
struct thread_info *ret = page ? page_address(page) : NULL; \
|
||||
\
|
||||
ret; \
|
||||
})
|
||||
|
||||
#ifdef CONFIG_X86_32
|
||||
|
||||
|
@@ -47,21 +47,6 @@
|
||||
|
||||
#include <asm/mpspec.h>
|
||||
|
||||
#ifdef CONFIG_X86_32
|
||||
|
||||
/* Mappings between logical cpu number and node number */
|
||||
extern int cpu_to_node_map[];
|
||||
|
||||
/* Returns the number of the node containing CPU 'cpu' */
|
||||
static inline int __cpu_to_node(int cpu)
|
||||
{
|
||||
return cpu_to_node_map[cpu];
|
||||
}
|
||||
#define early_cpu_to_node __cpu_to_node
|
||||
#define cpu_to_node __cpu_to_node
|
||||
|
||||
#else /* CONFIG_X86_64 */
|
||||
|
||||
/* Mappings between logical cpu number and node number */
|
||||
DECLARE_EARLY_PER_CPU(int, x86_cpu_to_node_map);
|
||||
|
||||
@@ -84,8 +69,6 @@ static inline int early_cpu_to_node(int cpu)
|
||||
|
||||
#endif /* !CONFIG_DEBUG_PER_CPU_MAPS */
|
||||
|
||||
#endif /* CONFIG_X86_64 */
|
||||
|
||||
/* Mappings between node number and cpus on that node. */
|
||||
extern cpumask_var_t node_to_cpumask_map[MAX_NUMNODES];
|
||||
|
||||
@@ -155,7 +138,7 @@ extern unsigned long node_remap_size[];
|
||||
.balance_interval = 1, \
|
||||
}
|
||||
|
||||
#ifdef CONFIG_X86_64_ACPI_NUMA
|
||||
#ifdef CONFIG_X86_64
|
||||
extern int __node_distance(int, int);
|
||||
#define node_distance(a, b) __node_distance(a, b)
|
||||
#endif
|
||||
|
@@ -3,25 +3,36 @@
|
||||
|
||||
#ifndef __ASSEMBLY__
|
||||
|
||||
#ifdef CONFIG_X86_TRAMPOLINE
|
||||
#include <linux/types.h>
|
||||
#include <asm/io.h>
|
||||
|
||||
/*
|
||||
* Trampoline 80x86 program as an array.
|
||||
* Trampoline 80x86 program as an array. These are in the init rodata
|
||||
* segment, but that's okay, because we only care about the relative
|
||||
* addresses of the symbols.
|
||||
*/
|
||||
extern const unsigned char trampoline_data [];
|
||||
extern const unsigned char trampoline_end [];
|
||||
extern unsigned char *trampoline_base;
|
||||
extern const unsigned char x86_trampoline_start [];
|
||||
extern const unsigned char x86_trampoline_end [];
|
||||
extern unsigned char *x86_trampoline_base;
|
||||
|
||||
extern unsigned long init_rsp;
|
||||
extern unsigned long initial_code;
|
||||
extern unsigned long initial_gs;
|
||||
|
||||
#define TRAMPOLINE_SIZE roundup(trampoline_end - trampoline_data, PAGE_SIZE)
|
||||
extern void __init setup_trampolines(void);
|
||||
|
||||
extern unsigned long setup_trampoline(void);
|
||||
extern void __init reserve_trampoline_memory(void);
|
||||
#else
|
||||
static inline void reserve_trampoline_memory(void) {}
|
||||
#endif /* CONFIG_X86_TRAMPOLINE */
|
||||
extern const unsigned char trampoline_data[];
|
||||
extern const unsigned char trampoline_status[];
|
||||
|
||||
#define TRAMPOLINE_SYM(x) \
|
||||
((void *)(x86_trampoline_base + \
|
||||
((const unsigned char *)(x) - x86_trampoline_start)))
|
||||
|
||||
/* Address of the SMP trampoline */
|
||||
static inline unsigned long trampoline_address(void)
|
||||
{
|
||||
return virt_to_phys(TRAMPOLINE_SYM(trampoline_data));
|
||||
}
|
||||
|
||||
#endif /* __ASSEMBLY__ */
|
||||
|
||||
|
@@ -35,7 +35,7 @@ static inline cycles_t get_cycles(void)
|
||||
static __always_inline cycles_t vget_cycles(void)
|
||||
{
|
||||
/*
|
||||
* We only do VDSOs on TSC capable CPUs, so this shouldnt
|
||||
* We only do VDSOs on TSC capable CPUs, so this shouldn't
|
||||
* access boot_cpu_data (which is not VDSO-safe):
|
||||
*/
|
||||
#ifndef CONFIG_X86_TSC
|
||||
|
@@ -1,20 +1,12 @@
|
||||
#ifndef _ASM_X86_TYPES_H
|
||||
#define _ASM_X86_TYPES_H
|
||||
|
||||
#define dma_addr_t dma_addr_t
|
||||
|
||||
#include <asm-generic/types.h>
|
||||
|
||||
#ifdef __KERNEL__
|
||||
#ifndef __ASSEMBLY__
|
||||
|
||||
typedef u64 dma64_addr_t;
|
||||
#if defined(CONFIG_X86_64) || defined(CONFIG_HIGHMEM64G)
|
||||
/* DMA addresses come in 32-bit and 64-bit flavours. */
|
||||
typedef u64 dma_addr_t;
|
||||
#else
|
||||
typedef u32 dma_addr_t;
|
||||
#endif
|
||||
|
||||
#endif /* __ASSEMBLY__ */
|
||||
#endif /* __KERNEL__ */
|
||||
|
@@ -346,10 +346,14 @@
|
||||
#define __NR_fanotify_init 338
|
||||
#define __NR_fanotify_mark 339
|
||||
#define __NR_prlimit64 340
|
||||
#define __NR_name_to_handle_at 341
|
||||
#define __NR_open_by_handle_at 342
|
||||
#define __NR_clock_adjtime 343
|
||||
#define __NR_syncfs 344
|
||||
|
||||
#ifdef __KERNEL__
|
||||
|
||||
#define NR_syscalls 341
|
||||
#define NR_syscalls 345
|
||||
|
||||
#define __ARCH_WANT_IPC_PARSE_VERSION
|
||||
#define __ARCH_WANT_OLD_READDIR
|
||||
|
@@ -669,6 +669,14 @@ __SYSCALL(__NR_fanotify_init, sys_fanotify_init)
|
||||
__SYSCALL(__NR_fanotify_mark, sys_fanotify_mark)
|
||||
#define __NR_prlimit64 302
|
||||
__SYSCALL(__NR_prlimit64, sys_prlimit64)
|
||||
#define __NR_name_to_handle_at 303
|
||||
__SYSCALL(__NR_name_to_handle_at, sys_name_to_handle_at)
|
||||
#define __NR_open_by_handle_at 304
|
||||
__SYSCALL(__NR_open_by_handle_at, sys_open_by_handle_at)
|
||||
#define __NR_clock_adjtime 305
|
||||
__SYSCALL(__NR_clock_adjtime, sys_clock_adjtime)
|
||||
#define __NR_syncfs 306
|
||||
__SYSCALL(__NR_syncfs, sys_syncfs)
|
||||
|
||||
#ifndef __NO_STUBS
|
||||
#define __ARCH_WANT_OLD_READDIR
|
||||
|
@@ -83,11 +83,13 @@ struct x86_init_paging {
|
||||
* boot cpu
|
||||
* @tsc_pre_init: platform function called before TSC init
|
||||
* @timer_init: initialize the platform timer (default PIT/HPET)
|
||||
* @wallclock_init: init the wallclock device
|
||||
*/
|
||||
struct x86_init_timers {
|
||||
void (*setup_percpu_clockev)(void);
|
||||
void (*tsc_pre_init)(void);
|
||||
void (*timer_init)(void);
|
||||
void (*wallclock_init)(void);
|
||||
};
|
||||
|
||||
/**
|
||||
|
@@ -287,7 +287,7 @@ HYPERVISOR_fpu_taskswitch(int set)
|
||||
static inline int
|
||||
HYPERVISOR_sched_op(int cmd, void *arg)
|
||||
{
|
||||
return _hypercall2(int, sched_op_new, cmd, arg);
|
||||
return _hypercall2(int, sched_op, cmd, arg);
|
||||
}
|
||||
|
||||
static inline long
|
||||
@@ -422,10 +422,17 @@ HYPERVISOR_set_segment_base(int reg, unsigned long value)
|
||||
#endif
|
||||
|
||||
static inline int
|
||||
HYPERVISOR_suspend(unsigned long srec)
|
||||
HYPERVISOR_suspend(unsigned long start_info_mfn)
|
||||
{
|
||||
return _hypercall3(int, sched_op, SCHEDOP_shutdown,
|
||||
SHUTDOWN_suspend, srec);
|
||||
struct sched_shutdown r = { .reason = SHUTDOWN_suspend };
|
||||
|
||||
/*
|
||||
* For a PV guest the tools require that the start_info mfn be
|
||||
* present in rdx/edx when the hypercall is made. Per the
|
||||
* hypercall calling convention this is the third hypercall
|
||||
* argument, which is start_info_mfn here.
|
||||
*/
|
||||
return _hypercall3(int, sched_op, SCHEDOP_shutdown, &r, start_info_mfn);
|
||||
}
|
||||
|
||||
static inline int
|
||||
|
@@ -86,7 +86,7 @@ DEFINE_GUEST_HANDLE(void);
|
||||
* The privilege level specifies which modes may enter a trap via a software
|
||||
* interrupt. On x86/64, since rings 1 and 2 are unavailable, we allocate
|
||||
* privilege levels as follows:
|
||||
* Level == 0: Noone may enter
|
||||
* Level == 0: No one may enter
|
||||
* Level == 1: Kernel may enter
|
||||
* Level == 2: Kernel may enter
|
||||
* Level == 3: Everyone may enter
|
||||
|
@@ -29,8 +29,10 @@ typedef struct xpaddr {
|
||||
|
||||
/**** MACHINE <-> PHYSICAL CONVERSION MACROS ****/
|
||||
#define INVALID_P2M_ENTRY (~0UL)
|
||||
#define FOREIGN_FRAME_BIT (1UL<<31)
|
||||
#define FOREIGN_FRAME_BIT (1UL<<(BITS_PER_LONG-1))
|
||||
#define IDENTITY_FRAME_BIT (1UL<<(BITS_PER_LONG-2))
|
||||
#define FOREIGN_FRAME(m) ((m) | FOREIGN_FRAME_BIT)
|
||||
#define IDENTITY_FRAME(m) ((m) | IDENTITY_FRAME_BIT)
|
||||
|
||||
/* Maximum amount of memory we can handle in a domain in pages */
|
||||
#define MAX_DOMAIN_PAGES \
|
||||
@@ -41,12 +43,18 @@ extern unsigned int machine_to_phys_order;
|
||||
|
||||
extern unsigned long get_phys_to_machine(unsigned long pfn);
|
||||
extern bool set_phys_to_machine(unsigned long pfn, unsigned long mfn);
|
||||
extern bool __set_phys_to_machine(unsigned long pfn, unsigned long mfn);
|
||||
extern unsigned long set_phys_range_identity(unsigned long pfn_s,
|
||||
unsigned long pfn_e);
|
||||
|
||||
extern int m2p_add_override(unsigned long mfn, struct page *page);
|
||||
extern int m2p_remove_override(struct page *page);
|
||||
extern struct page *m2p_find_override(unsigned long mfn);
|
||||
extern unsigned long m2p_find_override_pfn(unsigned long mfn, unsigned long pfn);
|
||||
|
||||
#ifdef CONFIG_XEN_DEBUG_FS
|
||||
extern int p2m_dump_show(struct seq_file *m, void *v);
|
||||
#endif
|
||||
static inline unsigned long pfn_to_mfn(unsigned long pfn)
|
||||
{
|
||||
unsigned long mfn;
|
||||
@@ -57,7 +65,7 @@ static inline unsigned long pfn_to_mfn(unsigned long pfn)
|
||||
mfn = get_phys_to_machine(pfn);
|
||||
|
||||
if (mfn != INVALID_P2M_ENTRY)
|
||||
mfn &= ~FOREIGN_FRAME_BIT;
|
||||
mfn &= ~(FOREIGN_FRAME_BIT | IDENTITY_FRAME_BIT);
|
||||
|
||||
return mfn;
|
||||
}
|
||||
@@ -73,25 +81,44 @@ static inline int phys_to_machine_mapping_valid(unsigned long pfn)
|
||||
static inline unsigned long mfn_to_pfn(unsigned long mfn)
|
||||
{
|
||||
unsigned long pfn;
|
||||
int ret = 0;
|
||||
|
||||
if (xen_feature(XENFEAT_auto_translated_physmap))
|
||||
return mfn;
|
||||
|
||||
if (unlikely((mfn >> machine_to_phys_order) != 0)) {
|
||||
pfn = ~0;
|
||||
goto try_override;
|
||||
}
|
||||
pfn = 0;
|
||||
/*
|
||||
* The array access can fail (e.g., device space beyond end of RAM).
|
||||
* In such cases it doesn't matter what we return (we return garbage),
|
||||
* but we must handle the fault without crashing!
|
||||
*/
|
||||
__get_user(pfn, &machine_to_phys_mapping[mfn]);
|
||||
ret = __get_user(pfn, &machine_to_phys_mapping[mfn]);
|
||||
try_override:
|
||||
/* ret might be < 0 if there are no entries in the m2p for mfn */
|
||||
if (ret < 0)
|
||||
pfn = ~0;
|
||||
else if (get_phys_to_machine(pfn) != mfn)
|
||||
/*
|
||||
* If this appears to be a foreign mfn (because the pfn
|
||||
* doesn't map back to the mfn), then check the local override
|
||||
* table to see if there's a better pfn to use.
|
||||
*
|
||||
* m2p_find_override_pfn returns ~0 if it doesn't find anything.
|
||||
*/
|
||||
pfn = m2p_find_override_pfn(mfn, ~0);
|
||||
|
||||
/*
|
||||
* If this appears to be a foreign mfn (because the pfn
|
||||
* doesn't map back to the mfn), then check the local override
|
||||
* table to see if there's a better pfn to use.
|
||||
/*
|
||||
* pfn is ~0 if there are no entries in the m2p for mfn or if the
|
||||
* entry doesn't map back to the mfn and m2p_override doesn't have a
|
||||
* valid entry for it.
|
||||
*/
|
||||
if (get_phys_to_machine(pfn) != mfn)
|
||||
pfn = m2p_find_override_pfn(mfn, pfn);
|
||||
if (pfn == ~0 &&
|
||||
get_phys_to_machine(mfn) == IDENTITY_FRAME(mfn))
|
||||
pfn = mfn;
|
||||
|
||||
return pfn;
|
||||
}
|
||||
|
@@ -27,16 +27,16 @@ static inline void __init xen_setup_pirqs(void)
|
||||
* its own functions.
|
||||
*/
|
||||
struct xen_pci_frontend_ops {
|
||||
int (*enable_msi)(struct pci_dev *dev, int **vectors);
|
||||
int (*enable_msi)(struct pci_dev *dev, int vectors[]);
|
||||
void (*disable_msi)(struct pci_dev *dev);
|
||||
int (*enable_msix)(struct pci_dev *dev, int **vectors, int nvec);
|
||||
int (*enable_msix)(struct pci_dev *dev, int vectors[], int nvec);
|
||||
void (*disable_msix)(struct pci_dev *dev);
|
||||
};
|
||||
|
||||
extern struct xen_pci_frontend_ops *xen_pci_frontend;
|
||||
|
||||
static inline int xen_pci_frontend_enable_msi(struct pci_dev *dev,
|
||||
int **vectors)
|
||||
int vectors[])
|
||||
{
|
||||
if (xen_pci_frontend && xen_pci_frontend->enable_msi)
|
||||
return xen_pci_frontend->enable_msi(dev, vectors);
|
||||
@@ -48,7 +48,7 @@ static inline void xen_pci_frontend_disable_msi(struct pci_dev *dev)
|
||||
xen_pci_frontend->disable_msi(dev);
|
||||
}
|
||||
static inline int xen_pci_frontend_enable_msix(struct pci_dev *dev,
|
||||
int **vectors, int nvec)
|
||||
int vectors[], int nvec)
|
||||
{
|
||||
if (xen_pci_frontend && xen_pci_frontend->enable_msix)
|
||||
return xen_pci_frontend->enable_msix(dev, vectors, nvec);
|
||||
|
@@ -41,13 +41,13 @@ obj-$(CONFIG_X86_32) += sys_i386_32.o i386_ksyms_32.o
|
||||
obj-$(CONFIG_X86_64) += sys_x86_64.o x8664_ksyms_64.o
|
||||
obj-$(CONFIG_X86_64) += syscall_64.o vsyscall_64.o
|
||||
obj-y += bootflag.o e820.o
|
||||
obj-y += pci-dma.o quirks.o i8237.o topology.o kdebugfs.o
|
||||
obj-y += pci-dma.o quirks.o topology.o kdebugfs.o
|
||||
obj-y += alternative.o i8253.o pci-nommu.o hw_breakpoint.o
|
||||
obj-y += tsc.o io_delay.o rtc.o
|
||||
obj-y += pci-iommu_table.o
|
||||
obj-y += resource.o
|
||||
|
||||
obj-$(CONFIG_X86_TRAMPOLINE) += trampoline.o
|
||||
obj-y += trampoline.o trampoline_$(BITS).o
|
||||
obj-y += process.o
|
||||
obj-y += i387.o xsave.o
|
||||
obj-y += ptrace.o
|
||||
@@ -55,10 +55,12 @@ obj-$(CONFIG_X86_32) += tls.o
|
||||
obj-$(CONFIG_IA32_EMULATION) += tls.o
|
||||
obj-y += step.o
|
||||
obj-$(CONFIG_INTEL_TXT) += tboot.o
|
||||
obj-$(CONFIG_ISA_DMA_API) += i8237.o
|
||||
obj-$(CONFIG_STACKTRACE) += stacktrace.o
|
||||
obj-y += cpu/
|
||||
obj-y += acpi/
|
||||
obj-y += reboot.o
|
||||
obj-$(CONFIG_X86_32) += reboot_32.o
|
||||
obj-$(CONFIG_MCA) += mca_32.o
|
||||
obj-$(CONFIG_X86_MSR) += msr.o
|
||||
obj-$(CONFIG_X86_CPUID) += cpuid.o
|
||||
@@ -66,10 +68,9 @@ obj-$(CONFIG_PCI) += early-quirks.o
|
||||
apm-y := apm_32.o
|
||||
obj-$(CONFIG_APM) += apm.o
|
||||
obj-$(CONFIG_SMP) += smp.o
|
||||
obj-$(CONFIG_SMP) += smpboot.o tsc_sync.o
|
||||
obj-$(CONFIG_SMP) += smpboot.o
|
||||
obj-$(CONFIG_SMP) += tsc_sync.o
|
||||
obj-$(CONFIG_SMP) += setup_percpu.o
|
||||
obj-$(CONFIG_X86_64_SMP) += tsc_sync.o
|
||||
obj-$(CONFIG_X86_TRAMPOLINE) += trampoline_$(BITS).o
|
||||
obj-$(CONFIG_X86_MPPARSE) += mpparse.o
|
||||
obj-y += apic/
|
||||
obj-$(CONFIG_X86_REBOOTFIXUPS) += reboot_fixups_32.o
|
||||
@@ -109,6 +110,7 @@ obj-$(CONFIG_MICROCODE) += microcode.o
|
||||
obj-$(CONFIG_X86_CHECK_BIOS_CORRUPTION) += check.o
|
||||
|
||||
obj-$(CONFIG_SWIOTLB) += pci-swiotlb.o
|
||||
obj-$(CONFIG_OF) += devicetree.o
|
||||
|
||||
###
|
||||
# 64 bit specific files
|
||||
|
@@ -595,14 +595,8 @@ static void acpi_map_cpu2node(acpi_handle handle, int cpu, int physid)
|
||||
nid = acpi_get_node(handle);
|
||||
if (nid == -1 || !node_online(nid))
|
||||
return;
|
||||
#ifdef CONFIG_X86_64
|
||||
apicid_to_node[physid] = nid;
|
||||
set_apicid_to_node(physid, nid);
|
||||
numa_set_node(cpu, nid);
|
||||
#else /* CONFIG_X86_32 */
|
||||
apicid_2_node[physid] = nid;
|
||||
cpu_to_node_map[cpu] = nid;
|
||||
#endif
|
||||
|
||||
#endif
|
||||
}
|
||||
|
||||
|
@@ -6,11 +6,17 @@
|
||||
#include <asm/page_types.h>
|
||||
#include <asm/pgtable_types.h>
|
||||
#include <asm/processor-flags.h>
|
||||
#include "wakeup.h"
|
||||
|
||||
.code16
|
||||
.section ".header", "a"
|
||||
.section ".jump", "ax"
|
||||
.globl _start
|
||||
_start:
|
||||
cli
|
||||
jmp wakeup_code
|
||||
|
||||
/* This should match the structure in wakeup.h */
|
||||
.section ".header", "a"
|
||||
.globl wakeup_header
|
||||
wakeup_header:
|
||||
video_mode: .short 0 /* Video mode number */
|
||||
@@ -30,14 +36,11 @@ wakeup_jmp: .byte 0xea /* ljmpw */
|
||||
wakeup_jmp_off: .word 3f
|
||||
wakeup_jmp_seg: .word 0
|
||||
wakeup_gdt: .quad 0, 0, 0
|
||||
signature: .long 0x51ee1111
|
||||
signature: .long WAKEUP_HEADER_SIGNATURE
|
||||
|
||||
.text
|
||||
.globl _start
|
||||
.code16
|
||||
wakeup_code:
|
||||
_start:
|
||||
cli
|
||||
cld
|
||||
|
||||
/* Apparently some dimwit BIOS programmers don't know how to
|
||||
@@ -77,12 +80,12 @@ _start:
|
||||
|
||||
/* Check header signature... */
|
||||
movl signature, %eax
|
||||
cmpl $0x51ee1111, %eax
|
||||
cmpl $WAKEUP_HEADER_SIGNATURE, %eax
|
||||
jne bogus_real_magic
|
||||
|
||||
/* Check we really have everything... */
|
||||
movl end_signature, %eax
|
||||
cmpl $0x65a22c82, %eax
|
||||
cmpl $WAKEUP_END_SIGNATURE, %eax
|
||||
jne bogus_real_magic
|
||||
|
||||
/* Call the C code */
|
||||
@@ -147,3 +150,7 @@ wakeup_heap:
|
||||
wakeup_stack:
|
||||
.space 2048
|
||||
wakeup_stack_end:
|
||||
|
||||
.section ".signature","a"
|
||||
end_signature:
|
||||
.long WAKEUP_END_SIGNATURE
|
||||
|
@@ -35,7 +35,8 @@ struct wakeup_header {
|
||||
extern struct wakeup_header wakeup_header;
|
||||
#endif
|
||||
|
||||
#define HEADER_OFFSET 0x3f00
|
||||
#define WAKEUP_SIZE 0x4000
|
||||
#define WAKEUP_HEADER_OFFSET 8
|
||||
#define WAKEUP_HEADER_SIGNATURE 0x51ee1111
|
||||
#define WAKEUP_END_SIGNATURE 0x65a22c82
|
||||
|
||||
#endif /* ARCH_X86_KERNEL_ACPI_RM_WAKEUP_H */
|
||||
|
@@ -13,9 +13,19 @@ ENTRY(_start)
|
||||
SECTIONS
|
||||
{
|
||||
. = 0;
|
||||
.jump : {
|
||||
*(.jump)
|
||||
} = 0x90909090
|
||||
|
||||
. = WAKEUP_HEADER_OFFSET;
|
||||
.header : {
|
||||
*(.header)
|
||||
}
|
||||
|
||||
. = ALIGN(16);
|
||||
.text : {
|
||||
*(.text*)
|
||||
}
|
||||
} = 0x90909090
|
||||
|
||||
. = ALIGN(16);
|
||||
.rodata : {
|
||||
@@ -33,11 +43,6 @@ SECTIONS
|
||||
*(.data*)
|
||||
}
|
||||
|
||||
.signature : {
|
||||
end_signature = .;
|
||||
LONG(0x65a22c82)
|
||||
}
|
||||
|
||||
. = ALIGN(16);
|
||||
.bss : {
|
||||
__bss_start = .;
|
||||
@@ -45,20 +50,13 @@ SECTIONS
|
||||
__bss_end = .;
|
||||
}
|
||||
|
||||
. = HEADER_OFFSET;
|
||||
.header : {
|
||||
*(.header)
|
||||
.signature : {
|
||||
*(.signature)
|
||||
}
|
||||
|
||||
. = ALIGN(16);
|
||||
_end = .;
|
||||
|
||||
/DISCARD/ : {
|
||||
*(.note*)
|
||||
}
|
||||
|
||||
/*
|
||||
* The ASSERT() sink to . is intentional, for binutils 2.14 compatibility:
|
||||
*/
|
||||
. = ASSERT(_end <= WAKEUP_SIZE, "Wakeup too big!");
|
||||
}
|
||||
|
@@ -18,12 +18,8 @@
|
||||
#include "realmode/wakeup.h"
|
||||
#include "sleep.h"
|
||||
|
||||
unsigned long acpi_wakeup_address;
|
||||
unsigned long acpi_realmode_flags;
|
||||
|
||||
/* address in low memory of the wakeup routine. */
|
||||
static unsigned long acpi_realmode;
|
||||
|
||||
#if defined(CONFIG_SMP) && defined(CONFIG_64BIT)
|
||||
static char temp_stack[4096];
|
||||
#endif
|
||||
@@ -33,22 +29,17 @@ static char temp_stack[4096];
|
||||
*
|
||||
* Create an identity mapped page table and copy the wakeup routine to
|
||||
* low memory.
|
||||
*
|
||||
* Note that this is too late to change acpi_wakeup_address.
|
||||
*/
|
||||
int acpi_suspend_lowlevel(void)
|
||||
{
|
||||
struct wakeup_header *header;
|
||||
/* address in low memory of the wakeup routine. */
|
||||
char *acpi_realmode;
|
||||
|
||||
if (!acpi_realmode) {
|
||||
printk(KERN_ERR "Could not allocate memory during boot, "
|
||||
"S3 disabled\n");
|
||||
return -ENOMEM;
|
||||
}
|
||||
memcpy((void *)acpi_realmode, &wakeup_code_start, WAKEUP_SIZE);
|
||||
acpi_realmode = TRAMPOLINE_SYM(acpi_wakeup_code);
|
||||
|
||||
header = (struct wakeup_header *)(acpi_realmode + HEADER_OFFSET);
|
||||
if (header->signature != 0x51ee1111) {
|
||||
header = (struct wakeup_header *)(acpi_realmode + WAKEUP_HEADER_OFFSET);
|
||||
if (header->signature != WAKEUP_HEADER_SIGNATURE) {
|
||||
printk(KERN_ERR "wakeup header does not match\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
@@ -68,9 +59,7 @@ int acpi_suspend_lowlevel(void)
|
||||
/* GDT[0]: GDT self-pointer */
|
||||
header->wakeup_gdt[0] =
|
||||
(u64)(sizeof(header->wakeup_gdt) - 1) +
|
||||
((u64)(acpi_wakeup_address +
|
||||
((char *)&header->wakeup_gdt - (char *)acpi_realmode))
|
||||
<< 16);
|
||||
((u64)__pa(&header->wakeup_gdt) << 16);
|
||||
/* GDT[1]: big real mode-like code segment */
|
||||
header->wakeup_gdt[1] =
|
||||
GDT_ENTRY(0x809b, acpi_wakeup_address, 0xfffff);
|
||||
@@ -96,7 +85,7 @@ int acpi_suspend_lowlevel(void)
|
||||
header->pmode_cr3 = (u32)__pa(&initial_page_table);
|
||||
saved_magic = 0x12345678;
|
||||
#else /* CONFIG_64BIT */
|
||||
header->trampoline_segment = setup_trampoline() >> 4;
|
||||
header->trampoline_segment = trampoline_address() >> 4;
|
||||
#ifdef CONFIG_SMP
|
||||
stack_start = (unsigned long)temp_stack + sizeof(temp_stack);
|
||||
early_gdt_descr.address =
|
||||
@@ -111,45 +100,6 @@ int acpi_suspend_lowlevel(void)
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* acpi_reserve_wakeup_memory - do _very_ early ACPI initialisation
|
||||
*
|
||||
* We allocate a page from the first 1MB of memory for the wakeup
|
||||
* routine for when we come back from a sleep state. The
|
||||
* runtime allocator allows specification of <16MB pages, but not
|
||||
* <1MB pages.
|
||||
*/
|
||||
void __init acpi_reserve_wakeup_memory(void)
|
||||
{
|
||||
phys_addr_t mem;
|
||||
|
||||
if ((&wakeup_code_end - &wakeup_code_start) > WAKEUP_SIZE) {
|
||||
printk(KERN_ERR
|
||||
"ACPI: Wakeup code way too big, S3 disabled.\n");
|
||||
return;
|
||||
}
|
||||
|
||||
mem = memblock_find_in_range(0, 1<<20, WAKEUP_SIZE, PAGE_SIZE);
|
||||
|
||||
if (mem == MEMBLOCK_ERROR) {
|
||||
printk(KERN_ERR "ACPI: Cannot allocate lowmem, S3 disabled.\n");
|
||||
return;
|
||||
}
|
||||
acpi_realmode = (unsigned long) phys_to_virt(mem);
|
||||
acpi_wakeup_address = mem;
|
||||
memblock_x86_reserve_range(mem, mem + WAKEUP_SIZE, "ACPI WAKEUP");
|
||||
}
|
||||
|
||||
int __init acpi_configure_wakeup_memory(void)
|
||||
{
|
||||
if (acpi_realmode)
|
||||
set_memory_x(acpi_realmode, WAKEUP_SIZE >> PAGE_SHIFT);
|
||||
|
||||
return 0;
|
||||
}
|
||||
arch_initcall(acpi_configure_wakeup_memory);
|
||||
|
||||
|
||||
static int __init acpi_sleep_setup(char *str)
|
||||
{
|
||||
while ((str != NULL) && (*str != '\0')) {
|
||||
|
@@ -4,13 +4,10 @@
|
||||
|
||||
#include <asm/trampoline.h>
|
||||
|
||||
extern char wakeup_code_start, wakeup_code_end;
|
||||
|
||||
extern unsigned long saved_video_mode;
|
||||
extern long saved_magic;
|
||||
|
||||
extern int wakeup_pmode_return;
|
||||
extern char swsusp_pg_dir[PAGE_SIZE];
|
||||
|
||||
extern unsigned long acpi_copy_wakeup_routine(unsigned long);
|
||||
extern void wakeup_long64(void);
|
||||
|
@@ -2,9 +2,11 @@
|
||||
* Wrapper script for the realmode binary as a transport object
|
||||
* before copying to low memory.
|
||||
*/
|
||||
.section ".rodata","a"
|
||||
.globl wakeup_code_start, wakeup_code_end
|
||||
wakeup_code_start:
|
||||
#include <asm/page_types.h>
|
||||
|
||||
.section ".x86_trampoline","a"
|
||||
.balign PAGE_SIZE
|
||||
.globl acpi_wakeup_code
|
||||
acpi_wakeup_code:
|
||||
.incbin "arch/x86/kernel/acpi/realmode/wakeup.bin"
|
||||
wakeup_code_end:
|
||||
.size wakeup_code_start, .-wakeup_code_start
|
||||
.size acpi_wakeup_code, .-acpi_wakeup_code
|
||||
|
@@ -199,7 +199,7 @@ void *text_poke_early(void *addr, const void *opcode, size_t len);
|
||||
|
||||
/* Replace instructions with better alternatives for this CPU type.
|
||||
This runs before SMP is initialized to avoid SMP problems with
|
||||
self modifying code. This implies that assymetric systems where
|
||||
self modifying code. This implies that asymmetric systems where
|
||||
APs have less capabilities than the boot processor are not handled.
|
||||
Tough. Make sure you disable such features by hand. */
|
||||
|
||||
@@ -620,7 +620,12 @@ static int __kprobes stop_machine_text_poke(void *data)
|
||||
flush_icache_range((unsigned long)p->addr,
|
||||
(unsigned long)p->addr + p->len);
|
||||
}
|
||||
|
||||
/*
|
||||
* Intel Archiecture Software Developer's Manual section 7.1.3 specifies
|
||||
* that a core serializing instruction such as "cpuid" should be
|
||||
* executed on _each_ core before the new instruction is made visible.
|
||||
*/
|
||||
sync_core();
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@@ -12,14 +12,19 @@
|
||||
|
||||
static u32 *flush_words;
|
||||
|
||||
struct pci_device_id amd_nb_misc_ids[] = {
|
||||
const struct pci_device_id amd_nb_misc_ids[] = {
|
||||
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_K8_NB_MISC) },
|
||||
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_10H_NB_MISC) },
|
||||
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_NB_MISC) },
|
||||
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_NB_F3) },
|
||||
{}
|
||||
};
|
||||
EXPORT_SYMBOL(amd_nb_misc_ids);
|
||||
|
||||
static struct pci_device_id amd_nb_link_ids[] = {
|
||||
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_NB_LINK) },
|
||||
{}
|
||||
};
|
||||
|
||||
const struct amd_nb_bus_dev_range amd_nb_bus_dev_ranges[] __initconst = {
|
||||
{ 0x00, 0x18, 0x20 },
|
||||
{ 0xff, 0x00, 0x20 },
|
||||
@@ -31,7 +36,7 @@ struct amd_northbridge_info amd_northbridges;
|
||||
EXPORT_SYMBOL(amd_northbridges);
|
||||
|
||||
static struct pci_dev *next_northbridge(struct pci_dev *dev,
|
||||
struct pci_device_id *ids)
|
||||
const struct pci_device_id *ids)
|
||||
{
|
||||
do {
|
||||
dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev);
|
||||
@@ -43,9 +48,9 @@ static struct pci_dev *next_northbridge(struct pci_dev *dev,
|
||||
|
||||
int amd_cache_northbridges(void)
|
||||
{
|
||||
int i = 0;
|
||||
u16 i = 0;
|
||||
struct amd_northbridge *nb;
|
||||
struct pci_dev *misc;
|
||||
struct pci_dev *misc, *link;
|
||||
|
||||
if (amd_nb_num())
|
||||
return 0;
|
||||
@@ -64,10 +69,12 @@ int amd_cache_northbridges(void)
|
||||
amd_northbridges.nb = nb;
|
||||
amd_northbridges.num = i;
|
||||
|
||||
misc = NULL;
|
||||
link = misc = NULL;
|
||||
for (i = 0; i != amd_nb_num(); i++) {
|
||||
node_to_amd_nb(i)->misc = misc =
|
||||
next_northbridge(misc, amd_nb_misc_ids);
|
||||
node_to_amd_nb(i)->link = link =
|
||||
next_northbridge(link, amd_nb_link_ids);
|
||||
}
|
||||
|
||||
/* some CPU families (e.g. family 0x11) do not support GART */
|
||||
@@ -85,26 +92,95 @@ int amd_cache_northbridges(void)
|
||||
boot_cpu_data.x86_mask >= 0x1))
|
||||
amd_northbridges.flags |= AMD_NB_L3_INDEX_DISABLE;
|
||||
|
||||
if (boot_cpu_data.x86 == 0x15)
|
||||
amd_northbridges.flags |= AMD_NB_L3_INDEX_DISABLE;
|
||||
|
||||
/* L3 cache partitioning is supported on family 0x15 */
|
||||
if (boot_cpu_data.x86 == 0x15)
|
||||
amd_northbridges.flags |= AMD_NB_L3_PARTITIONING;
|
||||
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(amd_cache_northbridges);
|
||||
|
||||
/* Ignores subdevice/subvendor but as far as I can figure out
|
||||
they're useless anyways */
|
||||
int __init early_is_amd_nb(u32 device)
|
||||
/*
|
||||
* Ignores subdevice/subvendor but as far as I can figure out
|
||||
* they're useless anyways
|
||||
*/
|
||||
bool __init early_is_amd_nb(u32 device)
|
||||
{
|
||||
struct pci_device_id *id;
|
||||
const struct pci_device_id *id;
|
||||
u32 vendor = device & 0xffff;
|
||||
|
||||
device >>= 16;
|
||||
for (id = amd_nb_misc_ids; id->vendor; id++)
|
||||
if (vendor == id->vendor && device == id->device)
|
||||
return 1;
|
||||
return true;
|
||||
return false;
|
||||
}
|
||||
|
||||
int amd_get_subcaches(int cpu)
|
||||
{
|
||||
struct pci_dev *link = node_to_amd_nb(amd_get_nb_id(cpu))->link;
|
||||
unsigned int mask;
|
||||
int cuid = 0;
|
||||
|
||||
if (!amd_nb_has_feature(AMD_NB_L3_PARTITIONING))
|
||||
return 0;
|
||||
|
||||
pci_read_config_dword(link, 0x1d4, &mask);
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
cuid = cpu_data(cpu).compute_unit_id;
|
||||
#endif
|
||||
return (mask >> (4 * cuid)) & 0xf;
|
||||
}
|
||||
|
||||
int amd_set_subcaches(int cpu, int mask)
|
||||
{
|
||||
static unsigned int reset, ban;
|
||||
struct amd_northbridge *nb = node_to_amd_nb(amd_get_nb_id(cpu));
|
||||
unsigned int reg;
|
||||
int cuid = 0;
|
||||
|
||||
if (!amd_nb_has_feature(AMD_NB_L3_PARTITIONING) || mask > 0xf)
|
||||
return -EINVAL;
|
||||
|
||||
/* if necessary, collect reset state of L3 partitioning and BAN mode */
|
||||
if (reset == 0) {
|
||||
pci_read_config_dword(nb->link, 0x1d4, &reset);
|
||||
pci_read_config_dword(nb->misc, 0x1b8, &ban);
|
||||
ban &= 0x180000;
|
||||
}
|
||||
|
||||
/* deactivate BAN mode if any subcaches are to be disabled */
|
||||
if (mask != 0xf) {
|
||||
pci_read_config_dword(nb->misc, 0x1b8, ®);
|
||||
pci_write_config_dword(nb->misc, 0x1b8, reg & ~0x180000);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
cuid = cpu_data(cpu).compute_unit_id;
|
||||
#endif
|
||||
mask <<= 4 * cuid;
|
||||
mask |= (0xf ^ (1 << cuid)) << 26;
|
||||
|
||||
pci_write_config_dword(nb->link, 0x1d4, mask);
|
||||
|
||||
/* reset BAN mode if L3 partitioning returned to reset state */
|
||||
pci_read_config_dword(nb->link, 0x1d4, ®);
|
||||
if (reg == reset) {
|
||||
pci_read_config_dword(nb->misc, 0x1b8, ®);
|
||||
reg &= ~0x180000;
|
||||
pci_write_config_dword(nb->misc, 0x1b8, reg | ban);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int amd_cache_gart(void)
|
||||
static int amd_cache_gart(void)
|
||||
{
|
||||
int i;
|
||||
u16 i;
|
||||
|
||||
if (!amd_nb_has_feature(AMD_NB_GART))
|
||||
return 0;
|
||||
|
@@ -508,64 +508,12 @@ static int apbt_next_event(unsigned long delta,
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* APB timer clock is not in sync with pclk on Langwell, which translates to
|
||||
* unreliable read value caused by sampling error. the error does not add up
|
||||
* overtime and only happens when sampling a 0 as a 1 by mistake. so the time
|
||||
* would go backwards. the following code is trying to prevent time traveling
|
||||
* backwards. little bit paranoid.
|
||||
*/
|
||||
static cycle_t apbt_read_clocksource(struct clocksource *cs)
|
||||
{
|
||||
unsigned long t0, t1, t2;
|
||||
static unsigned long last_read;
|
||||
unsigned long current_count;
|
||||
|
||||
bad_count:
|
||||
t1 = apbt_readl(phy_cs_timer_id,
|
||||
APBTMR_N_CURRENT_VALUE);
|
||||
t2 = apbt_readl(phy_cs_timer_id,
|
||||
APBTMR_N_CURRENT_VALUE);
|
||||
if (unlikely(t1 < t2)) {
|
||||
pr_debug("APBT: read current count error %lx:%lx:%lx\n",
|
||||
t1, t2, t2 - t1);
|
||||
goto bad_count;
|
||||
}
|
||||
/*
|
||||
* check against cached last read, makes sure time does not go back.
|
||||
* it could be a normal rollover but we will do tripple check anyway
|
||||
*/
|
||||
if (unlikely(t2 > last_read)) {
|
||||
/* check if we have a normal rollover */
|
||||
unsigned long raw_intr_status =
|
||||
apbt_readl_reg(APBTMRS_RAW_INT_STATUS);
|
||||
/*
|
||||
* cs timer interrupt is masked but raw intr bit is set if
|
||||
* rollover occurs. then we read EOI reg to clear it.
|
||||
*/
|
||||
if (raw_intr_status & (1 << phy_cs_timer_id)) {
|
||||
apbt_readl(phy_cs_timer_id, APBTMR_N_EOI);
|
||||
goto out;
|
||||
}
|
||||
pr_debug("APB CS going back %lx:%lx:%lx ",
|
||||
t2, last_read, t2 - last_read);
|
||||
bad_count_x3:
|
||||
pr_debug("triple check enforced\n");
|
||||
t0 = apbt_readl(phy_cs_timer_id,
|
||||
APBTMR_N_CURRENT_VALUE);
|
||||
udelay(1);
|
||||
t1 = apbt_readl(phy_cs_timer_id,
|
||||
APBTMR_N_CURRENT_VALUE);
|
||||
udelay(1);
|
||||
t2 = apbt_readl(phy_cs_timer_id,
|
||||
APBTMR_N_CURRENT_VALUE);
|
||||
if ((t2 > t1) || (t1 > t0)) {
|
||||
printk(KERN_ERR "Error: APB CS tripple check failed\n");
|
||||
goto bad_count_x3;
|
||||
}
|
||||
}
|
||||
out:
|
||||
last_read = t2;
|
||||
return (cycle_t)~t2;
|
||||
current_count = apbt_readl(phy_cs_timer_id, APBTMR_N_CURRENT_VALUE);
|
||||
return (cycle_t)~current_count;
|
||||
}
|
||||
|
||||
static int apbt_clocksource_register(void)
|
||||
|
@@ -13,7 +13,7 @@
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/types.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/bootmem.h>
|
||||
#include <linux/memblock.h>
|
||||
#include <linux/mmzone.h>
|
||||
#include <linux/pci_ids.h>
|
||||
#include <linux/pci.h>
|
||||
@@ -57,7 +57,7 @@ static void __init insert_aperture_resource(u32 aper_base, u32 aper_size)
|
||||
static u32 __init allocate_aperture(void)
|
||||
{
|
||||
u32 aper_size;
|
||||
void *p;
|
||||
unsigned long addr;
|
||||
|
||||
/* aper_size should <= 1G */
|
||||
if (fallback_aper_order > 5)
|
||||
@@ -73,7 +73,7 @@ static u32 __init allocate_aperture(void)
|
||||
/*
|
||||
* using 512M as goal, in case kexec will load kernel_big
|
||||
* that will do the on position decompress, and could overlap with
|
||||
* that positon with gart that is used.
|
||||
* that position with gart that is used.
|
||||
* sequende:
|
||||
* kernel_small
|
||||
* ==> kexec (with kdump trigger path or previous doesn't shutdown gart)
|
||||
@@ -83,27 +83,26 @@ static u32 __init allocate_aperture(void)
|
||||
* so don't use 512M below as gart iommu, leave the space for kernel
|
||||
* code for safe
|
||||
*/
|
||||
p = __alloc_bootmem_nopanic(aper_size, aper_size, 512ULL<<20);
|
||||
addr = memblock_find_in_range(0, 1ULL<<32, aper_size, 512ULL<<20);
|
||||
if (addr == MEMBLOCK_ERROR || addr + aper_size > 0xffffffff) {
|
||||
printk(KERN_ERR
|
||||
"Cannot allocate aperture memory hole (%lx,%uK)\n",
|
||||
addr, aper_size>>10);
|
||||
return 0;
|
||||
}
|
||||
memblock_x86_reserve_range(addr, addr + aper_size, "aperture64");
|
||||
/*
|
||||
* Kmemleak should not scan this block as it may not be mapped via the
|
||||
* kernel direct mapping.
|
||||
*/
|
||||
kmemleak_ignore(p);
|
||||
if (!p || __pa(p)+aper_size > 0xffffffff) {
|
||||
printk(KERN_ERR
|
||||
"Cannot allocate aperture memory hole (%p,%uK)\n",
|
||||
p, aper_size>>10);
|
||||
if (p)
|
||||
free_bootmem(__pa(p), aper_size);
|
||||
return 0;
|
||||
}
|
||||
kmemleak_ignore(phys_to_virt(addr));
|
||||
printk(KERN_INFO "Mapping aperture over %d KB of RAM @ %lx\n",
|
||||
aper_size >> 10, __pa(p));
|
||||
insert_aperture_resource((u32)__pa(p), aper_size);
|
||||
register_nosave_region((u32)__pa(p) >> PAGE_SHIFT,
|
||||
(u32)__pa(p+aper_size) >> PAGE_SHIFT);
|
||||
aper_size >> 10, addr);
|
||||
insert_aperture_resource((u32)addr, aper_size);
|
||||
register_nosave_region(addr >> PAGE_SHIFT,
|
||||
(addr+aper_size) >> PAGE_SHIFT);
|
||||
|
||||
return (u32)__pa(p);
|
||||
return (u32)addr;
|
||||
}
|
||||
|
||||
|
||||
|
@@ -43,6 +43,7 @@
|
||||
#include <asm/i8259.h>
|
||||
#include <asm/proto.h>
|
||||
#include <asm/apic.h>
|
||||
#include <asm/io_apic.h>
|
||||
#include <asm/desc.h>
|
||||
#include <asm/hpet.h>
|
||||
#include <asm/idle.h>
|
||||
@@ -78,12 +79,21 @@ EXPORT_EARLY_PER_CPU_SYMBOL(x86_cpu_to_apicid);
|
||||
EXPORT_EARLY_PER_CPU_SYMBOL(x86_bios_cpu_apicid);
|
||||
|
||||
#ifdef CONFIG_X86_32
|
||||
|
||||
/*
|
||||
* On x86_32, the mapping between cpu and logical apicid may vary
|
||||
* depending on apic in use. The following early percpu variable is
|
||||
* used for the mapping. This is where the behaviors of x86_64 and 32
|
||||
* actually diverge. Let's keep it ugly for now.
|
||||
*/
|
||||
DEFINE_EARLY_PER_CPU(int, x86_cpu_to_logical_apicid, BAD_APICID);
|
||||
|
||||
/*
|
||||
* Knob to control our willingness to enable the local APIC.
|
||||
*
|
||||
* +1=force-enable
|
||||
*/
|
||||
static int force_enable_local_apic;
|
||||
static int force_enable_local_apic __initdata;
|
||||
/*
|
||||
* APIC command line parameters
|
||||
*/
|
||||
@@ -153,7 +163,7 @@ early_param("nox2apic", setup_nox2apic);
|
||||
unsigned long mp_lapic_addr;
|
||||
int disable_apic;
|
||||
/* Disable local APIC timer from the kernel commandline or via dmi quirk */
|
||||
static int disable_apic_timer __cpuinitdata;
|
||||
static int disable_apic_timer __initdata;
|
||||
/* Local APIC timer works in C2 */
|
||||
int local_apic_timer_c2_ok;
|
||||
EXPORT_SYMBOL_GPL(local_apic_timer_c2_ok);
|
||||
@@ -177,29 +187,8 @@ static struct resource lapic_resource = {
|
||||
|
||||
static unsigned int calibration_result;
|
||||
|
||||
static int lapic_next_event(unsigned long delta,
|
||||
struct clock_event_device *evt);
|
||||
static void lapic_timer_setup(enum clock_event_mode mode,
|
||||
struct clock_event_device *evt);
|
||||
static void lapic_timer_broadcast(const struct cpumask *mask);
|
||||
static void apic_pm_activate(void);
|
||||
|
||||
/*
|
||||
* The local apic timer can be used for any function which is CPU local.
|
||||
*/
|
||||
static struct clock_event_device lapic_clockevent = {
|
||||
.name = "lapic",
|
||||
.features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT
|
||||
| CLOCK_EVT_FEAT_C3STOP | CLOCK_EVT_FEAT_DUMMY,
|
||||
.shift = 32,
|
||||
.set_mode = lapic_timer_setup,
|
||||
.set_next_event = lapic_next_event,
|
||||
.broadcast = lapic_timer_broadcast,
|
||||
.rating = 100,
|
||||
.irq = -1,
|
||||
};
|
||||
static DEFINE_PER_CPU(struct clock_event_device, lapic_events);
|
||||
|
||||
static unsigned long apic_phys;
|
||||
|
||||
/*
|
||||
@@ -238,7 +227,7 @@ static int modern_apic(void)
|
||||
* right after this call apic become NOOP driven
|
||||
* so apic->write/read doesn't do anything
|
||||
*/
|
||||
void apic_disable(void)
|
||||
static void __init apic_disable(void)
|
||||
{
|
||||
pr_info("APIC: switched to apic NOOP\n");
|
||||
apic = &apic_noop;
|
||||
@@ -282,23 +271,6 @@ u64 native_apic_icr_read(void)
|
||||
return icr1 | ((u64)icr2 << 32);
|
||||
}
|
||||
|
||||
/**
|
||||
* enable_NMI_through_LVT0 - enable NMI through local vector table 0
|
||||
*/
|
||||
void __cpuinit enable_NMI_through_LVT0(void)
|
||||
{
|
||||
unsigned int v;
|
||||
|
||||
/* unmask and set to NMI */
|
||||
v = APIC_DM_NMI;
|
||||
|
||||
/* Level triggered for 82489DX (32bit mode) */
|
||||
if (!lapic_is_integrated())
|
||||
v |= APIC_LVT_LEVEL_TRIGGER;
|
||||
|
||||
apic_write(APIC_LVT0, v);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_X86_32
|
||||
/**
|
||||
* get_physical_broadcast - Get number of physical broadcast IDs
|
||||
@@ -508,6 +480,23 @@ static void lapic_timer_broadcast(const struct cpumask *mask)
|
||||
#endif
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* The local apic timer can be used for any function which is CPU local.
|
||||
*/
|
||||
static struct clock_event_device lapic_clockevent = {
|
||||
.name = "lapic",
|
||||
.features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT
|
||||
| CLOCK_EVT_FEAT_C3STOP | CLOCK_EVT_FEAT_DUMMY,
|
||||
.shift = 32,
|
||||
.set_mode = lapic_timer_setup,
|
||||
.set_next_event = lapic_next_event,
|
||||
.broadcast = lapic_timer_broadcast,
|
||||
.rating = 100,
|
||||
.irq = -1,
|
||||
};
|
||||
static DEFINE_PER_CPU(struct clock_event_device, lapic_events);
|
||||
|
||||
/*
|
||||
* Setup the local APIC timer for this CPU. Copy the initialized values
|
||||
* of the boot CPU and register the clock event in the framework.
|
||||
@@ -1209,7 +1198,7 @@ void __cpuinit setup_local_APIC(void)
|
||||
rdtscll(tsc);
|
||||
|
||||
if (disable_apic) {
|
||||
arch_disable_smp_support();
|
||||
disable_ioapic_support();
|
||||
return;
|
||||
}
|
||||
|
||||
@@ -1237,6 +1226,19 @@ void __cpuinit setup_local_APIC(void)
|
||||
*/
|
||||
apic->init_apic_ldr();
|
||||
|
||||
#ifdef CONFIG_X86_32
|
||||
/*
|
||||
* APIC LDR is initialized. If logical_apicid mapping was
|
||||
* initialized during get_smp_config(), make sure it matches the
|
||||
* actual value.
|
||||
*/
|
||||
i = early_per_cpu(x86_cpu_to_logical_apicid, cpu);
|
||||
WARN_ON(i != BAD_APICID && i != logical_smp_processor_id());
|
||||
/* always use the value from LDR */
|
||||
early_per_cpu(x86_cpu_to_logical_apicid, cpu) =
|
||||
logical_smp_processor_id();
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Set Task Priority to 'accept all'. We never change this
|
||||
* later on.
|
||||
@@ -1448,7 +1450,7 @@ int __init enable_IR(void)
|
||||
void __init enable_IR_x2apic(void)
|
||||
{
|
||||
unsigned long flags;
|
||||
struct IO_APIC_route_entry **ioapic_entries = NULL;
|
||||
struct IO_APIC_route_entry **ioapic_entries;
|
||||
int ret, x2apic_enabled = 0;
|
||||
int dmar_table_init_ret;
|
||||
|
||||
@@ -1537,7 +1539,7 @@ static int __init detect_init_APIC(void)
|
||||
}
|
||||
#else
|
||||
|
||||
static int apic_verify(void)
|
||||
static int __init apic_verify(void)
|
||||
{
|
||||
u32 features, h, l;
|
||||
|
||||
@@ -1562,7 +1564,7 @@ static int apic_verify(void)
|
||||
return 0;
|
||||
}
|
||||
|
||||
int apic_force_enable(void)
|
||||
int __init apic_force_enable(unsigned long addr)
|
||||
{
|
||||
u32 h, l;
|
||||
|
||||
@@ -1578,7 +1580,7 @@ int apic_force_enable(void)
|
||||
if (!(l & MSR_IA32_APICBASE_ENABLE)) {
|
||||
pr_info("Local APIC disabled by BIOS -- reenabling.\n");
|
||||
l &= ~MSR_IA32_APICBASE_BASE;
|
||||
l |= MSR_IA32_APICBASE_ENABLE | APIC_DEFAULT_PHYS_BASE;
|
||||
l |= MSR_IA32_APICBASE_ENABLE | addr;
|
||||
wrmsr(MSR_IA32_APICBASE, l, h);
|
||||
enabled_via_apicbase = 1;
|
||||
}
|
||||
@@ -1619,7 +1621,7 @@ static int __init detect_init_APIC(void)
|
||||
"you can enable it with \"lapic\"\n");
|
||||
return -1;
|
||||
}
|
||||
if (apic_force_enable())
|
||||
if (apic_force_enable(APIC_DEFAULT_PHYS_BASE))
|
||||
return -1;
|
||||
} else {
|
||||
if (apic_verify())
|
||||
@@ -1930,17 +1932,6 @@ void __cpuinit generic_processor_info(int apicid, int version)
|
||||
{
|
||||
int cpu;
|
||||
|
||||
/*
|
||||
* Validate version
|
||||
*/
|
||||
if (version == 0x0) {
|
||||
pr_warning("BIOS bug, APIC version is 0 for CPU#%d! "
|
||||
"fixing up to 0x10. (tell your hw vendor)\n",
|
||||
version);
|
||||
version = 0x10;
|
||||
}
|
||||
apic_version[apicid] = version;
|
||||
|
||||
if (num_processors >= nr_cpu_ids) {
|
||||
int max = nr_cpu_ids;
|
||||
int thiscpu = max + disabled_cpus;
|
||||
@@ -1954,22 +1945,34 @@ void __cpuinit generic_processor_info(int apicid, int version)
|
||||
}
|
||||
|
||||
num_processors++;
|
||||
cpu = cpumask_next_zero(-1, cpu_present_mask);
|
||||
|
||||
if (version != apic_version[boot_cpu_physical_apicid])
|
||||
WARN_ONCE(1,
|
||||
"ACPI: apic version mismatch, bootcpu: %x cpu %d: %x\n",
|
||||
apic_version[boot_cpu_physical_apicid], cpu, version);
|
||||
|
||||
physid_set(apicid, phys_cpu_present_map);
|
||||
if (apicid == boot_cpu_physical_apicid) {
|
||||
/*
|
||||
* x86_bios_cpu_apicid is required to have processors listed
|
||||
* in same order as logical cpu numbers. Hence the first
|
||||
* entry is BSP, and so on.
|
||||
* boot_cpu_init() already hold bit 0 in cpu_present_mask
|
||||
* for BSP.
|
||||
*/
|
||||
cpu = 0;
|
||||
} else
|
||||
cpu = cpumask_next_zero(-1, cpu_present_mask);
|
||||
|
||||
/*
|
||||
* Validate version
|
||||
*/
|
||||
if (version == 0x0) {
|
||||
pr_warning("BIOS bug: APIC version is 0 for CPU %d/0x%x, fixing up to 0x10\n",
|
||||
cpu, apicid);
|
||||
version = 0x10;
|
||||
}
|
||||
apic_version[apicid] = version;
|
||||
|
||||
if (version != apic_version[boot_cpu_physical_apicid]) {
|
||||
pr_warning("BIOS bug: APIC version mismatch, boot CPU: %x, CPU %d: version %x\n",
|
||||
apic_version[boot_cpu_physical_apicid], cpu, version);
|
||||
}
|
||||
|
||||
physid_set(apicid, phys_cpu_present_map);
|
||||
if (apicid > max_physical_apicid)
|
||||
max_physical_apicid = apicid;
|
||||
|
||||
@@ -1977,7 +1980,10 @@ void __cpuinit generic_processor_info(int apicid, int version)
|
||||
early_per_cpu(x86_cpu_to_apicid, cpu) = apicid;
|
||||
early_per_cpu(x86_bios_cpu_apicid, cpu) = apicid;
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_X86_32
|
||||
early_per_cpu(x86_cpu_to_logical_apicid, cpu) =
|
||||
apic->x86_32_early_logical_apicid(cpu);
|
||||
#endif
|
||||
set_cpu_possible(cpu, true);
|
||||
set_cpu_present(cpu, true);
|
||||
}
|
||||
@@ -1998,10 +2004,14 @@ void default_init_apic_ldr(void)
|
||||
}
|
||||
|
||||
#ifdef CONFIG_X86_32
|
||||
int default_apicid_to_node(int logical_apicid)
|
||||
int default_x86_32_numa_cpu_node(int cpu)
|
||||
{
|
||||
#ifdef CONFIG_SMP
|
||||
return apicid_2_node[hard_smp_processor_id()];
|
||||
#ifdef CONFIG_NUMA
|
||||
int apicid = early_per_cpu(x86_cpu_to_apicid, cpu);
|
||||
|
||||
if (apicid != BAD_APICID)
|
||||
return __apicid_to_node[apicid];
|
||||
return NUMA_NO_NODE;
|
||||
#else
|
||||
return 0;
|
||||
#endif
|
||||
|
@@ -185,8 +185,6 @@ struct apic apic_flat = {
|
||||
.ioapic_phys_id_map = NULL,
|
||||
.setup_apic_routing = NULL,
|
||||
.multi_timer_check = NULL,
|
||||
.apicid_to_node = NULL,
|
||||
.cpu_to_logical_apicid = NULL,
|
||||
.cpu_present_to_apicid = default_cpu_present_to_apicid,
|
||||
.apicid_to_cpu_present = NULL,
|
||||
.setup_portio_remap = NULL,
|
||||
@@ -337,8 +335,6 @@ struct apic apic_physflat = {
|
||||
.ioapic_phys_id_map = NULL,
|
||||
.setup_apic_routing = NULL,
|
||||
.multi_timer_check = NULL,
|
||||
.apicid_to_node = NULL,
|
||||
.cpu_to_logical_apicid = NULL,
|
||||
.cpu_present_to_apicid = default_cpu_present_to_apicid,
|
||||
.apicid_to_cpu_present = NULL,
|
||||
.setup_portio_remap = NULL,
|
||||
|
@@ -54,11 +54,6 @@ static u64 noop_apic_icr_read(void)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int noop_cpu_to_logical_apicid(int cpu)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int noop_phys_pkg_id(int cpuid_apic, int index_msb)
|
||||
{
|
||||
return 0;
|
||||
@@ -113,12 +108,6 @@ static void noop_vector_allocation_domain(int cpu, struct cpumask *retmask)
|
||||
cpumask_set_cpu(cpu, retmask);
|
||||
}
|
||||
|
||||
int noop_apicid_to_node(int logical_apicid)
|
||||
{
|
||||
/* we're always on node 0 */
|
||||
return 0;
|
||||
}
|
||||
|
||||
static u32 noop_apic_read(u32 reg)
|
||||
{
|
||||
WARN_ON_ONCE((cpu_has_apic && !disable_apic));
|
||||
@@ -130,6 +119,14 @@ static void noop_apic_write(u32 reg, u32 v)
|
||||
WARN_ON_ONCE(cpu_has_apic && !disable_apic);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_X86_32
|
||||
static int noop_x86_32_numa_cpu_node(int cpu)
|
||||
{
|
||||
/* we're always on node 0 */
|
||||
return 0;
|
||||
}
|
||||
#endif
|
||||
|
||||
struct apic apic_noop = {
|
||||
.name = "noop",
|
||||
.probe = noop_probe,
|
||||
@@ -153,9 +150,7 @@ struct apic apic_noop = {
|
||||
.ioapic_phys_id_map = default_ioapic_phys_id_map,
|
||||
.setup_apic_routing = NULL,
|
||||
.multi_timer_check = NULL,
|
||||
.apicid_to_node = noop_apicid_to_node,
|
||||
|
||||
.cpu_to_logical_apicid = noop_cpu_to_logical_apicid,
|
||||
.cpu_present_to_apicid = default_cpu_present_to_apicid,
|
||||
.apicid_to_cpu_present = physid_set_mask_of_physid,
|
||||
|
||||
@@ -197,4 +192,9 @@ struct apic apic_noop = {
|
||||
.icr_write = noop_apic_icr_write,
|
||||
.wait_icr_idle = noop_apic_wait_icr_idle,
|
||||
.safe_wait_icr_idle = noop_safe_apic_wait_icr_idle,
|
||||
|
||||
#ifdef CONFIG_X86_32
|
||||
.x86_32_early_logical_apicid = noop_x86_32_early_logical_apicid,
|
||||
.x86_32_numa_cpu_node = noop_x86_32_numa_cpu_node,
|
||||
#endif
|
||||
};
|
||||
|
@@ -45,6 +45,12 @@ static unsigned long bigsmp_check_apicid_present(int bit)
|
||||
return 1;
|
||||
}
|
||||
|
||||
static int bigsmp_early_logical_apicid(int cpu)
|
||||
{
|
||||
/* on bigsmp, logical apicid is the same as physical */
|
||||
return early_per_cpu(x86_cpu_to_apicid, cpu);
|
||||
}
|
||||
|
||||
static inline unsigned long calculate_ldr(int cpu)
|
||||
{
|
||||
unsigned long val, id;
|
||||
@@ -80,11 +86,6 @@ static void bigsmp_setup_apic_routing(void)
|
||||
nr_ioapics);
|
||||
}
|
||||
|
||||
static int bigsmp_apicid_to_node(int logical_apicid)
|
||||
{
|
||||
return apicid_2_node[hard_smp_processor_id()];
|
||||
}
|
||||
|
||||
static int bigsmp_cpu_present_to_apicid(int mps_cpu)
|
||||
{
|
||||
if (mps_cpu < nr_cpu_ids)
|
||||
@@ -93,14 +94,6 @@ static int bigsmp_cpu_present_to_apicid(int mps_cpu)
|
||||
return BAD_APICID;
|
||||
}
|
||||
|
||||
/* Mapping from cpu number to logical apicid */
|
||||
static inline int bigsmp_cpu_to_logical_apicid(int cpu)
|
||||
{
|
||||
if (cpu >= nr_cpu_ids)
|
||||
return BAD_APICID;
|
||||
return cpu_physical_id(cpu);
|
||||
}
|
||||
|
||||
static void bigsmp_ioapic_phys_id_map(physid_mask_t *phys_map, physid_mask_t *retmap)
|
||||
{
|
||||
/* For clustered we don't have a good way to do this yet - hack */
|
||||
@@ -115,7 +108,11 @@ static int bigsmp_check_phys_apicid_present(int phys_apicid)
|
||||
/* As we are using single CPU as destination, pick only one CPU here */
|
||||
static unsigned int bigsmp_cpu_mask_to_apicid(const struct cpumask *cpumask)
|
||||
{
|
||||
return bigsmp_cpu_to_logical_apicid(cpumask_first(cpumask));
|
||||
int cpu = cpumask_first(cpumask);
|
||||
|
||||
if (cpu < nr_cpu_ids)
|
||||
return cpu_physical_id(cpu);
|
||||
return BAD_APICID;
|
||||
}
|
||||
|
||||
static unsigned int bigsmp_cpu_mask_to_apicid_and(const struct cpumask *cpumask,
|
||||
@@ -129,9 +126,9 @@ static unsigned int bigsmp_cpu_mask_to_apicid_and(const struct cpumask *cpumask,
|
||||
*/
|
||||
for_each_cpu_and(cpu, cpumask, andmask) {
|
||||
if (cpumask_test_cpu(cpu, cpu_online_mask))
|
||||
break;
|
||||
return cpu_physical_id(cpu);
|
||||
}
|
||||
return bigsmp_cpu_to_logical_apicid(cpu);
|
||||
return BAD_APICID;
|
||||
}
|
||||
|
||||
static int bigsmp_phys_pkg_id(int cpuid_apic, int index_msb)
|
||||
@@ -219,8 +216,6 @@ struct apic apic_bigsmp = {
|
||||
.ioapic_phys_id_map = bigsmp_ioapic_phys_id_map,
|
||||
.setup_apic_routing = bigsmp_setup_apic_routing,
|
||||
.multi_timer_check = NULL,
|
||||
.apicid_to_node = bigsmp_apicid_to_node,
|
||||
.cpu_to_logical_apicid = bigsmp_cpu_to_logical_apicid,
|
||||
.cpu_present_to_apicid = bigsmp_cpu_present_to_apicid,
|
||||
.apicid_to_cpu_present = physid_set_mask_of_physid,
|
||||
.setup_portio_remap = NULL,
|
||||
@@ -256,4 +251,7 @@ struct apic apic_bigsmp = {
|
||||
.icr_write = native_apic_icr_write,
|
||||
.wait_icr_idle = native_apic_wait_icr_idle,
|
||||
.safe_wait_icr_idle = native_safe_apic_wait_icr_idle,
|
||||
|
||||
.x86_32_early_logical_apicid = bigsmp_early_logical_apicid,
|
||||
.x86_32_numa_cpu_node = default_x86_32_numa_cpu_node,
|
||||
};
|
||||
|
@@ -460,6 +460,12 @@ static unsigned long es7000_check_apicid_present(int bit)
|
||||
return physid_isset(bit, phys_cpu_present_map);
|
||||
}
|
||||
|
||||
static int es7000_early_logical_apicid(int cpu)
|
||||
{
|
||||
/* on es7000, logical apicid is the same as physical */
|
||||
return early_per_cpu(x86_bios_cpu_apicid, cpu);
|
||||
}
|
||||
|
||||
static unsigned long calculate_ldr(int cpu)
|
||||
{
|
||||
unsigned long id = per_cpu(x86_bios_cpu_apicid, cpu);
|
||||
@@ -504,12 +510,11 @@ static void es7000_setup_apic_routing(void)
|
||||
nr_ioapics, cpumask_bits(es7000_target_cpus())[0]);
|
||||
}
|
||||
|
||||
static int es7000_apicid_to_node(int logical_apicid)
|
||||
static int es7000_numa_cpu_node(int cpu)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
static int es7000_cpu_present_to_apicid(int mps_cpu)
|
||||
{
|
||||
if (!mps_cpu)
|
||||
@@ -528,18 +533,6 @@ static void es7000_apicid_to_cpu_present(int phys_apicid, physid_mask_t *retmap)
|
||||
++cpu_id;
|
||||
}
|
||||
|
||||
/* Mapping from cpu number to logical apicid */
|
||||
static int es7000_cpu_to_logical_apicid(int cpu)
|
||||
{
|
||||
#ifdef CONFIG_SMP
|
||||
if (cpu >= nr_cpu_ids)
|
||||
return BAD_APICID;
|
||||
return cpu_2_logical_apicid[cpu];
|
||||
#else
|
||||
return logical_smp_processor_id();
|
||||
#endif
|
||||
}
|
||||
|
||||
static void es7000_ioapic_phys_id_map(physid_mask_t *phys_map, physid_mask_t *retmap)
|
||||
{
|
||||
/* For clustered we don't have a good way to do this yet - hack */
|
||||
@@ -561,7 +554,7 @@ static unsigned int es7000_cpu_mask_to_apicid(const struct cpumask *cpumask)
|
||||
* The cpus in the mask must all be on the apic cluster.
|
||||
*/
|
||||
for_each_cpu(cpu, cpumask) {
|
||||
int new_apicid = es7000_cpu_to_logical_apicid(cpu);
|
||||
int new_apicid = early_per_cpu(x86_cpu_to_logical_apicid, cpu);
|
||||
|
||||
if (round && APIC_CLUSTER(apicid) != APIC_CLUSTER(new_apicid)) {
|
||||
WARN(1, "Not a valid mask!");
|
||||
@@ -578,7 +571,7 @@ static unsigned int
|
||||
es7000_cpu_mask_to_apicid_and(const struct cpumask *inmask,
|
||||
const struct cpumask *andmask)
|
||||
{
|
||||
int apicid = es7000_cpu_to_logical_apicid(0);
|
||||
int apicid = early_per_cpu(x86_cpu_to_logical_apicid, 0);
|
||||
cpumask_var_t cpumask;
|
||||
|
||||
if (!alloc_cpumask_var(&cpumask, GFP_ATOMIC))
|
||||
@@ -655,8 +648,6 @@ struct apic __refdata apic_es7000_cluster = {
|
||||
.ioapic_phys_id_map = es7000_ioapic_phys_id_map,
|
||||
.setup_apic_routing = es7000_setup_apic_routing,
|
||||
.multi_timer_check = NULL,
|
||||
.apicid_to_node = es7000_apicid_to_node,
|
||||
.cpu_to_logical_apicid = es7000_cpu_to_logical_apicid,
|
||||
.cpu_present_to_apicid = es7000_cpu_present_to_apicid,
|
||||
.apicid_to_cpu_present = es7000_apicid_to_cpu_present,
|
||||
.setup_portio_remap = NULL,
|
||||
@@ -695,6 +686,9 @@ struct apic __refdata apic_es7000_cluster = {
|
||||
.icr_write = native_apic_icr_write,
|
||||
.wait_icr_idle = native_apic_wait_icr_idle,
|
||||
.safe_wait_icr_idle = native_safe_apic_wait_icr_idle,
|
||||
|
||||
.x86_32_early_logical_apicid = es7000_early_logical_apicid,
|
||||
.x86_32_numa_cpu_node = es7000_numa_cpu_node,
|
||||
};
|
||||
|
||||
struct apic __refdata apic_es7000 = {
|
||||
@@ -720,8 +714,6 @@ struct apic __refdata apic_es7000 = {
|
||||
.ioapic_phys_id_map = es7000_ioapic_phys_id_map,
|
||||
.setup_apic_routing = es7000_setup_apic_routing,
|
||||
.multi_timer_check = NULL,
|
||||
.apicid_to_node = es7000_apicid_to_node,
|
||||
.cpu_to_logical_apicid = es7000_cpu_to_logical_apicid,
|
||||
.cpu_present_to_apicid = es7000_cpu_present_to_apicid,
|
||||
.apicid_to_cpu_present = es7000_apicid_to_cpu_present,
|
||||
.setup_portio_remap = NULL,
|
||||
@@ -758,4 +750,7 @@ struct apic __refdata apic_es7000 = {
|
||||
.icr_write = native_apic_icr_write,
|
||||
.wait_icr_idle = native_apic_wait_icr_idle,
|
||||
.safe_wait_icr_idle = native_safe_apic_wait_icr_idle,
|
||||
|
||||
.x86_32_early_logical_apicid = es7000_early_logical_apicid,
|
||||
.x86_32_numa_cpu_node = es7000_numa_cpu_node,
|
||||
};
|
||||
|
@@ -83,7 +83,6 @@ arch_trigger_all_cpu_backtrace_handler(struct notifier_block *self,
|
||||
arch_spin_lock(&lock);
|
||||
printk(KERN_WARNING "NMI backtrace for cpu %d\n", cpu);
|
||||
show_regs(regs);
|
||||
dump_stack();
|
||||
arch_spin_unlock(&lock);
|
||||
cpumask_clear_cpu(cpu, to_cpumask(backtrace_mask));
|
||||
return NOTIFY_STOP;
|
||||
|
@@ -108,7 +108,10 @@ DECLARE_BITMAP(mp_bus_not_pci, MAX_MP_BUSSES);
|
||||
|
||||
int skip_ioapic_setup;
|
||||
|
||||
void arch_disable_smp_support(void)
|
||||
/**
|
||||
* disable_ioapic_support() - disables ioapic support at runtime
|
||||
*/
|
||||
void disable_ioapic_support(void)
|
||||
{
|
||||
#ifdef CONFIG_PCI
|
||||
noioapicquirk = 1;
|
||||
@@ -120,11 +123,14 @@ void arch_disable_smp_support(void)
|
||||
static int __init parse_noapic(char *str)
|
||||
{
|
||||
/* disable IO-APIC */
|
||||
arch_disable_smp_support();
|
||||
disable_ioapic_support();
|
||||
return 0;
|
||||
}
|
||||
early_param("noapic", parse_noapic);
|
||||
|
||||
static int io_apic_setup_irq_pin_once(unsigned int irq, int node,
|
||||
struct io_apic_irq_attr *attr);
|
||||
|
||||
/* Will be called in mpparse/acpi/sfi codes for saving IRQ info */
|
||||
void mp_save_irq(struct mpc_intsrc *m)
|
||||
{
|
||||
@@ -181,7 +187,7 @@ int __init arch_early_irq_init(void)
|
||||
irq_reserve_irqs(0, legacy_pic->nr_legacy_irqs);
|
||||
|
||||
for (i = 0; i < count; i++) {
|
||||
set_irq_chip_data(i, &cfg[i]);
|
||||
irq_set_chip_data(i, &cfg[i]);
|
||||
zalloc_cpumask_var_node(&cfg[i].domain, GFP_KERNEL, node);
|
||||
zalloc_cpumask_var_node(&cfg[i].old_domain, GFP_KERNEL, node);
|
||||
/*
|
||||
@@ -200,7 +206,7 @@ int __init arch_early_irq_init(void)
|
||||
#ifdef CONFIG_SPARSE_IRQ
|
||||
static struct irq_cfg *irq_cfg(unsigned int irq)
|
||||
{
|
||||
return get_irq_chip_data(irq);
|
||||
return irq_get_chip_data(irq);
|
||||
}
|
||||
|
||||
static struct irq_cfg *alloc_irq_cfg(unsigned int irq, int node)
|
||||
@@ -226,7 +232,7 @@ static void free_irq_cfg(unsigned int at, struct irq_cfg *cfg)
|
||||
{
|
||||
if (!cfg)
|
||||
return;
|
||||
set_irq_chip_data(at, NULL);
|
||||
irq_set_chip_data(at, NULL);
|
||||
free_cpumask_var(cfg->domain);
|
||||
free_cpumask_var(cfg->old_domain);
|
||||
kfree(cfg);
|
||||
@@ -256,14 +262,14 @@ static struct irq_cfg *alloc_irq_and_cfg_at(unsigned int at, int node)
|
||||
if (res < 0) {
|
||||
if (res != -EEXIST)
|
||||
return NULL;
|
||||
cfg = get_irq_chip_data(at);
|
||||
cfg = irq_get_chip_data(at);
|
||||
if (cfg)
|
||||
return cfg;
|
||||
}
|
||||
|
||||
cfg = alloc_irq_cfg(at, node);
|
||||
if (cfg)
|
||||
set_irq_chip_data(at, cfg);
|
||||
irq_set_chip_data(at, cfg);
|
||||
else
|
||||
irq_free_desc(at);
|
||||
return cfg;
|
||||
@@ -818,7 +824,7 @@ static int EISA_ELCR(unsigned int irq)
|
||||
#define default_MCA_trigger(idx) (1)
|
||||
#define default_MCA_polarity(idx) default_ISA_polarity(idx)
|
||||
|
||||
static int MPBIOS_polarity(int idx)
|
||||
static int irq_polarity(int idx)
|
||||
{
|
||||
int bus = mp_irqs[idx].srcbus;
|
||||
int polarity;
|
||||
@@ -860,7 +866,7 @@ static int MPBIOS_polarity(int idx)
|
||||
return polarity;
|
||||
}
|
||||
|
||||
static int MPBIOS_trigger(int idx)
|
||||
static int irq_trigger(int idx)
|
||||
{
|
||||
int bus = mp_irqs[idx].srcbus;
|
||||
int trigger;
|
||||
@@ -932,16 +938,6 @@ static int MPBIOS_trigger(int idx)
|
||||
return trigger;
|
||||
}
|
||||
|
||||
static inline int irq_polarity(int idx)
|
||||
{
|
||||
return MPBIOS_polarity(idx);
|
||||
}
|
||||
|
||||
static inline int irq_trigger(int idx)
|
||||
{
|
||||
return MPBIOS_trigger(idx);
|
||||
}
|
||||
|
||||
static int pin_2_irq(int idx, int apic, int pin)
|
||||
{
|
||||
int irq;
|
||||
@@ -1189,7 +1185,7 @@ void __setup_vector_irq(int cpu)
|
||||
raw_spin_lock(&vector_lock);
|
||||
/* Mark the inuse vectors */
|
||||
for_each_active_irq(irq) {
|
||||
cfg = get_irq_chip_data(irq);
|
||||
cfg = irq_get_chip_data(irq);
|
||||
if (!cfg)
|
||||
continue;
|
||||
/*
|
||||
@@ -1220,10 +1216,6 @@ void __setup_vector_irq(int cpu)
|
||||
static struct irq_chip ioapic_chip;
|
||||
static struct irq_chip ir_ioapic_chip;
|
||||
|
||||
#define IOAPIC_AUTO -1
|
||||
#define IOAPIC_EDGE 0
|
||||
#define IOAPIC_LEVEL 1
|
||||
|
||||
#ifdef CONFIG_X86_32
|
||||
static inline int IO_APIC_irq_trigger(int irq)
|
||||
{
|
||||
@@ -1248,35 +1240,31 @@ static inline int IO_APIC_irq_trigger(int irq)
|
||||
}
|
||||
#endif
|
||||
|
||||
static void ioapic_register_intr(unsigned int irq, unsigned long trigger)
|
||||
static void ioapic_register_intr(unsigned int irq, struct irq_cfg *cfg,
|
||||
unsigned long trigger)
|
||||
{
|
||||
struct irq_chip *chip = &ioapic_chip;
|
||||
irq_flow_handler_t hdl;
|
||||
bool fasteoi;
|
||||
|
||||
if ((trigger == IOAPIC_AUTO && IO_APIC_irq_trigger(irq)) ||
|
||||
trigger == IOAPIC_LEVEL)
|
||||
trigger == IOAPIC_LEVEL) {
|
||||
irq_set_status_flags(irq, IRQ_LEVEL);
|
||||
else
|
||||
fasteoi = true;
|
||||
} else {
|
||||
irq_clear_status_flags(irq, IRQ_LEVEL);
|
||||
|
||||
if (irq_remapped(get_irq_chip_data(irq))) {
|
||||
irq_set_status_flags(irq, IRQ_MOVE_PCNTXT);
|
||||
if (trigger)
|
||||
set_irq_chip_and_handler_name(irq, &ir_ioapic_chip,
|
||||
handle_fasteoi_irq,
|
||||
"fasteoi");
|
||||
else
|
||||
set_irq_chip_and_handler_name(irq, &ir_ioapic_chip,
|
||||
handle_edge_irq, "edge");
|
||||
return;
|
||||
fasteoi = false;
|
||||
}
|
||||
|
||||
if ((trigger == IOAPIC_AUTO && IO_APIC_irq_trigger(irq)) ||
|
||||
trigger == IOAPIC_LEVEL)
|
||||
set_irq_chip_and_handler_name(irq, &ioapic_chip,
|
||||
handle_fasteoi_irq,
|
||||
"fasteoi");
|
||||
else
|
||||
set_irq_chip_and_handler_name(irq, &ioapic_chip,
|
||||
handle_edge_irq, "edge");
|
||||
if (irq_remapped(cfg)) {
|
||||
irq_set_status_flags(irq, IRQ_MOVE_PCNTXT);
|
||||
chip = &ir_ioapic_chip;
|
||||
fasteoi = trigger != 0;
|
||||
}
|
||||
|
||||
hdl = fasteoi ? handle_fasteoi_irq : handle_edge_irq;
|
||||
irq_set_chip_and_handler_name(irq, chip, hdl,
|
||||
fasteoi ? "fasteoi" : "edge");
|
||||
}
|
||||
|
||||
static int setup_ioapic_entry(int apic_id, int irq,
|
||||
@@ -1374,7 +1362,7 @@ static void setup_ioapic_irq(int apic_id, int pin, unsigned int irq,
|
||||
return;
|
||||
}
|
||||
|
||||
ioapic_register_intr(irq, trigger);
|
||||
ioapic_register_intr(irq, cfg, trigger);
|
||||
if (irq < legacy_pic->nr_legacy_irqs)
|
||||
legacy_pic->mask(irq);
|
||||
|
||||
@@ -1385,33 +1373,26 @@ static struct {
|
||||
DECLARE_BITMAP(pin_programmed, MP_MAX_IOAPIC_PIN + 1);
|
||||
} mp_ioapic_routing[MAX_IO_APICS];
|
||||
|
||||
static void __init setup_IO_APIC_irqs(void)
|
||||
static bool __init io_apic_pin_not_connected(int idx, int apic_id, int pin)
|
||||
{
|
||||
int apic_id, pin, idx, irq, notcon = 0;
|
||||
int node = cpu_to_node(0);
|
||||
struct irq_cfg *cfg;
|
||||
if (idx != -1)
|
||||
return false;
|
||||
|
||||
apic_printk(APIC_VERBOSE, KERN_DEBUG "init IO_APIC IRQs\n");
|
||||
apic_printk(APIC_VERBOSE, KERN_DEBUG " apic %d pin %d not connected\n",
|
||||
mp_ioapics[apic_id].apicid, pin);
|
||||
return true;
|
||||
}
|
||||
|
||||
static void __init __io_apic_setup_irqs(unsigned int apic_id)
|
||||
{
|
||||
int idx, node = cpu_to_node(0);
|
||||
struct io_apic_irq_attr attr;
|
||||
unsigned int pin, irq;
|
||||
|
||||
for (apic_id = 0; apic_id < nr_ioapics; apic_id++)
|
||||
for (pin = 0; pin < nr_ioapic_registers[apic_id]; pin++) {
|
||||
idx = find_irq_entry(apic_id, pin, mp_INT);
|
||||
if (idx == -1) {
|
||||
if (!notcon) {
|
||||
notcon = 1;
|
||||
apic_printk(APIC_VERBOSE,
|
||||
KERN_DEBUG " %d-%d",
|
||||
mp_ioapics[apic_id].apicid, pin);
|
||||
} else
|
||||
apic_printk(APIC_VERBOSE, " %d-%d",
|
||||
mp_ioapics[apic_id].apicid, pin);
|
||||
if (io_apic_pin_not_connected(idx, apic_id, pin))
|
||||
continue;
|
||||
}
|
||||
if (notcon) {
|
||||
apic_printk(APIC_VERBOSE,
|
||||
" (apicid-pin) not connected\n");
|
||||
notcon = 0;
|
||||
}
|
||||
|
||||
irq = pin_2_irq(idx, apic_id, pin);
|
||||
|
||||
@@ -1423,25 +1404,24 @@ static void __init setup_IO_APIC_irqs(void)
|
||||
* installed and if it returns 1:
|
||||
*/
|
||||
if (apic->multi_timer_check &&
|
||||
apic->multi_timer_check(apic_id, irq))
|
||||
apic->multi_timer_check(apic_id, irq))
|
||||
continue;
|
||||
|
||||
cfg = alloc_irq_and_cfg_at(irq, node);
|
||||
if (!cfg)
|
||||
continue;
|
||||
set_io_apic_irq_attr(&attr, apic_id, pin, irq_trigger(idx),
|
||||
irq_polarity(idx));
|
||||
|
||||
add_pin_to_irq_node(cfg, node, apic_id, pin);
|
||||
/*
|
||||
* don't mark it in pin_programmed, so later acpi could
|
||||
* set it correctly when irq < 16
|
||||
*/
|
||||
setup_ioapic_irq(apic_id, pin, irq, cfg, irq_trigger(idx),
|
||||
irq_polarity(idx));
|
||||
io_apic_setup_irq_pin(irq, node, &attr);
|
||||
}
|
||||
}
|
||||
|
||||
if (notcon)
|
||||
apic_printk(APIC_VERBOSE,
|
||||
" (apicid-pin) not connected\n");
|
||||
static void __init setup_IO_APIC_irqs(void)
|
||||
{
|
||||
unsigned int apic_id;
|
||||
|
||||
apic_printk(APIC_VERBOSE, KERN_DEBUG "init IO_APIC IRQs\n");
|
||||
|
||||
for (apic_id = 0; apic_id < nr_ioapics; apic_id++)
|
||||
__io_apic_setup_irqs(apic_id);
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -1452,7 +1432,7 @@ static void __init setup_IO_APIC_irqs(void)
|
||||
void setup_IO_APIC_irq_extra(u32 gsi)
|
||||
{
|
||||
int apic_id = 0, pin, idx, irq, node = cpu_to_node(0);
|
||||
struct irq_cfg *cfg;
|
||||
struct io_apic_irq_attr attr;
|
||||
|
||||
/*
|
||||
* Convert 'gsi' to 'ioapic.pin'.
|
||||
@@ -1472,21 +1452,10 @@ void setup_IO_APIC_irq_extra(u32 gsi)
|
||||
if (apic_id == 0 || irq < NR_IRQS_LEGACY)
|
||||
return;
|
||||
|
||||
cfg = alloc_irq_and_cfg_at(irq, node);
|
||||
if (!cfg)
|
||||
return;
|
||||
set_io_apic_irq_attr(&attr, apic_id, pin, irq_trigger(idx),
|
||||
irq_polarity(idx));
|
||||
|
||||
add_pin_to_irq_node(cfg, node, apic_id, pin);
|
||||
|
||||
if (test_bit(pin, mp_ioapic_routing[apic_id].pin_programmed)) {
|
||||
pr_debug("Pin %d-%d already programmed\n",
|
||||
mp_ioapics[apic_id].apicid, pin);
|
||||
return;
|
||||
}
|
||||
set_bit(pin, mp_ioapic_routing[apic_id].pin_programmed);
|
||||
|
||||
setup_ioapic_irq(apic_id, pin, irq, cfg,
|
||||
irq_trigger(idx), irq_polarity(idx));
|
||||
io_apic_setup_irq_pin_once(irq, node, &attr);
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -1518,7 +1487,8 @@ static void __init setup_timer_IRQ0_pin(unsigned int apic_id, unsigned int pin,
|
||||
* The timer IRQ doesn't have to know that behind the
|
||||
* scene we may have a 8259A-master in AEOI mode ...
|
||||
*/
|
||||
set_irq_chip_and_handler_name(0, &ioapic_chip, handle_edge_irq, "edge");
|
||||
irq_set_chip_and_handler_name(0, &ioapic_chip, handle_edge_irq,
|
||||
"edge");
|
||||
|
||||
/*
|
||||
* Add it to the IO-APIC irq-routing table:
|
||||
@@ -1625,7 +1595,7 @@ __apicdebuginit(void) print_IO_APIC(void)
|
||||
for_each_active_irq(irq) {
|
||||
struct irq_pin_list *entry;
|
||||
|
||||
cfg = get_irq_chip_data(irq);
|
||||
cfg = irq_get_chip_data(irq);
|
||||
if (!cfg)
|
||||
continue;
|
||||
entry = cfg->irq_2_pin;
|
||||
@@ -1916,7 +1886,7 @@ void disable_IO_APIC(void)
|
||||
*
|
||||
* With interrupt-remapping, for now we will use virtual wire A mode,
|
||||
* as virtual wire B is little complex (need to configure both
|
||||
* IOAPIC RTE aswell as interrupt-remapping table entry).
|
||||
* IOAPIC RTE as well as interrupt-remapping table entry).
|
||||
* As this gets called during crash dump, keep this simple for now.
|
||||
*/
|
||||
if (ioapic_i8259.pin != -1 && !intr_remapping_enabled) {
|
||||
@@ -2391,7 +2361,7 @@ static void irq_complete_move(struct irq_cfg *cfg)
|
||||
|
||||
void irq_force_complete_move(int irq)
|
||||
{
|
||||
struct irq_cfg *cfg = get_irq_chip_data(irq);
|
||||
struct irq_cfg *cfg = irq_get_chip_data(irq);
|
||||
|
||||
if (!cfg)
|
||||
return;
|
||||
@@ -2405,7 +2375,7 @@ static inline void irq_complete_move(struct irq_cfg *cfg) { }
|
||||
static void ack_apic_edge(struct irq_data *data)
|
||||
{
|
||||
irq_complete_move(data->chip_data);
|
||||
move_native_irq(data->irq);
|
||||
irq_move_irq(data);
|
||||
ack_APIC_irq();
|
||||
}
|
||||
|
||||
@@ -2462,7 +2432,7 @@ static void ack_apic_level(struct irq_data *data)
|
||||
irq_complete_move(cfg);
|
||||
#ifdef CONFIG_GENERIC_PENDING_IRQ
|
||||
/* If we are moving the irq we need to mask it */
|
||||
if (unlikely(irq_to_desc(irq)->status & IRQ_MOVE_PENDING)) {
|
||||
if (unlikely(irqd_is_setaffinity_pending(data))) {
|
||||
do_unmask_irq = 1;
|
||||
mask_ioapic(cfg);
|
||||
}
|
||||
@@ -2551,7 +2521,7 @@ static void ack_apic_level(struct irq_data *data)
|
||||
* and you can go talk to the chipset vendor about it.
|
||||
*/
|
||||
if (!io_apic_level_ack_pending(cfg))
|
||||
move_masked_irq(irq);
|
||||
irq_move_masked_irq(data);
|
||||
unmask_ioapic(cfg);
|
||||
}
|
||||
}
|
||||
@@ -2614,7 +2584,7 @@ static inline void init_IO_APIC_traps(void)
|
||||
* 0x80, because int 0x80 is hm, kind of importantish. ;)
|
||||
*/
|
||||
for_each_active_irq(irq) {
|
||||
cfg = get_irq_chip_data(irq);
|
||||
cfg = irq_get_chip_data(irq);
|
||||
if (IO_APIC_IRQ(irq) && cfg && !cfg->vector) {
|
||||
/*
|
||||
* Hmm.. We don't have an entry for this,
|
||||
@@ -2625,7 +2595,7 @@ static inline void init_IO_APIC_traps(void)
|
||||
legacy_pic->make_irq(irq);
|
||||
else
|
||||
/* Strange. Oh, well.. */
|
||||
set_irq_chip(irq, &no_irq_chip);
|
||||
irq_set_chip(irq, &no_irq_chip);
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -2665,7 +2635,7 @@ static struct irq_chip lapic_chip __read_mostly = {
|
||||
static void lapic_register_intr(int irq)
|
||||
{
|
||||
irq_clear_status_flags(irq, IRQ_LEVEL);
|
||||
set_irq_chip_and_handler_name(irq, &lapic_chip, handle_edge_irq,
|
||||
irq_set_chip_and_handler_name(irq, &lapic_chip, handle_edge_irq,
|
||||
"edge");
|
||||
}
|
||||
|
||||
@@ -2749,7 +2719,7 @@ int timer_through_8259 __initdata;
|
||||
*/
|
||||
static inline void __init check_timer(void)
|
||||
{
|
||||
struct irq_cfg *cfg = get_irq_chip_data(0);
|
||||
struct irq_cfg *cfg = irq_get_chip_data(0);
|
||||
int node = cpu_to_node(0);
|
||||
int apic1, pin1, apic2, pin2;
|
||||
unsigned long flags;
|
||||
@@ -2935,7 +2905,7 @@ void __init setup_IO_APIC(void)
|
||||
}
|
||||
|
||||
/*
|
||||
* Called after all the initialization is done. If we didnt find any
|
||||
* Called after all the initialization is done. If we didn't find any
|
||||
* APIC bugs then we can allow the modify fast path
|
||||
*/
|
||||
|
||||
@@ -3060,7 +3030,7 @@ unsigned int create_irq_nr(unsigned int from, int node)
|
||||
raw_spin_unlock_irqrestore(&vector_lock, flags);
|
||||
|
||||
if (ret) {
|
||||
set_irq_chip_data(irq, cfg);
|
||||
irq_set_chip_data(irq, cfg);
|
||||
irq_clear_status_flags(irq, IRQ_NOREQUEST);
|
||||
} else {
|
||||
free_irq_at(irq, cfg);
|
||||
@@ -3085,7 +3055,7 @@ int create_irq(void)
|
||||
|
||||
void destroy_irq(unsigned int irq)
|
||||
{
|
||||
struct irq_cfg *cfg = get_irq_chip_data(irq);
|
||||
struct irq_cfg *cfg = irq_get_chip_data(irq);
|
||||
unsigned long flags;
|
||||
|
||||
irq_set_status_flags(irq, IRQ_NOREQUEST|IRQ_NOPROBE);
|
||||
@@ -3119,7 +3089,7 @@ static int msi_compose_msg(struct pci_dev *pdev, unsigned int irq,
|
||||
|
||||
dest = apic->cpu_mask_to_apicid_and(cfg->domain, apic->target_cpus());
|
||||
|
||||
if (irq_remapped(get_irq_chip_data(irq))) {
|
||||
if (irq_remapped(cfg)) {
|
||||
struct irte irte;
|
||||
int ir_index;
|
||||
u16 sub_handle;
|
||||
@@ -3291,6 +3261,7 @@ static int msi_alloc_irte(struct pci_dev *dev, int irq, int nvec)
|
||||
|
||||
static int setup_msi_irq(struct pci_dev *dev, struct msi_desc *msidesc, int irq)
|
||||
{
|
||||
struct irq_chip *chip = &msi_chip;
|
||||
struct msi_msg msg;
|
||||
int ret;
|
||||
|
||||
@@ -3298,14 +3269,15 @@ static int setup_msi_irq(struct pci_dev *dev, struct msi_desc *msidesc, int irq)
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
set_irq_msi(irq, msidesc);
|
||||
irq_set_msi_desc(irq, msidesc);
|
||||
write_msi_msg(irq, &msg);
|
||||
|
||||
if (irq_remapped(get_irq_chip_data(irq))) {
|
||||
if (irq_remapped(irq_get_chip_data(irq))) {
|
||||
irq_set_status_flags(irq, IRQ_MOVE_PCNTXT);
|
||||
set_irq_chip_and_handler_name(irq, &msi_ir_chip, handle_edge_irq, "edge");
|
||||
} else
|
||||
set_irq_chip_and_handler_name(irq, &msi_chip, handle_edge_irq, "edge");
|
||||
chip = &msi_ir_chip;
|
||||
}
|
||||
|
||||
irq_set_chip_and_handler_name(irq, chip, handle_edge_irq, "edge");
|
||||
|
||||
dev_printk(KERN_DEBUG, &dev->dev, "irq %d for MSI/MSI-X\n", irq);
|
||||
|
||||
@@ -3423,8 +3395,8 @@ int arch_setup_dmar_msi(unsigned int irq)
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
dmar_msi_write(irq, &msg);
|
||||
set_irq_chip_and_handler_name(irq, &dmar_msi_type, handle_edge_irq,
|
||||
"edge");
|
||||
irq_set_chip_and_handler_name(irq, &dmar_msi_type, handle_edge_irq,
|
||||
"edge");
|
||||
return 0;
|
||||
}
|
||||
#endif
|
||||
@@ -3482,6 +3454,7 @@ static struct irq_chip hpet_msi_type = {
|
||||
|
||||
int arch_setup_hpet_msi(unsigned int irq, unsigned int id)
|
||||
{
|
||||
struct irq_chip *chip = &hpet_msi_type;
|
||||
struct msi_msg msg;
|
||||
int ret;
|
||||
|
||||
@@ -3501,15 +3474,12 @@ int arch_setup_hpet_msi(unsigned int irq, unsigned int id)
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
hpet_msi_write(get_irq_data(irq), &msg);
|
||||
hpet_msi_write(irq_get_handler_data(irq), &msg);
|
||||
irq_set_status_flags(irq, IRQ_MOVE_PCNTXT);
|
||||
if (irq_remapped(get_irq_chip_data(irq)))
|
||||
set_irq_chip_and_handler_name(irq, &ir_hpet_msi_type,
|
||||
handle_edge_irq, "edge");
|
||||
else
|
||||
set_irq_chip_and_handler_name(irq, &hpet_msi_type,
|
||||
handle_edge_irq, "edge");
|
||||
if (irq_remapped(irq_get_chip_data(irq)))
|
||||
chip = &ir_hpet_msi_type;
|
||||
|
||||
irq_set_chip_and_handler_name(irq, chip, handle_edge_irq, "edge");
|
||||
return 0;
|
||||
}
|
||||
#endif
|
||||
@@ -3596,7 +3566,7 @@ int arch_setup_ht_irq(unsigned int irq, struct pci_dev *dev)
|
||||
|
||||
write_ht_irq_msg(irq, &msg);
|
||||
|
||||
set_irq_chip_and_handler_name(irq, &ht_irq_chip,
|
||||
irq_set_chip_and_handler_name(irq, &ht_irq_chip,
|
||||
handle_edge_irq, "edge");
|
||||
|
||||
dev_printk(KERN_DEBUG, &dev->dev, "irq %d for HT\n", irq);
|
||||
@@ -3605,7 +3575,40 @@ int arch_setup_ht_irq(unsigned int irq, struct pci_dev *dev)
|
||||
}
|
||||
#endif /* CONFIG_HT_IRQ */
|
||||
|
||||
int __init io_apic_get_redir_entries (int ioapic)
|
||||
int
|
||||
io_apic_setup_irq_pin(unsigned int irq, int node, struct io_apic_irq_attr *attr)
|
||||
{
|
||||
struct irq_cfg *cfg = alloc_irq_and_cfg_at(irq, node);
|
||||
int ret;
|
||||
|
||||
if (!cfg)
|
||||
return -EINVAL;
|
||||
ret = __add_pin_to_irq_node(cfg, node, attr->ioapic, attr->ioapic_pin);
|
||||
if (!ret)
|
||||
setup_ioapic_irq(attr->ioapic, attr->ioapic_pin, irq, cfg,
|
||||
attr->trigger, attr->polarity);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int io_apic_setup_irq_pin_once(unsigned int irq, int node,
|
||||
struct io_apic_irq_attr *attr)
|
||||
{
|
||||
unsigned int id = attr->ioapic, pin = attr->ioapic_pin;
|
||||
int ret;
|
||||
|
||||
/* Avoid redundant programming */
|
||||
if (test_bit(pin, mp_ioapic_routing[id].pin_programmed)) {
|
||||
pr_debug("Pin %d-%d already programmed\n",
|
||||
mp_ioapics[id].apicid, pin);
|
||||
return 0;
|
||||
}
|
||||
ret = io_apic_setup_irq_pin(irq, node, attr);
|
||||
if (!ret)
|
||||
set_bit(pin, mp_ioapic_routing[id].pin_programmed);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int __init io_apic_get_redir_entries(int ioapic)
|
||||
{
|
||||
union IO_APIC_reg_01 reg_01;
|
||||
unsigned long flags;
|
||||
@@ -3659,96 +3662,24 @@ int __init arch_probe_nr_irqs(void)
|
||||
}
|
||||
#endif
|
||||
|
||||
static int __io_apic_set_pci_routing(struct device *dev, int irq,
|
||||
struct io_apic_irq_attr *irq_attr)
|
||||
int io_apic_set_pci_routing(struct device *dev, int irq,
|
||||
struct io_apic_irq_attr *irq_attr)
|
||||
{
|
||||
struct irq_cfg *cfg;
|
||||
int node;
|
||||
int ioapic, pin;
|
||||
int trigger, polarity;
|
||||
|
||||
ioapic = irq_attr->ioapic;
|
||||
if (!IO_APIC_IRQ(irq)) {
|
||||
apic_printk(APIC_QUIET,KERN_ERR "IOAPIC[%d]: Invalid reference to IRQ 0\n",
|
||||
ioapic);
|
||||
irq_attr->ioapic);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (dev)
|
||||
node = dev_to_node(dev);
|
||||
else
|
||||
node = cpu_to_node(0);
|
||||
node = dev ? dev_to_node(dev) : cpu_to_node(0);
|
||||
|
||||
cfg = alloc_irq_and_cfg_at(irq, node);
|
||||
if (!cfg)
|
||||
return 0;
|
||||
|
||||
pin = irq_attr->ioapic_pin;
|
||||
trigger = irq_attr->trigger;
|
||||
polarity = irq_attr->polarity;
|
||||
|
||||
/*
|
||||
* IRQs < 16 are already in the irq_2_pin[] map
|
||||
*/
|
||||
if (irq >= legacy_pic->nr_legacy_irqs) {
|
||||
if (__add_pin_to_irq_node(cfg, node, ioapic, pin)) {
|
||||
printk(KERN_INFO "can not add pin %d for irq %d\n",
|
||||
pin, irq);
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
setup_ioapic_irq(ioapic, pin, irq, cfg, trigger, polarity);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int io_apic_set_pci_routing(struct device *dev, int irq,
|
||||
struct io_apic_irq_attr *irq_attr)
|
||||
{
|
||||
int ioapic, pin;
|
||||
/*
|
||||
* Avoid pin reprogramming. PRTs typically include entries
|
||||
* with redundant pin->gsi mappings (but unique PCI devices);
|
||||
* we only program the IOAPIC on the first.
|
||||
*/
|
||||
ioapic = irq_attr->ioapic;
|
||||
pin = irq_attr->ioapic_pin;
|
||||
if (test_bit(pin, mp_ioapic_routing[ioapic].pin_programmed)) {
|
||||
pr_debug("Pin %d-%d already programmed\n",
|
||||
mp_ioapics[ioapic].apicid, pin);
|
||||
return 0;
|
||||
}
|
||||
set_bit(pin, mp_ioapic_routing[ioapic].pin_programmed);
|
||||
|
||||
return __io_apic_set_pci_routing(dev, irq, irq_attr);
|
||||
}
|
||||
|
||||
u8 __init io_apic_unique_id(u8 id)
|
||||
{
|
||||
#ifdef CONFIG_X86_32
|
||||
if ((boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) &&
|
||||
!APIC_XAPIC(apic_version[boot_cpu_physical_apicid]))
|
||||
return io_apic_get_unique_id(nr_ioapics, id);
|
||||
else
|
||||
return id;
|
||||
#else
|
||||
int i;
|
||||
DECLARE_BITMAP(used, 256);
|
||||
|
||||
bitmap_zero(used, 256);
|
||||
for (i = 0; i < nr_ioapics; i++) {
|
||||
struct mpc_ioapic *ia = &mp_ioapics[i];
|
||||
__set_bit(ia->apicid, used);
|
||||
}
|
||||
if (!test_bit(id, used))
|
||||
return id;
|
||||
return find_first_zero_bit(used, 256);
|
||||
#endif
|
||||
return io_apic_setup_irq_pin_once(irq, node, irq_attr);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_X86_32
|
||||
int __init io_apic_get_unique_id(int ioapic, int apic_id)
|
||||
static int __init io_apic_get_unique_id(int ioapic, int apic_id)
|
||||
{
|
||||
union IO_APIC_reg_00 reg_00;
|
||||
static physid_mask_t apic_id_map = PHYSID_MASK_NONE;
|
||||
@@ -3821,9 +3752,33 @@ int __init io_apic_get_unique_id(int ioapic, int apic_id)
|
||||
|
||||
return apic_id;
|
||||
}
|
||||
|
||||
static u8 __init io_apic_unique_id(u8 id)
|
||||
{
|
||||
if ((boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) &&
|
||||
!APIC_XAPIC(apic_version[boot_cpu_physical_apicid]))
|
||||
return io_apic_get_unique_id(nr_ioapics, id);
|
||||
else
|
||||
return id;
|
||||
}
|
||||
#else
|
||||
static u8 __init io_apic_unique_id(u8 id)
|
||||
{
|
||||
int i;
|
||||
DECLARE_BITMAP(used, 256);
|
||||
|
||||
bitmap_zero(used, 256);
|
||||
for (i = 0; i < nr_ioapics; i++) {
|
||||
struct mpc_ioapic *ia = &mp_ioapics[i];
|
||||
__set_bit(ia->apicid, used);
|
||||
}
|
||||
if (!test_bit(id, used))
|
||||
return id;
|
||||
return find_first_zero_bit(used, 256);
|
||||
}
|
||||
#endif
|
||||
|
||||
int __init io_apic_get_version(int ioapic)
|
||||
static int __init io_apic_get_version(int ioapic)
|
||||
{
|
||||
union IO_APIC_reg_01 reg_01;
|
||||
unsigned long flags;
|
||||
@@ -3868,8 +3823,8 @@ int acpi_get_override_irq(u32 gsi, int *trigger, int *polarity)
|
||||
void __init setup_ioapic_dest(void)
|
||||
{
|
||||
int pin, ioapic, irq, irq_entry;
|
||||
struct irq_desc *desc;
|
||||
const struct cpumask *mask;
|
||||
struct irq_data *idata;
|
||||
|
||||
if (skip_ioapic_setup == 1)
|
||||
return;
|
||||
@@ -3884,21 +3839,20 @@ void __init setup_ioapic_dest(void)
|
||||
if ((ioapic > 0) && (irq > 16))
|
||||
continue;
|
||||
|
||||
desc = irq_to_desc(irq);
|
||||
idata = irq_get_irq_data(irq);
|
||||
|
||||
/*
|
||||
* Honour affinities which have been set in early boot
|
||||
*/
|
||||
if (desc->status &
|
||||
(IRQ_NO_BALANCING | IRQ_AFFINITY_SET))
|
||||
mask = desc->irq_data.affinity;
|
||||
if (!irqd_can_balance(idata) || irqd_affinity_was_set(idata))
|
||||
mask = idata->affinity;
|
||||
else
|
||||
mask = apic->target_cpus();
|
||||
|
||||
if (intr_remapping_enabled)
|
||||
ir_ioapic_set_affinity(&desc->irq_data, mask, false);
|
||||
ir_ioapic_set_affinity(idata, mask, false);
|
||||
else
|
||||
ioapic_set_affinity(&desc->irq_data, mask, false);
|
||||
ioapic_set_affinity(idata, mask, false);
|
||||
}
|
||||
|
||||
}
|
||||
@@ -4026,10 +3980,10 @@ int mp_find_ioapic_pin(int ioapic, u32 gsi)
|
||||
return gsi - mp_gsi_routing[ioapic].gsi_base;
|
||||
}
|
||||
|
||||
static int bad_ioapic(unsigned long address)
|
||||
static __init int bad_ioapic(unsigned long address)
|
||||
{
|
||||
if (nr_ioapics >= MAX_IO_APICS) {
|
||||
printk(KERN_WARNING "WARING: Max # of I/O APICs (%d) exceeded "
|
||||
printk(KERN_WARNING "WARNING: Max # of I/O APICs (%d) exceeded "
|
||||
"(found %d), skipping\n", MAX_IO_APICS, nr_ioapics);
|
||||
return 1;
|
||||
}
|
||||
@@ -4086,20 +4040,16 @@ void __init mp_register_ioapic(int id, u32 address, u32 gsi_base)
|
||||
/* Enable IOAPIC early just for system timer */
|
||||
void __init pre_init_apic_IRQ0(void)
|
||||
{
|
||||
struct irq_cfg *cfg;
|
||||
struct io_apic_irq_attr attr = { 0, 0, 0, 0 };
|
||||
|
||||
printk(KERN_INFO "Early APIC setup for system timer0\n");
|
||||
#ifndef CONFIG_SMP
|
||||
physid_set_mask_of_physid(boot_cpu_physical_apicid,
|
||||
&phys_cpu_present_map);
|
||||
#endif
|
||||
/* Make sure the irq descriptor is set up */
|
||||
cfg = alloc_irq_and_cfg_at(0, 0);
|
||||
|
||||
setup_local_APIC();
|
||||
|
||||
add_pin_to_irq_node(cfg, 0, 0, 0);
|
||||
set_irq_chip_and_handler_name(0, &ioapic_chip, handle_edge_irq, "edge");
|
||||
|
||||
setup_ioapic_irq(0, 0, 0, cfg, 0, 0);
|
||||
io_apic_setup_irq_pin(0, 0, &attr);
|
||||
irq_set_chip_and_handler_name(0, &ioapic_chip, handle_edge_irq,
|
||||
"edge");
|
||||
}
|
||||
|
@@ -56,6 +56,8 @@ void default_send_IPI_mask_allbutself_phys(const struct cpumask *mask,
|
||||
local_irq_restore(flags);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_X86_32
|
||||
|
||||
void default_send_IPI_mask_sequence_logical(const struct cpumask *mask,
|
||||
int vector)
|
||||
{
|
||||
@@ -71,8 +73,8 @@ void default_send_IPI_mask_sequence_logical(const struct cpumask *mask,
|
||||
local_irq_save(flags);
|
||||
for_each_cpu(query_cpu, mask)
|
||||
__default_send_IPI_dest_field(
|
||||
apic->cpu_to_logical_apicid(query_cpu), vector,
|
||||
apic->dest_logical);
|
||||
early_per_cpu(x86_cpu_to_logical_apicid, query_cpu),
|
||||
vector, apic->dest_logical);
|
||||
local_irq_restore(flags);
|
||||
}
|
||||
|
||||
@@ -90,14 +92,12 @@ void default_send_IPI_mask_allbutself_logical(const struct cpumask *mask,
|
||||
if (query_cpu == this_cpu)
|
||||
continue;
|
||||
__default_send_IPI_dest_field(
|
||||
apic->cpu_to_logical_apicid(query_cpu), vector,
|
||||
apic->dest_logical);
|
||||
early_per_cpu(x86_cpu_to_logical_apicid, query_cpu),
|
||||
vector, apic->dest_logical);
|
||||
}
|
||||
local_irq_restore(flags);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_X86_32
|
||||
|
||||
/*
|
||||
* This is only used on smaller machines.
|
||||
*/
|
||||
|
@@ -373,13 +373,6 @@ static inline void numaq_ioapic_phys_id_map(physid_mask_t *phys_map, physid_mask
|
||||
return physids_promote(0xFUL, retmap);
|
||||
}
|
||||
|
||||
static inline int numaq_cpu_to_logical_apicid(int cpu)
|
||||
{
|
||||
if (cpu >= nr_cpu_ids)
|
||||
return BAD_APICID;
|
||||
return cpu_2_logical_apicid[cpu];
|
||||
}
|
||||
|
||||
/*
|
||||
* Supporting over 60 cpus on NUMA-Q requires a locality-dependent
|
||||
* cpu to APIC ID relation to properly interact with the intelligent
|
||||
@@ -398,6 +391,15 @@ static inline int numaq_apicid_to_node(int logical_apicid)
|
||||
return logical_apicid >> 4;
|
||||
}
|
||||
|
||||
static int numaq_numa_cpu_node(int cpu)
|
||||
{
|
||||
int logical_apicid = early_per_cpu(x86_cpu_to_logical_apicid, cpu);
|
||||
|
||||
if (logical_apicid != BAD_APICID)
|
||||
return numaq_apicid_to_node(logical_apicid);
|
||||
return NUMA_NO_NODE;
|
||||
}
|
||||
|
||||
static void numaq_apicid_to_cpu_present(int logical_apicid, physid_mask_t *retmap)
|
||||
{
|
||||
int node = numaq_apicid_to_node(logical_apicid);
|
||||
@@ -508,8 +510,6 @@ struct apic __refdata apic_numaq = {
|
||||
.ioapic_phys_id_map = numaq_ioapic_phys_id_map,
|
||||
.setup_apic_routing = numaq_setup_apic_routing,
|
||||
.multi_timer_check = numaq_multi_timer_check,
|
||||
.apicid_to_node = numaq_apicid_to_node,
|
||||
.cpu_to_logical_apicid = numaq_cpu_to_logical_apicid,
|
||||
.cpu_present_to_apicid = numaq_cpu_present_to_apicid,
|
||||
.apicid_to_cpu_present = numaq_apicid_to_cpu_present,
|
||||
.setup_portio_remap = numaq_setup_portio_remap,
|
||||
@@ -547,4 +547,7 @@ struct apic __refdata apic_numaq = {
|
||||
.icr_write = native_apic_icr_write,
|
||||
.wait_icr_idle = native_apic_wait_icr_idle,
|
||||
.safe_wait_icr_idle = native_safe_apic_wait_icr_idle,
|
||||
|
||||
.x86_32_early_logical_apicid = noop_x86_32_early_logical_apicid,
|
||||
.x86_32_numa_cpu_node = numaq_numa_cpu_node,
|
||||
};
|
||||
|
@@ -77,6 +77,11 @@ void __init default_setup_apic_routing(void)
|
||||
apic->setup_apic_routing();
|
||||
}
|
||||
|
||||
static int default_x86_32_early_logical_apicid(int cpu)
|
||||
{
|
||||
return 1 << cpu;
|
||||
}
|
||||
|
||||
static void setup_apic_flat_routing(void)
|
||||
{
|
||||
#ifdef CONFIG_X86_IO_APIC
|
||||
@@ -130,8 +135,6 @@ struct apic apic_default = {
|
||||
.ioapic_phys_id_map = default_ioapic_phys_id_map,
|
||||
.setup_apic_routing = setup_apic_flat_routing,
|
||||
.multi_timer_check = NULL,
|
||||
.apicid_to_node = default_apicid_to_node,
|
||||
.cpu_to_logical_apicid = default_cpu_to_logical_apicid,
|
||||
.cpu_present_to_apicid = default_cpu_present_to_apicid,
|
||||
.apicid_to_cpu_present = physid_set_mask_of_physid,
|
||||
.setup_portio_remap = NULL,
|
||||
@@ -167,6 +170,9 @@ struct apic apic_default = {
|
||||
.icr_write = native_apic_icr_write,
|
||||
.wait_icr_idle = native_apic_wait_icr_idle,
|
||||
.safe_wait_icr_idle = native_safe_apic_wait_icr_idle,
|
||||
|
||||
.x86_32_early_logical_apicid = default_x86_32_early_logical_apicid,
|
||||
.x86_32_numa_cpu_node = default_x86_32_numa_cpu_node,
|
||||
};
|
||||
|
||||
extern struct apic apic_numaq;
|
||||
|
@@ -194,11 +194,10 @@ static unsigned long summit_check_apicid_present(int bit)
|
||||
return 1;
|
||||
}
|
||||
|
||||
static void summit_init_apic_ldr(void)
|
||||
static int summit_early_logical_apicid(int cpu)
|
||||
{
|
||||
unsigned long val, id;
|
||||
int count = 0;
|
||||
u8 my_id = (u8)hard_smp_processor_id();
|
||||
u8 my_id = early_per_cpu(x86_cpu_to_apicid, cpu);
|
||||
u8 my_cluster = APIC_CLUSTER(my_id);
|
||||
#ifdef CONFIG_SMP
|
||||
u8 lid;
|
||||
@@ -206,7 +205,7 @@ static void summit_init_apic_ldr(void)
|
||||
|
||||
/* Create logical APIC IDs by counting CPUs already in cluster. */
|
||||
for (count = 0, i = nr_cpu_ids; --i >= 0; ) {
|
||||
lid = cpu_2_logical_apicid[i];
|
||||
lid = early_per_cpu(x86_cpu_to_logical_apicid, i);
|
||||
if (lid != BAD_APICID && APIC_CLUSTER(lid) == my_cluster)
|
||||
++count;
|
||||
}
|
||||
@@ -214,7 +213,15 @@ static void summit_init_apic_ldr(void)
|
||||
/* We only have a 4 wide bitmap in cluster mode. If a deranged
|
||||
* BIOS puts 5 CPUs in one APIC cluster, we're hosed. */
|
||||
BUG_ON(count >= XAPIC_DEST_CPUS_SHIFT);
|
||||
id = my_cluster | (1UL << count);
|
||||
return my_cluster | (1UL << count);
|
||||
}
|
||||
|
||||
static void summit_init_apic_ldr(void)
|
||||
{
|
||||
int cpu = smp_processor_id();
|
||||
unsigned long id = early_per_cpu(x86_cpu_to_logical_apicid, cpu);
|
||||
unsigned long val;
|
||||
|
||||
apic_write(APIC_DFR, SUMMIT_APIC_DFR_VALUE);
|
||||
val = apic_read(APIC_LDR) & ~APIC_LDR_MASK;
|
||||
val |= SET_APIC_LOGICAL_ID(id);
|
||||
@@ -232,27 +239,6 @@ static void summit_setup_apic_routing(void)
|
||||
nr_ioapics);
|
||||
}
|
||||
|
||||
static int summit_apicid_to_node(int logical_apicid)
|
||||
{
|
||||
#ifdef CONFIG_SMP
|
||||
return apicid_2_node[hard_smp_processor_id()];
|
||||
#else
|
||||
return 0;
|
||||
#endif
|
||||
}
|
||||
|
||||
/* Mapping from cpu number to logical apicid */
|
||||
static inline int summit_cpu_to_logical_apicid(int cpu)
|
||||
{
|
||||
#ifdef CONFIG_SMP
|
||||
if (cpu >= nr_cpu_ids)
|
||||
return BAD_APICID;
|
||||
return cpu_2_logical_apicid[cpu];
|
||||
#else
|
||||
return logical_smp_processor_id();
|
||||
#endif
|
||||
}
|
||||
|
||||
static int summit_cpu_present_to_apicid(int mps_cpu)
|
||||
{
|
||||
if (mps_cpu < nr_cpu_ids)
|
||||
@@ -286,7 +272,7 @@ static unsigned int summit_cpu_mask_to_apicid(const struct cpumask *cpumask)
|
||||
* The cpus in the mask must all be on the apic cluster.
|
||||
*/
|
||||
for_each_cpu(cpu, cpumask) {
|
||||
int new_apicid = summit_cpu_to_logical_apicid(cpu);
|
||||
int new_apicid = early_per_cpu(x86_cpu_to_logical_apicid, cpu);
|
||||
|
||||
if (round && APIC_CLUSTER(apicid) != APIC_CLUSTER(new_apicid)) {
|
||||
printk("%s: Not a valid mask!\n", __func__);
|
||||
@@ -301,7 +287,7 @@ static unsigned int summit_cpu_mask_to_apicid(const struct cpumask *cpumask)
|
||||
static unsigned int summit_cpu_mask_to_apicid_and(const struct cpumask *inmask,
|
||||
const struct cpumask *andmask)
|
||||
{
|
||||
int apicid = summit_cpu_to_logical_apicid(0);
|
||||
int apicid = early_per_cpu(x86_cpu_to_logical_apicid, 0);
|
||||
cpumask_var_t cpumask;
|
||||
|
||||
if (!alloc_cpumask_var(&cpumask, GFP_ATOMIC))
|
||||
@@ -528,8 +514,6 @@ struct apic apic_summit = {
|
||||
.ioapic_phys_id_map = summit_ioapic_phys_id_map,
|
||||
.setup_apic_routing = summit_setup_apic_routing,
|
||||
.multi_timer_check = NULL,
|
||||
.apicid_to_node = summit_apicid_to_node,
|
||||
.cpu_to_logical_apicid = summit_cpu_to_logical_apicid,
|
||||
.cpu_present_to_apicid = summit_cpu_present_to_apicid,
|
||||
.apicid_to_cpu_present = summit_apicid_to_cpu_present,
|
||||
.setup_portio_remap = NULL,
|
||||
@@ -565,4 +549,7 @@ struct apic apic_summit = {
|
||||
.icr_write = native_apic_icr_write,
|
||||
.wait_icr_idle = native_apic_wait_icr_idle,
|
||||
.safe_wait_icr_idle = native_safe_apic_wait_icr_idle,
|
||||
|
||||
.x86_32_early_logical_apicid = summit_early_logical_apicid,
|
||||
.x86_32_numa_cpu_node = default_x86_32_numa_cpu_node,
|
||||
};
|
||||
|
@@ -206,8 +206,6 @@ struct apic apic_x2apic_cluster = {
|
||||
.ioapic_phys_id_map = NULL,
|
||||
.setup_apic_routing = NULL,
|
||||
.multi_timer_check = NULL,
|
||||
.apicid_to_node = NULL,
|
||||
.cpu_to_logical_apicid = NULL,
|
||||
.cpu_present_to_apicid = default_cpu_present_to_apicid,
|
||||
.apicid_to_cpu_present = NULL,
|
||||
.setup_portio_remap = NULL,
|
||||
|
@@ -195,8 +195,6 @@ struct apic apic_x2apic_phys = {
|
||||
.ioapic_phys_id_map = NULL,
|
||||
.setup_apic_routing = NULL,
|
||||
.multi_timer_check = NULL,
|
||||
.apicid_to_node = NULL,
|
||||
.cpu_to_logical_apicid = NULL,
|
||||
.cpu_present_to_apicid = default_cpu_present_to_apicid,
|
||||
.apicid_to_cpu_present = NULL,
|
||||
.setup_portio_remap = NULL,
|
||||
|
@@ -338,8 +338,6 @@ struct apic __refdata apic_x2apic_uv_x = {
|
||||
.ioapic_phys_id_map = NULL,
|
||||
.setup_apic_routing = NULL,
|
||||
.multi_timer_check = NULL,
|
||||
.apicid_to_node = NULL,
|
||||
.cpu_to_logical_apicid = NULL,
|
||||
.cpu_present_to_apicid = default_cpu_present_to_apicid,
|
||||
.apicid_to_cpu_present = NULL,
|
||||
.setup_portio_remap = NULL,
|
||||
|
@@ -66,7 +66,7 @@
|
||||
* 1.5: Fix segment register reloading (in case of bad segments saved
|
||||
* across BIOS call).
|
||||
* Stephen Rothwell
|
||||
* 1.6: Cope with complier/assembler differences.
|
||||
* 1.6: Cope with compiler/assembler differences.
|
||||
* Only try to turn off the first display device.
|
||||
* Fix OOPS at power off with no APM BIOS by Jan Echternach
|
||||
* <echter@informatik.uni-rostock.de>
|
||||
@@ -227,6 +227,7 @@
|
||||
#include <linux/suspend.h>
|
||||
#include <linux/kthread.h>
|
||||
#include <linux/jiffies.h>
|
||||
#include <linux/acpi.h>
|
||||
|
||||
#include <asm/system.h>
|
||||
#include <asm/uaccess.h>
|
||||
@@ -975,20 +976,10 @@ recalc:
|
||||
|
||||
static void apm_power_off(void)
|
||||
{
|
||||
unsigned char po_bios_call[] = {
|
||||
0xb8, 0x00, 0x10, /* movw $0x1000,ax */
|
||||
0x8e, 0xd0, /* movw ax,ss */
|
||||
0xbc, 0x00, 0xf0, /* movw $0xf000,sp */
|
||||
0xb8, 0x07, 0x53, /* movw $0x5307,ax */
|
||||
0xbb, 0x01, 0x00, /* movw $0x0001,bx */
|
||||
0xb9, 0x03, 0x00, /* movw $0x0003,cx */
|
||||
0xcd, 0x15 /* int $0x15 */
|
||||
};
|
||||
|
||||
/* Some bioses don't like being called from CPU != 0 */
|
||||
if (apm_info.realmode_power_off) {
|
||||
set_cpus_allowed_ptr(current, cpumask_of(0));
|
||||
machine_real_restart(po_bios_call, sizeof(po_bios_call));
|
||||
machine_real_restart(MRR_APM);
|
||||
} else {
|
||||
(void)set_system_power_state(APM_STATE_OFF);
|
||||
}
|
||||
@@ -2331,12 +2322,11 @@ static int __init apm_init(void)
|
||||
apm_info.disabled = 1;
|
||||
return -ENODEV;
|
||||
}
|
||||
if (pm_flags & PM_ACPI) {
|
||||
if (!acpi_disabled) {
|
||||
printk(KERN_NOTICE "apm: overridden by ACPI.\n");
|
||||
apm_info.disabled = 1;
|
||||
return -ENODEV;
|
||||
}
|
||||
pm_flags |= PM_APM;
|
||||
|
||||
/*
|
||||
* Set up the long jump entry point to the APM BIOS, which is called
|
||||
@@ -2428,7 +2418,6 @@ static void __exit apm_exit(void)
|
||||
kthread_stop(kapmd_task);
|
||||
kapmd_task = NULL;
|
||||
}
|
||||
pm_flags &= ~PM_APM;
|
||||
}
|
||||
|
||||
module_init(apm_init);
|
||||
|
@@ -1,5 +1,70 @@
|
||||
/*
|
||||
* Generate definitions needed by assembly language modules.
|
||||
* This code generates raw asm output which is post-processed to extract
|
||||
* and format the required data.
|
||||
*/
|
||||
#define COMPILE_OFFSETS
|
||||
|
||||
#include <linux/crypto.h>
|
||||
#include <linux/sched.h>
|
||||
#include <linux/stddef.h>
|
||||
#include <linux/hardirq.h>
|
||||
#include <linux/suspend.h>
|
||||
#include <linux/kbuild.h>
|
||||
#include <asm/processor.h>
|
||||
#include <asm/thread_info.h>
|
||||
#include <asm/sigframe.h>
|
||||
#include <asm/bootparam.h>
|
||||
#include <asm/suspend.h>
|
||||
|
||||
#ifdef CONFIG_XEN
|
||||
#include <xen/interface/xen.h>
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_X86_32
|
||||
# include "asm-offsets_32.c"
|
||||
#else
|
||||
# include "asm-offsets_64.c"
|
||||
#endif
|
||||
|
||||
void common(void) {
|
||||
BLANK();
|
||||
OFFSET(TI_flags, thread_info, flags);
|
||||
OFFSET(TI_status, thread_info, status);
|
||||
OFFSET(TI_addr_limit, thread_info, addr_limit);
|
||||
OFFSET(TI_preempt_count, thread_info, preempt_count);
|
||||
|
||||
BLANK();
|
||||
OFFSET(crypto_tfm_ctx_offset, crypto_tfm, __crt_ctx);
|
||||
|
||||
BLANK();
|
||||
OFFSET(pbe_address, pbe, address);
|
||||
OFFSET(pbe_orig_address, pbe, orig_address);
|
||||
OFFSET(pbe_next, pbe, next);
|
||||
|
||||
#ifdef CONFIG_PARAVIRT
|
||||
BLANK();
|
||||
OFFSET(PARAVIRT_enabled, pv_info, paravirt_enabled);
|
||||
OFFSET(PARAVIRT_PATCH_pv_cpu_ops, paravirt_patch_template, pv_cpu_ops);
|
||||
OFFSET(PARAVIRT_PATCH_pv_irq_ops, paravirt_patch_template, pv_irq_ops);
|
||||
OFFSET(PV_IRQ_irq_disable, pv_irq_ops, irq_disable);
|
||||
OFFSET(PV_IRQ_irq_enable, pv_irq_ops, irq_enable);
|
||||
OFFSET(PV_CPU_iret, pv_cpu_ops, iret);
|
||||
OFFSET(PV_CPU_irq_enable_sysexit, pv_cpu_ops, irq_enable_sysexit);
|
||||
OFFSET(PV_CPU_read_cr0, pv_cpu_ops, read_cr0);
|
||||
OFFSET(PV_MMU_read_cr2, pv_mmu_ops, read_cr2);
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_XEN
|
||||
BLANK();
|
||||
OFFSET(XEN_vcpu_info_mask, vcpu_info, evtchn_upcall_mask);
|
||||
OFFSET(XEN_vcpu_info_pending, vcpu_info, evtchn_upcall_pending);
|
||||
#endif
|
||||
|
||||
BLANK();
|
||||
OFFSET(BP_scratch, boot_params, scratch);
|
||||
OFFSET(BP_loadflags, boot_params, hdr.loadflags);
|
||||
OFFSET(BP_hardware_subarch, boot_params, hdr.hardware_subarch);
|
||||
OFFSET(BP_version, boot_params, hdr.version);
|
||||
OFFSET(BP_kernel_alignment, boot_params, hdr.kernel_alignment);
|
||||
}
|
||||
|
@@ -1,26 +1,4 @@
|
||||
/*
|
||||
* Generate definitions needed by assembly language modules.
|
||||
* This code generates raw asm output which is post-processed
|
||||
* to extract and format the required data.
|
||||
*/
|
||||
|
||||
#include <linux/crypto.h>
|
||||
#include <linux/sched.h>
|
||||
#include <linux/signal.h>
|
||||
#include <linux/personality.h>
|
||||
#include <linux/suspend.h>
|
||||
#include <linux/kbuild.h>
|
||||
#include <asm/ucontext.h>
|
||||
#include <asm/sigframe.h>
|
||||
#include <asm/pgtable.h>
|
||||
#include <asm/fixmap.h>
|
||||
#include <asm/processor.h>
|
||||
#include <asm/thread_info.h>
|
||||
#include <asm/bootparam.h>
|
||||
#include <asm/elf.h>
|
||||
#include <asm/suspend.h>
|
||||
|
||||
#include <xen/interface/xen.h>
|
||||
|
||||
#include <linux/lguest.h>
|
||||
#include "../../../drivers/lguest/lg.h"
|
||||
@@ -51,21 +29,10 @@ void foo(void)
|
||||
OFFSET(CPUINFO_x86_vendor_id, cpuinfo_x86, x86_vendor_id);
|
||||
BLANK();
|
||||
|
||||
OFFSET(TI_task, thread_info, task);
|
||||
OFFSET(TI_exec_domain, thread_info, exec_domain);
|
||||
OFFSET(TI_flags, thread_info, flags);
|
||||
OFFSET(TI_status, thread_info, status);
|
||||
OFFSET(TI_preempt_count, thread_info, preempt_count);
|
||||
OFFSET(TI_addr_limit, thread_info, addr_limit);
|
||||
OFFSET(TI_restart_block, thread_info, restart_block);
|
||||
OFFSET(TI_sysenter_return, thread_info, sysenter_return);
|
||||
OFFSET(TI_cpu, thread_info, cpu);
|
||||
BLANK();
|
||||
|
||||
OFFSET(GDS_size, desc_ptr, size);
|
||||
OFFSET(GDS_address, desc_ptr, address);
|
||||
BLANK();
|
||||
|
||||
OFFSET(PT_EBX, pt_regs, bx);
|
||||
OFFSET(PT_ECX, pt_regs, cx);
|
||||
OFFSET(PT_EDX, pt_regs, dx);
|
||||
@@ -85,42 +52,13 @@ void foo(void)
|
||||
OFFSET(PT_OLDSS, pt_regs, ss);
|
||||
BLANK();
|
||||
|
||||
OFFSET(EXEC_DOMAIN_handler, exec_domain, handler);
|
||||
OFFSET(IA32_RT_SIGFRAME_sigcontext, rt_sigframe, uc.uc_mcontext);
|
||||
BLANK();
|
||||
|
||||
OFFSET(pbe_address, pbe, address);
|
||||
OFFSET(pbe_orig_address, pbe, orig_address);
|
||||
OFFSET(pbe_next, pbe, next);
|
||||
|
||||
/* Offset from the sysenter stack to tss.sp0 */
|
||||
DEFINE(TSS_sysenter_sp0, offsetof(struct tss_struct, x86_tss.sp0) -
|
||||
sizeof(struct tss_struct));
|
||||
|
||||
DEFINE(PAGE_SIZE_asm, PAGE_SIZE);
|
||||
DEFINE(PAGE_SHIFT_asm, PAGE_SHIFT);
|
||||
DEFINE(THREAD_SIZE_asm, THREAD_SIZE);
|
||||
|
||||
OFFSET(crypto_tfm_ctx_offset, crypto_tfm, __crt_ctx);
|
||||
|
||||
#ifdef CONFIG_PARAVIRT
|
||||
BLANK();
|
||||
OFFSET(PARAVIRT_enabled, pv_info, paravirt_enabled);
|
||||
OFFSET(PARAVIRT_PATCH_pv_cpu_ops, paravirt_patch_template, pv_cpu_ops);
|
||||
OFFSET(PARAVIRT_PATCH_pv_irq_ops, paravirt_patch_template, pv_irq_ops);
|
||||
OFFSET(PV_IRQ_irq_disable, pv_irq_ops, irq_disable);
|
||||
OFFSET(PV_IRQ_irq_enable, pv_irq_ops, irq_enable);
|
||||
OFFSET(PV_CPU_iret, pv_cpu_ops, iret);
|
||||
OFFSET(PV_CPU_irq_enable_sysexit, pv_cpu_ops, irq_enable_sysexit);
|
||||
OFFSET(PV_CPU_read_cr0, pv_cpu_ops, read_cr0);
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_XEN
|
||||
BLANK();
|
||||
OFFSET(XEN_vcpu_info_mask, vcpu_info, evtchn_upcall_mask);
|
||||
OFFSET(XEN_vcpu_info_pending, vcpu_info, evtchn_upcall_pending);
|
||||
#endif
|
||||
|
||||
#if defined(CONFIG_LGUEST) || defined(CONFIG_LGUEST_GUEST) || defined(CONFIG_LGUEST_MODULE)
|
||||
BLANK();
|
||||
OFFSET(LGUEST_DATA_irq_enabled, lguest_data, irq_enabled);
|
||||
@@ -139,11 +77,4 @@ void foo(void)
|
||||
OFFSET(LGUEST_PAGES_regs_errcode, lguest_pages, regs.errcode);
|
||||
OFFSET(LGUEST_PAGES_regs, lguest_pages, regs);
|
||||
#endif
|
||||
|
||||
BLANK();
|
||||
OFFSET(BP_scratch, boot_params, scratch);
|
||||
OFFSET(BP_loadflags, boot_params, hdr.loadflags);
|
||||
OFFSET(BP_hardware_subarch, boot_params, hdr.hardware_subarch);
|
||||
OFFSET(BP_version, boot_params, hdr.version);
|
||||
OFFSET(BP_kernel_alignment, boot_params, hdr.kernel_alignment);
|
||||
}
|
||||
|
@@ -1,27 +1,4 @@
|
||||
/*
|
||||
* Generate definitions needed by assembly language modules.
|
||||
* This code generates raw asm output which is post-processed to extract
|
||||
* and format the required data.
|
||||
*/
|
||||
#define COMPILE_OFFSETS
|
||||
|
||||
#include <linux/crypto.h>
|
||||
#include <linux/sched.h>
|
||||
#include <linux/stddef.h>
|
||||
#include <linux/errno.h>
|
||||
#include <linux/hardirq.h>
|
||||
#include <linux/suspend.h>
|
||||
#include <linux/kbuild.h>
|
||||
#include <asm/processor.h>
|
||||
#include <asm/segment.h>
|
||||
#include <asm/thread_info.h>
|
||||
#include <asm/ia32.h>
|
||||
#include <asm/bootparam.h>
|
||||
#include <asm/suspend.h>
|
||||
|
||||
#include <xen/interface/xen.h>
|
||||
|
||||
#include <asm/sigframe.h>
|
||||
|
||||
#define __NO_STUBS 1
|
||||
#undef __SYSCALL
|
||||
@@ -33,41 +10,19 @@ static char syscalls[] = {
|
||||
|
||||
int main(void)
|
||||
{
|
||||
#define ENTRY(entry) DEFINE(tsk_ ## entry, offsetof(struct task_struct, entry))
|
||||
ENTRY(state);
|
||||
ENTRY(flags);
|
||||
ENTRY(pid);
|
||||
BLANK();
|
||||
#undef ENTRY
|
||||
#define ENTRY(entry) DEFINE(TI_ ## entry, offsetof(struct thread_info, entry))
|
||||
ENTRY(flags);
|
||||
ENTRY(addr_limit);
|
||||
ENTRY(preempt_count);
|
||||
ENTRY(status);
|
||||
#ifdef CONFIG_IA32_EMULATION
|
||||
ENTRY(sysenter_return);
|
||||
#endif
|
||||
BLANK();
|
||||
#undef ENTRY
|
||||
#ifdef CONFIG_PARAVIRT
|
||||
BLANK();
|
||||
OFFSET(PARAVIRT_enabled, pv_info, paravirt_enabled);
|
||||
OFFSET(PARAVIRT_PATCH_pv_cpu_ops, paravirt_patch_template, pv_cpu_ops);
|
||||
OFFSET(PARAVIRT_PATCH_pv_irq_ops, paravirt_patch_template, pv_irq_ops);
|
||||
OFFSET(PV_IRQ_irq_disable, pv_irq_ops, irq_disable);
|
||||
OFFSET(PV_IRQ_irq_enable, pv_irq_ops, irq_enable);
|
||||
OFFSET(PV_IRQ_adjust_exception_frame, pv_irq_ops, adjust_exception_frame);
|
||||
OFFSET(PV_CPU_iret, pv_cpu_ops, iret);
|
||||
OFFSET(PV_CPU_usergs_sysret32, pv_cpu_ops, usergs_sysret32);
|
||||
OFFSET(PV_CPU_usergs_sysret64, pv_cpu_ops, usergs_sysret64);
|
||||
OFFSET(PV_CPU_irq_enable_sysexit, pv_cpu_ops, irq_enable_sysexit);
|
||||
OFFSET(PV_CPU_swapgs, pv_cpu_ops, swapgs);
|
||||
OFFSET(PV_MMU_read_cr2, pv_mmu_ops, read_cr2);
|
||||
BLANK();
|
||||
#endif
|
||||
|
||||
|
||||
#ifdef CONFIG_IA32_EMULATION
|
||||
#define ENTRY(entry) DEFINE(IA32_SIGCONTEXT_ ## entry, offsetof(struct sigcontext_ia32, entry))
|
||||
OFFSET(TI_sysenter_return, thread_info, sysenter_return);
|
||||
BLANK();
|
||||
|
||||
#define ENTRY(entry) OFFSET(IA32_SIGCONTEXT_ ## entry, sigcontext_ia32, entry)
|
||||
ENTRY(ax);
|
||||
ENTRY(bx);
|
||||
ENTRY(cx);
|
||||
@@ -79,15 +34,12 @@ int main(void)
|
||||
ENTRY(ip);
|
||||
BLANK();
|
||||
#undef ENTRY
|
||||
DEFINE(IA32_RT_SIGFRAME_sigcontext,
|
||||
offsetof (struct rt_sigframe_ia32, uc.uc_mcontext));
|
||||
|
||||
OFFSET(IA32_RT_SIGFRAME_sigcontext, rt_sigframe_ia32, uc.uc_mcontext);
|
||||
BLANK();
|
||||
#endif
|
||||
DEFINE(pbe_address, offsetof(struct pbe, address));
|
||||
DEFINE(pbe_orig_address, offsetof(struct pbe, orig_address));
|
||||
DEFINE(pbe_next, offsetof(struct pbe, next));
|
||||
BLANK();
|
||||
#define ENTRY(entry) DEFINE(pt_regs_ ## entry, offsetof(struct pt_regs, entry))
|
||||
|
||||
#define ENTRY(entry) OFFSET(pt_regs_ ## entry, pt_regs, entry)
|
||||
ENTRY(bx);
|
||||
ENTRY(bx);
|
||||
ENTRY(cx);
|
||||
@@ -107,7 +59,8 @@ int main(void)
|
||||
ENTRY(flags);
|
||||
BLANK();
|
||||
#undef ENTRY
|
||||
#define ENTRY(entry) DEFINE(saved_context_ ## entry, offsetof(struct saved_context, entry))
|
||||
|
||||
#define ENTRY(entry) OFFSET(saved_context_ ## entry, saved_context, entry)
|
||||
ENTRY(cr0);
|
||||
ENTRY(cr2);
|
||||
ENTRY(cr3);
|
||||
@@ -115,26 +68,11 @@ int main(void)
|
||||
ENTRY(cr8);
|
||||
BLANK();
|
||||
#undef ENTRY
|
||||
DEFINE(TSS_ist, offsetof(struct tss_struct, x86_tss.ist));
|
||||
BLANK();
|
||||
DEFINE(crypto_tfm_ctx_offset, offsetof(struct crypto_tfm, __crt_ctx));
|
||||
|
||||
OFFSET(TSS_ist, tss_struct, x86_tss.ist);
|
||||
BLANK();
|
||||
|
||||
DEFINE(__NR_syscall_max, sizeof(syscalls) - 1);
|
||||
|
||||
BLANK();
|
||||
OFFSET(BP_scratch, boot_params, scratch);
|
||||
OFFSET(BP_loadflags, boot_params, hdr.loadflags);
|
||||
OFFSET(BP_hardware_subarch, boot_params, hdr.hardware_subarch);
|
||||
OFFSET(BP_version, boot_params, hdr.version);
|
||||
OFFSET(BP_kernel_alignment, boot_params, hdr.kernel_alignment);
|
||||
|
||||
BLANK();
|
||||
DEFINE(PAGE_SIZE_asm, PAGE_SIZE);
|
||||
#ifdef CONFIG_XEN
|
||||
BLANK();
|
||||
OFFSET(XEN_vcpu_info_mask, vcpu_info, evtchn_upcall_mask);
|
||||
OFFSET(XEN_vcpu_info_pending, vcpu_info, evtchn_upcall_pending);
|
||||
#undef ENTRY
|
||||
#endif
|
||||
return 0;
|
||||
}
|
||||
|
@@ -233,18 +233,22 @@ static void __cpuinit init_amd_k7(struct cpuinfo_x86 *c)
|
||||
}
|
||||
#endif
|
||||
|
||||
#if defined(CONFIG_NUMA) && defined(CONFIG_X86_64)
|
||||
#ifdef CONFIG_NUMA
|
||||
/*
|
||||
* To workaround broken NUMA config. Read the comment in
|
||||
* srat_detect_node().
|
||||
*/
|
||||
static int __cpuinit nearby_node(int apicid)
|
||||
{
|
||||
int i, node;
|
||||
|
||||
for (i = apicid - 1; i >= 0; i--) {
|
||||
node = apicid_to_node[i];
|
||||
node = __apicid_to_node[i];
|
||||
if (node != NUMA_NO_NODE && node_online(node))
|
||||
return node;
|
||||
}
|
||||
for (i = apicid + 1; i < MAX_LOCAL_APIC; i++) {
|
||||
node = apicid_to_node[i];
|
||||
node = __apicid_to_node[i];
|
||||
if (node != NUMA_NO_NODE && node_online(node))
|
||||
return node;
|
||||
}
|
||||
@@ -261,7 +265,7 @@ static int __cpuinit nearby_node(int apicid)
|
||||
#ifdef CONFIG_X86_HT
|
||||
static void __cpuinit amd_get_topology(struct cpuinfo_x86 *c)
|
||||
{
|
||||
u32 nodes;
|
||||
u32 nodes, cores_per_cu = 1;
|
||||
u8 node_id;
|
||||
int cpu = smp_processor_id();
|
||||
|
||||
@@ -276,6 +280,7 @@ static void __cpuinit amd_get_topology(struct cpuinfo_x86 *c)
|
||||
/* get compute unit information */
|
||||
smp_num_siblings = ((ebx >> 8) & 3) + 1;
|
||||
c->compute_unit_id = ebx & 0xff;
|
||||
cores_per_cu += ((ebx >> 8) & 3);
|
||||
} else if (cpu_has(c, X86_FEATURE_NODEID_MSR)) {
|
||||
u64 value;
|
||||
|
||||
@@ -288,15 +293,18 @@ static void __cpuinit amd_get_topology(struct cpuinfo_x86 *c)
|
||||
/* fixup multi-node processor information */
|
||||
if (nodes > 1) {
|
||||
u32 cores_per_node;
|
||||
u32 cus_per_node;
|
||||
|
||||
set_cpu_cap(c, X86_FEATURE_AMD_DCM);
|
||||
cores_per_node = c->x86_max_cores / nodes;
|
||||
cus_per_node = cores_per_node / cores_per_cu;
|
||||
|
||||
/* store NodeID, use llc_shared_map to store sibling info */
|
||||
per_cpu(cpu_llc_id, cpu) = node_id;
|
||||
|
||||
/* core id to be in range from 0 to (cores_per_node - 1) */
|
||||
c->cpu_core_id = c->cpu_core_id % cores_per_node;
|
||||
/* core id has to be in the [0 .. cores_per_node - 1] range */
|
||||
c->cpu_core_id %= cores_per_node;
|
||||
c->compute_unit_id %= cus_per_node;
|
||||
}
|
||||
}
|
||||
#endif
|
||||
@@ -334,31 +342,40 @@ EXPORT_SYMBOL_GPL(amd_get_nb_id);
|
||||
|
||||
static void __cpuinit srat_detect_node(struct cpuinfo_x86 *c)
|
||||
{
|
||||
#if defined(CONFIG_NUMA) && defined(CONFIG_X86_64)
|
||||
#ifdef CONFIG_NUMA
|
||||
int cpu = smp_processor_id();
|
||||
int node;
|
||||
unsigned apicid = c->apicid;
|
||||
|
||||
node = per_cpu(cpu_llc_id, cpu);
|
||||
node = numa_cpu_node(cpu);
|
||||
if (node == NUMA_NO_NODE)
|
||||
node = per_cpu(cpu_llc_id, cpu);
|
||||
|
||||
if (apicid_to_node[apicid] != NUMA_NO_NODE)
|
||||
node = apicid_to_node[apicid];
|
||||
if (!node_online(node)) {
|
||||
/* Two possibilities here:
|
||||
- The CPU is missing memory and no node was created.
|
||||
In that case try picking one from a nearby CPU
|
||||
- The APIC IDs differ from the HyperTransport node IDs
|
||||
which the K8 northbridge parsing fills in.
|
||||
Assume they are all increased by a constant offset,
|
||||
but in the same order as the HT nodeids.
|
||||
If that doesn't result in a usable node fall back to the
|
||||
path for the previous case. */
|
||||
|
||||
/*
|
||||
* Two possibilities here:
|
||||
*
|
||||
* - The CPU is missing memory and no node was created. In
|
||||
* that case try picking one from a nearby CPU.
|
||||
*
|
||||
* - The APIC IDs differ from the HyperTransport node IDs
|
||||
* which the K8 northbridge parsing fills in. Assume
|
||||
* they are all increased by a constant offset, but in
|
||||
* the same order as the HT nodeids. If that doesn't
|
||||
* result in a usable node fall back to the path for the
|
||||
* previous case.
|
||||
*
|
||||
* This workaround operates directly on the mapping between
|
||||
* APIC ID and NUMA node, assuming certain relationship
|
||||
* between APIC ID, HT node ID and NUMA topology. As going
|
||||
* through CPU mapping may alter the outcome, directly
|
||||
* access __apicid_to_node[].
|
||||
*/
|
||||
int ht_nodeid = c->initial_apicid;
|
||||
|
||||
if (ht_nodeid >= 0 &&
|
||||
apicid_to_node[ht_nodeid] != NUMA_NO_NODE)
|
||||
node = apicid_to_node[ht_nodeid];
|
||||
__apicid_to_node[ht_nodeid] != NUMA_NO_NODE)
|
||||
node = __apicid_to_node[ht_nodeid];
|
||||
/* Pick a nearby node */
|
||||
if (!node_online(node))
|
||||
node = nearby_node(apicid);
|
||||
@@ -594,6 +611,10 @@ static void __cpuinit init_amd(struct cpuinfo_x86 *c)
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
/* As a rule processors have APIC timer running in deep C states */
|
||||
if (c->x86 >= 0xf && !cpu_has_amd_erratum(amd_erratum_400))
|
||||
set_cpu_cap(c, X86_FEATURE_ARAT);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_X86_32
|
||||
|
@@ -675,7 +675,7 @@ void __init early_cpu_init(void)
|
||||
const struct cpu_dev *const *cdev;
|
||||
int count = 0;
|
||||
|
||||
#ifdef PROCESSOR_SELECT
|
||||
#ifdef CONFIG_PROCESSOR_SELECT
|
||||
printk(KERN_INFO "KERNEL supported cpus:\n");
|
||||
#endif
|
||||
|
||||
@@ -687,7 +687,7 @@ void __init early_cpu_init(void)
|
||||
cpu_devs[count] = cpudev;
|
||||
count++;
|
||||
|
||||
#ifdef PROCESSOR_SELECT
|
||||
#ifdef CONFIG_PROCESSOR_SELECT
|
||||
{
|
||||
unsigned int j;
|
||||
|
||||
@@ -869,7 +869,7 @@ static void __cpuinit identify_cpu(struct cpuinfo_x86 *c)
|
||||
|
||||
select_idle_routine(c);
|
||||
|
||||
#if defined(CONFIG_NUMA) && defined(CONFIG_X86_64)
|
||||
#ifdef CONFIG_NUMA
|
||||
numa_add_cpu(smp_processor_id());
|
||||
#endif
|
||||
}
|
||||
|
@@ -444,7 +444,7 @@ static int __cpuinit longhaul_get_ranges(void)
|
||||
return -EINVAL;
|
||||
}
|
||||
/* Get max multiplier - as we always did.
|
||||
* Longhaul MSR is usefull only when voltage scaling is enabled.
|
||||
* Longhaul MSR is useful only when voltage scaling is enabled.
|
||||
* C3 is booting at max anyway. */
|
||||
maxmult = mult;
|
||||
/* Get min multiplier */
|
||||
@@ -1011,7 +1011,7 @@ static void __exit longhaul_exit(void)
|
||||
* trigger frequency transition in some cases. */
|
||||
module_param(disable_acpi_c3, int, 0644);
|
||||
MODULE_PARM_DESC(disable_acpi_c3, "Don't use ACPI C3 support");
|
||||
/* Change CPU voltage with frequency. Very usefull to save
|
||||
/* Change CPU voltage with frequency. Very useful to save
|
||||
* power, but most VIA C3 processors aren't supporting it. */
|
||||
module_param(scale_voltage, int, 0644);
|
||||
MODULE_PARM_DESC(scale_voltage, "Scale voltage of processor");
|
||||
|
@@ -313,8 +313,6 @@ static int __init pcc_cpufreq_do_osc(acpi_handle *handle)
|
||||
u32 supported;
|
||||
int ret = 0;
|
||||
|
||||
input.count = 4;
|
||||
input.pointer = in_params;
|
||||
input.count = 4;
|
||||
input.pointer = in_params;
|
||||
in_params[0].type = ACPI_TYPE_BUFFER;
|
||||
|
@@ -630,8 +630,7 @@ static void print_basics(struct powernow_k8_data *data)
|
||||
data->powernow_table[j].frequency/1000);
|
||||
} else {
|
||||
printk(KERN_INFO PFX
|
||||
" %d : fid 0x%x (%d MHz), vid 0x%x\n",
|
||||
j,
|
||||
"fid 0x%x (%d MHz), vid 0x%x\n",
|
||||
data->powernow_table[j].index & 0xff,
|
||||
data->powernow_table[j].frequency/1000,
|
||||
data->powernow_table[j].index >> 8);
|
||||
@@ -1276,7 +1275,7 @@ static int __cpuinit powernowk8_cpu_init(struct cpufreq_policy *pol)
|
||||
|
||||
if (powernow_k8_cpu_init_acpi(data)) {
|
||||
/*
|
||||
* Use the PSB BIOS structure. This is only availabe on
|
||||
* Use the PSB BIOS structure. This is only available on
|
||||
* an UP version, and is deprecated by AMD.
|
||||
*/
|
||||
if (num_online_cpus() != 1) {
|
||||
|
@@ -292,7 +292,7 @@ static int speedstep_cpu_init(struct cpufreq_policy *policy)
|
||||
|
||||
result = speedstep_smi_ownership();
|
||||
if (result) {
|
||||
dprintk("fails in aquiring ownership of a SMI interface.\n");
|
||||
dprintk("fails in acquiring ownership of a SMI interface.\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
@@ -360,7 +360,7 @@ static int speedstep_resume(struct cpufreq_policy *policy)
|
||||
int result = speedstep_smi_ownership();
|
||||
|
||||
if (result)
|
||||
dprintk("fails in re-aquiring ownership of a SMI interface.\n");
|
||||
dprintk("fails in re-acquiring ownership of a SMI interface.\n");
|
||||
|
||||
return result;
|
||||
}
|
||||
|
@@ -276,14 +276,13 @@ static void __cpuinit intel_workarounds(struct cpuinfo_x86 *c)
|
||||
|
||||
static void __cpuinit srat_detect_node(struct cpuinfo_x86 *c)
|
||||
{
|
||||
#if defined(CONFIG_NUMA) && defined(CONFIG_X86_64)
|
||||
#ifdef CONFIG_NUMA
|
||||
unsigned node;
|
||||
int cpu = smp_processor_id();
|
||||
int apicid = cpu_has_apic ? hard_smp_processor_id() : c->apicid;
|
||||
|
||||
/* Don't do the funky fallback heuristics the AMD version employs
|
||||
for now. */
|
||||
node = apicid_to_node[apicid];
|
||||
node = numa_cpu_node(cpu);
|
||||
if (node == NUMA_NO_NODE || !node_online(node)) {
|
||||
/* reuse the value from init_cpu_to_node() */
|
||||
node = cpu_to_node(cpu);
|
||||
|
Některé soubory nejsou zobrazny, neboť je v této revizi změněno mnoho souborů Zobrazit více
Odkázat v novém úkolu
Zablokovat Uživatele