Merge branch 'linus' into core/iommu
This commit is contained in:
@@ -973,6 +973,29 @@ void __init mp_register_ioapic(int id, u32 address, u32 gsi_base)
|
||||
nr_ioapics++;
|
||||
}
|
||||
|
||||
int __init acpi_probe_gsi(void)
|
||||
{
|
||||
int idx;
|
||||
int gsi;
|
||||
int max_gsi = 0;
|
||||
|
||||
if (acpi_disabled)
|
||||
return 0;
|
||||
|
||||
if (!acpi_ioapic)
|
||||
return 0;
|
||||
|
||||
max_gsi = 0;
|
||||
for (idx = 0; idx < nr_ioapics; idx++) {
|
||||
gsi = mp_ioapic_routing[idx].gsi_end;
|
||||
|
||||
if (gsi > max_gsi)
|
||||
max_gsi = gsi;
|
||||
}
|
||||
|
||||
return max_gsi + 1;
|
||||
}
|
||||
|
||||
static void assign_to_mp_irq(struct mp_config_intsrc *m,
|
||||
struct mp_config_intsrc *mp_irq)
|
||||
{
|
||||
|
@@ -156,11 +156,11 @@ static int __init acpi_sleep_setup(char *str)
|
||||
#ifdef CONFIG_HIBERNATION
|
||||
if (strncmp(str, "s4_nohwsig", 10) == 0)
|
||||
acpi_no_s4_hw_signature();
|
||||
if (strncmp(str, "s4_nonvs", 8) == 0)
|
||||
acpi_s4_no_nvs();
|
||||
#endif
|
||||
if (strncmp(str, "old_ordering", 12) == 0)
|
||||
acpi_old_suspend_ordering();
|
||||
if (strncmp(str, "s4_nonvs", 8) == 0)
|
||||
acpi_s4_no_nvs();
|
||||
str = strchr(str, ',');
|
||||
if (str != NULL)
|
||||
str += strspn(str, ", \t");
|
||||
|
@@ -13,7 +13,6 @@
|
||||
* Hooray, we are in Long 64-bit mode (but still running in low memory)
|
||||
*/
|
||||
ENTRY(wakeup_long64)
|
||||
wakeup_long64:
|
||||
movq saved_magic, %rax
|
||||
movq $0x123456789abcdef0, %rdx
|
||||
cmpq %rdx, %rax
|
||||
@@ -34,16 +33,12 @@ wakeup_long64:
|
||||
|
||||
movq saved_rip, %rax
|
||||
jmp *%rax
|
||||
ENDPROC(wakeup_long64)
|
||||
|
||||
bogus_64_magic:
|
||||
jmp bogus_64_magic
|
||||
|
||||
.align 2
|
||||
.p2align 4,,15
|
||||
.globl do_suspend_lowlevel
|
||||
.type do_suspend_lowlevel,@function
|
||||
do_suspend_lowlevel:
|
||||
.LFB5:
|
||||
ENTRY(do_suspend_lowlevel)
|
||||
subq $8, %rsp
|
||||
xorl %eax, %eax
|
||||
call save_processor_state
|
||||
@@ -67,7 +62,7 @@ do_suspend_lowlevel:
|
||||
pushfq
|
||||
popq pt_regs_flags(%rax)
|
||||
|
||||
movq $.L97, saved_rip(%rip)
|
||||
movq $resume_point, saved_rip(%rip)
|
||||
|
||||
movq %rsp, saved_rsp
|
||||
movq %rbp, saved_rbp
|
||||
@@ -78,14 +73,12 @@ do_suspend_lowlevel:
|
||||
addq $8, %rsp
|
||||
movl $3, %edi
|
||||
xorl %eax, %eax
|
||||
jmp acpi_enter_sleep_state
|
||||
.L97:
|
||||
.p2align 4,,7
|
||||
.L99:
|
||||
.align 4
|
||||
movl $24, %eax
|
||||
movw %ax, %ds
|
||||
call acpi_enter_sleep_state
|
||||
/* in case something went wrong, restore the machine status and go on */
|
||||
jmp resume_point
|
||||
|
||||
.align 4
|
||||
resume_point:
|
||||
/* We don't restore %rax, it must be 0 anyway */
|
||||
movq $saved_context, %rax
|
||||
movq saved_context_cr4(%rax), %rbx
|
||||
@@ -117,12 +110,9 @@ do_suspend_lowlevel:
|
||||
xorl %eax, %eax
|
||||
addq $8, %rsp
|
||||
jmp restore_processor_state
|
||||
.LFE5:
|
||||
.Lfe5:
|
||||
.size do_suspend_lowlevel, .Lfe5-do_suspend_lowlevel
|
||||
|
||||
ENDPROC(do_suspend_lowlevel)
|
||||
|
||||
.data
|
||||
ALIGN
|
||||
ENTRY(saved_rbp) .quad 0
|
||||
ENTRY(saved_rsi) .quad 0
|
||||
ENTRY(saved_rdi) .quad 0
|
||||
|
@@ -862,7 +862,7 @@ void clear_local_APIC(void)
|
||||
}
|
||||
|
||||
/* lets not touch this if we didn't frob it */
|
||||
#if defined(CONFIG_X86_MCE_P4THERMAL) || defined(X86_MCE_INTEL)
|
||||
#if defined(CONFIG_X86_MCE_P4THERMAL) || defined(CONFIG_X86_MCE_INTEL)
|
||||
if (maxlvt >= 5) {
|
||||
v = apic_read(APIC_LVTTHMR);
|
||||
apic_write(APIC_LVTTHMR, v | APIC_LVT_MASKED);
|
||||
@@ -895,6 +895,10 @@ void disable_local_APIC(void)
|
||||
{
|
||||
unsigned int value;
|
||||
|
||||
/* APIC hasn't been mapped yet */
|
||||
if (!apic_phys)
|
||||
return;
|
||||
|
||||
clear_local_APIC();
|
||||
|
||||
/*
|
||||
@@ -1432,7 +1436,7 @@ static int __init detect_init_APIC(void)
|
||||
switch (boot_cpu_data.x86_vendor) {
|
||||
case X86_VENDOR_AMD:
|
||||
if ((boot_cpu_data.x86 == 6 && boot_cpu_data.x86_model > 1) ||
|
||||
(boot_cpu_data.x86 == 15))
|
||||
(boot_cpu_data.x86 >= 15))
|
||||
break;
|
||||
goto no_apic;
|
||||
case X86_VENDOR_INTEL:
|
||||
@@ -1833,6 +1837,11 @@ void __cpuinit generic_processor_info(int apicid, int version)
|
||||
num_processors++;
|
||||
cpu = cpumask_next_zero(-1, cpu_present_mask);
|
||||
|
||||
if (version != apic_version[boot_cpu_physical_apicid])
|
||||
WARN_ONCE(1,
|
||||
"ACPI: apic version mismatch, bootcpu: %x cpu %d: %x\n",
|
||||
apic_version[boot_cpu_physical_apicid], cpu, version);
|
||||
|
||||
physid_set(apicid, phys_cpu_present_map);
|
||||
if (apicid == boot_cpu_physical_apicid) {
|
||||
/*
|
||||
|
@@ -1192,6 +1192,7 @@ static int suspend(int vetoable)
|
||||
device_suspend(PMSG_SUSPEND);
|
||||
local_irq_disable();
|
||||
device_power_down(PMSG_SUSPEND);
|
||||
sysdev_suspend(PMSG_SUSPEND);
|
||||
|
||||
local_irq_enable();
|
||||
|
||||
@@ -1208,6 +1209,7 @@ static int suspend(int vetoable)
|
||||
if (err != APM_SUCCESS)
|
||||
apm_error("suspend", err);
|
||||
err = (err == APM_SUCCESS) ? 0 : -EIO;
|
||||
sysdev_resume();
|
||||
device_power_up(PMSG_RESUME);
|
||||
local_irq_enable();
|
||||
device_resume(PMSG_RESUME);
|
||||
@@ -1228,6 +1230,7 @@ static void standby(void)
|
||||
|
||||
local_irq_disable();
|
||||
device_power_down(PMSG_SUSPEND);
|
||||
sysdev_suspend(PMSG_SUSPEND);
|
||||
local_irq_enable();
|
||||
|
||||
err = set_system_power_state(APM_STATE_STANDBY);
|
||||
@@ -1235,6 +1238,7 @@ static void standby(void)
|
||||
apm_error("standby", err);
|
||||
|
||||
local_irq_disable();
|
||||
sysdev_resume();
|
||||
device_power_up(PMSG_RESUME);
|
||||
local_irq_enable();
|
||||
}
|
||||
|
@@ -245,17 +245,6 @@ config X86_E_POWERSAVER
|
||||
|
||||
comment "shared options"
|
||||
|
||||
config X86_ACPI_CPUFREQ_PROC_INTF
|
||||
bool "/proc/acpi/processor/../performance interface (deprecated)"
|
||||
depends on PROC_FS
|
||||
depends on X86_ACPI_CPUFREQ || X86_POWERNOW_K7_ACPI || X86_POWERNOW_K8_ACPI
|
||||
help
|
||||
This enables the deprecated /proc/acpi/processor/../performance
|
||||
interface. While it is helpful for debugging, the generic,
|
||||
cross-architecture cpufreq interfaces should be used.
|
||||
|
||||
If in doubt, say N.
|
||||
|
||||
config X86_SPEEDSTEP_LIB
|
||||
tristate
|
||||
default (X86_SPEEDSTEP_ICH || X86_SPEEDSTEP_SMI || X86_P4_CLOCKMOD)
|
||||
|
@@ -145,13 +145,14 @@ typedef union {
|
||||
|
||||
struct drv_cmd {
|
||||
unsigned int type;
|
||||
cpumask_var_t mask;
|
||||
const struct cpumask *mask;
|
||||
drv_addr_union addr;
|
||||
u32 val;
|
||||
};
|
||||
|
||||
static void do_drv_read(struct drv_cmd *cmd)
|
||||
static long do_drv_read(void *_cmd)
|
||||
{
|
||||
struct drv_cmd *cmd = _cmd;
|
||||
u32 h;
|
||||
|
||||
switch (cmd->type) {
|
||||
@@ -166,10 +167,12 @@ static void do_drv_read(struct drv_cmd *cmd)
|
||||
default:
|
||||
break;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void do_drv_write(struct drv_cmd *cmd)
|
||||
static long do_drv_write(void *_cmd)
|
||||
{
|
||||
struct drv_cmd *cmd = _cmd;
|
||||
u32 lo, hi;
|
||||
|
||||
switch (cmd->type) {
|
||||
@@ -186,30 +189,23 @@ static void do_drv_write(struct drv_cmd *cmd)
|
||||
default:
|
||||
break;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void drv_read(struct drv_cmd *cmd)
|
||||
{
|
||||
cpumask_t saved_mask = current->cpus_allowed;
|
||||
cmd->val = 0;
|
||||
|
||||
set_cpus_allowed_ptr(current, cmd->mask);
|
||||
do_drv_read(cmd);
|
||||
set_cpus_allowed_ptr(current, &saved_mask);
|
||||
work_on_cpu(cpumask_any(cmd->mask), do_drv_read, cmd);
|
||||
}
|
||||
|
||||
static void drv_write(struct drv_cmd *cmd)
|
||||
{
|
||||
cpumask_t saved_mask = current->cpus_allowed;
|
||||
unsigned int i;
|
||||
|
||||
for_each_cpu(i, cmd->mask) {
|
||||
set_cpus_allowed_ptr(current, cpumask_of(i));
|
||||
do_drv_write(cmd);
|
||||
work_on_cpu(i, do_drv_write, cmd);
|
||||
}
|
||||
|
||||
set_cpus_allowed_ptr(current, &saved_mask);
|
||||
return;
|
||||
}
|
||||
|
||||
static u32 get_cur_val(const struct cpumask *mask)
|
||||
@@ -235,8 +231,7 @@ static u32 get_cur_val(const struct cpumask *mask)
|
||||
return 0;
|
||||
}
|
||||
|
||||
cpumask_copy(cmd.mask, mask);
|
||||
|
||||
cmd.mask = mask;
|
||||
drv_read(&cmd);
|
||||
|
||||
dprintk("get_cur_val = %u\n", cmd.val);
|
||||
@@ -368,7 +363,7 @@ static unsigned int get_cur_freq_on_cpu(unsigned int cpu)
|
||||
return freq;
|
||||
}
|
||||
|
||||
static unsigned int check_freqs(const cpumask_t *mask, unsigned int freq,
|
||||
static unsigned int check_freqs(const struct cpumask *mask, unsigned int freq,
|
||||
struct acpi_cpufreq_data *data)
|
||||
{
|
||||
unsigned int cur_freq;
|
||||
@@ -403,9 +398,6 @@ static int acpi_cpufreq_target(struct cpufreq_policy *policy,
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
if (unlikely(!alloc_cpumask_var(&cmd.mask, GFP_KERNEL)))
|
||||
return -ENOMEM;
|
||||
|
||||
perf = data->acpi_data;
|
||||
result = cpufreq_frequency_table_target(policy,
|
||||
data->freq_table,
|
||||
@@ -450,9 +442,9 @@ static int acpi_cpufreq_target(struct cpufreq_policy *policy,
|
||||
|
||||
/* cpufreq holds the hotplug lock, so we are safe from here on */
|
||||
if (policy->shared_type != CPUFREQ_SHARED_TYPE_ANY)
|
||||
cpumask_and(cmd.mask, cpu_online_mask, policy->cpus);
|
||||
cmd.mask = policy->cpus;
|
||||
else
|
||||
cpumask_copy(cmd.mask, cpumask_of(policy->cpu));
|
||||
cmd.mask = cpumask_of(policy->cpu);
|
||||
|
||||
freqs.old = perf->states[perf->state].core_frequency * 1000;
|
||||
freqs.new = data->freq_table[next_state].frequency;
|
||||
@@ -479,7 +471,6 @@ static int acpi_cpufreq_target(struct cpufreq_policy *policy,
|
||||
perf->state = next_perf_state;
|
||||
|
||||
out:
|
||||
free_cpumask_var(cmd.mask);
|
||||
return result;
|
||||
}
|
||||
|
||||
|
@@ -939,10 +939,25 @@ static void powernow_k8_cpu_exit_acpi(struct powernow_k8_data *data)
|
||||
free_cpumask_var(data->acpi_data.shared_cpu_map);
|
||||
}
|
||||
|
||||
static int get_transition_latency(struct powernow_k8_data *data)
|
||||
{
|
||||
int max_latency = 0;
|
||||
int i;
|
||||
for (i = 0; i < data->acpi_data.state_count; i++) {
|
||||
int cur_latency = data->acpi_data.states[i].transition_latency
|
||||
+ data->acpi_data.states[i].bus_master_latency;
|
||||
if (cur_latency > max_latency)
|
||||
max_latency = cur_latency;
|
||||
}
|
||||
/* value in usecs, needs to be in nanoseconds */
|
||||
return 1000 * max_latency;
|
||||
}
|
||||
|
||||
#else
|
||||
static int powernow_k8_cpu_init_acpi(struct powernow_k8_data *data) { return -ENODEV; }
|
||||
static void powernow_k8_cpu_exit_acpi(struct powernow_k8_data *data) { return; }
|
||||
static void powernow_k8_acpi_pst_values(struct powernow_k8_data *data, unsigned int index) { return; }
|
||||
static int get_transition_latency(struct powernow_k8_data *data) { return 0; }
|
||||
#endif /* CONFIG_X86_POWERNOW_K8_ACPI */
|
||||
|
||||
/* Take a frequency, and issue the fid/vid transition command */
|
||||
@@ -1142,8 +1157,7 @@ static int __cpuinit powernowk8_cpu_init(struct cpufreq_policy *pol)
|
||||
data->cpu = pol->cpu;
|
||||
data->currpstate = HW_PSTATE_INVALID;
|
||||
|
||||
rc = powernow_k8_cpu_init_acpi(data);
|
||||
if (rc) {
|
||||
if (powernow_k8_cpu_init_acpi(data)) {
|
||||
/*
|
||||
* Use the PSB BIOS structure. This is only availabe on
|
||||
* an UP version, and is deprecated by AMD.
|
||||
@@ -1161,19 +1175,28 @@ static int __cpuinit powernowk8_cpu_init(struct cpufreq_policy *pol)
|
||||
"ACPI maintainers and complain to your BIOS "
|
||||
"vendor.\n");
|
||||
#endif
|
||||
goto err_out;
|
||||
kfree(data);
|
||||
return -ENODEV;
|
||||
}
|
||||
if (pol->cpu != 0) {
|
||||
printk(KERN_ERR FW_BUG PFX "No ACPI _PSS objects for "
|
||||
"CPU other than CPU0. Complain to your BIOS "
|
||||
"vendor.\n");
|
||||
goto err_out;
|
||||
kfree(data);
|
||||
return -ENODEV;
|
||||
}
|
||||
rc = find_psb_table(data);
|
||||
if (rc) {
|
||||
goto err_out;
|
||||
kfree(data);
|
||||
return -ENODEV;
|
||||
}
|
||||
}
|
||||
/* Take a crude guess here.
|
||||
* That guess was in microseconds, so multiply with 1000 */
|
||||
pol->cpuinfo.transition_latency = (
|
||||
((data->rvo + 8) * data->vstable * VST_UNITS_20US) +
|
||||
((1 << data->irt) * 30)) * 1000;
|
||||
} else /* ACPI _PSS objects available */
|
||||
pol->cpuinfo.transition_latency = get_transition_latency(data);
|
||||
|
||||
/* only run on specific CPU from here on */
|
||||
oldmask = current->cpus_allowed;
|
||||
@@ -1204,11 +1227,6 @@ static int __cpuinit powernowk8_cpu_init(struct cpufreq_policy *pol)
|
||||
cpumask_copy(pol->cpus, &per_cpu(cpu_core_map, pol->cpu));
|
||||
data->available_cores = pol->cpus;
|
||||
|
||||
/* Take a crude guess here.
|
||||
* That guess was in microseconds, so multiply with 1000 */
|
||||
pol->cpuinfo.transition_latency = (((data->rvo + 8) * data->vstable * VST_UNITS_20US)
|
||||
+ (3 * (1 << data->irt) * 10)) * 1000;
|
||||
|
||||
if (cpu_family == CPU_HW_PSTATE)
|
||||
pol->cur = find_khz_freq_from_pstate(data->powernow_table, data->currpstate);
|
||||
else
|
||||
|
@@ -29,6 +29,19 @@
|
||||
|
||||
static void __cpuinit early_init_intel(struct cpuinfo_x86 *c)
|
||||
{
|
||||
/* Unmask CPUID levels if masked: */
|
||||
if (c->x86 > 6 || (c->x86 == 6 && c->x86_model >= 0xd)) {
|
||||
u64 misc_enable;
|
||||
|
||||
rdmsrl(MSR_IA32_MISC_ENABLE, misc_enable);
|
||||
|
||||
if (misc_enable & MSR_IA32_MISC_ENABLE_LIMIT_CPUID) {
|
||||
misc_enable &= ~MSR_IA32_MISC_ENABLE_LIMIT_CPUID;
|
||||
wrmsrl(MSR_IA32_MISC_ENABLE, misc_enable);
|
||||
c->cpuid_level = cpuid_eax(0);
|
||||
}
|
||||
}
|
||||
|
||||
if ((c->x86 == 0xf && c->x86_model >= 0x03) ||
|
||||
(c->x86 == 0x6 && c->x86_model >= 0x0e))
|
||||
set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC);
|
||||
@@ -278,6 +291,9 @@ static void __cpuinit init_intel(struct cpuinfo_x86 *c)
|
||||
ds_init_intel(c);
|
||||
}
|
||||
|
||||
if (c->x86 == 6 && c->x86_model == 29 && cpu_has_clflush)
|
||||
set_cpu_cap(c, X86_FEATURE_CLFLUSH_MONITOR);
|
||||
|
||||
#ifdef CONFIG_X86_64
|
||||
if (c->x86 == 15)
|
||||
c->x86_cache_alignment = c->x86_clflush_size * 2;
|
||||
|
@@ -36,8 +36,11 @@ static struct _cache_table cache_table[] __cpuinitdata =
|
||||
{
|
||||
{ 0x06, LVL_1_INST, 8 }, /* 4-way set assoc, 32 byte line size */
|
||||
{ 0x08, LVL_1_INST, 16 }, /* 4-way set assoc, 32 byte line size */
|
||||
{ 0x09, LVL_1_INST, 32 }, /* 4-way set assoc, 64 byte line size */
|
||||
{ 0x0a, LVL_1_DATA, 8 }, /* 2 way set assoc, 32 byte line size */
|
||||
{ 0x0c, LVL_1_DATA, 16 }, /* 4-way set assoc, 32 byte line size */
|
||||
{ 0x0d, LVL_1_DATA, 16 }, /* 4-way set assoc, 64 byte line size */
|
||||
{ 0x21, LVL_2, 256 }, /* 8-way set assoc, 64 byte line size */
|
||||
{ 0x22, LVL_3, 512 }, /* 4-way set assoc, sectored cache, 64 byte line size */
|
||||
{ 0x23, LVL_3, 1024 }, /* 8-way set assoc, sectored cache, 64 byte line size */
|
||||
{ 0x25, LVL_3, 2048 }, /* 8-way set assoc, sectored cache, 64 byte line size */
|
||||
@@ -85,6 +88,18 @@ static struct _cache_table cache_table[] __cpuinitdata =
|
||||
{ 0x85, LVL_2, 2048 }, /* 8-way set assoc, 32 byte line size */
|
||||
{ 0x86, LVL_2, 512 }, /* 4-way set assoc, 64 byte line size */
|
||||
{ 0x87, LVL_2, 1024 }, /* 8-way set assoc, 64 byte line size */
|
||||
{ 0xd0, LVL_3, 512 }, /* 4-way set assoc, 64 byte line size */
|
||||
{ 0xd1, LVL_3, 1024 }, /* 4-way set assoc, 64 byte line size */
|
||||
{ 0xd2, LVL_3, 2048 }, /* 4-way set assoc, 64 byte line size */
|
||||
{ 0xd6, LVL_3, 1024 }, /* 8-way set assoc, 64 byte line size */
|
||||
{ 0xd7, LVL_3, 2038 }, /* 8-way set assoc, 64 byte line size */
|
||||
{ 0xd8, LVL_3, 4096 }, /* 12-way set assoc, 64 byte line size */
|
||||
{ 0xdc, LVL_3, 2048 }, /* 12-way set assoc, 64 byte line size */
|
||||
{ 0xdd, LVL_3, 4096 }, /* 12-way set assoc, 64 byte line size */
|
||||
{ 0xde, LVL_3, 8192 }, /* 12-way set assoc, 64 byte line size */
|
||||
{ 0xe2, LVL_3, 2048 }, /* 16-way set assoc, 64 byte line size */
|
||||
{ 0xe3, LVL_3, 4096 }, /* 16-way set assoc, 64 byte line size */
|
||||
{ 0xe4, LVL_3, 8192 }, /* 16-way set assoc, 64 byte line size */
|
||||
{ 0x00, 0, 0}
|
||||
};
|
||||
|
||||
|
@@ -295,11 +295,11 @@ void do_machine_check(struct pt_regs * regs, long error_code)
|
||||
* If we know that the error was in user space, send a
|
||||
* SIGBUS. Otherwise, panic if tolerance is low.
|
||||
*
|
||||
* do_exit() takes an awful lot of locks and has a slight
|
||||
* force_sig() takes an awful lot of locks and has a slight
|
||||
* risk of deadlocking.
|
||||
*/
|
||||
if (user_space) {
|
||||
do_exit(SIGBUS);
|
||||
force_sig(SIGBUS, current);
|
||||
} else if (panic_on_oops || tolerant < 2) {
|
||||
mce_panic("Uncorrected machine check",
|
||||
&panicm, mcestart);
|
||||
@@ -490,7 +490,7 @@ static void __cpuinit mce_cpu_quirks(struct cpuinfo_x86 *c)
|
||||
|
||||
}
|
||||
|
||||
static void __cpuinit mce_cpu_features(struct cpuinfo_x86 *c)
|
||||
static void mce_cpu_features(struct cpuinfo_x86 *c)
|
||||
{
|
||||
switch (c->x86_vendor) {
|
||||
case X86_VENDOR_INTEL:
|
||||
@@ -734,6 +734,7 @@ __setup("mce=", mcheck_enable);
|
||||
static int mce_resume(struct sys_device *dev)
|
||||
{
|
||||
mce_init(NULL);
|
||||
mce_cpu_features(¤t_cpu_data);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@@ -121,7 +121,7 @@ static long threshold_restart_bank(void *_tr)
|
||||
}
|
||||
|
||||
/* cpu init entry point, called from mce.c with preempt off */
|
||||
void __cpuinit mce_amd_feature_init(struct cpuinfo_x86 *c)
|
||||
void mce_amd_feature_init(struct cpuinfo_x86 *c)
|
||||
{
|
||||
unsigned int bank, block;
|
||||
unsigned int cpu = smp_processor_id();
|
||||
|
@@ -30,7 +30,7 @@ asmlinkage void smp_thermal_interrupt(void)
|
||||
irq_exit();
|
||||
}
|
||||
|
||||
static void __cpuinit intel_init_thermal(struct cpuinfo_x86 *c)
|
||||
static void intel_init_thermal(struct cpuinfo_x86 *c)
|
||||
{
|
||||
u32 l, h;
|
||||
int tm2 = 0;
|
||||
@@ -84,7 +84,7 @@ static void __cpuinit intel_init_thermal(struct cpuinfo_x86 *c)
|
||||
return;
|
||||
}
|
||||
|
||||
void __cpuinit mce_intel_feature_init(struct cpuinfo_x86 *c)
|
||||
void mce_intel_feature_init(struct cpuinfo_x86 *c)
|
||||
{
|
||||
intel_init_thermal(c);
|
||||
}
|
||||
|
@@ -33,11 +33,13 @@ u64 mtrr_tom2;
|
||||
struct mtrr_state_type mtrr_state = {};
|
||||
EXPORT_SYMBOL_GPL(mtrr_state);
|
||||
|
||||
#undef MODULE_PARAM_PREFIX
|
||||
#define MODULE_PARAM_PREFIX "mtrr."
|
||||
|
||||
static int mtrr_show;
|
||||
module_param_named(show, mtrr_show, bool, 0);
|
||||
static int __initdata mtrr_show;
|
||||
static int __init mtrr_debug(char *opt)
|
||||
{
|
||||
mtrr_show = 1;
|
||||
return 0;
|
||||
}
|
||||
early_param("mtrr.show", mtrr_debug);
|
||||
|
||||
/*
|
||||
* Returns the effective MTRR type for the region
|
||||
|
@@ -1594,8 +1594,7 @@ int __init mtrr_trim_uncached_memory(unsigned long end_pfn)
|
||||
|
||||
/* kvm/qemu doesn't have mtrr set right, don't trim them all */
|
||||
if (!highest_pfn) {
|
||||
WARN(!kvm_para_available(), KERN_WARNING
|
||||
"WARNING: strange, CPU MTRRs all blank?\n");
|
||||
printk(KERN_INFO "CPU MTRRs all blank - virtualized system.\n");
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@@ -15,8 +15,8 @@
|
||||
* - buffer allocation (memory accounting)
|
||||
*
|
||||
*
|
||||
* Copyright (C) 2007-2008 Intel Corporation.
|
||||
* Markus Metzger <markus.t.metzger@intel.com>, 2007-2008
|
||||
* Copyright (C) 2007-2009 Intel Corporation.
|
||||
* Markus Metzger <markus.t.metzger@intel.com>, 2007-2009
|
||||
*/
|
||||
|
||||
|
||||
@@ -890,7 +890,7 @@ int ds_set_pebs_reset(struct pebs_tracer *tracer, u64 value)
|
||||
}
|
||||
|
||||
static const struct ds_configuration ds_cfg_netburst = {
|
||||
.name = "netburst",
|
||||
.name = "Netburst",
|
||||
.ctl[dsf_bts] = (1 << 2) | (1 << 3),
|
||||
.ctl[dsf_bts_kernel] = (1 << 5),
|
||||
.ctl[dsf_bts_user] = (1 << 6),
|
||||
@@ -904,7 +904,7 @@ static const struct ds_configuration ds_cfg_netburst = {
|
||||
#endif
|
||||
};
|
||||
static const struct ds_configuration ds_cfg_pentium_m = {
|
||||
.name = "pentium m",
|
||||
.name = "Pentium M",
|
||||
.ctl[dsf_bts] = (1 << 6) | (1 << 7),
|
||||
|
||||
.sizeof_field = sizeof(long),
|
||||
@@ -915,8 +915,8 @@ static const struct ds_configuration ds_cfg_pentium_m = {
|
||||
.sizeof_rec[ds_pebs] = sizeof(long) * 18,
|
||||
#endif
|
||||
};
|
||||
static const struct ds_configuration ds_cfg_core2 = {
|
||||
.name = "core 2",
|
||||
static const struct ds_configuration ds_cfg_core2_atom = {
|
||||
.name = "Core 2/Atom",
|
||||
.ctl[dsf_bts] = (1 << 6) | (1 << 7),
|
||||
.ctl[dsf_bts_kernel] = (1 << 9),
|
||||
.ctl[dsf_bts_user] = (1 << 10),
|
||||
@@ -949,19 +949,22 @@ void __cpuinit ds_init_intel(struct cpuinfo_x86 *c)
|
||||
switch (c->x86) {
|
||||
case 0x6:
|
||||
switch (c->x86_model) {
|
||||
case 0 ... 0xC:
|
||||
/* sorry, don't know about them */
|
||||
break;
|
||||
case 0xD:
|
||||
case 0xE: /* Pentium M */
|
||||
case 0x9:
|
||||
case 0xd: /* Pentium M */
|
||||
ds_configure(&ds_cfg_pentium_m);
|
||||
break;
|
||||
default: /* Core2, Atom, ... */
|
||||
ds_configure(&ds_cfg_core2);
|
||||
case 0xf:
|
||||
case 0x17: /* Core2 */
|
||||
case 0x1c: /* Atom */
|
||||
ds_configure(&ds_cfg_core2_atom);
|
||||
break;
|
||||
case 0x1a: /* i7 */
|
||||
default:
|
||||
/* sorry, don't know about them */
|
||||
break;
|
||||
}
|
||||
break;
|
||||
case 0xF:
|
||||
case 0xf:
|
||||
switch (c->x86_model) {
|
||||
case 0x0:
|
||||
case 0x1:
|
||||
|
@@ -346,6 +346,7 @@ ENTRY(save_args)
|
||||
popq_cfi %rax /* move return address... */
|
||||
mov %gs:pda_irqstackptr,%rsp
|
||||
EMPTY_FRAME 0
|
||||
pushq_cfi %rbp /* backlink for unwinder */
|
||||
pushq_cfi %rax /* ... to the new stack */
|
||||
/*
|
||||
* We entered an interrupt context - irqs are off:
|
||||
|
@@ -488,20 +488,21 @@ void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr)
|
||||
* ignore such a protection.
|
||||
*/
|
||||
asm volatile(
|
||||
"1: " _ASM_MOV " (%[parent_old]), %[old]\n"
|
||||
"2: " _ASM_MOV " %[return_hooker], (%[parent_replaced])\n"
|
||||
"1: " _ASM_MOV " (%[parent]), %[old]\n"
|
||||
"2: " _ASM_MOV " %[return_hooker], (%[parent])\n"
|
||||
" movl $0, %[faulted]\n"
|
||||
"3:\n"
|
||||
|
||||
".section .fixup, \"ax\"\n"
|
||||
"3: movl $1, %[faulted]\n"
|
||||
"4: movl $1, %[faulted]\n"
|
||||
" jmp 3b\n"
|
||||
".previous\n"
|
||||
|
||||
_ASM_EXTABLE(1b, 3b)
|
||||
_ASM_EXTABLE(2b, 3b)
|
||||
_ASM_EXTABLE(1b, 4b)
|
||||
_ASM_EXTABLE(2b, 4b)
|
||||
|
||||
: [parent_replaced] "=r" (parent), [old] "=r" (old),
|
||||
[faulted] "=r" (faulted)
|
||||
: [parent_old] "0" (parent), [return_hooker] "r" (return_hooker)
|
||||
: [old] "=r" (old), [faulted] "=r" (faulted)
|
||||
: [parent] "r" (parent), [return_hooker] "r" (return_hooker)
|
||||
: "memory"
|
||||
);
|
||||
|
||||
|
@@ -269,6 +269,8 @@ static void hpet_set_mode(enum clock_event_mode mode,
|
||||
now = hpet_readl(HPET_COUNTER);
|
||||
cmp = now + (unsigned long) delta;
|
||||
cfg = hpet_readl(HPET_Tn_CFG(timer));
|
||||
/* Make sure we use edge triggered interrupts */
|
||||
cfg &= ~HPET_TN_LEVEL;
|
||||
cfg |= HPET_TN_ENABLE | HPET_TN_PERIODIC |
|
||||
HPET_TN_SETVAL | HPET_TN_32BIT;
|
||||
hpet_writel(cfg, HPET_Tn_CFG(timer));
|
||||
@@ -628,11 +630,12 @@ static int hpet_cpuhp_notify(struct notifier_block *n,
|
||||
|
||||
switch (action & 0xf) {
|
||||
case CPU_ONLINE:
|
||||
INIT_DELAYED_WORK(&work.work, hpet_work);
|
||||
INIT_DELAYED_WORK_ON_STACK(&work.work, hpet_work);
|
||||
init_completion(&work.complete);
|
||||
/* FIXME: add schedule_work_on() */
|
||||
schedule_delayed_work_on(cpu, &work.work, 0);
|
||||
wait_for_completion(&work.complete);
|
||||
destroy_timer_on_stack(&work.work.timer);
|
||||
break;
|
||||
case CPU_DEAD:
|
||||
if (hdev) {
|
||||
@@ -896,13 +899,21 @@ static unsigned long hpet_rtc_flags;
|
||||
static int hpet_prev_update_sec;
|
||||
static struct rtc_time hpet_alarm_time;
|
||||
static unsigned long hpet_pie_count;
|
||||
static unsigned long hpet_t1_cmp;
|
||||
static u32 hpet_t1_cmp;
|
||||
static unsigned long hpet_default_delta;
|
||||
static unsigned long hpet_pie_delta;
|
||||
static unsigned long hpet_pie_limit;
|
||||
|
||||
static rtc_irq_handler irq_handler;
|
||||
|
||||
/*
|
||||
* Check that the hpet counter c1 is ahead of the c2
|
||||
*/
|
||||
static inline int hpet_cnt_ahead(u32 c1, u32 c2)
|
||||
{
|
||||
return (s32)(c2 - c1) < 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Registers a IRQ handler.
|
||||
*/
|
||||
@@ -1074,7 +1085,7 @@ static void hpet_rtc_timer_reinit(void)
|
||||
hpet_t1_cmp += delta;
|
||||
hpet_writel(hpet_t1_cmp, HPET_T1_CMP);
|
||||
lost_ints++;
|
||||
} while ((long)(hpet_readl(HPET_COUNTER) - hpet_t1_cmp) > 0);
|
||||
} while (!hpet_cnt_ahead(hpet_t1_cmp, hpet_readl(HPET_COUNTER)));
|
||||
|
||||
if (lost_ints) {
|
||||
if (hpet_rtc_flags & RTC_PIE)
|
||||
|
@@ -28,10 +28,10 @@ static int i8237A_resume(struct sys_device *dev)
|
||||
|
||||
flags = claim_dma_lock();
|
||||
|
||||
dma_outb(DMA1_RESET_REG, 0);
|
||||
dma_outb(DMA2_RESET_REG, 0);
|
||||
dma_outb(0, DMA1_RESET_REG);
|
||||
dma_outb(0, DMA2_RESET_REG);
|
||||
|
||||
for (i = 0;i < 8;i++) {
|
||||
for (i = 0; i < 8; i++) {
|
||||
set_dma_addr(i, 0x000000);
|
||||
/* DMA count is a bit weird so this is not 0 */
|
||||
set_dma_count(i, 1);
|
||||
@@ -51,14 +51,14 @@ static int i8237A_suspend(struct sys_device *dev, pm_message_t state)
|
||||
}
|
||||
|
||||
static struct sysdev_class i8237_sysdev_class = {
|
||||
.name = "i8237",
|
||||
.suspend = i8237A_suspend,
|
||||
.resume = i8237A_resume,
|
||||
.name = "i8237",
|
||||
.suspend = i8237A_suspend,
|
||||
.resume = i8237A_resume,
|
||||
};
|
||||
|
||||
static struct sys_device device_i8237A = {
|
||||
.id = 0,
|
||||
.cls = &i8237_sysdev_class,
|
||||
.id = 0,
|
||||
.cls = &i8237_sysdev_class,
|
||||
};
|
||||
|
||||
static int __init i8237A_init_sysfs(void)
|
||||
@@ -68,5 +68,4 @@ static int __init i8237A_init_sysfs(void)
|
||||
error = sysdev_register(&device_i8237A);
|
||||
return error;
|
||||
}
|
||||
|
||||
device_initcall(i8237A_init_sysfs);
|
||||
|
@@ -2528,14 +2528,15 @@ static void irq_complete_move(struct irq_desc **descp)
|
||||
|
||||
vector = ~get_irq_regs()->orig_ax;
|
||||
me = smp_processor_id();
|
||||
|
||||
if (vector == cfg->vector && cpumask_test_cpu(me, cfg->domain)) {
|
||||
#ifdef CONFIG_NUMA_MIGRATE_IRQ_DESC
|
||||
*descp = desc = move_irq_desc(desc, me);
|
||||
/* get the new one */
|
||||
cfg = desc->chip_data;
|
||||
#endif
|
||||
|
||||
if (vector == cfg->vector && cpumask_test_cpu(me, cfg->domain))
|
||||
send_cleanup_vector(cfg);
|
||||
}
|
||||
}
|
||||
#else
|
||||
static inline void irq_complete_move(struct irq_desc **descp) {}
|
||||
@@ -3840,14 +3841,24 @@ int __init io_apic_get_redir_entries (int ioapic)
|
||||
|
||||
void __init probe_nr_irqs_gsi(void)
|
||||
{
|
||||
int idx;
|
||||
int nr = 0;
|
||||
|
||||
for (idx = 0; idx < nr_ioapics; idx++)
|
||||
nr += io_apic_get_redir_entries(idx) + 1;
|
||||
|
||||
if (nr > nr_irqs_gsi)
|
||||
nr = acpi_probe_gsi();
|
||||
if (nr > nr_irqs_gsi) {
|
||||
nr_irqs_gsi = nr;
|
||||
} else {
|
||||
/* for acpi=off or acpi is not compiled in */
|
||||
int idx;
|
||||
|
||||
nr = 0;
|
||||
for (idx = 0; idx < nr_ioapics; idx++)
|
||||
nr += io_apic_get_redir_entries(idx) + 1;
|
||||
|
||||
if (nr > nr_irqs_gsi)
|
||||
nr_irqs_gsi = nr;
|
||||
}
|
||||
|
||||
printk(KERN_DEBUG "nr_irqs_gsi: %d\n", nr_irqs_gsi);
|
||||
}
|
||||
|
||||
/* --------------------------------------------------------------------------
|
||||
|
@@ -78,15 +78,6 @@ void __init init_ISA_irqs(void)
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* IRQ2 is cascade interrupt to second interrupt controller
|
||||
*/
|
||||
static struct irqaction irq2 = {
|
||||
.handler = no_action,
|
||||
.mask = CPU_MASK_NONE,
|
||||
.name = "cascade",
|
||||
};
|
||||
|
||||
DEFINE_PER_CPU(vector_irq_t, vector_irq) = {
|
||||
[0 ... IRQ0_VECTOR - 1] = -1,
|
||||
[IRQ0_VECTOR] = 0,
|
||||
@@ -178,9 +169,6 @@ void __init native_init_IRQ(void)
|
||||
alloc_intr_gate(THERMAL_APIC_VECTOR, thermal_interrupt);
|
||||
#endif
|
||||
|
||||
if (!acpi_ioapic)
|
||||
setup_irq(2, &irq2);
|
||||
|
||||
/* setup after call gates are initialised (usually add in
|
||||
* the architecture specific gates)
|
||||
*/
|
||||
|
@@ -446,7 +446,7 @@ void __kprobes arch_prepare_kretprobe(struct kretprobe_instance *ri,
|
||||
static void __kprobes setup_singlestep(struct kprobe *p, struct pt_regs *regs,
|
||||
struct kprobe_ctlblk *kcb)
|
||||
{
|
||||
#if !defined(CONFIG_PREEMPT) || defined(CONFIG_PM)
|
||||
#if !defined(CONFIG_PREEMPT) || defined(CONFIG_FREEZER)
|
||||
if (p->ainsn.boostable == 1 && !p->post_handler) {
|
||||
/* Boost up -- we can execute copied instructions directly */
|
||||
reset_current_kprobe();
|
||||
|
@@ -203,7 +203,7 @@ static void __init platform_detect(void)
|
||||
static void __init platform_detect(void)
|
||||
{
|
||||
/* stopgap until OFW support is added to the kernel */
|
||||
olpc_platform_info.boardrev = 0xc2;
|
||||
olpc_platform_info.boardrev = olpc_board(0xc2);
|
||||
}
|
||||
#endif
|
||||
|
||||
|
@@ -268,6 +268,32 @@ enum paravirt_lazy_mode paravirt_get_lazy_mode(void)
|
||||
return __get_cpu_var(paravirt_lazy_mode);
|
||||
}
|
||||
|
||||
void arch_flush_lazy_mmu_mode(void)
|
||||
{
|
||||
preempt_disable();
|
||||
|
||||
if (paravirt_get_lazy_mode() == PARAVIRT_LAZY_MMU) {
|
||||
WARN_ON(preempt_count() == 1);
|
||||
arch_leave_lazy_mmu_mode();
|
||||
arch_enter_lazy_mmu_mode();
|
||||
}
|
||||
|
||||
preempt_enable();
|
||||
}
|
||||
|
||||
void arch_flush_lazy_cpu_mode(void)
|
||||
{
|
||||
preempt_disable();
|
||||
|
||||
if (paravirt_get_lazy_mode() == PARAVIRT_LAZY_CPU) {
|
||||
WARN_ON(preempt_count() == 1);
|
||||
arch_leave_lazy_cpu_mode();
|
||||
arch_enter_lazy_cpu_mode();
|
||||
}
|
||||
|
||||
preempt_enable();
|
||||
}
|
||||
|
||||
struct pv_info pv_info = {
|
||||
.name = "bare hardware",
|
||||
.paravirt_enabled = 0,
|
||||
|
@@ -5,7 +5,7 @@
|
||||
* This allows to use PCI devices that only support 32bit addresses on systems
|
||||
* with more than 4GB.
|
||||
*
|
||||
* See Documentation/DMA-mapping.txt for the interface specification.
|
||||
* See Documentation/PCI/PCI-DMA-mapping.txt for the interface specification.
|
||||
*
|
||||
* Copyright 2002 Andi Kleen, SuSE Labs.
|
||||
* Subject to the GNU General Public License v2 only.
|
||||
|
@@ -180,6 +180,9 @@ void mwait_idle_with_hints(unsigned long ax, unsigned long cx)
|
||||
|
||||
trace_power_start(&it, POWER_CSTATE, (ax>>4)+1);
|
||||
if (!need_resched()) {
|
||||
if (cpu_has(¤t_cpu_data, X86_FEATURE_CLFLUSH_MONITOR))
|
||||
clflush((void *)¤t_thread_info()->flags);
|
||||
|
||||
__monitor((void *)¤t_thread_info()->flags, 0, 0);
|
||||
smp_mb();
|
||||
if (!need_resched())
|
||||
@@ -194,6 +197,9 @@ static void mwait_idle(void)
|
||||
struct power_trace it;
|
||||
if (!need_resched()) {
|
||||
trace_power_start(&it, POWER_CSTATE, 1);
|
||||
if (cpu_has(¤t_cpu_data, X86_FEATURE_CLFLUSH_MONITOR))
|
||||
clflush((void *)¤t_thread_info()->flags);
|
||||
|
||||
__monitor((void *)¤t_thread_info()->flags, 0, 0);
|
||||
smp_mb();
|
||||
if (!need_resched())
|
||||
|
@@ -104,9 +104,6 @@ void cpu_idle(void)
|
||||
check_pgt_cache();
|
||||
rmb();
|
||||
|
||||
if (rcu_pending(cpu))
|
||||
rcu_check_callbacks(cpu, 0);
|
||||
|
||||
if (cpu_is_offline(cpu))
|
||||
play_dead();
|
||||
|
||||
|
@@ -40,6 +40,7 @@
|
||||
#include <linux/uaccess.h>
|
||||
#include <linux/io.h>
|
||||
#include <linux/ftrace.h>
|
||||
#include <linux/dmi.h>
|
||||
|
||||
#include <asm/pgtable.h>
|
||||
#include <asm/system.h>
|
||||
@@ -151,14 +152,18 @@ void __show_regs(struct pt_regs *regs, int all)
|
||||
unsigned long d0, d1, d2, d3, d6, d7;
|
||||
unsigned int fsindex, gsindex;
|
||||
unsigned int ds, cs, es;
|
||||
const char *board;
|
||||
|
||||
printk("\n");
|
||||
print_modules();
|
||||
printk(KERN_INFO "Pid: %d, comm: %.20s %s %s %.*s\n",
|
||||
board = dmi_get_system_info(DMI_PRODUCT_NAME);
|
||||
if (!board)
|
||||
board = "";
|
||||
printk(KERN_INFO "Pid: %d, comm: %.20s %s %s %.*s %s\n",
|
||||
current->pid, current->comm, print_tainted(),
|
||||
init_utsname()->release,
|
||||
(int)strcspn(init_utsname()->version, " "),
|
||||
init_utsname()->version);
|
||||
init_utsname()->version, board);
|
||||
printk(KERN_INFO "RIP: %04lx:[<%016lx>] ", regs->cs & 0xffff, regs->ip);
|
||||
printk_address(regs->ip, 1);
|
||||
printk(KERN_INFO "RSP: %04lx:%016lx EFLAGS: %08lx\n", regs->ss,
|
||||
|
@@ -810,12 +810,16 @@ static void ptrace_bts_untrace(struct task_struct *child)
|
||||
|
||||
static void ptrace_bts_detach(struct task_struct *child)
|
||||
{
|
||||
if (unlikely(child->bts)) {
|
||||
ds_release_bts(child->bts);
|
||||
child->bts = NULL;
|
||||
|
||||
ptrace_bts_free_buffer(child);
|
||||
}
|
||||
/*
|
||||
* Ptrace_detach() races with ptrace_untrace() in case
|
||||
* the child dies and is reaped by another thread.
|
||||
*
|
||||
* We only do the memory accounting at this point and
|
||||
* leave the buffer deallocation and the bts tracer
|
||||
* release to ptrace_bts_untrace() which will be called
|
||||
* later on with tasklist_lock held.
|
||||
*/
|
||||
release_locked_buffer(child->bts_buffer, child->bts_size);
|
||||
}
|
||||
#else
|
||||
static inline void ptrace_bts_fork(struct task_struct *tsk) {}
|
||||
@@ -1384,7 +1388,7 @@ void send_sigtrap(struct task_struct *tsk, struct pt_regs *regs,
|
||||
#ifdef CONFIG_X86_32
|
||||
# define IS_IA32 1
|
||||
#elif defined CONFIG_IA32_EMULATION
|
||||
# define IS_IA32 test_thread_flag(TIF_IA32)
|
||||
# define IS_IA32 is_compat_task()
|
||||
#else
|
||||
# define IS_IA32 0
|
||||
#endif
|
||||
|
@@ -607,7 +607,7 @@ struct x86_quirks *x86_quirks __initdata = &default_x86_quirks;
|
||||
static int __init dmi_low_memory_corruption(const struct dmi_system_id *d)
|
||||
{
|
||||
printk(KERN_NOTICE
|
||||
"%s detected: BIOS may corrupt low RAM, working it around.\n",
|
||||
"%s detected: BIOS may corrupt low RAM, working around it.\n",
|
||||
d->ident);
|
||||
|
||||
e820_update_range(0, 0x10000, E820_RAM, E820_RESERVED);
|
||||
|
@@ -136,7 +136,7 @@ static void __init setup_cpu_pda_map(void)
|
||||
#ifdef CONFIG_X86_64
|
||||
|
||||
/* correctly size the local cpu masks */
|
||||
static void setup_cpu_local_masks(void)
|
||||
static void __init setup_cpu_local_masks(void)
|
||||
{
|
||||
alloc_bootmem_cpumask_var(&cpu_initialized_mask);
|
||||
alloc_bootmem_cpumask_var(&cpu_callin_mask);
|
||||
|
@@ -632,9 +632,16 @@ badframe:
|
||||
}
|
||||
|
||||
#ifdef CONFIG_X86_32
|
||||
asmlinkage int sys_rt_sigreturn(struct pt_regs regs)
|
||||
/*
|
||||
* Note: do not pass in pt_regs directly as with tail-call optimization
|
||||
* GCC will incorrectly stomp on the caller's frame and corrupt user-space
|
||||
* register state:
|
||||
*/
|
||||
asmlinkage int sys_rt_sigreturn(unsigned long __unused)
|
||||
{
|
||||
return do_rt_sigreturn(®s);
|
||||
struct pt_regs *regs = (struct pt_regs *)&__unused;
|
||||
|
||||
return do_rt_sigreturn(regs);
|
||||
}
|
||||
#else /* !CONFIG_X86_32 */
|
||||
asmlinkage long sys_rt_sigreturn(struct pt_regs *regs)
|
||||
|
@@ -115,7 +115,7 @@ unsigned long __init calibrate_cpu(void)
|
||||
|
||||
static struct irqaction irq0 = {
|
||||
.handler = timer_interrupt,
|
||||
.flags = IRQF_DISABLED | IRQF_IRQPOLL | IRQF_NOBALANCING,
|
||||
.flags = IRQF_DISABLED | IRQF_IRQPOLL | IRQF_NOBALANCING | IRQF_TIMER,
|
||||
.mask = CPU_MASK_NONE,
|
||||
.name = "timer"
|
||||
};
|
||||
|
@@ -200,6 +200,7 @@ static int uv_wait_completion(struct bau_desc *bau_desc,
|
||||
destination_timeouts = 0;
|
||||
}
|
||||
}
|
||||
cpu_relax();
|
||||
}
|
||||
return FLUSH_COMPLETE;
|
||||
}
|
||||
|
@@ -99,6 +99,12 @@ static inline void preempt_conditional_sti(struct pt_regs *regs)
|
||||
local_irq_enable();
|
||||
}
|
||||
|
||||
static inline void conditional_cli(struct pt_regs *regs)
|
||||
{
|
||||
if (regs->flags & X86_EFLAGS_IF)
|
||||
local_irq_disable();
|
||||
}
|
||||
|
||||
static inline void preempt_conditional_cli(struct pt_regs *regs)
|
||||
{
|
||||
if (regs->flags & X86_EFLAGS_IF)
|
||||
@@ -626,8 +632,10 @@ clear_dr7:
|
||||
|
||||
#ifdef CONFIG_X86_32
|
||||
debug_vm86:
|
||||
/* reenable preemption: handle_vm86_trap() might sleep */
|
||||
dec_preempt_count();
|
||||
handle_vm86_trap((struct kernel_vm86_regs *) regs, error_code, 1);
|
||||
preempt_conditional_cli(regs);
|
||||
conditional_cli(regs);
|
||||
return;
|
||||
#endif
|
||||
|
||||
@@ -896,7 +904,7 @@ asmlinkage void math_state_restore(void)
|
||||
EXPORT_SYMBOL_GPL(math_state_restore);
|
||||
|
||||
#ifndef CONFIG_MATH_EMULATION
|
||||
asmlinkage void math_emulate(long arg)
|
||||
void math_emulate(struct math_emu_info *info)
|
||||
{
|
||||
printk(KERN_EMERG
|
||||
"math-emulation not enabled and no coprocessor found.\n");
|
||||
@@ -906,16 +914,19 @@ asmlinkage void math_emulate(long arg)
|
||||
}
|
||||
#endif /* CONFIG_MATH_EMULATION */
|
||||
|
||||
dotraplinkage void __kprobes
|
||||
do_device_not_available(struct pt_regs *regs, long error)
|
||||
dotraplinkage void __kprobes do_device_not_available(struct pt_regs regs)
|
||||
{
|
||||
#ifdef CONFIG_X86_32
|
||||
if (read_cr0() & X86_CR0_EM) {
|
||||
conditional_sti(regs);
|
||||
math_emulate(0);
|
||||
struct math_emu_info info = { };
|
||||
|
||||
conditional_sti(®s);
|
||||
|
||||
info.regs = ®s;
|
||||
math_emulate(&info);
|
||||
} else {
|
||||
math_state_restore(); /* interrupts still off */
|
||||
conditional_sti(regs);
|
||||
conditional_sti(®s);
|
||||
}
|
||||
#else
|
||||
math_state_restore();
|
||||
|
@@ -320,6 +320,16 @@ static void vmi_release_pmd(unsigned long pfn)
|
||||
vmi_ops.release_page(pfn, VMI_PAGE_L2);
|
||||
}
|
||||
|
||||
/*
|
||||
* We use the pgd_free hook for releasing the pgd page:
|
||||
*/
|
||||
static void vmi_pgd_free(struct mm_struct *mm, pgd_t *pgd)
|
||||
{
|
||||
unsigned long pfn = __pa(pgd) >> PAGE_SHIFT;
|
||||
|
||||
vmi_ops.release_page(pfn, VMI_PAGE_L2);
|
||||
}
|
||||
|
||||
/*
|
||||
* Helper macros for MMU update flags. We can defer updates until a flush
|
||||
* or page invalidation only if the update is to the current address space
|
||||
@@ -762,6 +772,7 @@ static inline int __init activate_vmi(void)
|
||||
if (vmi_ops.release_page) {
|
||||
pv_mmu_ops.release_pte = vmi_release_pte;
|
||||
pv_mmu_ops.release_pmd = vmi_release_pmd;
|
||||
pv_mmu_ops.pgd_free = vmi_pgd_free;
|
||||
}
|
||||
|
||||
/* Set linear is needed in all cases */
|
||||
@@ -858,7 +869,7 @@ void __init vmi_init(void)
|
||||
#endif
|
||||
}
|
||||
|
||||
void vmi_activate(void)
|
||||
void __init vmi_activate(void)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
||||
|
@@ -202,7 +202,7 @@ static irqreturn_t vmi_timer_interrupt(int irq, void *dev_id)
|
||||
static struct irqaction vmi_clock_action = {
|
||||
.name = "vmi-timer",
|
||||
.handler = vmi_timer_interrupt,
|
||||
.flags = IRQF_DISABLED | IRQF_NOBALANCING,
|
||||
.flags = IRQF_DISABLED | IRQF_NOBALANCING | IRQF_TIMER,
|
||||
.mask = CPU_MASK_ALL,
|
||||
};
|
||||
|
||||
@@ -283,10 +283,13 @@ void __devinit vmi_time_ap_init(void)
|
||||
#endif
|
||||
|
||||
/** vmi clocksource */
|
||||
static struct clocksource clocksource_vmi;
|
||||
|
||||
static cycle_t read_real_cycles(void)
|
||||
{
|
||||
return vmi_timer_ops.get_cycle_counter(VMI_CYCLES_REAL);
|
||||
cycle_t ret = (cycle_t)vmi_timer_ops.get_cycle_counter(VMI_CYCLES_REAL);
|
||||
return ret >= clocksource_vmi.cycle_last ?
|
||||
ret : clocksource_vmi.cycle_last;
|
||||
}
|
||||
|
||||
static struct clocksource clocksource_vmi = {
|
||||
|
Reference in New Issue
Block a user