Auto merge with /home/aegl/GIT/linus
This commit is contained in:
@@ -779,7 +779,7 @@ acpi_map_iosapic (acpi_handle handle, u32 depth, void *context, void **ret)
|
||||
union acpi_object *obj;
|
||||
struct acpi_table_iosapic *iosapic;
|
||||
unsigned int gsi_base;
|
||||
int node;
|
||||
int pxm, node;
|
||||
|
||||
/* Only care about objects w/ a method that returns the MADT */
|
||||
if (ACPI_FAILURE(acpi_evaluate_object(handle, "_MAT", NULL, &buffer)))
|
||||
@@ -805,29 +805,16 @@ acpi_map_iosapic (acpi_handle handle, u32 depth, void *context, void **ret)
|
||||
gsi_base = iosapic->global_irq_base;
|
||||
|
||||
acpi_os_free(buffer.pointer);
|
||||
buffer.length = ACPI_ALLOCATE_BUFFER;
|
||||
buffer.pointer = NULL;
|
||||
|
||||
/*
|
||||
* OK, it's an IOSAPIC MADT entry, look for a _PXM method to tell
|
||||
* OK, it's an IOSAPIC MADT entry, look for a _PXM value to tell
|
||||
* us which node to associate this with.
|
||||
*/
|
||||
if (ACPI_FAILURE(acpi_evaluate_object(handle, "_PXM", NULL, &buffer)))
|
||||
pxm = acpi_get_pxm(handle);
|
||||
if (pxm < 0)
|
||||
return AE_OK;
|
||||
|
||||
if (!buffer.length || !buffer.pointer)
|
||||
return AE_OK;
|
||||
|
||||
obj = buffer.pointer;
|
||||
|
||||
if (obj->type != ACPI_TYPE_INTEGER ||
|
||||
obj->integer.value >= MAX_PXM_DOMAINS) {
|
||||
acpi_os_free(buffer.pointer);
|
||||
return AE_OK;
|
||||
}
|
||||
|
||||
node = pxm_to_nid_map[obj->integer.value];
|
||||
acpi_os_free(buffer.pointer);
|
||||
node = pxm_to_nid_map[pxm];
|
||||
|
||||
if (node >= MAX_NUMNODES || !node_online(node) ||
|
||||
cpus_empty(node_to_cpumask(node)))
|
||||
|
@@ -777,7 +777,7 @@ GLOBAL_ENTRY(ia64_ret_from_ia32_execve)
|
||||
st8.spill [r2]=r8 // store return value in slot for r8 and set unat bit
|
||||
.mem.offset 8,0
|
||||
st8.spill [r3]=r0 // clear error indication in slot for r10 and set unat bit
|
||||
END(ia64_ret_from_ia32_execve_syscall)
|
||||
END(ia64_ret_from_ia32_execve)
|
||||
// fall through
|
||||
#endif /* CONFIG_IA32_SUPPORT */
|
||||
GLOBAL_ENTRY(ia64_leave_kernel)
|
||||
@@ -1176,7 +1176,7 @@ ENTRY(notify_resume_user)
|
||||
;;
|
||||
(pNonSys) mov out2=0 // out2==0 => not a syscall
|
||||
.fframe 16
|
||||
.spillpsp ar.unat, 16 // (note that offset is relative to psp+0x10!)
|
||||
.spillsp ar.unat, 16
|
||||
st8 [sp]=r9,-16 // allocate space for ar.unat and save it
|
||||
st8 [out1]=loc1,-8 // save ar.pfs, out1=&sigscratch
|
||||
.body
|
||||
@@ -1202,7 +1202,7 @@ GLOBAL_ENTRY(sys_rt_sigsuspend)
|
||||
adds out2=8,sp // out2=&sigscratch->ar_pfs
|
||||
;;
|
||||
.fframe 16
|
||||
.spillpsp ar.unat, 16 // (note that offset is relative to psp+0x10!)
|
||||
.spillsp ar.unat, 16
|
||||
st8 [sp]=r9,-16 // allocate space for ar.unat and save it
|
||||
st8 [out2]=loc1,-8 // save ar.pfs, out2=&sigscratch
|
||||
.body
|
||||
|
@@ -460,9 +460,9 @@ EX(.fail_efault, ld8 r14=[r33]) // r14 <- *set
|
||||
;;
|
||||
|
||||
st8 [r2]=r14 // update current->blocked with new mask
|
||||
cmpxchg4.acq r14=[r9],r18,ar.ccv // current->thread_info->flags <- r18
|
||||
cmpxchg4.acq r8=[r9],r18,ar.ccv // current->thread_info->flags <- r18
|
||||
;;
|
||||
cmp.ne p6,p0=r17,r14 // update failed?
|
||||
cmp.ne p6,p0=r17,r8 // update failed?
|
||||
(p6) br.cond.spnt.few 1b // yes -> retry
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
|
@@ -1103,8 +1103,6 @@ ia64_mca_cpe_int_caller(int cpe_irq, void *arg, struct pt_regs *ptregs)
|
||||
return IRQ_HANDLED;
|
||||
}
|
||||
|
||||
#endif /* CONFIG_ACPI */
|
||||
|
||||
/*
|
||||
* ia64_mca_cpe_poll
|
||||
*
|
||||
@@ -1122,6 +1120,8 @@ ia64_mca_cpe_poll (unsigned long dummy)
|
||||
platform_send_ipi(first_cpu(cpu_online_map), IA64_CPEP_VECTOR, IA64_IPI_DM_INT, 0);
|
||||
}
|
||||
|
||||
#endif /* CONFIG_ACPI */
|
||||
|
||||
/*
|
||||
* C portion of the OS INIT handler
|
||||
*
|
||||
@@ -1390,8 +1390,7 @@ ia64_mca_init(void)
|
||||
register_percpu_irq(IA64_MCA_WAKEUP_VECTOR, &mca_wkup_irqaction);
|
||||
|
||||
#ifdef CONFIG_ACPI
|
||||
/* Setup the CPEI/P vector and handler */
|
||||
cpe_vector = acpi_request_vector(ACPI_INTERRUPT_CPEI);
|
||||
/* Setup the CPEI/P handler */
|
||||
register_percpu_irq(IA64_CPEP_VECTOR, &mca_cpep_irqaction);
|
||||
#endif
|
||||
|
||||
@@ -1436,6 +1435,7 @@ ia64_mca_late_init(void)
|
||||
|
||||
#ifdef CONFIG_ACPI
|
||||
/* Setup the CPEI/P vector and handler */
|
||||
cpe_vector = acpi_request_vector(ACPI_INTERRUPT_CPEI);
|
||||
init_timer(&cpe_poll_timer);
|
||||
cpe_poll_timer.function = ia64_mca_cpe_poll;
|
||||
|
||||
|
@@ -132,8 +132,7 @@ mca_handler_bh(unsigned long paddr)
|
||||
spin_unlock(&mca_bh_lock);
|
||||
|
||||
/* This process is about to be killed itself */
|
||||
force_sig(SIGKILL, current);
|
||||
schedule();
|
||||
do_exit(SIGKILL);
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -439,6 +438,7 @@ recover_from_read_error(slidx_table_t *slidx, peidx_table_t *peidx, pal_bus_chec
|
||||
psr2 = (struct ia64_psr *)&pmsa->pmsa_ipsr;
|
||||
psr2->cpl = 0;
|
||||
psr2->ri = 0;
|
||||
psr2->i = 0;
|
||||
|
||||
return 1;
|
||||
}
|
||||
|
@@ -10,6 +10,7 @@
|
||||
|
||||
#include <asm/asmmacro.h>
|
||||
#include <asm/processor.h>
|
||||
#include <asm/ptrace.h>
|
||||
|
||||
GLOBAL_ENTRY(mca_handler_bhhook)
|
||||
invala // clear RSE ?
|
||||
@@ -20,12 +21,21 @@ GLOBAL_ENTRY(mca_handler_bhhook)
|
||||
;;
|
||||
alloc r16=ar.pfs,0,2,1,0 // make a new frame
|
||||
;;
|
||||
mov ar.rsc=0
|
||||
;;
|
||||
mov r13=IA64_KR(CURRENT) // current task pointer
|
||||
;;
|
||||
adds r12=IA64_TASK_THREAD_KSP_OFFSET,r13
|
||||
mov r2=r13
|
||||
;;
|
||||
ld8 r12=[r12] // stack pointer
|
||||
addl r22=IA64_RBS_OFFSET,r2
|
||||
;;
|
||||
mov ar.bspstore=r22
|
||||
;;
|
||||
addl sp=IA64_STK_OFFSET-IA64_PT_REGS_SIZE,r2
|
||||
;;
|
||||
adds r2=IA64_TASK_THREAD_ON_USTACK_OFFSET,r13
|
||||
;;
|
||||
st1 [r2]=r0 // clear current->thread.on_ustack flag
|
||||
mov loc0=r16
|
||||
movl loc1=mca_handler_bh // recovery C function
|
||||
;;
|
||||
@@ -34,7 +44,9 @@ GLOBAL_ENTRY(mca_handler_bhhook)
|
||||
;;
|
||||
mov loc1=rp
|
||||
;;
|
||||
br.call.sptk.many rp=b6 // not return ...
|
||||
ssm psr.i
|
||||
;;
|
||||
br.call.sptk.many rp=b6 // does not return ...
|
||||
;;
|
||||
mov ar.pfs=loc0
|
||||
mov rp=loc1
|
||||
|
@@ -41,7 +41,7 @@
|
||||
(pKStk) addl r3=THIS_CPU(ia64_mca_data),r3;; \
|
||||
(pKStk) ld8 r3 = [r3];; \
|
||||
(pKStk) addl r3=IA64_MCA_CPU_INIT_STACK_OFFSET,r3;; \
|
||||
(pKStk) addl sp=IA64_STK_OFFSET-IA64_PT_REGS_SIZE,r3; \
|
||||
(pKStk) addl r1=IA64_STK_OFFSET-IA64_PT_REGS_SIZE,r3; \
|
||||
(pUStk) mov ar.rsc=0; /* set enforced lazy mode, pl 0, little-endian, loadrs=0 */ \
|
||||
(pUStk) addl r22=IA64_RBS_OFFSET,r1; /* compute base of register backing store */ \
|
||||
;; \
|
||||
@@ -50,7 +50,6 @@
|
||||
(pUStk) mov r23=ar.bspstore; /* save ar.bspstore */ \
|
||||
(pUStk) dep r22=-1,r22,61,3; /* compute kernel virtual addr of RBS */ \
|
||||
;; \
|
||||
(pKStk) addl r1=-IA64_PT_REGS_SIZE,r1; /* if in kernel mode, use sp (r12) */ \
|
||||
(pUStk) mov ar.bspstore=r22; /* switch to kernel RBS */ \
|
||||
;; \
|
||||
(pUStk) mov r18=ar.bsp; \
|
||||
|
@@ -825,14 +825,16 @@ apply_relocate_add (Elf64_Shdr *sechdrs, const char *strtab, unsigned int symind
|
||||
* XXX Should have an arch-hook for running this after final section
|
||||
* addresses have been selected...
|
||||
*/
|
||||
/* See if gp can cover the entire core module: */
|
||||
uint64_t gp = (uint64_t) mod->module_core + MAX_LTOFF / 2;
|
||||
if (mod->core_size >= MAX_LTOFF)
|
||||
uint64_t gp;
|
||||
if (mod->core_size > MAX_LTOFF)
|
||||
/*
|
||||
* This takes advantage of fact that SHF_ARCH_SMALL gets allocated
|
||||
* at the end of the module.
|
||||
*/
|
||||
gp = (uint64_t) mod->module_core + mod->core_size - MAX_LTOFF / 2;
|
||||
gp = mod->core_size - MAX_LTOFF / 2;
|
||||
else
|
||||
gp = mod->core_size / 2;
|
||||
gp = (uint64_t) mod->module_core + ((gp + 7) & -8);
|
||||
mod->arch.gp = gp;
|
||||
DEBUGP("%s: placing gp at 0x%lx\n", __FUNCTION__, gp);
|
||||
}
|
||||
|
@@ -11,7 +11,7 @@
|
||||
* Version Perfmon-2.x is a rewrite of perfmon-1.x
|
||||
* by Stephane Eranian, Hewlett Packard Co.
|
||||
*
|
||||
* Copyright (C) 1999-2003, 2005 Hewlett Packard Co
|
||||
* Copyright (C) 1999-2005 Hewlett Packard Co
|
||||
* Stephane Eranian <eranian@hpl.hp.com>
|
||||
* David Mosberger-Tang <davidm@hpl.hp.com>
|
||||
*
|
||||
@@ -497,6 +497,9 @@ typedef struct {
|
||||
static pfm_stats_t pfm_stats[NR_CPUS];
|
||||
static pfm_session_t pfm_sessions; /* global sessions information */
|
||||
|
||||
static spinlock_t pfm_alt_install_check = SPIN_LOCK_UNLOCKED;
|
||||
static pfm_intr_handler_desc_t *pfm_alt_intr_handler;
|
||||
|
||||
static struct proc_dir_entry *perfmon_dir;
|
||||
static pfm_uuid_t pfm_null_uuid = {0,};
|
||||
|
||||
@@ -606,6 +609,7 @@ DEFINE_PER_CPU(unsigned long, pfm_syst_info);
|
||||
DEFINE_PER_CPU(struct task_struct *, pmu_owner);
|
||||
DEFINE_PER_CPU(pfm_context_t *, pmu_ctx);
|
||||
DEFINE_PER_CPU(unsigned long, pmu_activation_number);
|
||||
EXPORT_PER_CPU_SYMBOL_GPL(pfm_syst_info);
|
||||
|
||||
|
||||
/* forward declaration */
|
||||
@@ -1265,6 +1269,8 @@ out:
|
||||
}
|
||||
EXPORT_SYMBOL(pfm_unregister_buffer_fmt);
|
||||
|
||||
extern void update_pal_halt_status(int);
|
||||
|
||||
static int
|
||||
pfm_reserve_session(struct task_struct *task, int is_syswide, unsigned int cpu)
|
||||
{
|
||||
@@ -1311,6 +1317,11 @@ pfm_reserve_session(struct task_struct *task, int is_syswide, unsigned int cpu)
|
||||
is_syswide,
|
||||
cpu));
|
||||
|
||||
/*
|
||||
* disable default_idle() to go to PAL_HALT
|
||||
*/
|
||||
update_pal_halt_status(0);
|
||||
|
||||
UNLOCK_PFS(flags);
|
||||
|
||||
return 0;
|
||||
@@ -1318,7 +1329,7 @@ pfm_reserve_session(struct task_struct *task, int is_syswide, unsigned int cpu)
|
||||
error_conflict:
|
||||
DPRINT(("system wide not possible, conflicting session [%d] on CPU%d\n",
|
||||
pfm_sessions.pfs_sys_session[cpu]->pid,
|
||||
smp_processor_id()));
|
||||
cpu));
|
||||
abort:
|
||||
UNLOCK_PFS(flags);
|
||||
|
||||
@@ -1366,6 +1377,12 @@ pfm_unreserve_session(pfm_context_t *ctx, int is_syswide, unsigned int cpu)
|
||||
is_syswide,
|
||||
cpu));
|
||||
|
||||
/*
|
||||
* if possible, enable default_idle() to go into PAL_HALT
|
||||
*/
|
||||
if (pfm_sessions.pfs_task_sessions == 0 && pfm_sessions.pfs_sys_sessions == 0)
|
||||
update_pal_halt_status(1);
|
||||
|
||||
UNLOCK_PFS(flags);
|
||||
|
||||
return 0;
|
||||
@@ -4202,7 +4219,7 @@ pfm_context_load(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
|
||||
DPRINT(("cannot load to [%d], invalid ctx_state=%d\n",
|
||||
req->load_pid,
|
||||
ctx->ctx_state));
|
||||
return -EINVAL;
|
||||
return -EBUSY;
|
||||
}
|
||||
|
||||
DPRINT(("load_pid [%d] using_dbreg=%d\n", req->load_pid, ctx->ctx_fl_using_dbreg));
|
||||
@@ -4704,16 +4721,26 @@ recheck:
|
||||
if (task == current || ctx->ctx_fl_system) return 0;
|
||||
|
||||
/*
|
||||
* if context is UNLOADED we are safe to go
|
||||
* we are monitoring another thread
|
||||
*/
|
||||
if (state == PFM_CTX_UNLOADED) return 0;
|
||||
|
||||
/*
|
||||
* no command can operate on a zombie context
|
||||
*/
|
||||
if (state == PFM_CTX_ZOMBIE) {
|
||||
DPRINT(("cmd %d state zombie cannot operate on context\n", cmd));
|
||||
return -EINVAL;
|
||||
switch(state) {
|
||||
case PFM_CTX_UNLOADED:
|
||||
/*
|
||||
* if context is UNLOADED we are safe to go
|
||||
*/
|
||||
return 0;
|
||||
case PFM_CTX_ZOMBIE:
|
||||
/*
|
||||
* no command can operate on a zombie context
|
||||
*/
|
||||
DPRINT(("cmd %d state zombie cannot operate on context\n", cmd));
|
||||
return -EINVAL;
|
||||
case PFM_CTX_MASKED:
|
||||
/*
|
||||
* PMU state has been saved to software even though
|
||||
* the thread may still be running.
|
||||
*/
|
||||
if (cmd != PFM_UNLOAD_CONTEXT) return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -5532,26 +5559,32 @@ pfm_interrupt_handler(int irq, void *arg, struct pt_regs *regs)
|
||||
int ret;
|
||||
|
||||
this_cpu = get_cpu();
|
||||
min = pfm_stats[this_cpu].pfm_ovfl_intr_cycles_min;
|
||||
max = pfm_stats[this_cpu].pfm_ovfl_intr_cycles_max;
|
||||
if (likely(!pfm_alt_intr_handler)) {
|
||||
min = pfm_stats[this_cpu].pfm_ovfl_intr_cycles_min;
|
||||
max = pfm_stats[this_cpu].pfm_ovfl_intr_cycles_max;
|
||||
|
||||
start_cycles = ia64_get_itc();
|
||||
start_cycles = ia64_get_itc();
|
||||
|
||||
ret = pfm_do_interrupt_handler(irq, arg, regs);
|
||||
ret = pfm_do_interrupt_handler(irq, arg, regs);
|
||||
|
||||
total_cycles = ia64_get_itc();
|
||||
total_cycles = ia64_get_itc();
|
||||
|
||||
/*
|
||||
* don't measure spurious interrupts
|
||||
*/
|
||||
if (likely(ret == 0)) {
|
||||
total_cycles -= start_cycles;
|
||||
/*
|
||||
* don't measure spurious interrupts
|
||||
*/
|
||||
if (likely(ret == 0)) {
|
||||
total_cycles -= start_cycles;
|
||||
|
||||
if (total_cycles < min) pfm_stats[this_cpu].pfm_ovfl_intr_cycles_min = total_cycles;
|
||||
if (total_cycles > max) pfm_stats[this_cpu].pfm_ovfl_intr_cycles_max = total_cycles;
|
||||
if (total_cycles < min) pfm_stats[this_cpu].pfm_ovfl_intr_cycles_min = total_cycles;
|
||||
if (total_cycles > max) pfm_stats[this_cpu].pfm_ovfl_intr_cycles_max = total_cycles;
|
||||
|
||||
pfm_stats[this_cpu].pfm_ovfl_intr_cycles += total_cycles;
|
||||
pfm_stats[this_cpu].pfm_ovfl_intr_cycles += total_cycles;
|
||||
}
|
||||
}
|
||||
else {
|
||||
(*pfm_alt_intr_handler->handler)(irq, arg, regs);
|
||||
}
|
||||
|
||||
put_cpu_no_resched();
|
||||
return IRQ_HANDLED;
|
||||
}
|
||||
@@ -6402,6 +6435,141 @@ static struct irqaction perfmon_irqaction = {
|
||||
.name = "perfmon"
|
||||
};
|
||||
|
||||
static void
|
||||
pfm_alt_save_pmu_state(void *data)
|
||||
{
|
||||
struct pt_regs *regs;
|
||||
|
||||
regs = ia64_task_regs(current);
|
||||
|
||||
DPRINT(("called\n"));
|
||||
|
||||
/*
|
||||
* should not be necessary but
|
||||
* let's take not risk
|
||||
*/
|
||||
pfm_clear_psr_up();
|
||||
pfm_clear_psr_pp();
|
||||
ia64_psr(regs)->pp = 0;
|
||||
|
||||
/*
|
||||
* This call is required
|
||||
* May cause a spurious interrupt on some processors
|
||||
*/
|
||||
pfm_freeze_pmu();
|
||||
|
||||
ia64_srlz_d();
|
||||
}
|
||||
|
||||
void
|
||||
pfm_alt_restore_pmu_state(void *data)
|
||||
{
|
||||
struct pt_regs *regs;
|
||||
|
||||
regs = ia64_task_regs(current);
|
||||
|
||||
DPRINT(("called\n"));
|
||||
|
||||
/*
|
||||
* put PMU back in state expected
|
||||
* by perfmon
|
||||
*/
|
||||
pfm_clear_psr_up();
|
||||
pfm_clear_psr_pp();
|
||||
ia64_psr(regs)->pp = 0;
|
||||
|
||||
/*
|
||||
* perfmon runs with PMU unfrozen at all times
|
||||
*/
|
||||
pfm_unfreeze_pmu();
|
||||
|
||||
ia64_srlz_d();
|
||||
}
|
||||
|
||||
int
|
||||
pfm_install_alt_pmu_interrupt(pfm_intr_handler_desc_t *hdl)
|
||||
{
|
||||
int ret, i;
|
||||
int reserve_cpu;
|
||||
|
||||
/* some sanity checks */
|
||||
if (hdl == NULL || hdl->handler == NULL) return -EINVAL;
|
||||
|
||||
/* do the easy test first */
|
||||
if (pfm_alt_intr_handler) return -EBUSY;
|
||||
|
||||
/* one at a time in the install or remove, just fail the others */
|
||||
if (!spin_trylock(&pfm_alt_install_check)) {
|
||||
return -EBUSY;
|
||||
}
|
||||
|
||||
/* reserve our session */
|
||||
for_each_online_cpu(reserve_cpu) {
|
||||
ret = pfm_reserve_session(NULL, 1, reserve_cpu);
|
||||
if (ret) goto cleanup_reserve;
|
||||
}
|
||||
|
||||
/* save the current system wide pmu states */
|
||||
ret = on_each_cpu(pfm_alt_save_pmu_state, NULL, 0, 1);
|
||||
if (ret) {
|
||||
DPRINT(("on_each_cpu() failed: %d\n", ret));
|
||||
goto cleanup_reserve;
|
||||
}
|
||||
|
||||
/* officially change to the alternate interrupt handler */
|
||||
pfm_alt_intr_handler = hdl;
|
||||
|
||||
spin_unlock(&pfm_alt_install_check);
|
||||
|
||||
return 0;
|
||||
|
||||
cleanup_reserve:
|
||||
for_each_online_cpu(i) {
|
||||
/* don't unreserve more than we reserved */
|
||||
if (i >= reserve_cpu) break;
|
||||
|
||||
pfm_unreserve_session(NULL, 1, i);
|
||||
}
|
||||
|
||||
spin_unlock(&pfm_alt_install_check);
|
||||
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(pfm_install_alt_pmu_interrupt);
|
||||
|
||||
int
|
||||
pfm_remove_alt_pmu_interrupt(pfm_intr_handler_desc_t *hdl)
|
||||
{
|
||||
int i;
|
||||
int ret;
|
||||
|
||||
if (hdl == NULL) return -EINVAL;
|
||||
|
||||
/* cannot remove someone else's handler! */
|
||||
if (pfm_alt_intr_handler != hdl) return -EINVAL;
|
||||
|
||||
/* one at a time in the install or remove, just fail the others */
|
||||
if (!spin_trylock(&pfm_alt_install_check)) {
|
||||
return -EBUSY;
|
||||
}
|
||||
|
||||
pfm_alt_intr_handler = NULL;
|
||||
|
||||
ret = on_each_cpu(pfm_alt_restore_pmu_state, NULL, 0, 1);
|
||||
if (ret) {
|
||||
DPRINT(("on_each_cpu() failed: %d\n", ret));
|
||||
}
|
||||
|
||||
for_each_online_cpu(i) {
|
||||
pfm_unreserve_session(NULL, 1, i);
|
||||
}
|
||||
|
||||
spin_unlock(&pfm_alt_install_check);
|
||||
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(pfm_remove_alt_pmu_interrupt);
|
||||
|
||||
/*
|
||||
* perfmon initialization routine, called from the initcall() table
|
||||
*/
|
||||
|
@@ -50,7 +50,7 @@
|
||||
#include "sigframe.h"
|
||||
|
||||
void (*ia64_mark_idle)(int);
|
||||
static cpumask_t cpu_idle_map;
|
||||
static DEFINE_PER_CPU(unsigned int, cpu_idle_state);
|
||||
|
||||
unsigned long boot_option_idle_override = 0;
|
||||
EXPORT_SYMBOL(boot_option_idle_override);
|
||||
@@ -173,7 +173,9 @@ do_notify_resume_user (sigset_t *oldset, struct sigscratch *scr, long in_syscall
|
||||
ia64_do_signal(oldset, scr, in_syscall);
|
||||
}
|
||||
|
||||
static int pal_halt = 1;
|
||||
static int pal_halt = 1;
|
||||
static int can_do_pal_halt = 1;
|
||||
|
||||
static int __init nohalt_setup(char * str)
|
||||
{
|
||||
pal_halt = 0;
|
||||
@@ -181,16 +183,20 @@ static int __init nohalt_setup(char * str)
|
||||
}
|
||||
__setup("nohalt", nohalt_setup);
|
||||
|
||||
void
|
||||
update_pal_halt_status(int status)
|
||||
{
|
||||
can_do_pal_halt = pal_halt && status;
|
||||
}
|
||||
|
||||
/*
|
||||
* We use this if we don't have any better idle routine..
|
||||
*/
|
||||
void
|
||||
default_idle (void)
|
||||
{
|
||||
unsigned long pmu_active = ia64_getreg(_IA64_REG_PSR) & (IA64_PSR_PP | IA64_PSR_UP);
|
||||
|
||||
while (!need_resched())
|
||||
if (pal_halt && !pmu_active)
|
||||
if (can_do_pal_halt)
|
||||
safe_halt();
|
||||
else
|
||||
cpu_relax();
|
||||
@@ -223,20 +229,31 @@ static inline void play_dead(void)
|
||||
}
|
||||
#endif /* CONFIG_HOTPLUG_CPU */
|
||||
|
||||
|
||||
void cpu_idle_wait(void)
|
||||
{
|
||||
int cpu;
|
||||
cpumask_t map;
|
||||
unsigned int cpu, this_cpu = get_cpu();
|
||||
cpumask_t map;
|
||||
|
||||
for_each_online_cpu(cpu)
|
||||
cpu_set(cpu, cpu_idle_map);
|
||||
set_cpus_allowed(current, cpumask_of_cpu(this_cpu));
|
||||
put_cpu();
|
||||
|
||||
wmb();
|
||||
do {
|
||||
ssleep(1);
|
||||
cpus_and(map, cpu_idle_map, cpu_online_map);
|
||||
} while (!cpus_empty(map));
|
||||
cpus_clear(map);
|
||||
for_each_online_cpu(cpu) {
|
||||
per_cpu(cpu_idle_state, cpu) = 1;
|
||||
cpu_set(cpu, map);
|
||||
}
|
||||
|
||||
__get_cpu_var(cpu_idle_state) = 0;
|
||||
|
||||
wmb();
|
||||
do {
|
||||
ssleep(1);
|
||||
for_each_online_cpu(cpu) {
|
||||
if (cpu_isset(cpu, map) && !per_cpu(cpu_idle_state, cpu))
|
||||
cpu_clear(cpu, map);
|
||||
}
|
||||
cpus_and(map, map, cpu_online_map);
|
||||
} while (!cpus_empty(map));
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(cpu_idle_wait);
|
||||
|
||||
@@ -244,7 +261,6 @@ void __attribute__((noreturn))
|
||||
cpu_idle (void)
|
||||
{
|
||||
void (*mark_idle)(int) = ia64_mark_idle;
|
||||
int cpu = smp_processor_id();
|
||||
|
||||
/* endless idle loop with no priority at all */
|
||||
while (1) {
|
||||
@@ -255,12 +271,13 @@ cpu_idle (void)
|
||||
while (!need_resched()) {
|
||||
void (*idle)(void);
|
||||
|
||||
if (__get_cpu_var(cpu_idle_state))
|
||||
__get_cpu_var(cpu_idle_state) = 0;
|
||||
|
||||
rmb();
|
||||
if (mark_idle)
|
||||
(*mark_idle)(1);
|
||||
|
||||
if (cpu_isset(cpu, cpu_idle_map))
|
||||
cpu_clear(cpu, cpu_idle_map);
|
||||
rmb();
|
||||
idle = pm_idle;
|
||||
if (!idle)
|
||||
idle = default_idle;
|
||||
|
@@ -635,11 +635,17 @@ ia64_flush_fph (struct task_struct *task)
|
||||
{
|
||||
struct ia64_psr *psr = ia64_psr(ia64_task_regs(task));
|
||||
|
||||
/*
|
||||
* Prevent migrating this task while
|
||||
* we're fiddling with the FPU state
|
||||
*/
|
||||
preempt_disable();
|
||||
if (ia64_is_local_fpu_owner(task) && psr->mfh) {
|
||||
psr->mfh = 0;
|
||||
task->thread.flags |= IA64_THREAD_FPH_VALID;
|
||||
ia64_save_fpu(&task->thread.fph[0]);
|
||||
}
|
||||
preempt_enable();
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -692,16 +698,30 @@ convert_to_non_syscall (struct task_struct *child, struct pt_regs *pt,
|
||||
unsigned long cfm)
|
||||
{
|
||||
struct unw_frame_info info, prev_info;
|
||||
unsigned long ip, pr;
|
||||
unsigned long ip, sp, pr;
|
||||
|
||||
unw_init_from_blocked_task(&info, child);
|
||||
while (1) {
|
||||
prev_info = info;
|
||||
if (unw_unwind(&info) < 0)
|
||||
return;
|
||||
if (unw_get_rp(&info, &ip) < 0)
|
||||
|
||||
unw_get_sp(&info, &sp);
|
||||
if ((long)((unsigned long)child + IA64_STK_OFFSET - sp)
|
||||
< IA64_PT_REGS_SIZE) {
|
||||
dprintk("ptrace.%s: ran off the top of the kernel "
|
||||
"stack\n", __FUNCTION__);
|
||||
return;
|
||||
if (ip < FIXADDR_USER_END)
|
||||
}
|
||||
if (unw_get_pr (&prev_info, &pr) < 0) {
|
||||
unw_get_rp(&prev_info, &ip);
|
||||
dprintk("ptrace.%s: failed to read "
|
||||
"predicate register (ip=0x%lx)\n",
|
||||
__FUNCTION__, ip);
|
||||
return;
|
||||
}
|
||||
if (unw_is_intr_frame(&info)
|
||||
&& (pr & (1UL << PRED_USER_STACK)))
|
||||
break;
|
||||
}
|
||||
|
||||
@@ -1616,20 +1636,25 @@ syscall_trace_enter (long arg0, long arg1, long arg2, long arg3,
|
||||
long arg4, long arg5, long arg6, long arg7,
|
||||
struct pt_regs regs)
|
||||
{
|
||||
long syscall;
|
||||
|
||||
if (unlikely(current->audit_context)) {
|
||||
if (IS_IA32_PROCESS(®s))
|
||||
syscall = regs.r1;
|
||||
else
|
||||
syscall = regs.r15;
|
||||
|
||||
audit_syscall_entry(current, syscall, arg0, arg1, arg2, arg3);
|
||||
}
|
||||
|
||||
if (test_thread_flag(TIF_SYSCALL_TRACE)
|
||||
if (test_thread_flag(TIF_SYSCALL_TRACE)
|
||||
&& (current->ptrace & PT_PTRACED))
|
||||
syscall_trace();
|
||||
|
||||
if (unlikely(current->audit_context)) {
|
||||
long syscall;
|
||||
int arch;
|
||||
|
||||
if (IS_IA32_PROCESS(®s)) {
|
||||
syscall = regs.r1;
|
||||
arch = AUDIT_ARCH_I386;
|
||||
} else {
|
||||
syscall = regs.r15;
|
||||
arch = AUDIT_ARCH_IA64;
|
||||
}
|
||||
|
||||
audit_syscall_entry(current, arch, syscall, arg0, arg1, arg2, arg3);
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
/* "asmlinkage" so the input arguments are preserved... */
|
||||
@@ -1640,7 +1665,7 @@ syscall_trace_leave (long arg0, long arg1, long arg2, long arg3,
|
||||
struct pt_regs regs)
|
||||
{
|
||||
if (unlikely(current->audit_context))
|
||||
audit_syscall_exit(current, regs.r8);
|
||||
audit_syscall_exit(current, AUDITSC_RESULT(regs.r10), regs.r8);
|
||||
|
||||
if (test_thread_flag(TIF_SYSCALL_TRACE)
|
||||
&& (current->ptrace & PT_PTRACED))
|
||||
|
@@ -720,7 +720,8 @@ cpu_init (void)
|
||||
ia64_set_kr(IA64_KR_PT_BASE, __pa(ia64_imva(empty_zero_page)));
|
||||
|
||||
/*
|
||||
* Initialize default control register to defer all speculative faults. The
|
||||
* Initialize default control register to defer speculative faults except
|
||||
* for those arising from TLB misses, which are not deferred. The
|
||||
* kernel MUST NOT depend on a particular setting of these bits (in other words,
|
||||
* the kernel must have recovery code for all speculative accesses). Turn on
|
||||
* dcr.lc as per recommendation by the architecture team. Most IA-32 apps
|
||||
|
@@ -224,7 +224,8 @@ ia64_rt_sigreturn (struct sigscratch *scr)
|
||||
* could be corrupted.
|
||||
*/
|
||||
retval = (long) &ia64_leave_kernel;
|
||||
if (test_thread_flag(TIF_SYSCALL_TRACE))
|
||||
if (test_thread_flag(TIF_SYSCALL_TRACE)
|
||||
|| test_thread_flag(TIF_SYSCALL_AUDIT))
|
||||
/*
|
||||
* strace expects to be notified after sigreturn returns even though the
|
||||
* context to which we return may not be in the middle of a syscall.
|
||||
|
@@ -624,7 +624,7 @@ static struct {
|
||||
__u16 thread_id;
|
||||
__u16 proc_fixed_addr;
|
||||
__u8 valid;
|
||||
}mt_info[NR_CPUS] __devinit;
|
||||
} mt_info[NR_CPUS] __devinitdata;
|
||||
|
||||
#ifdef CONFIG_HOTPLUG_CPU
|
||||
static inline void
|
||||
|
@@ -182,13 +182,6 @@ do_mmap2 (unsigned long addr, unsigned long len, int prot, int flags, int fd, un
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* A zero mmap always succeeds in Linux, independent of whether or not the
|
||||
* remaining arguments are valid.
|
||||
*/
|
||||
if (len == 0)
|
||||
goto out;
|
||||
|
||||
/* Careful about overflows.. */
|
||||
len = PAGE_ALIGN(len);
|
||||
if (!len || len > TASK_SIZE) {
|
||||
|
@@ -111,6 +111,24 @@ ia64_bad_break (unsigned long break_num, struct pt_regs *regs)
|
||||
siginfo_t siginfo;
|
||||
int sig, code;
|
||||
|
||||
/* break.b always sets cr.iim to 0, which causes problems for
|
||||
* debuggers. Get the real break number from the original instruction,
|
||||
* but only for kernel code. User space break.b is left alone, to
|
||||
* preserve the existing behaviour. All break codings have the same
|
||||
* format, so there is no need to check the slot type.
|
||||
*/
|
||||
if (break_num == 0 && !user_mode(regs)) {
|
||||
struct ia64_psr *ipsr = ia64_psr(regs);
|
||||
unsigned long *bundle = (unsigned long *)regs->cr_iip;
|
||||
unsigned long slot;
|
||||
switch (ipsr->ri) {
|
||||
case 0: slot = (bundle[0] >> 5); break;
|
||||
case 1: slot = (bundle[0] >> 46) | (bundle[1] << 18); break;
|
||||
default: slot = (bundle[1] >> 23); break;
|
||||
}
|
||||
break_num = ((slot >> 36 & 1) << 20) | (slot >> 6 & 0xfffff);
|
||||
}
|
||||
|
||||
/* SIGILL, SIGFPE, SIGSEGV, and SIGBUS want these field initialized: */
|
||||
siginfo.si_addr = (void __user *) (regs->cr_iip + ia64_psr(regs)->ri);
|
||||
siginfo.si_imm = break_num;
|
||||
@@ -202,13 +220,21 @@ disabled_fph_fault (struct pt_regs *regs)
|
||||
|
||||
/* first, grant user-level access to fph partition: */
|
||||
psr->dfh = 0;
|
||||
|
||||
/*
|
||||
* Make sure that no other task gets in on this processor
|
||||
* while we're claiming the FPU
|
||||
*/
|
||||
preempt_disable();
|
||||
#ifndef CONFIG_SMP
|
||||
{
|
||||
struct task_struct *fpu_owner
|
||||
= (struct task_struct *)ia64_get_kr(IA64_KR_FPU_OWNER);
|
||||
|
||||
if (ia64_is_local_fpu_owner(current))
|
||||
if (ia64_is_local_fpu_owner(current)) {
|
||||
preempt_enable_no_resched();
|
||||
return;
|
||||
}
|
||||
|
||||
if (fpu_owner)
|
||||
ia64_flush_fph(fpu_owner);
|
||||
@@ -226,6 +252,7 @@ disabled_fph_fault (struct pt_regs *regs)
|
||||
*/
|
||||
psr->mfh = 1;
|
||||
}
|
||||
preempt_enable_no_resched();
|
||||
}
|
||||
|
||||
static inline int
|
||||
|
Reference in New Issue
Block a user