Merge branch 'release' of git://git.kernel.org/pub/scm/linux/kernel/git/aegl/linux-2.6
* 'release' of git://git.kernel.org/pub/scm/linux/kernel/git/aegl/linux-2.6: (27 commits) [IA64] kdump: Add crash_save_vmcoreinfo for INIT [IA64] Fix NUMA configuration issue [IA64] Itanium Spec updates [IA64] Untangle sync_icache_dcache() page size determination [IA64] arch/ia64/kernel/: use time_* macros [IA64] remove redundant display of free swap space in show_mem() [IA64] make IOMMU respect the segment boundary limits [IA64] kprobes: kprobe-booster for ia64 [IA64] fix getpid and set_tid_address fast system calls for pid namespaces [IA64] Replace explicit jiffies tests with time_* macros. [IA64] use goto to jump out do/while_each_thread [IA64] Fix unlock ordering in smp_callin [IA64] pgd_offset() constfication. [IA64] kdump: crash.c coding style fix [IA64] kdump: add kdump_on_fatal_mca [IA64] Minimize per_cpu reservations. [IA64] Correct pernodesize calculation. [IA64] Kernel parameter for max number of concurrent global TLB purges [IA64] Multiple outstanding ptc.g instruction support [IA64] Implement smp_call_function_mask for ia64 ...
Šī revīzija ir iekļauta:
@@ -423,6 +423,7 @@ static u32 __devinitdata pxm_flag[PXM_FLAG_LEN];
|
||||
#define pxm_bit_set(bit) (set_bit(bit,(void *)pxm_flag))
|
||||
#define pxm_bit_test(bit) (test_bit(bit,(void *)pxm_flag))
|
||||
static struct acpi_table_slit __initdata *slit_table;
|
||||
cpumask_t early_cpu_possible_map = CPU_MASK_NONE;
|
||||
|
||||
static int get_processor_proximity_domain(struct acpi_srat_cpu_affinity *pa)
|
||||
{
|
||||
@@ -482,6 +483,7 @@ acpi_numa_processor_affinity_init(struct acpi_srat_cpu_affinity *pa)
|
||||
(pa->apic_id << 8) | (pa->local_sapic_eid);
|
||||
/* nid should be overridden as logical node id later */
|
||||
node_cpuid[srat_num_cpus].nid = pxm;
|
||||
cpu_set(srat_num_cpus, early_cpu_possible_map);
|
||||
srat_num_cpus++;
|
||||
}
|
||||
|
||||
@@ -559,7 +561,7 @@ void __init acpi_numa_arch_fixup(void)
|
||||
}
|
||||
|
||||
/* set logical node id in cpu structure */
|
||||
for (i = 0; i < srat_num_cpus; i++)
|
||||
for_each_possible_early_cpu(i)
|
||||
node_cpuid[i].nid = pxm_to_node(node_cpuid[i].nid);
|
||||
|
||||
printk(KERN_INFO "Number of logical nodes in system = %d\n",
|
||||
|
@@ -7,6 +7,7 @@
|
||||
#define ASM_OFFSETS_C 1
|
||||
|
||||
#include <linux/sched.h>
|
||||
#include <linux/pid.h>
|
||||
#include <linux/clocksource.h>
|
||||
|
||||
#include <asm-ia64/processor.h>
|
||||
@@ -34,17 +35,29 @@ void foo(void)
|
||||
DEFINE(SIGFRAME_SIZE, sizeof (struct sigframe));
|
||||
DEFINE(UNW_FRAME_INFO_SIZE, sizeof (struct unw_frame_info));
|
||||
|
||||
BUILD_BUG_ON(sizeof(struct upid) != 32);
|
||||
DEFINE(IA64_UPID_SHIFT, 5);
|
||||
|
||||
BLANK();
|
||||
|
||||
DEFINE(TI_FLAGS, offsetof(struct thread_info, flags));
|
||||
DEFINE(TI_CPU, offsetof(struct thread_info, cpu));
|
||||
DEFINE(TI_PRE_COUNT, offsetof(struct thread_info, preempt_count));
|
||||
#ifdef CONFIG_VIRT_CPU_ACCOUNTING
|
||||
DEFINE(TI_AC_STAMP, offsetof(struct thread_info, ac_stamp));
|
||||
DEFINE(TI_AC_LEAVE, offsetof(struct thread_info, ac_leave));
|
||||
DEFINE(TI_AC_STIME, offsetof(struct thread_info, ac_stime));
|
||||
DEFINE(TI_AC_UTIME, offsetof(struct thread_info, ac_utime));
|
||||
#endif
|
||||
|
||||
BLANK();
|
||||
|
||||
DEFINE(IA64_TASK_BLOCKED_OFFSET,offsetof (struct task_struct, blocked));
|
||||
DEFINE(IA64_TASK_CLEAR_CHILD_TID_OFFSET,offsetof (struct task_struct, clear_child_tid));
|
||||
DEFINE(IA64_TASK_GROUP_LEADER_OFFSET, offsetof (struct task_struct, group_leader));
|
||||
DEFINE(IA64_TASK_TGIDLINK_OFFSET, offsetof (struct task_struct, pids[PIDTYPE_PID].pid));
|
||||
DEFINE(IA64_PID_LEVEL_OFFSET, offsetof (struct pid, level));
|
||||
DEFINE(IA64_PID_UPID_OFFSET, offsetof (struct pid, numbers[0]));
|
||||
DEFINE(IA64_TASK_PENDING_OFFSET,offsetof (struct task_struct, pending));
|
||||
DEFINE(IA64_TASK_PID_OFFSET, offsetof (struct task_struct, pid));
|
||||
DEFINE(IA64_TASK_REAL_PARENT_OFFSET, offsetof (struct task_struct, real_parent));
|
||||
|
@@ -24,6 +24,7 @@ int kdump_status[NR_CPUS];
|
||||
static atomic_t kdump_cpu_frozen;
|
||||
atomic_t kdump_in_progress;
|
||||
static int kdump_on_init = 1;
|
||||
static int kdump_on_fatal_mca = 1;
|
||||
|
||||
static inline Elf64_Word
|
||||
*append_elf_note(Elf64_Word *buf, char *name, unsigned type, void *data,
|
||||
@@ -118,6 +119,7 @@ machine_crash_shutdown(struct pt_regs *pt)
|
||||
static void
|
||||
machine_kdump_on_init(void)
|
||||
{
|
||||
crash_save_vmcoreinfo();
|
||||
local_irq_disable();
|
||||
kexec_disable_iosapic();
|
||||
machine_kexec(ia64_kimage);
|
||||
@@ -148,7 +150,7 @@ kdump_init_notifier(struct notifier_block *self, unsigned long val, void *data)
|
||||
struct ia64_mca_notify_die *nd;
|
||||
struct die_args *args = data;
|
||||
|
||||
if (!kdump_on_init)
|
||||
if (!kdump_on_init && !kdump_on_fatal_mca)
|
||||
return NOTIFY_DONE;
|
||||
|
||||
if (!ia64_kimage) {
|
||||
@@ -173,32 +175,38 @@ kdump_init_notifier(struct notifier_block *self, unsigned long val, void *data)
|
||||
return NOTIFY_DONE;
|
||||
|
||||
switch (val) {
|
||||
case DIE_INIT_MONARCH_PROCESS:
|
||||
case DIE_INIT_MONARCH_PROCESS:
|
||||
if (kdump_on_init) {
|
||||
atomic_set(&kdump_in_progress, 1);
|
||||
*(nd->monarch_cpu) = -1;
|
||||
break;
|
||||
case DIE_INIT_MONARCH_LEAVE:
|
||||
}
|
||||
break;
|
||||
case DIE_INIT_MONARCH_LEAVE:
|
||||
if (kdump_on_init)
|
||||
machine_kdump_on_init();
|
||||
break;
|
||||
case DIE_INIT_SLAVE_LEAVE:
|
||||
if (atomic_read(&kdump_in_progress))
|
||||
unw_init_running(kdump_cpu_freeze, NULL);
|
||||
break;
|
||||
case DIE_MCA_RENDZVOUS_LEAVE:
|
||||
if (atomic_read(&kdump_in_progress))
|
||||
unw_init_running(kdump_cpu_freeze, NULL);
|
||||
break;
|
||||
case DIE_MCA_MONARCH_LEAVE:
|
||||
/* die_register->signr indicate if MCA is recoverable */
|
||||
if (!args->signr)
|
||||
machine_kdump_on_init();
|
||||
break;
|
||||
break;
|
||||
case DIE_INIT_SLAVE_LEAVE:
|
||||
if (atomic_read(&kdump_in_progress))
|
||||
unw_init_running(kdump_cpu_freeze, NULL);
|
||||
break;
|
||||
case DIE_MCA_RENDZVOUS_LEAVE:
|
||||
if (atomic_read(&kdump_in_progress))
|
||||
unw_init_running(kdump_cpu_freeze, NULL);
|
||||
break;
|
||||
case DIE_MCA_MONARCH_LEAVE:
|
||||
/* die_register->signr indicate if MCA is recoverable */
|
||||
if (kdump_on_fatal_mca && !args->signr) {
|
||||
atomic_set(&kdump_in_progress, 1);
|
||||
*(nd->monarch_cpu) = -1;
|
||||
machine_kdump_on_init();
|
||||
}
|
||||
break;
|
||||
}
|
||||
return NOTIFY_DONE;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_SYSCTL
|
||||
static ctl_table kdump_on_init_table[] = {
|
||||
static ctl_table kdump_ctl_table[] = {
|
||||
{
|
||||
.ctl_name = CTL_UNNUMBERED,
|
||||
.procname = "kdump_on_init",
|
||||
@@ -207,6 +215,14 @@ static ctl_table kdump_on_init_table[] = {
|
||||
.mode = 0644,
|
||||
.proc_handler = &proc_dointvec,
|
||||
},
|
||||
{
|
||||
.ctl_name = CTL_UNNUMBERED,
|
||||
.procname = "kdump_on_fatal_mca",
|
||||
.data = &kdump_on_fatal_mca,
|
||||
.maxlen = sizeof(int),
|
||||
.mode = 0644,
|
||||
.proc_handler = &proc_dointvec,
|
||||
},
|
||||
{ .ctl_name = 0 }
|
||||
};
|
||||
|
||||
@@ -215,7 +231,7 @@ static ctl_table sys_table[] = {
|
||||
.ctl_name = CTL_KERN,
|
||||
.procname = "kernel",
|
||||
.mode = 0555,
|
||||
.child = kdump_on_init_table,
|
||||
.child = kdump_ctl_table,
|
||||
},
|
||||
{ .ctl_name = 0 }
|
||||
};
|
||||
|
@@ -37,6 +37,7 @@
|
||||
#include <asm/pgtable.h>
|
||||
#include <asm/processor.h>
|
||||
#include <asm/mca.h>
|
||||
#include <asm/tlbflush.h>
|
||||
|
||||
#define EFI_DEBUG 0
|
||||
|
||||
@@ -403,6 +404,41 @@ efi_get_pal_addr (void)
|
||||
return NULL;
|
||||
}
|
||||
|
||||
|
||||
static u8 __init palo_checksum(u8 *buffer, u32 length)
|
||||
{
|
||||
u8 sum = 0;
|
||||
u8 *end = buffer + length;
|
||||
|
||||
while (buffer < end)
|
||||
sum = (u8) (sum + *(buffer++));
|
||||
|
||||
return sum;
|
||||
}
|
||||
|
||||
/*
|
||||
* Parse and handle PALO table which is published at:
|
||||
* http://www.dig64.org/home/DIG64_PALO_R1_0.pdf
|
||||
*/
|
||||
static void __init handle_palo(unsigned long palo_phys)
|
||||
{
|
||||
struct palo_table *palo = __va(palo_phys);
|
||||
u8 checksum;
|
||||
|
||||
if (strncmp(palo->signature, PALO_SIG, sizeof(PALO_SIG) - 1)) {
|
||||
printk(KERN_INFO "PALO signature incorrect.\n");
|
||||
return;
|
||||
}
|
||||
|
||||
checksum = palo_checksum((u8 *)palo, palo->length);
|
||||
if (checksum) {
|
||||
printk(KERN_INFO "PALO checksum incorrect.\n");
|
||||
return;
|
||||
}
|
||||
|
||||
setup_ptcg_sem(palo->max_tlb_purges, NPTCG_FROM_PALO);
|
||||
}
|
||||
|
||||
void
|
||||
efi_map_pal_code (void)
|
||||
{
|
||||
@@ -432,6 +468,7 @@ efi_init (void)
|
||||
u64 efi_desc_size;
|
||||
char *cp, vendor[100] = "unknown";
|
||||
int i;
|
||||
unsigned long palo_phys;
|
||||
|
||||
/*
|
||||
* It's too early to be able to use the standard kernel command line
|
||||
@@ -496,6 +533,8 @@ efi_init (void)
|
||||
efi.hcdp = EFI_INVALID_TABLE_ADDR;
|
||||
efi.uga = EFI_INVALID_TABLE_ADDR;
|
||||
|
||||
palo_phys = EFI_INVALID_TABLE_ADDR;
|
||||
|
||||
for (i = 0; i < (int) efi.systab->nr_tables; i++) {
|
||||
if (efi_guidcmp(config_tables[i].guid, MPS_TABLE_GUID) == 0) {
|
||||
efi.mps = config_tables[i].table;
|
||||
@@ -515,10 +554,17 @@ efi_init (void)
|
||||
} else if (efi_guidcmp(config_tables[i].guid, HCDP_TABLE_GUID) == 0) {
|
||||
efi.hcdp = config_tables[i].table;
|
||||
printk(" HCDP=0x%lx", config_tables[i].table);
|
||||
} else if (efi_guidcmp(config_tables[i].guid,
|
||||
PROCESSOR_ABSTRACTION_LAYER_OVERWRITE_GUID) == 0) {
|
||||
palo_phys = config_tables[i].table;
|
||||
printk(" PALO=0x%lx", config_tables[i].table);
|
||||
}
|
||||
}
|
||||
printk("\n");
|
||||
|
||||
if (palo_phys != EFI_INVALID_TABLE_ADDR)
|
||||
handle_palo(palo_phys);
|
||||
|
||||
runtime = __va(efi.systab->runtime);
|
||||
efi.get_time = phys_get_time;
|
||||
efi.set_time = phys_set_time;
|
||||
|
@@ -710,6 +710,16 @@ ENTRY(ia64_leave_syscall)
|
||||
(pUStk) cmp.eq.unc p6,p0=r0,r0 // p6 <- pUStk
|
||||
#endif
|
||||
.work_processed_syscall:
|
||||
#ifdef CONFIG_VIRT_CPU_ACCOUNTING
|
||||
adds r2=PT(LOADRS)+16,r12
|
||||
(pUStk) mov.m r22=ar.itc // fetch time at leave
|
||||
adds r18=TI_FLAGS+IA64_TASK_SIZE,r13
|
||||
;;
|
||||
(p6) ld4 r31=[r18] // load current_thread_info()->flags
|
||||
ld8 r19=[r2],PT(B6)-PT(LOADRS) // load ar.rsc value for "loadrs"
|
||||
adds r3=PT(AR_BSPSTORE)+16,r12 // deferred
|
||||
;;
|
||||
#else
|
||||
adds r2=PT(LOADRS)+16,r12
|
||||
adds r3=PT(AR_BSPSTORE)+16,r12
|
||||
adds r18=TI_FLAGS+IA64_TASK_SIZE,r13
|
||||
@@ -718,6 +728,7 @@ ENTRY(ia64_leave_syscall)
|
||||
ld8 r19=[r2],PT(B6)-PT(LOADRS) // load ar.rsc value for "loadrs"
|
||||
nop.i 0
|
||||
;;
|
||||
#endif
|
||||
mov r16=ar.bsp // M2 get existing backing store pointer
|
||||
ld8 r18=[r2],PT(R9)-PT(B6) // load b6
|
||||
(p6) and r15=TIF_WORK_MASK,r31 // any work other than TIF_SYSCALL_TRACE?
|
||||
@@ -737,12 +748,21 @@ ENTRY(ia64_leave_syscall)
|
||||
|
||||
ld8 r29=[r2],16 // M0|1 load cr.ipsr
|
||||
ld8 r28=[r3],16 // M0|1 load cr.iip
|
||||
#ifdef CONFIG_VIRT_CPU_ACCOUNTING
|
||||
(pUStk) add r14=TI_AC_LEAVE+IA64_TASK_SIZE,r13
|
||||
;;
|
||||
ld8 r30=[r2],16 // M0|1 load cr.ifs
|
||||
ld8 r25=[r3],16 // M0|1 load ar.unat
|
||||
(pUStk) add r15=IA64_TASK_THREAD_ON_USTACK_OFFSET,r13
|
||||
;;
|
||||
#else
|
||||
mov r22=r0 // A clear r22
|
||||
;;
|
||||
ld8 r30=[r2],16 // M0|1 load cr.ifs
|
||||
ld8 r25=[r3],16 // M0|1 load ar.unat
|
||||
(pUStk) add r14=IA64_TASK_THREAD_ON_USTACK_OFFSET,r13
|
||||
;;
|
||||
#endif
|
||||
ld8 r26=[r2],PT(B0)-PT(AR_PFS) // M0|1 load ar.pfs
|
||||
(pKStk) mov r22=psr // M2 read PSR now that interrupts are disabled
|
||||
nop 0
|
||||
@@ -759,7 +779,11 @@ ENTRY(ia64_leave_syscall)
|
||||
ld8.fill r1=[r3],16 // M0|1 load r1
|
||||
(pUStk) mov r17=1 // A
|
||||
;;
|
||||
#ifdef CONFIG_VIRT_CPU_ACCOUNTING
|
||||
(pUStk) st1 [r15]=r17 // M2|3
|
||||
#else
|
||||
(pUStk) st1 [r14]=r17 // M2|3
|
||||
#endif
|
||||
ld8.fill r13=[r3],16 // M0|1
|
||||
mov f8=f0 // F clear f8
|
||||
;;
|
||||
@@ -775,12 +799,22 @@ ENTRY(ia64_leave_syscall)
|
||||
shr.u r18=r19,16 // I0|1 get byte size of existing "dirty" partition
|
||||
cover // B add current frame into dirty partition & set cr.ifs
|
||||
;;
|
||||
#ifdef CONFIG_VIRT_CPU_ACCOUNTING
|
||||
mov r19=ar.bsp // M2 get new backing store pointer
|
||||
st8 [r14]=r22 // M save time at leave
|
||||
mov f10=f0 // F clear f10
|
||||
|
||||
mov r22=r0 // A clear r22
|
||||
movl r14=__kernel_syscall_via_epc // X
|
||||
;;
|
||||
#else
|
||||
mov r19=ar.bsp // M2 get new backing store pointer
|
||||
mov f10=f0 // F clear f10
|
||||
|
||||
nop.m 0
|
||||
movl r14=__kernel_syscall_via_epc // X
|
||||
;;
|
||||
#endif
|
||||
mov.m ar.csd=r0 // M2 clear ar.csd
|
||||
mov.m ar.ccv=r0 // M2 clear ar.ccv
|
||||
mov b7=r14 // I0 clear b7 (hint with __kernel_syscall_via_epc)
|
||||
@@ -913,10 +947,18 @@ GLOBAL_ENTRY(ia64_leave_kernel)
|
||||
adds r16=PT(CR_IPSR)+16,r12
|
||||
adds r17=PT(CR_IIP)+16,r12
|
||||
|
||||
#ifdef CONFIG_VIRT_CPU_ACCOUNTING
|
||||
.pred.rel.mutex pUStk,pKStk
|
||||
(pKStk) mov r22=psr // M2 read PSR now that interrupts are disabled
|
||||
(pUStk) mov.m r22=ar.itc // M fetch time at leave
|
||||
nop.i 0
|
||||
;;
|
||||
#else
|
||||
(pKStk) mov r22=psr // M2 read PSR now that interrupts are disabled
|
||||
nop.i 0
|
||||
nop.i 0
|
||||
;;
|
||||
#endif
|
||||
ld8 r29=[r16],16 // load cr.ipsr
|
||||
ld8 r28=[r17],16 // load cr.iip
|
||||
;;
|
||||
@@ -938,15 +980,37 @@ GLOBAL_ENTRY(ia64_leave_kernel)
|
||||
;;
|
||||
ld8.fill r12=[r16],16
|
||||
ld8.fill r13=[r17],16
|
||||
#ifdef CONFIG_VIRT_CPU_ACCOUNTING
|
||||
(pUStk) adds r3=TI_AC_LEAVE+IA64_TASK_SIZE,r18
|
||||
#else
|
||||
(pUStk) adds r18=IA64_TASK_THREAD_ON_USTACK_OFFSET,r18
|
||||
#endif
|
||||
;;
|
||||
ld8 r20=[r16],16 // ar.fpsr
|
||||
ld8.fill r15=[r17],16
|
||||
#ifdef CONFIG_VIRT_CPU_ACCOUNTING
|
||||
(pUStk) adds r18=IA64_TASK_THREAD_ON_USTACK_OFFSET,r18 // deferred
|
||||
#endif
|
||||
;;
|
||||
ld8.fill r14=[r16],16
|
||||
ld8.fill r2=[r17]
|
||||
(pUStk) mov r17=1
|
||||
;;
|
||||
#ifdef CONFIG_VIRT_CPU_ACCOUNTING
|
||||
// mmi_ : ld8 st1 shr;; mmi_ : st8 st1 shr;;
|
||||
// mib : mov add br -> mib : ld8 add br
|
||||
// bbb_ : br nop cover;; mbb_ : mov br cover;;
|
||||
//
|
||||
// no one require bsp in r16 if (pKStk) branch is selected.
|
||||
(pUStk) st8 [r3]=r22 // save time at leave
|
||||
(pUStk) st1 [r18]=r17 // restore current->thread.on_ustack
|
||||
shr.u r18=r19,16 // get byte size of existing "dirty" partition
|
||||
;;
|
||||
ld8.fill r3=[r16] // deferred
|
||||
LOAD_PHYS_STACK_REG_SIZE(r17)
|
||||
(pKStk) br.cond.dpnt skip_rbs_switch
|
||||
mov r16=ar.bsp // get existing backing store pointer
|
||||
#else
|
||||
ld8.fill r3=[r16]
|
||||
(pUStk) st1 [r18]=r17 // restore current->thread.on_ustack
|
||||
shr.u r18=r19,16 // get byte size of existing "dirty" partition
|
||||
@@ -954,6 +1018,7 @@ GLOBAL_ENTRY(ia64_leave_kernel)
|
||||
mov r16=ar.bsp // get existing backing store pointer
|
||||
LOAD_PHYS_STACK_REG_SIZE(r17)
|
||||
(pKStk) br.cond.dpnt skip_rbs_switch
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Restore user backing store.
|
||||
|
@@ -61,13 +61,29 @@ ENTRY(fsys_getpid)
|
||||
.prologue
|
||||
.altrp b6
|
||||
.body
|
||||
add r17=IA64_TASK_GROUP_LEADER_OFFSET,r16
|
||||
;;
|
||||
ld8 r17=[r17] // r17 = current->group_leader
|
||||
add r9=TI_FLAGS+IA64_TASK_SIZE,r16
|
||||
;;
|
||||
ld4 r9=[r9]
|
||||
add r8=IA64_TASK_TGID_OFFSET,r16
|
||||
add r17=IA64_TASK_TGIDLINK_OFFSET,r17
|
||||
;;
|
||||
and r9=TIF_ALLWORK_MASK,r9
|
||||
ld4 r8=[r8] // r8 = current->tgid
|
||||
ld8 r17=[r17] // r17 = current->group_leader->pids[PIDTYPE_PID].pid
|
||||
;;
|
||||
add r8=IA64_PID_LEVEL_OFFSET,r17
|
||||
;;
|
||||
ld4 r8=[r8] // r8 = pid->level
|
||||
add r17=IA64_PID_UPID_OFFSET,r17 // r17 = &pid->numbers[0]
|
||||
;;
|
||||
shl r8=r8,IA64_UPID_SHIFT
|
||||
;;
|
||||
add r17=r17,r8 // r17 = &pid->numbers[pid->level]
|
||||
;;
|
||||
ld4 r8=[r17] // r8 = pid->numbers[pid->level].nr
|
||||
;;
|
||||
mov r17=0
|
||||
;;
|
||||
cmp.ne p8,p0=0,r9
|
||||
(p8) br.spnt.many fsys_fallback_syscall
|
||||
@@ -126,15 +142,25 @@ ENTRY(fsys_set_tid_address)
|
||||
.altrp b6
|
||||
.body
|
||||
add r9=TI_FLAGS+IA64_TASK_SIZE,r16
|
||||
add r17=IA64_TASK_TGIDLINK_OFFSET,r16
|
||||
;;
|
||||
ld4 r9=[r9]
|
||||
tnat.z p6,p7=r32 // check argument register for being NaT
|
||||
ld8 r17=[r17] // r17 = current->pids[PIDTYPE_PID].pid
|
||||
;;
|
||||
and r9=TIF_ALLWORK_MASK,r9
|
||||
add r8=IA64_TASK_PID_OFFSET,r16
|
||||
add r8=IA64_PID_LEVEL_OFFSET,r17
|
||||
add r18=IA64_TASK_CLEAR_CHILD_TID_OFFSET,r16
|
||||
;;
|
||||
ld4 r8=[r8]
|
||||
ld4 r8=[r8] // r8 = pid->level
|
||||
add r17=IA64_PID_UPID_OFFSET,r17 // r17 = &pid->numbers[0]
|
||||
;;
|
||||
shl r8=r8,IA64_UPID_SHIFT
|
||||
;;
|
||||
add r17=r17,r8 // r17 = &pid->numbers[pid->level]
|
||||
;;
|
||||
ld4 r8=[r17] // r8 = pid->numbers[pid->level].nr
|
||||
;;
|
||||
cmp.ne p8,p0=0,r9
|
||||
mov r17=-1
|
||||
;;
|
||||
@@ -210,27 +236,25 @@ ENTRY(fsys_gettimeofday)
|
||||
// Note that instructions are optimized for McKinley. McKinley can
|
||||
// process two bundles simultaneously and therefore we continuously
|
||||
// try to feed the CPU two bundles and then a stop.
|
||||
//
|
||||
// Additional note that code has changed a lot. Optimization is TBD.
|
||||
// Comments begin with "?" are maybe outdated.
|
||||
tnat.nz p6,p0 = r31 // ? branch deferred to fit later bundle
|
||||
mov pr = r30,0xc000 // Set predicates according to function
|
||||
|
||||
add r2 = TI_FLAGS+IA64_TASK_SIZE,r16
|
||||
tnat.nz p6,p0 = r31 // guard against Nat argument
|
||||
(p6) br.cond.spnt.few .fail_einval
|
||||
movl r20 = fsyscall_gtod_data // load fsyscall gettimeofday data address
|
||||
;;
|
||||
ld4 r2 = [r2] // process work pending flags
|
||||
movl r29 = itc_jitter_data // itc_jitter
|
||||
add r22 = IA64_GTOD_WALL_TIME_OFFSET,r20 // wall_time
|
||||
ld4 r2 = [r2] // process work pending flags
|
||||
;;
|
||||
(p15) add r22 = IA64_GTOD_MONO_TIME_OFFSET,r20 // monotonic_time
|
||||
add r21 = IA64_CLKSRC_MMIO_OFFSET,r20
|
||||
add r19 = IA64_ITC_LASTCYCLE_OFFSET,r29
|
||||
and r2 = TIF_ALLWORK_MASK,r2
|
||||
(p6) br.cond.spnt.few .fail_einval // ? deferred branch
|
||||
mov pr = r30,0xc000 // Set predicates according to function
|
||||
;;
|
||||
add r26 = IA64_CLKSRC_CYCLE_LAST_OFFSET,r20 // clksrc_cycle_last
|
||||
and r2 = TIF_ALLWORK_MASK,r2
|
||||
add r19 = IA64_ITC_LASTCYCLE_OFFSET,r29
|
||||
(p15) add r22 = IA64_GTOD_MONO_TIME_OFFSET,r20 // monotonic_time
|
||||
;;
|
||||
add r26 = IA64_CLKSRC_CYCLE_LAST_OFFSET,r20 // clksrc_cycle_last
|
||||
cmp.ne p6, p0 = 0, r2 // Fallback if work is scheduled
|
||||
(p6) br.cond.spnt.many fsys_fallback_syscall
|
||||
(p6) br.cond.spnt.many fsys_fallback_syscall
|
||||
;;
|
||||
// Begin critical section
|
||||
.time_redo:
|
||||
@@ -258,7 +282,6 @@ ENTRY(fsys_gettimeofday)
|
||||
(p8) mov r2 = ar.itc // CPU_TIMER. 36 clocks latency!!!
|
||||
(p9) ld8 r2 = [r30] // MMIO_TIMER. Could also have latency issues..
|
||||
(p13) ld8 r25 = [r19] // get itc_lastcycle value
|
||||
;; // ? could be removed by moving the last add upward
|
||||
ld8 r9 = [r22],IA64_TIMESPEC_TV_NSEC_OFFSET // tv_sec
|
||||
;;
|
||||
ld8 r8 = [r22],-IA64_TIMESPEC_TV_NSEC_OFFSET // tv_nsec
|
||||
@@ -285,13 +308,12 @@ ENTRY(fsys_gettimeofday)
|
||||
EX(.fail_efault, probe.w.fault r31, 3)
|
||||
xmpy.l f8 = f8,f7 // nsec_per_cyc*(counter-last_counter)
|
||||
;;
|
||||
// ? simulate tbit.nz.or p7,p0 = r28,0
|
||||
getf.sig r2 = f8
|
||||
mf
|
||||
;;
|
||||
ld4 r10 = [r20] // gtod_lock.sequence
|
||||
shr.u r2 = r2,r23 // shift by factor
|
||||
;; // ? overloaded 3 bundles!
|
||||
;;
|
||||
add r8 = r8,r2 // Add xtime.nsecs
|
||||
cmp4.ne p7,p0 = r28,r10
|
||||
(p7) br.cond.dpnt.few .time_redo // sequence number changed, redo
|
||||
@@ -319,9 +341,9 @@ EX(.fail_efault, probe.w.fault r31, 3)
|
||||
EX(.fail_efault, probe.w.fault r23, 3) // This also costs 5 cycles
|
||||
(p14) xmpy.hu f8 = f8, f7 // xmpy has 5 cycles latency so use it
|
||||
;;
|
||||
mov r8 = r0
|
||||
(p14) getf.sig r2 = f8
|
||||
;;
|
||||
mov r8 = r0
|
||||
(p14) shr.u r21 = r2, 4
|
||||
;;
|
||||
EX(.fail_efault, st8 [r31] = r9)
|
||||
@@ -660,7 +682,11 @@ GLOBAL_ENTRY(fsys_bubble_down)
|
||||
nop.i 0
|
||||
;;
|
||||
mov ar.rsc=0 // M2 set enforced lazy mode, pl 0, LE, loadrs=0
|
||||
#ifdef CONFIG_VIRT_CPU_ACCOUNTING
|
||||
mov.m r30=ar.itc // M get cycle for accounting
|
||||
#else
|
||||
nop.m 0
|
||||
#endif
|
||||
nop.i 0
|
||||
;;
|
||||
mov r23=ar.bspstore // M2 (12 cyc) save ar.bspstore
|
||||
@@ -682,6 +708,28 @@ GLOBAL_ENTRY(fsys_bubble_down)
|
||||
cmp.ne pKStk,pUStk=r0,r0 // A set pKStk <- 0, pUStk <- 1
|
||||
br.call.sptk.many b7=ia64_syscall_setup // B
|
||||
;;
|
||||
#ifdef CONFIG_VIRT_CPU_ACCOUNTING
|
||||
// mov.m r30=ar.itc is called in advance
|
||||
add r16=TI_AC_STAMP+IA64_TASK_SIZE,r2
|
||||
add r17=TI_AC_LEAVE+IA64_TASK_SIZE,r2
|
||||
;;
|
||||
ld8 r18=[r16],TI_AC_STIME-TI_AC_STAMP // time at last check in kernel
|
||||
ld8 r19=[r17],TI_AC_UTIME-TI_AC_LEAVE // time at leave kernel
|
||||
;;
|
||||
ld8 r20=[r16],TI_AC_STAMP-TI_AC_STIME // cumulated stime
|
||||
ld8 r21=[r17] // cumulated utime
|
||||
sub r22=r19,r18 // stime before leave kernel
|
||||
;;
|
||||
st8 [r16]=r30,TI_AC_STIME-TI_AC_STAMP // update stamp
|
||||
sub r18=r30,r19 // elapsed time in user mode
|
||||
;;
|
||||
add r20=r20,r22 // sum stime
|
||||
add r21=r21,r18 // sum utime
|
||||
;;
|
||||
st8 [r16]=r20 // update stime
|
||||
st8 [r17]=r21 // update utime
|
||||
;;
|
||||
#endif
|
||||
mov ar.rsc=0x3 // M2 set eager mode, pl 0, LE, loadrs=0
|
||||
mov rp=r14 // I0 set the real return addr
|
||||
and r3=_TIF_SYSCALL_TRACEAUDIT,r3 // A
|
||||
|
@@ -1002,6 +1002,26 @@ GLOBAL_ENTRY(sched_clock)
|
||||
br.ret.sptk.many rp
|
||||
END(sched_clock)
|
||||
|
||||
#ifdef CONFIG_VIRT_CPU_ACCOUNTING
|
||||
GLOBAL_ENTRY(cycle_to_cputime)
|
||||
alloc r16=ar.pfs,1,0,0,0
|
||||
addl r8=THIS_CPU(cpu_info) + IA64_CPUINFO_NSEC_PER_CYC_OFFSET,r0
|
||||
;;
|
||||
ldf8 f8=[r8]
|
||||
;;
|
||||
setf.sig f9=r32
|
||||
;;
|
||||
xmpy.lu f10=f9,f8 // calculate low 64 bits of 128-bit product (4 cyc)
|
||||
xmpy.hu f11=f9,f8 // calculate high 64 bits of 128-bit product
|
||||
;;
|
||||
getf.sig r8=f10 // (5 cyc)
|
||||
getf.sig r9=f11
|
||||
;;
|
||||
shrp r8=r9,r8,IA64_NSEC_PER_CYC_SHIFT
|
||||
br.ret.sptk.many rp
|
||||
END(cycle_to_cputime)
|
||||
#endif /* CONFIG_VIRT_CPU_ACCOUNTING */
|
||||
|
||||
GLOBAL_ENTRY(start_kernel_thread)
|
||||
.prologue
|
||||
.save rp, r0 // this is the end of the call-chain
|
||||
|
@@ -472,7 +472,7 @@ ia64_handle_irq (ia64_vector vector, struct pt_regs *regs)
|
||||
static unsigned char count;
|
||||
static long last_time;
|
||||
|
||||
if (jiffies - last_time > 5*HZ)
|
||||
if (time_after(jiffies, last_time + 5 * HZ))
|
||||
count = 0;
|
||||
if (++count < 5) {
|
||||
last_time = jiffies;
|
||||
|
@@ -805,8 +805,13 @@ ENTRY(break_fault)
|
||||
|
||||
(p8) adds r28=16,r28 // A switch cr.iip to next bundle
|
||||
(p9) adds r8=1,r8 // A increment ei to next slot
|
||||
#ifdef CONFIG_VIRT_CPU_ACCOUNTING
|
||||
;;
|
||||
mov b6=r30 // I0 setup syscall handler branch reg early
|
||||
#else
|
||||
nop.i 0
|
||||
;;
|
||||
#endif
|
||||
|
||||
mov.m r25=ar.unat // M2 (5 cyc)
|
||||
dep r29=r8,r29,41,2 // I0 insert new ei into cr.ipsr
|
||||
@@ -817,7 +822,11 @@ ENTRY(break_fault)
|
||||
//
|
||||
///////////////////////////////////////////////////////////////////////
|
||||
st1 [r16]=r0 // M2|3 clear current->thread.on_ustack flag
|
||||
#ifdef CONFIG_VIRT_CPU_ACCOUNTING
|
||||
mov.m r30=ar.itc // M get cycle for accounting
|
||||
#else
|
||||
mov b6=r30 // I0 setup syscall handler branch reg early
|
||||
#endif
|
||||
cmp.eq pKStk,pUStk=r0,r17 // A were we on kernel stacks already?
|
||||
|
||||
and r9=_TIF_SYSCALL_TRACEAUDIT,r9 // A mask trace or audit
|
||||
@@ -829,6 +838,30 @@ ENTRY(break_fault)
|
||||
cmp.eq p14,p0=r9,r0 // A are syscalls being traced/audited?
|
||||
br.call.sptk.many b7=ia64_syscall_setup // B
|
||||
1:
|
||||
#ifdef CONFIG_VIRT_CPU_ACCOUNTING
|
||||
// mov.m r30=ar.itc is called in advance, and r13 is current
|
||||
add r16=TI_AC_STAMP+IA64_TASK_SIZE,r13 // A
|
||||
add r17=TI_AC_LEAVE+IA64_TASK_SIZE,r13 // A
|
||||
(pKStk) br.cond.spnt .skip_accounting // B unlikely skip
|
||||
;;
|
||||
ld8 r18=[r16],TI_AC_STIME-TI_AC_STAMP // M get last stamp
|
||||
ld8 r19=[r17],TI_AC_UTIME-TI_AC_LEAVE // M time at leave
|
||||
;;
|
||||
ld8 r20=[r16],TI_AC_STAMP-TI_AC_STIME // M cumulated stime
|
||||
ld8 r21=[r17] // M cumulated utime
|
||||
sub r22=r19,r18 // A stime before leave
|
||||
;;
|
||||
st8 [r16]=r30,TI_AC_STIME-TI_AC_STAMP // M update stamp
|
||||
sub r18=r30,r19 // A elapsed time in user
|
||||
;;
|
||||
add r20=r20,r22 // A sum stime
|
||||
add r21=r21,r18 // A sum utime
|
||||
;;
|
||||
st8 [r16]=r20 // M update stime
|
||||
st8 [r17]=r21 // M update utime
|
||||
;;
|
||||
.skip_accounting:
|
||||
#endif
|
||||
mov ar.rsc=0x3 // M2 set eager mode, pl 0, LE, loadrs=0
|
||||
nop 0
|
||||
bsw.1 // B (6 cyc) regs are saved, switch to bank 1
|
||||
@@ -928,6 +961,7 @@ END(interrupt)
|
||||
* - r27: saved ar.rsc
|
||||
* - r28: saved cr.iip
|
||||
* - r29: saved cr.ipsr
|
||||
* - r30: ar.itc for accounting (don't touch)
|
||||
* - r31: saved pr
|
||||
* - b0: original contents (to be saved)
|
||||
* On exit:
|
||||
@@ -1090,6 +1124,41 @@ END(dispatch_illegal_op_fault)
|
||||
DBG_FAULT(16)
|
||||
FAULT(16)
|
||||
|
||||
#ifdef CONFIG_VIRT_CPU_ACCOUNTING
|
||||
/*
|
||||
* There is no particular reason for this code to be here, other than
|
||||
* that there happens to be space here that would go unused otherwise.
|
||||
* If this fault ever gets "unreserved", simply moved the following
|
||||
* code to a more suitable spot...
|
||||
*
|
||||
* account_sys_enter is called from SAVE_MIN* macros if accounting is
|
||||
* enabled and if the macro is entered from user mode.
|
||||
*/
|
||||
ENTRY(account_sys_enter)
|
||||
// mov.m r20=ar.itc is called in advance, and r13 is current
|
||||
add r16=TI_AC_STAMP+IA64_TASK_SIZE,r13
|
||||
add r17=TI_AC_LEAVE+IA64_TASK_SIZE,r13
|
||||
;;
|
||||
ld8 r18=[r16],TI_AC_STIME-TI_AC_STAMP // time at last check in kernel
|
||||
ld8 r19=[r17],TI_AC_UTIME-TI_AC_LEAVE // time at left from kernel
|
||||
;;
|
||||
ld8 r23=[r16],TI_AC_STAMP-TI_AC_STIME // cumulated stime
|
||||
ld8 r21=[r17] // cumulated utime
|
||||
sub r22=r19,r18 // stime before leave kernel
|
||||
;;
|
||||
st8 [r16]=r20,TI_AC_STIME-TI_AC_STAMP // update stamp
|
||||
sub r18=r20,r19 // elapsed time in user mode
|
||||
;;
|
||||
add r23=r23,r22 // sum stime
|
||||
add r21=r21,r18 // sum utime
|
||||
;;
|
||||
st8 [r16]=r23 // update stime
|
||||
st8 [r17]=r21 // update utime
|
||||
;;
|
||||
br.ret.sptk.many rp
|
||||
END(account_sys_enter)
|
||||
#endif
|
||||
|
||||
.org ia64_ivt+0x4400
|
||||
/////////////////////////////////////////////////////////////////////////////////////////
|
||||
// 0x4400 Entry 17 (size 64 bundles) Reserved
|
||||
|
@@ -78,6 +78,20 @@ static enum instruction_type bundle_encoding[32][3] = {
|
||||
{ u, u, u }, /* 1F */
|
||||
};
|
||||
|
||||
/* Insert a long branch code */
|
||||
static void __kprobes set_brl_inst(void *from, void *to)
|
||||
{
|
||||
s64 rel = ((s64) to - (s64) from) >> 4;
|
||||
bundle_t *brl;
|
||||
brl = (bundle_t *) ((u64) from & ~0xf);
|
||||
brl->quad0.template = 0x05; /* [MLX](stop) */
|
||||
brl->quad0.slot0 = NOP_M_INST; /* nop.m 0x0 */
|
||||
brl->quad0.slot1_p0 = ((rel >> 20) & 0x7fffffffff) << 2;
|
||||
brl->quad1.slot1_p1 = (((rel >> 20) & 0x7fffffffff) << 2) >> (64 - 46);
|
||||
/* brl.cond.sptk.many.clr rel<<4 (qp=0) */
|
||||
brl->quad1.slot2 = BRL_INST(rel >> 59, rel & 0xfffff);
|
||||
}
|
||||
|
||||
/*
|
||||
* In this function we check to see if the instruction
|
||||
* is IP relative instruction and update the kprobe
|
||||
@@ -496,6 +510,77 @@ void __kprobes arch_prepare_kretprobe(struct kretprobe_instance *ri,
|
||||
regs->b0 = ((struct fnptr *)kretprobe_trampoline)->ip;
|
||||
}
|
||||
|
||||
/* Check the instruction in the slot is break */
|
||||
static int __kprobes __is_ia64_break_inst(bundle_t *bundle, uint slot)
|
||||
{
|
||||
unsigned int major_opcode;
|
||||
unsigned int template = bundle->quad0.template;
|
||||
unsigned long kprobe_inst;
|
||||
|
||||
/* Move to slot 2, if bundle is MLX type and kprobe slot is 1 */
|
||||
if (slot == 1 && bundle_encoding[template][1] == L)
|
||||
slot++;
|
||||
|
||||
/* Get Kprobe probe instruction at given slot*/
|
||||
get_kprobe_inst(bundle, slot, &kprobe_inst, &major_opcode);
|
||||
|
||||
/* For break instruction,
|
||||
* Bits 37:40 Major opcode to be zero
|
||||
* Bits 27:32 X6 to be zero
|
||||
* Bits 32:35 X3 to be zero
|
||||
*/
|
||||
if (major_opcode || ((kprobe_inst >> 27) & 0x1FF)) {
|
||||
/* Not a break instruction */
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Is a break instruction */
|
||||
return 1;
|
||||
}
|
||||
|
||||
/*
|
||||
* In this function, we check whether the target bundle modifies IP or
|
||||
* it triggers an exception. If so, it cannot be boostable.
|
||||
*/
|
||||
static int __kprobes can_boost(bundle_t *bundle, uint slot,
|
||||
unsigned long bundle_addr)
|
||||
{
|
||||
unsigned int template = bundle->quad0.template;
|
||||
|
||||
do {
|
||||
if (search_exception_tables(bundle_addr + slot) ||
|
||||
__is_ia64_break_inst(bundle, slot))
|
||||
return 0; /* exception may occur in this bundle*/
|
||||
} while ((++slot) < 3);
|
||||
template &= 0x1e;
|
||||
if (template >= 0x10 /* including B unit */ ||
|
||||
template == 0x04 /* including X unit */ ||
|
||||
template == 0x06) /* undefined */
|
||||
return 0;
|
||||
|
||||
return 1;
|
||||
}
|
||||
|
||||
/* Prepare long jump bundle and disables other boosters if need */
|
||||
static void __kprobes prepare_booster(struct kprobe *p)
|
||||
{
|
||||
unsigned long addr = (unsigned long)p->addr & ~0xFULL;
|
||||
unsigned int slot = (unsigned long)p->addr & 0xf;
|
||||
struct kprobe *other_kp;
|
||||
|
||||
if (can_boost(&p->ainsn.insn[0].bundle, slot, addr)) {
|
||||
set_brl_inst(&p->ainsn.insn[1].bundle, (bundle_t *)addr + 1);
|
||||
p->ainsn.inst_flag |= INST_FLAG_BOOSTABLE;
|
||||
}
|
||||
|
||||
/* disables boosters in previous slots */
|
||||
for (; addr < (unsigned long)p->addr; addr++) {
|
||||
other_kp = get_kprobe((void *)addr);
|
||||
if (other_kp)
|
||||
other_kp->ainsn.inst_flag &= ~INST_FLAG_BOOSTABLE;
|
||||
}
|
||||
}
|
||||
|
||||
int __kprobes arch_prepare_kprobe(struct kprobe *p)
|
||||
{
|
||||
unsigned long addr = (unsigned long) p->addr;
|
||||
@@ -530,6 +615,8 @@ int __kprobes arch_prepare_kprobe(struct kprobe *p)
|
||||
|
||||
prepare_break_inst(template, slot, major_opcode, kprobe_inst, p, qp);
|
||||
|
||||
prepare_booster(p);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@@ -543,7 +630,9 @@ void __kprobes arch_arm_kprobe(struct kprobe *p)
|
||||
src = &p->opcode.bundle;
|
||||
|
||||
flush_icache_range((unsigned long)p->ainsn.insn,
|
||||
(unsigned long)p->ainsn.insn + sizeof(kprobe_opcode_t));
|
||||
(unsigned long)p->ainsn.insn +
|
||||
sizeof(kprobe_opcode_t) * MAX_INSN_SIZE);
|
||||
|
||||
switch (p->ainsn.slot) {
|
||||
case 0:
|
||||
dest->quad0.slot0 = src->quad0.slot0;
|
||||
@@ -584,13 +673,13 @@ void __kprobes arch_disarm_kprobe(struct kprobe *p)
|
||||
void __kprobes arch_remove_kprobe(struct kprobe *p)
|
||||
{
|
||||
mutex_lock(&kprobe_mutex);
|
||||
free_insn_slot(p->ainsn.insn, 0);
|
||||
free_insn_slot(p->ainsn.insn, p->ainsn.inst_flag & INST_FLAG_BOOSTABLE);
|
||||
mutex_unlock(&kprobe_mutex);
|
||||
}
|
||||
/*
|
||||
* We are resuming execution after a single step fault, so the pt_regs
|
||||
* structure reflects the register state after we executed the instruction
|
||||
* located in the kprobe (p->ainsn.insn.bundle). We still need to adjust
|
||||
* located in the kprobe (p->ainsn.insn->bundle). We still need to adjust
|
||||
* the ip to point back to the original stack address. To set the IP address
|
||||
* to original stack address, handle the case where we need to fixup the
|
||||
* relative IP address and/or fixup branch register.
|
||||
@@ -607,7 +696,7 @@ static void __kprobes resume_execution(struct kprobe *p, struct pt_regs *regs)
|
||||
if (slot == 1 && bundle_encoding[template][1] == L)
|
||||
slot = 2;
|
||||
|
||||
if (p->ainsn.inst_flag) {
|
||||
if (p->ainsn.inst_flag & ~INST_FLAG_BOOSTABLE) {
|
||||
|
||||
if (p->ainsn.inst_flag & INST_FLAG_FIX_RELATIVE_IP_ADDR) {
|
||||
/* Fix relative IP address */
|
||||
@@ -686,33 +775,12 @@ static void __kprobes prepare_ss(struct kprobe *p, struct pt_regs *regs)
|
||||
static int __kprobes is_ia64_break_inst(struct pt_regs *regs)
|
||||
{
|
||||
unsigned int slot = ia64_psr(regs)->ri;
|
||||
unsigned int template, major_opcode;
|
||||
unsigned long kprobe_inst;
|
||||
unsigned long *kprobe_addr = (unsigned long *)regs->cr_iip;
|
||||
bundle_t bundle;
|
||||
|
||||
memcpy(&bundle, kprobe_addr, sizeof(bundle_t));
|
||||
template = bundle.quad0.template;
|
||||
|
||||
/* Move to slot 2, if bundle is MLX type and kprobe slot is 1 */
|
||||
if (slot == 1 && bundle_encoding[template][1] == L)
|
||||
slot++;
|
||||
|
||||
/* Get Kprobe probe instruction at given slot*/
|
||||
get_kprobe_inst(&bundle, slot, &kprobe_inst, &major_opcode);
|
||||
|
||||
/* For break instruction,
|
||||
* Bits 37:40 Major opcode to be zero
|
||||
* Bits 27:32 X6 to be zero
|
||||
* Bits 32:35 X3 to be zero
|
||||
*/
|
||||
if (major_opcode || ((kprobe_inst >> 27) & 0x1FF) ) {
|
||||
/* Not a break instruction */
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Is a break instruction */
|
||||
return 1;
|
||||
return __is_ia64_break_inst(&bundle, slot);
|
||||
}
|
||||
|
||||
static int __kprobes pre_kprobes_handler(struct die_args *args)
|
||||
@@ -802,6 +870,19 @@ static int __kprobes pre_kprobes_handler(struct die_args *args)
|
||||
return 1;
|
||||
|
||||
ss_probe:
|
||||
#if !defined(CONFIG_PREEMPT) || defined(CONFIG_PM)
|
||||
if (p->ainsn.inst_flag == INST_FLAG_BOOSTABLE && !p->post_handler) {
|
||||
/* Boost up -- we can execute copied instructions directly */
|
||||
ia64_psr(regs)->ri = p->ainsn.slot;
|
||||
regs->cr_iip = (unsigned long)&p->ainsn.insn->bundle & ~0xFULL;
|
||||
/* turn single stepping off */
|
||||
ia64_psr(regs)->ss = 0;
|
||||
|
||||
reset_current_kprobe();
|
||||
preempt_enable_no_resched();
|
||||
return 1;
|
||||
}
|
||||
#endif
|
||||
prepare_ss(p, regs);
|
||||
kcb->kprobe_status = KPROBE_HIT_SS;
|
||||
return 1;
|
||||
|
@@ -69,6 +69,7 @@
|
||||
* 2007-04-27 Russ Anderson <rja@sgi.com>
|
||||
* Support multiple cpus going through OS_MCA in the same event.
|
||||
*/
|
||||
#include <linux/jiffies.h>
|
||||
#include <linux/types.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/sched.h>
|
||||
@@ -97,6 +98,7 @@
|
||||
|
||||
#include <asm/irq.h>
|
||||
#include <asm/hw_irq.h>
|
||||
#include <asm/tlb.h>
|
||||
|
||||
#include "mca_drv.h"
|
||||
#include "entry.h"
|
||||
@@ -112,6 +114,7 @@ DEFINE_PER_CPU(u64, ia64_mca_data); /* == __per_cpu_mca[smp_processor_id()] */
|
||||
DEFINE_PER_CPU(u64, ia64_mca_per_cpu_pte); /* PTE to map per-CPU area */
|
||||
DEFINE_PER_CPU(u64, ia64_mca_pal_pte); /* PTE to map PAL code */
|
||||
DEFINE_PER_CPU(u64, ia64_mca_pal_base); /* vaddr PAL code granule */
|
||||
DEFINE_PER_CPU(u64, ia64_mca_tr_reload); /* Flag for TR reload */
|
||||
|
||||
unsigned long __per_cpu_mca[NR_CPUS];
|
||||
|
||||
@@ -293,7 +296,8 @@ static void ia64_mlogbuf_dump_from_init(void)
|
||||
if (mlogbuf_finished)
|
||||
return;
|
||||
|
||||
if (mlogbuf_timestamp && (mlogbuf_timestamp + 30*HZ > jiffies)) {
|
||||
if (mlogbuf_timestamp &&
|
||||
time_before(jiffies, mlogbuf_timestamp + 30 * HZ)) {
|
||||
printk(KERN_ERR "INIT: mlogbuf_dump is interrupted by INIT "
|
||||
" and the system seems to be messed up.\n");
|
||||
ia64_mlogbuf_finish(0);
|
||||
@@ -1182,6 +1186,49 @@ all_in:
|
||||
return;
|
||||
}
|
||||
|
||||
/* mca_insert_tr
|
||||
*
|
||||
* Switch rid when TR reload and needed!
|
||||
* iord: 1: itr, 2: itr;
|
||||
*
|
||||
*/
|
||||
static void mca_insert_tr(u64 iord)
|
||||
{
|
||||
|
||||
int i;
|
||||
u64 old_rr;
|
||||
struct ia64_tr_entry *p;
|
||||
unsigned long psr;
|
||||
int cpu = smp_processor_id();
|
||||
|
||||
psr = ia64_clear_ic();
|
||||
for (i = IA64_TR_ALLOC_BASE; i < IA64_TR_ALLOC_MAX; i++) {
|
||||
p = &__per_cpu_idtrs[cpu][iord-1][i];
|
||||
if (p->pte & 0x1) {
|
||||
old_rr = ia64_get_rr(p->ifa);
|
||||
if (old_rr != p->rr) {
|
||||
ia64_set_rr(p->ifa, p->rr);
|
||||
ia64_srlz_d();
|
||||
}
|
||||
ia64_ptr(iord, p->ifa, p->itir >> 2);
|
||||
ia64_srlz_i();
|
||||
if (iord & 0x1) {
|
||||
ia64_itr(0x1, i, p->ifa, p->pte, p->itir >> 2);
|
||||
ia64_srlz_i();
|
||||
}
|
||||
if (iord & 0x2) {
|
||||
ia64_itr(0x2, i, p->ifa, p->pte, p->itir >> 2);
|
||||
ia64_srlz_i();
|
||||
}
|
||||
if (old_rr != p->rr) {
|
||||
ia64_set_rr(p->ifa, old_rr);
|
||||
ia64_srlz_d();
|
||||
}
|
||||
}
|
||||
}
|
||||
ia64_set_psr(psr);
|
||||
}
|
||||
|
||||
/*
|
||||
* ia64_mca_handler
|
||||
*
|
||||
@@ -1266,16 +1313,17 @@ ia64_mca_handler(struct pt_regs *regs, struct switch_stack *sw,
|
||||
} else {
|
||||
/* Dump buffered message to console */
|
||||
ia64_mlogbuf_finish(1);
|
||||
#ifdef CONFIG_KEXEC
|
||||
atomic_set(&kdump_in_progress, 1);
|
||||
monarch_cpu = -1;
|
||||
#endif
|
||||
}
|
||||
|
||||
if (__get_cpu_var(ia64_mca_tr_reload)) {
|
||||
mca_insert_tr(0x1); /*Reload dynamic itrs*/
|
||||
mca_insert_tr(0x2); /*Reload dynamic itrs*/
|
||||
}
|
||||
|
||||
if (notify_die(DIE_MCA_MONARCH_LEAVE, "MCA", regs, (long)&nd, 0, recover)
|
||||
== NOTIFY_STOP)
|
||||
ia64_mca_spin(__func__);
|
||||
|
||||
|
||||
if (atomic_dec_return(&mca_count) > 0) {
|
||||
int i;
|
||||
|
||||
|
@@ -219,8 +219,13 @@ ia64_reload_tr:
|
||||
mov r20=IA64_TR_CURRENT_STACK
|
||||
;;
|
||||
itr.d dtr[r20]=r16
|
||||
GET_THIS_PADDR(r2, ia64_mca_tr_reload)
|
||||
mov r18 = 1
|
||||
;;
|
||||
srlz.d
|
||||
;;
|
||||
st8 [r2] =r18
|
||||
;;
|
||||
|
||||
done_tlb_purge_and_reload:
|
||||
|
||||
|
@@ -3,6 +3,18 @@
|
||||
|
||||
#include "entry.h"
|
||||
|
||||
#ifdef CONFIG_VIRT_CPU_ACCOUNTING
|
||||
/* read ar.itc in advance, and use it before leaving bank 0 */
|
||||
#define ACCOUNT_GET_STAMP \
|
||||
(pUStk) mov.m r20=ar.itc;
|
||||
#define ACCOUNT_SYS_ENTER \
|
||||
(pUStk) br.call.spnt rp=account_sys_enter \
|
||||
;;
|
||||
#else
|
||||
#define ACCOUNT_GET_STAMP
|
||||
#define ACCOUNT_SYS_ENTER
|
||||
#endif
|
||||
|
||||
/*
|
||||
* DO_SAVE_MIN switches to the kernel stacks (if necessary) and saves
|
||||
* the minimum state necessary that allows us to turn psr.ic back
|
||||
@@ -122,11 +134,13 @@
|
||||
;; \
|
||||
.mem.offset 0,0; st8.spill [r16]=r2,16; \
|
||||
.mem.offset 8,0; st8.spill [r17]=r3,16; \
|
||||
ACCOUNT_GET_STAMP \
|
||||
adds r2=IA64_PT_REGS_R16_OFFSET,r1; \
|
||||
;; \
|
||||
EXTRA; \
|
||||
movl r1=__gp; /* establish kernel global pointer */ \
|
||||
;; \
|
||||
ACCOUNT_SYS_ENTER \
|
||||
bsw.1; /* switch back to bank 1 (must be last in insn group) */ \
|
||||
;;
|
||||
|
||||
|
@@ -73,7 +73,7 @@ void __init build_cpu_to_node_map(void)
|
||||
for(node=0; node < MAX_NUMNODES; node++)
|
||||
cpus_clear(node_to_cpu_mask[node]);
|
||||
|
||||
for(cpu = 0; cpu < NR_CPUS; ++cpu) {
|
||||
for_each_possible_early_cpu(cpu) {
|
||||
node = -1;
|
||||
for (i = 0; i < NR_CPUS; ++i)
|
||||
if (cpu_physical_id(cpu) == node_cpuid[i].phys_id) {
|
||||
|
@@ -135,10 +135,10 @@ ia64_patch_mckinley_e9 (unsigned long start, unsigned long end)
|
||||
|
||||
while (offp < (s32 *) end) {
|
||||
wp = (u64 *) ia64_imva((char *) offp + *offp);
|
||||
wp[0] = 0x0000000100000000UL; /* nop.m 0; nop.i 0; nop.i 0 */
|
||||
wp[1] = 0x0004000000000200UL;
|
||||
wp[2] = 0x0000000100000011UL; /* nop.m 0; nop.i 0; br.ret.sptk.many b6 */
|
||||
wp[3] = 0x0084006880000200UL;
|
||||
wp[0] = 0x0000000100000011UL; /* nop.m 0; nop.i 0; br.ret.sptk.many b6 */
|
||||
wp[1] = 0x0084006880000200UL;
|
||||
wp[2] = 0x0000000100000000UL; /* nop.m 0; nop.i 0; nop.i 0 */
|
||||
wp[3] = 0x0004000000000200UL;
|
||||
ia64_fc(wp); ia64_fc(wp + 2);
|
||||
++offp;
|
||||
}
|
||||
|
@@ -4204,10 +4204,10 @@ pfm_check_task_exist(pfm_context_t *ctx)
|
||||
do_each_thread (g, t) {
|
||||
if (t->thread.pfm_context == ctx) {
|
||||
ret = 0;
|
||||
break;
|
||||
goto out;
|
||||
}
|
||||
} while_each_thread (g, t);
|
||||
|
||||
out:
|
||||
read_unlock(&tasklist_lock);
|
||||
|
||||
DPRINT(("pfm_check_task_exist: ret=%d ctx=%p\n", ret, ctx));
|
||||
|
@@ -625,42 +625,12 @@ do_dump_fpu (struct unw_frame_info *info, void *arg)
|
||||
do_dump_task_fpu(current, info, arg);
|
||||
}
|
||||
|
||||
int
|
||||
dump_task_regs(struct task_struct *task, elf_gregset_t *regs)
|
||||
{
|
||||
struct unw_frame_info tcore_info;
|
||||
|
||||
if (current == task) {
|
||||
unw_init_running(do_copy_regs, regs);
|
||||
} else {
|
||||
memset(&tcore_info, 0, sizeof(tcore_info));
|
||||
unw_init_from_blocked_task(&tcore_info, task);
|
||||
do_copy_task_regs(task, &tcore_info, regs);
|
||||
}
|
||||
return 1;
|
||||
}
|
||||
|
||||
void
|
||||
ia64_elf_core_copy_regs (struct pt_regs *pt, elf_gregset_t dst)
|
||||
{
|
||||
unw_init_running(do_copy_regs, dst);
|
||||
}
|
||||
|
||||
int
|
||||
dump_task_fpu (struct task_struct *task, elf_fpregset_t *dst)
|
||||
{
|
||||
struct unw_frame_info tcore_info;
|
||||
|
||||
if (current == task) {
|
||||
unw_init_running(do_dump_fpu, dst);
|
||||
} else {
|
||||
memset(&tcore_info, 0, sizeof(tcore_info));
|
||||
unw_init_from_blocked_task(&tcore_info, task);
|
||||
do_dump_task_fpu(task, &tcore_info, dst);
|
||||
}
|
||||
return 1;
|
||||
}
|
||||
|
||||
int
|
||||
dump_fpu (struct pt_regs *pt, elf_fpregset_t dst)
|
||||
{
|
||||
|
Failā izmaiņas netiks attēlotas, jo tās ir par lielu
Ielādēt izmaiņas
@@ -59,6 +59,7 @@
|
||||
#include <asm/setup.h>
|
||||
#include <asm/smp.h>
|
||||
#include <asm/system.h>
|
||||
#include <asm/tlbflush.h>
|
||||
#include <asm/unistd.h>
|
||||
#include <asm/hpsim.h>
|
||||
|
||||
@@ -176,6 +177,29 @@ filter_rsvd_memory (unsigned long start, unsigned long end, void *arg)
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Similar to "filter_rsvd_memory()", but the reserved memory ranges
|
||||
* are not filtered out.
|
||||
*/
|
||||
int __init
|
||||
filter_memory(unsigned long start, unsigned long end, void *arg)
|
||||
{
|
||||
void (*func)(unsigned long, unsigned long, int);
|
||||
|
||||
#if IGNORE_PFN0
|
||||
if (start == PAGE_OFFSET) {
|
||||
printk(KERN_WARNING "warning: skipping physical page 0\n");
|
||||
start += PAGE_SIZE;
|
||||
if (start >= end)
|
||||
return 0;
|
||||
}
|
||||
#endif
|
||||
func = arg;
|
||||
if (start < end)
|
||||
call_pernode_memory(__pa(start), end - start, func);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void __init
|
||||
sort_regions (struct rsvd_region *rsvd_region, int max)
|
||||
{
|
||||
@@ -493,6 +517,8 @@ setup_arch (char **cmdline_p)
|
||||
acpi_table_init();
|
||||
# ifdef CONFIG_ACPI_NUMA
|
||||
acpi_numa_init();
|
||||
per_cpu_scan_finalize((cpus_weight(early_cpu_possible_map) == 0 ?
|
||||
32 : cpus_weight(early_cpu_possible_map)), additional_cpus);
|
||||
# endif
|
||||
#else
|
||||
# ifdef CONFIG_SMP
|
||||
@@ -946,9 +972,10 @@ cpu_init (void)
|
||||
#endif
|
||||
|
||||
/* set ia64_ctx.max_rid to the maximum RID that is supported by all CPUs: */
|
||||
if (ia64_pal_vm_summary(NULL, &vmi) == 0)
|
||||
if (ia64_pal_vm_summary(NULL, &vmi) == 0) {
|
||||
max_ctx = (1U << (vmi.pal_vm_info_2_s.rid_size - 3)) - 1;
|
||||
else {
|
||||
setup_ptcg_sem(vmi.pal_vm_info_2_s.max_purges, NPTCG_FROM_PAL);
|
||||
} else {
|
||||
printk(KERN_WARNING "cpu_init: PAL VM summary failed, assuming 18 RID bits\n");
|
||||
max_ctx = (1U << 15) - 1; /* use architected minimum */
|
||||
}
|
||||
|
@@ -209,6 +209,19 @@ send_IPI_allbutself (int op)
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Called with preemption disabled.
|
||||
*/
|
||||
static inline void
|
||||
send_IPI_mask(cpumask_t mask, int op)
|
||||
{
|
||||
unsigned int cpu;
|
||||
|
||||
for_each_cpu_mask(cpu, mask) {
|
||||
send_IPI_single(cpu, op);
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Called with preemption disabled.
|
||||
*/
|
||||
@@ -401,6 +414,75 @@ smp_call_function_single (int cpuid, void (*func) (void *info), void *info, int
|
||||
}
|
||||
EXPORT_SYMBOL(smp_call_function_single);
|
||||
|
||||
/**
|
||||
* smp_call_function_mask(): Run a function on a set of other CPUs.
|
||||
* <mask> The set of cpus to run on. Must not include the current cpu.
|
||||
* <func> The function to run. This must be fast and non-blocking.
|
||||
* <info> An arbitrary pointer to pass to the function.
|
||||
* <wait> If true, wait (atomically) until function
|
||||
* has completed on other CPUs.
|
||||
*
|
||||
* Returns 0 on success, else a negative status code.
|
||||
*
|
||||
* If @wait is true, then returns once @func has returned; otherwise
|
||||
* it returns just before the target cpu calls @func.
|
||||
*
|
||||
* You must not call this function with disabled interrupts or from a
|
||||
* hardware interrupt handler or from a bottom half handler.
|
||||
*/
|
||||
int smp_call_function_mask(cpumask_t mask,
|
||||
void (*func)(void *), void *info,
|
||||
int wait)
|
||||
{
|
||||
struct call_data_struct data;
|
||||
cpumask_t allbutself;
|
||||
int cpus;
|
||||
|
||||
spin_lock(&call_lock);
|
||||
allbutself = cpu_online_map;
|
||||
cpu_clear(smp_processor_id(), allbutself);
|
||||
|
||||
cpus_and(mask, mask, allbutself);
|
||||
cpus = cpus_weight(mask);
|
||||
if (!cpus) {
|
||||
spin_unlock(&call_lock);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Can deadlock when called with interrupts disabled */
|
||||
WARN_ON(irqs_disabled());
|
||||
|
||||
data.func = func;
|
||||
data.info = info;
|
||||
atomic_set(&data.started, 0);
|
||||
data.wait = wait;
|
||||
if (wait)
|
||||
atomic_set(&data.finished, 0);
|
||||
|
||||
call_data = &data;
|
||||
mb(); /* ensure store to call_data precedes setting of IPI_CALL_FUNC*/
|
||||
|
||||
/* Send a message to other CPUs */
|
||||
if (cpus_equal(mask, allbutself))
|
||||
send_IPI_allbutself(IPI_CALL_FUNC);
|
||||
else
|
||||
send_IPI_mask(mask, IPI_CALL_FUNC);
|
||||
|
||||
/* Wait for response */
|
||||
while (atomic_read(&data.started) != cpus)
|
||||
cpu_relax();
|
||||
|
||||
if (wait)
|
||||
while (atomic_read(&data.finished) != cpus)
|
||||
cpu_relax();
|
||||
call_data = NULL;
|
||||
|
||||
spin_unlock(&call_lock);
|
||||
return 0;
|
||||
|
||||
}
|
||||
EXPORT_SYMBOL(smp_call_function_mask);
|
||||
|
||||
/*
|
||||
* this function sends a 'generic call function' IPI to all other CPUs
|
||||
* in the system.
|
||||
|
@@ -400,9 +400,9 @@ smp_callin (void)
|
||||
/* Setup the per cpu irq handling data structures */
|
||||
__setup_vector_irq(cpuid);
|
||||
cpu_set(cpuid, cpu_online_map);
|
||||
unlock_ipi_calllock();
|
||||
per_cpu(cpu_state, cpuid) = CPU_ONLINE;
|
||||
spin_unlock(&vector_lock);
|
||||
unlock_ipi_calllock();
|
||||
|
||||
smp_setup_percpu_timer();
|
||||
|
||||
|
@@ -59,6 +59,84 @@ static struct clocksource clocksource_itc = {
|
||||
};
|
||||
static struct clocksource *itc_clocksource;
|
||||
|
||||
#ifdef CONFIG_VIRT_CPU_ACCOUNTING
|
||||
|
||||
#include <linux/kernel_stat.h>
|
||||
|
||||
extern cputime_t cycle_to_cputime(u64 cyc);
|
||||
|
||||
/*
|
||||
* Called from the context switch with interrupts disabled, to charge all
|
||||
* accumulated times to the current process, and to prepare accounting on
|
||||
* the next process.
|
||||
*/
|
||||
void ia64_account_on_switch(struct task_struct *prev, struct task_struct *next)
|
||||
{
|
||||
struct thread_info *pi = task_thread_info(prev);
|
||||
struct thread_info *ni = task_thread_info(next);
|
||||
cputime_t delta_stime, delta_utime;
|
||||
__u64 now;
|
||||
|
||||
now = ia64_get_itc();
|
||||
|
||||
delta_stime = cycle_to_cputime(pi->ac_stime + (now - pi->ac_stamp));
|
||||
account_system_time(prev, 0, delta_stime);
|
||||
account_system_time_scaled(prev, delta_stime);
|
||||
|
||||
if (pi->ac_utime) {
|
||||
delta_utime = cycle_to_cputime(pi->ac_utime);
|
||||
account_user_time(prev, delta_utime);
|
||||
account_user_time_scaled(prev, delta_utime);
|
||||
}
|
||||
|
||||
pi->ac_stamp = ni->ac_stamp = now;
|
||||
ni->ac_stime = ni->ac_utime = 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Account time for a transition between system, hard irq or soft irq state.
|
||||
* Note that this function is called with interrupts enabled.
|
||||
*/
|
||||
void account_system_vtime(struct task_struct *tsk)
|
||||
{
|
||||
struct thread_info *ti = task_thread_info(tsk);
|
||||
unsigned long flags;
|
||||
cputime_t delta_stime;
|
||||
__u64 now;
|
||||
|
||||
local_irq_save(flags);
|
||||
|
||||
now = ia64_get_itc();
|
||||
|
||||
delta_stime = cycle_to_cputime(ti->ac_stime + (now - ti->ac_stamp));
|
||||
account_system_time(tsk, 0, delta_stime);
|
||||
account_system_time_scaled(tsk, delta_stime);
|
||||
ti->ac_stime = 0;
|
||||
|
||||
ti->ac_stamp = now;
|
||||
|
||||
local_irq_restore(flags);
|
||||
}
|
||||
|
||||
/*
|
||||
* Called from the timer interrupt handler to charge accumulated user time
|
||||
* to the current process. Must be called with interrupts disabled.
|
||||
*/
|
||||
void account_process_tick(struct task_struct *p, int user_tick)
|
||||
{
|
||||
struct thread_info *ti = task_thread_info(p);
|
||||
cputime_t delta_utime;
|
||||
|
||||
if (ti->ac_utime) {
|
||||
delta_utime = cycle_to_cputime(ti->ac_utime);
|
||||
account_user_time(p, delta_utime);
|
||||
account_user_time_scaled(p, delta_utime);
|
||||
ti->ac_utime = 0;
|
||||
}
|
||||
}
|
||||
|
||||
#endif /* CONFIG_VIRT_CPU_ACCOUNTING */
|
||||
|
||||
static irqreturn_t
|
||||
timer_interrupt (int irq, void *dev_id)
|
||||
{
|
||||
|
@@ -13,6 +13,7 @@
|
||||
* 2001/08/13 Correct size of extended floats (float_fsz) from 16 to 10 bytes.
|
||||
* 2001/01/17 Add support emulation of unaligned kernel accesses.
|
||||
*/
|
||||
#include <linux/jiffies.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/sched.h>
|
||||
#include <linux/tty.h>
|
||||
@@ -1290,7 +1291,7 @@ within_logging_rate_limit (void)
|
||||
{
|
||||
static unsigned long count, last_time;
|
||||
|
||||
if (jiffies - last_time > 5*HZ)
|
||||
if (time_after(jiffies, last_time + 5 * HZ))
|
||||
count = 0;
|
||||
if (count < 5) {
|
||||
last_time = jiffies;
|
||||
|
Atsaukties uz šo jaunā problēmā
Block a user