Merge linux-2.6 with linux-acpi-2.6

This commit is contained in:
Len Brown
2005-09-08 01:45:47 -04:00
當前提交 64e47488c9
共有 1968 個文件被更改,包括 58374 次插入48994 次删除

查看文件

@@ -408,6 +408,11 @@ config GENERIC_IRQ_PROBE
bool
default y
config GENERIC_PENDING_IRQ
bool
depends on GENERIC_HARDIRQS && SMP
default y
source "arch/ia64/hp/sim/Kconfig"
source "arch/ia64/oprofile/Kconfig"

查看文件

@@ -130,7 +130,7 @@ static void rs_stop(struct tty_struct *tty)
static void rs_start(struct tty_struct *tty)
{
#if SIMSERIAL_DEBUG
#ifdef SIMSERIAL_DEBUG
printk("rs_start: tty->stopped=%d tty->hw_stopped=%d tty->flow_stopped=%d\n",
tty->stopped, tty->hw_stopped, tty->flow_stopped);
#endif

查看文件

@@ -215,7 +215,7 @@ ia32_syscall_table:
data8 sys32_fork
data8 sys_read
data8 sys_write
data8 sys32_open /* 5 */
data8 compat_sys_open /* 5 */
data8 sys_close
data8 sys32_waitpid
data8 sys_creat

查看文件

@@ -2359,37 +2359,6 @@ sys32_brk (unsigned int brk)
return ret;
}
/*
* Exactly like fs/open.c:sys_open(), except that it doesn't set the O_LARGEFILE flag.
*/
asmlinkage long
sys32_open (const char __user * filename, int flags, int mode)
{
char * tmp;
int fd, error;
tmp = getname(filename);
fd = PTR_ERR(tmp);
if (!IS_ERR(tmp)) {
fd = get_unused_fd();
if (fd >= 0) {
struct file *f = filp_open(tmp, flags, mode);
error = PTR_ERR(f);
if (IS_ERR(f))
goto out_error;
fd_install(fd, f);
}
out:
putname(tmp);
}
return fd;
out_error:
put_unused_fd(fd);
fd = error;
goto out;
}
/* Structure for ia32 emulation on ia64 */
struct epoll_event32
{

查看文件

@@ -16,7 +16,7 @@ obj-$(CONFIG_IA64_HP_ZX1_SWIOTLB) += acpi-ext.o
obj-$(CONFIG_IA64_PALINFO) += palinfo.o
obj-$(CONFIG_IOSAPIC) += iosapic.o
obj-$(CONFIG_MODULES) += module.o
obj-$(CONFIG_SMP) += smp.o smpboot.o domain.o
obj-$(CONFIG_SMP) += smp.o smpboot.o
obj-$(CONFIG_NUMA) += numa.o
obj-$(CONFIG_PERFMON) += perfmon_default_smpl.o
obj-$(CONFIG_IA64_CYCLONE) += cyclone.o

查看文件

@@ -1,396 +0,0 @@
/*
* arch/ia64/kernel/domain.c
* Architecture specific sched-domains builder.
*
* Copyright (C) 2004 Jesse Barnes
* Copyright (C) 2004 Silicon Graphics, Inc.
*/
#include <linux/sched.h>
#include <linux/percpu.h>
#include <linux/slab.h>
#include <linux/cpumask.h>
#include <linux/init.h>
#include <linux/topology.h>
#include <linux/nodemask.h>
#define SD_NODES_PER_DOMAIN 16
#ifdef CONFIG_NUMA
/**
* find_next_best_node - find the next node to include in a sched_domain
* @node: node whose sched_domain we're building
* @used_nodes: nodes already in the sched_domain
*
* Find the next node to include in a given scheduling domain. Simply
* finds the closest node not already in the @used_nodes map.
*
* Should use nodemask_t.
*/
static int find_next_best_node(int node, unsigned long *used_nodes)
{
int i, n, val, min_val, best_node = 0;
min_val = INT_MAX;
for (i = 0; i < MAX_NUMNODES; i++) {
/* Start at @node */
n = (node + i) % MAX_NUMNODES;
if (!nr_cpus_node(n))
continue;
/* Skip already used nodes */
if (test_bit(n, used_nodes))
continue;
/* Simple min distance search */
val = node_distance(node, n);
if (val < min_val) {
min_val = val;
best_node = n;
}
}
set_bit(best_node, used_nodes);
return best_node;
}
/**
* sched_domain_node_span - get a cpumask for a node's sched_domain
* @node: node whose cpumask we're constructing
* @size: number of nodes to include in this span
*
* Given a node, construct a good cpumask for its sched_domain to span. It
* should be one that prevents unnecessary balancing, but also spreads tasks
* out optimally.
*/
static cpumask_t sched_domain_node_span(int node)
{
int i;
cpumask_t span, nodemask;
DECLARE_BITMAP(used_nodes, MAX_NUMNODES);
cpus_clear(span);
bitmap_zero(used_nodes, MAX_NUMNODES);
nodemask = node_to_cpumask(node);
cpus_or(span, span, nodemask);
set_bit(node, used_nodes);
for (i = 1; i < SD_NODES_PER_DOMAIN; i++) {
int next_node = find_next_best_node(node, used_nodes);
nodemask = node_to_cpumask(next_node);
cpus_or(span, span, nodemask);
}
return span;
}
#endif
/*
* At the moment, CONFIG_SCHED_SMT is never defined, but leave it in so we
* can switch it on easily if needed.
*/
#ifdef CONFIG_SCHED_SMT
static DEFINE_PER_CPU(struct sched_domain, cpu_domains);
static struct sched_group sched_group_cpus[NR_CPUS];
static int cpu_to_cpu_group(int cpu)
{
return cpu;
}
#endif
static DEFINE_PER_CPU(struct sched_domain, phys_domains);
static struct sched_group sched_group_phys[NR_CPUS];
static int cpu_to_phys_group(int cpu)
{
#ifdef CONFIG_SCHED_SMT
return first_cpu(cpu_sibling_map[cpu]);
#else
return cpu;
#endif
}
#ifdef CONFIG_NUMA
/*
* The init_sched_build_groups can't handle what we want to do with node
* groups, so roll our own. Now each node has its own list of groups which
* gets dynamically allocated.
*/
static DEFINE_PER_CPU(struct sched_domain, node_domains);
static struct sched_group *sched_group_nodes[MAX_NUMNODES];
static DEFINE_PER_CPU(struct sched_domain, allnodes_domains);
static struct sched_group sched_group_allnodes[MAX_NUMNODES];
static int cpu_to_allnodes_group(int cpu)
{
return cpu_to_node(cpu);
}
#endif
/*
* Build sched domains for a given set of cpus and attach the sched domains
* to the individual cpus
*/
void build_sched_domains(const cpumask_t *cpu_map)
{
int i;
/*
* Set up domains for cpus specified by the cpu_map.
*/
for_each_cpu_mask(i, *cpu_map) {
int group;
struct sched_domain *sd = NULL, *p;
cpumask_t nodemask = node_to_cpumask(cpu_to_node(i));
cpus_and(nodemask, nodemask, *cpu_map);
#ifdef CONFIG_NUMA
if (num_online_cpus()
> SD_NODES_PER_DOMAIN*cpus_weight(nodemask)) {
sd = &per_cpu(allnodes_domains, i);
*sd = SD_ALLNODES_INIT;
sd->span = *cpu_map;
group = cpu_to_allnodes_group(i);
sd->groups = &sched_group_allnodes[group];
p = sd;
} else
p = NULL;
sd = &per_cpu(node_domains, i);
*sd = SD_NODE_INIT;
sd->span = sched_domain_node_span(cpu_to_node(i));
sd->parent = p;
cpus_and(sd->span, sd->span, *cpu_map);
#endif
p = sd;
sd = &per_cpu(phys_domains, i);
group = cpu_to_phys_group(i);
*sd = SD_CPU_INIT;
sd->span = nodemask;
sd->parent = p;
sd->groups = &sched_group_phys[group];
#ifdef CONFIG_SCHED_SMT
p = sd;
sd = &per_cpu(cpu_domains, i);
group = cpu_to_cpu_group(i);
*sd = SD_SIBLING_INIT;
sd->span = cpu_sibling_map[i];
cpus_and(sd->span, sd->span, *cpu_map);
sd->parent = p;
sd->groups = &sched_group_cpus[group];
#endif
}
#ifdef CONFIG_SCHED_SMT
/* Set up CPU (sibling) groups */
for_each_cpu_mask(i, *cpu_map) {
cpumask_t this_sibling_map = cpu_sibling_map[i];
cpus_and(this_sibling_map, this_sibling_map, *cpu_map);
if (i != first_cpu(this_sibling_map))
continue;
init_sched_build_groups(sched_group_cpus, this_sibling_map,
&cpu_to_cpu_group);
}
#endif
/* Set up physical groups */
for (i = 0; i < MAX_NUMNODES; i++) {
cpumask_t nodemask = node_to_cpumask(i);
cpus_and(nodemask, nodemask, *cpu_map);
if (cpus_empty(nodemask))
continue;
init_sched_build_groups(sched_group_phys, nodemask,
&cpu_to_phys_group);
}
#ifdef CONFIG_NUMA
init_sched_build_groups(sched_group_allnodes, *cpu_map,
&cpu_to_allnodes_group);
for (i = 0; i < MAX_NUMNODES; i++) {
/* Set up node groups */
struct sched_group *sg, *prev;
cpumask_t nodemask = node_to_cpumask(i);
cpumask_t domainspan;
cpumask_t covered = CPU_MASK_NONE;
int j;
cpus_and(nodemask, nodemask, *cpu_map);
if (cpus_empty(nodemask))
continue;
domainspan = sched_domain_node_span(i);
cpus_and(domainspan, domainspan, *cpu_map);
sg = kmalloc(sizeof(struct sched_group), GFP_KERNEL);
sched_group_nodes[i] = sg;
for_each_cpu_mask(j, nodemask) {
struct sched_domain *sd;
sd = &per_cpu(node_domains, j);
sd->groups = sg;
if (sd->groups == NULL) {
/* Turn off balancing if we have no groups */
sd->flags = 0;
}
}
if (!sg) {
printk(KERN_WARNING
"Can not alloc domain group for node %d\n", i);
continue;
}
sg->cpu_power = 0;
sg->cpumask = nodemask;
cpus_or(covered, covered, nodemask);
prev = sg;
for (j = 0; j < MAX_NUMNODES; j++) {
cpumask_t tmp, notcovered;
int n = (i + j) % MAX_NUMNODES;
cpus_complement(notcovered, covered);
cpus_and(tmp, notcovered, *cpu_map);
cpus_and(tmp, tmp, domainspan);
if (cpus_empty(tmp))
break;
nodemask = node_to_cpumask(n);
cpus_and(tmp, tmp, nodemask);
if (cpus_empty(tmp))
continue;
sg = kmalloc(sizeof(struct sched_group), GFP_KERNEL);
if (!sg) {
printk(KERN_WARNING
"Can not alloc domain group for node %d\n", j);
break;
}
sg->cpu_power = 0;
sg->cpumask = tmp;
cpus_or(covered, covered, tmp);
prev->next = sg;
prev = sg;
}
prev->next = sched_group_nodes[i];
}
#endif
/* Calculate CPU power for physical packages and nodes */
for_each_cpu_mask(i, *cpu_map) {
int power;
struct sched_domain *sd;
#ifdef CONFIG_SCHED_SMT
sd = &per_cpu(cpu_domains, i);
power = SCHED_LOAD_SCALE;
sd->groups->cpu_power = power;
#endif
sd = &per_cpu(phys_domains, i);
power = SCHED_LOAD_SCALE + SCHED_LOAD_SCALE *
(cpus_weight(sd->groups->cpumask)-1) / 10;
sd->groups->cpu_power = power;
#ifdef CONFIG_NUMA
sd = &per_cpu(allnodes_domains, i);
if (sd->groups) {
power = SCHED_LOAD_SCALE + SCHED_LOAD_SCALE *
(cpus_weight(sd->groups->cpumask)-1) / 10;
sd->groups->cpu_power = power;
}
#endif
}
#ifdef CONFIG_NUMA
for (i = 0; i < MAX_NUMNODES; i++) {
struct sched_group *sg = sched_group_nodes[i];
int j;
if (sg == NULL)
continue;
next_sg:
for_each_cpu_mask(j, sg->cpumask) {
struct sched_domain *sd;
int power;
sd = &per_cpu(phys_domains, j);
if (j != first_cpu(sd->groups->cpumask)) {
/*
* Only add "power" once for each
* physical package.
*/
continue;
}
power = SCHED_LOAD_SCALE + SCHED_LOAD_SCALE *
(cpus_weight(sd->groups->cpumask)-1) / 10;
sg->cpu_power += power;
}
sg = sg->next;
if (sg != sched_group_nodes[i])
goto next_sg;
}
#endif
/* Attach the domains */
for_each_cpu_mask(i, *cpu_map) {
struct sched_domain *sd;
#ifdef CONFIG_SCHED_SMT
sd = &per_cpu(cpu_domains, i);
#else
sd = &per_cpu(phys_domains, i);
#endif
cpu_attach_domain(sd, i);
}
}
/*
* Set up scheduler domains and groups. Callers must hold the hotplug lock.
*/
void arch_init_sched_domains(const cpumask_t *cpu_map)
{
cpumask_t cpu_default_map;
/*
* Setup mask for cpus without special case scheduling requirements.
* For now this just excludes isolated cpus, but could be used to
* exclude other special cases in the future.
*/
cpus_andnot(cpu_default_map, *cpu_map, cpu_isolated_map);
build_sched_domains(&cpu_default_map);
}
void arch_destroy_sched_domains(const cpumask_t *cpu_map)
{
#ifdef CONFIG_NUMA
int i;
for (i = 0; i < MAX_NUMNODES; i++) {
cpumask_t nodemask = node_to_cpumask(i);
struct sched_group *oldsg, *sg = sched_group_nodes[i];
cpus_and(nodemask, nodemask, *cpu_map);
if (cpus_empty(nodemask))
continue;
if (sg == NULL)
continue;
sg = sg->next;
next_sg:
oldsg = sg;
sg = sg->next;
kfree(oldsg);
if (oldsg != sched_group_nodes[i])
goto next_sg;
sched_group_nodes[i] = NULL;
}
#endif
}

查看文件

@@ -91,23 +91,8 @@ skip:
}
#ifdef CONFIG_SMP
/*
* This is updated when the user sets irq affinity via /proc
*/
static cpumask_t __cacheline_aligned pending_irq_cpumask[NR_IRQS];
static unsigned long pending_irq_redir[BITS_TO_LONGS(NR_IRQS)];
static char irq_redir [NR_IRQS]; // = { [0 ... NR_IRQS-1] = 1 };
/*
* Arch specific routine for deferred write to iosapic rte to reprogram
* intr destination.
*/
void proc_set_irq_affinity(unsigned int irq, cpumask_t mask_val)
{
pending_irq_cpumask[irq] = mask_val;
}
void set_irq_affinity_info (unsigned int irq, int hwid, int redir)
{
cpumask_t mask = CPU_MASK_NONE;
@@ -116,32 +101,10 @@ void set_irq_affinity_info (unsigned int irq, int hwid, int redir)
if (irq < NR_IRQS) {
irq_affinity[irq] = mask;
set_irq_info(irq, mask);
irq_redir[irq] = (char) (redir & 0xff);
}
}
void move_irq(int irq)
{
/* note - we hold desc->lock */
cpumask_t tmp;
irq_desc_t *desc = irq_descp(irq);
int redir = test_bit(irq, pending_irq_redir);
if (unlikely(!desc->handler->set_affinity))
return;
if (!cpus_empty(pending_irq_cpumask[irq])) {
cpus_and(tmp, pending_irq_cpumask[irq], cpu_online_map);
if (unlikely(!cpus_empty(tmp))) {
desc->handler->set_affinity(irq | (redir ? IA64_IRQ_REDIRECTED : 0),
pending_irq_cpumask[irq]);
}
cpus_clear(pending_irq_cpumask[irq]);
}
}
#endif /* CONFIG_SMP */
#ifdef CONFIG_HOTPLUG_CPU

查看文件

@@ -49,6 +49,7 @@
/*
* void jprobe_break(void)
*/
.section .kprobes.text, "ax"
ENTRY(jprobe_break)
break.m 0x80300
END(jprobe_break)

查看文件

@@ -87,12 +87,25 @@ static enum instruction_type bundle_encoding[32][3] = {
* is IP relative instruction and update the kprobe
* inst flag accordingly
*/
static void update_kprobe_inst_flag(uint template, uint slot, uint major_opcode,
unsigned long kprobe_inst, struct kprobe *p)
static void __kprobes update_kprobe_inst_flag(uint template, uint slot,
uint major_opcode,
unsigned long kprobe_inst,
struct kprobe *p)
{
p->ainsn.inst_flag = 0;
p->ainsn.target_br_reg = 0;
/* Check for Break instruction
* Bits 37:40 Major opcode to be zero
* Bits 27:32 X6 to be zero
* Bits 32:35 X3 to be zero
*/
if ((!major_opcode) && (!((kprobe_inst >> 27) & 0x1FF)) ) {
/* is a break instruction */
p->ainsn.inst_flag |= INST_FLAG_BREAK_INST;
return;
}
if (bundle_encoding[template][slot] == B) {
switch (major_opcode) {
case INDIRECT_CALL_OPCODE:
@@ -126,8 +139,10 @@ static void update_kprobe_inst_flag(uint template, uint slot, uint major_opcode
* Returns 0 if supported
* Returns -EINVAL if unsupported
*/
static int unsupported_inst(uint template, uint slot, uint major_opcode,
unsigned long kprobe_inst, struct kprobe *p)
static int __kprobes unsupported_inst(uint template, uint slot,
uint major_opcode,
unsigned long kprobe_inst,
struct kprobe *p)
{
unsigned long addr = (unsigned long)p->addr;
@@ -168,8 +183,9 @@ static int unsupported_inst(uint template, uint slot, uint major_opcode,
* on which we are inserting kprobe is cmp instruction
* with ctype as unc.
*/
static uint is_cmp_ctype_unc_inst(uint template, uint slot, uint major_opcode,
unsigned long kprobe_inst)
static uint __kprobes is_cmp_ctype_unc_inst(uint template, uint slot,
uint major_opcode,
unsigned long kprobe_inst)
{
cmp_inst_t cmp_inst;
uint ctype_unc = 0;
@@ -201,8 +217,10 @@ out:
* In this function we override the bundle with
* the break instruction at the given slot.
*/
static void prepare_break_inst(uint template, uint slot, uint major_opcode,
unsigned long kprobe_inst, struct kprobe *p)
static void __kprobes prepare_break_inst(uint template, uint slot,
uint major_opcode,
unsigned long kprobe_inst,
struct kprobe *p)
{
unsigned long break_inst = BREAK_INST;
bundle_t *bundle = &p->ainsn.insn.bundle;
@@ -271,7 +289,8 @@ static inline int in_ivt_functions(unsigned long addr)
&& addr < (unsigned long)__end_ivt_text);
}
static int valid_kprobe_addr(int template, int slot, unsigned long addr)
static int __kprobes valid_kprobe_addr(int template, int slot,
unsigned long addr)
{
if ((slot > 2) || ((bundle_encoding[template][1] == L) && slot > 1)) {
printk(KERN_WARNING "Attempting to insert unaligned kprobe "
@@ -323,7 +342,7 @@ static void kretprobe_trampoline(void)
* - cleanup by marking the instance as unused
* - long jump back to the original return address
*/
int trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs)
int __kprobes trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs)
{
struct kretprobe_instance *ri = NULL;
struct hlist_head *head;
@@ -381,7 +400,8 @@ int trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs)
return 1;
}
void arch_prepare_kretprobe(struct kretprobe *rp, struct pt_regs *regs)
void __kprobes arch_prepare_kretprobe(struct kretprobe *rp,
struct pt_regs *regs)
{
struct kretprobe_instance *ri;
@@ -399,7 +419,7 @@ void arch_prepare_kretprobe(struct kretprobe *rp, struct pt_regs *regs)
}
}
int arch_prepare_kprobe(struct kprobe *p)
int __kprobes arch_prepare_kprobe(struct kprobe *p)
{
unsigned long addr = (unsigned long) p->addr;
unsigned long *kprobe_addr = (unsigned long *)(addr & ~0xFULL);
@@ -430,7 +450,7 @@ int arch_prepare_kprobe(struct kprobe *p)
return 0;
}
void arch_arm_kprobe(struct kprobe *p)
void __kprobes arch_arm_kprobe(struct kprobe *p)
{
unsigned long addr = (unsigned long)p->addr;
unsigned long arm_addr = addr & ~0xFULL;
@@ -439,7 +459,7 @@ void arch_arm_kprobe(struct kprobe *p)
flush_icache_range(arm_addr, arm_addr + sizeof(bundle_t));
}
void arch_disarm_kprobe(struct kprobe *p)
void __kprobes arch_disarm_kprobe(struct kprobe *p)
{
unsigned long addr = (unsigned long)p->addr;
unsigned long arm_addr = addr & ~0xFULL;
@@ -449,7 +469,7 @@ void arch_disarm_kprobe(struct kprobe *p)
flush_icache_range(arm_addr, arm_addr + sizeof(bundle_t));
}
void arch_remove_kprobe(struct kprobe *p)
void __kprobes arch_remove_kprobe(struct kprobe *p)
{
}
@@ -461,7 +481,7 @@ void arch_remove_kprobe(struct kprobe *p)
* to original stack address, handle the case where we need to fixup the
* relative IP address and/or fixup branch register.
*/
static void resume_execution(struct kprobe *p, struct pt_regs *regs)
static void __kprobes resume_execution(struct kprobe *p, struct pt_regs *regs)
{
unsigned long bundle_addr = ((unsigned long) (&p->opcode.bundle)) & ~0xFULL;
unsigned long resume_addr = (unsigned long)p->addr & ~0xFULL;
@@ -528,13 +548,16 @@ turn_ss_off:
ia64_psr(regs)->ss = 0;
}
static void prepare_ss(struct kprobe *p, struct pt_regs *regs)
static void __kprobes prepare_ss(struct kprobe *p, struct pt_regs *regs)
{
unsigned long bundle_addr = (unsigned long) &p->opcode.bundle;
unsigned long slot = (unsigned long)p->addr & 0xf;
/* Update instruction pointer (IIP) and slot number (IPSR.ri) */
regs->cr_iip = bundle_addr & ~0xFULL;
/* single step inline if break instruction */
if (p->ainsn.inst_flag == INST_FLAG_BREAK_INST)
regs->cr_iip = (unsigned long)p->addr & ~0xFULL;
else
regs->cr_iip = bundle_addr & ~0xFULL;
if (slot > 2)
slot = 0;
@@ -545,7 +568,39 @@ static void prepare_ss(struct kprobe *p, struct pt_regs *regs)
ia64_psr(regs)->ss = 1;
}
static int pre_kprobes_handler(struct die_args *args)
static int __kprobes is_ia64_break_inst(struct pt_regs *regs)
{
unsigned int slot = ia64_psr(regs)->ri;
unsigned int template, major_opcode;
unsigned long kprobe_inst;
unsigned long *kprobe_addr = (unsigned long *)regs->cr_iip;
bundle_t bundle;
memcpy(&bundle, kprobe_addr, sizeof(bundle_t));
template = bundle.quad0.template;
/* Move to slot 2, if bundle is MLX type and kprobe slot is 1 */
if (slot == 1 && bundle_encoding[template][1] == L)
slot++;
/* Get Kprobe probe instruction at given slot*/
get_kprobe_inst(&bundle, slot, &kprobe_inst, &major_opcode);
/* For break instruction,
* Bits 37:40 Major opcode to be zero
* Bits 27:32 X6 to be zero
* Bits 32:35 X3 to be zero
*/
if (major_opcode || ((kprobe_inst >> 27) & 0x1FF) ) {
/* Not a break instruction */
return 0;
}
/* Is a break instruction */
return 1;
}
static int __kprobes pre_kprobes_handler(struct die_args *args)
{
struct kprobe *p;
int ret = 0;
@@ -558,7 +613,9 @@ static int pre_kprobes_handler(struct die_args *args)
if (kprobe_running()) {
p = get_kprobe(addr);
if (p) {
if (kprobe_status == KPROBE_HIT_SS) {
if ( (kprobe_status == KPROBE_HIT_SS) &&
(p->ainsn.inst_flag == INST_FLAG_BREAK_INST)) {
ia64_psr(regs)->ss = 0;
unlock_kprobes();
goto no_kprobe;
}
@@ -592,6 +649,19 @@ static int pre_kprobes_handler(struct die_args *args)
p = get_kprobe(addr);
if (!p) {
unlock_kprobes();
if (!is_ia64_break_inst(regs)) {
/*
* The breakpoint instruction was removed right
* after we hit it. Another cpu has removed
* either a probepoint or a debugger breakpoint
* at this address. In either case, no further
* handling of this interrupt is appropriate.
*/
ret = 1;
}
/* Not one of our break, let kernel handle it */
goto no_kprobe;
}
@@ -616,7 +686,7 @@ no_kprobe:
return ret;
}
static int post_kprobes_handler(struct pt_regs *regs)
static int __kprobes post_kprobes_handler(struct pt_regs *regs)
{
if (!kprobe_running())
return 0;
@@ -641,7 +711,7 @@ out:
return 1;
}
static int kprobes_fault_handler(struct pt_regs *regs, int trapnr)
static int __kprobes kprobes_fault_handler(struct pt_regs *regs, int trapnr)
{
if (!kprobe_running())
return 0;
@@ -659,8 +729,8 @@ static int kprobes_fault_handler(struct pt_regs *regs, int trapnr)
return 0;
}
int kprobe_exceptions_notify(struct notifier_block *self, unsigned long val,
void *data)
int __kprobes kprobe_exceptions_notify(struct notifier_block *self,
unsigned long val, void *data)
{
struct die_args *args = (struct die_args *)data;
switch(val) {
@@ -681,7 +751,7 @@ int kprobe_exceptions_notify(struct notifier_block *self, unsigned long val,
return NOTIFY_DONE;
}
int setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs)
int __kprobes setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs)
{
struct jprobe *jp = container_of(p, struct jprobe, kp);
unsigned long addr = ((struct fnptr *)(jp->entry))->ip;
@@ -703,7 +773,7 @@ int setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs)
return 1;
}
int longjmp_break_handler(struct kprobe *p, struct pt_regs *regs)
int __kprobes longjmp_break_handler(struct kprobe *p, struct pt_regs *regs)
{
*regs = jprobe_saved_regs;
return 1;

查看文件

@@ -15,6 +15,7 @@
#include <linux/vt_kern.h> /* For unblank_screen() */
#include <linux/module.h> /* for EXPORT_SYMBOL */
#include <linux/hardirq.h>
#include <linux/kprobes.h>
#include <asm/fpswa.h>
#include <asm/ia32.h>
@@ -122,7 +123,7 @@ die_if_kernel (char *str, struct pt_regs *regs, long err)
}
void
ia64_bad_break (unsigned long break_num, struct pt_regs *regs)
__kprobes ia64_bad_break (unsigned long break_num, struct pt_regs *regs)
{
siginfo_t siginfo;
int sig, code;
@@ -444,7 +445,7 @@ ia64_illegal_op_fault (unsigned long ec, long arg1, long arg2, long arg3,
return rv;
}
void
void __kprobes
ia64_fault (unsigned long vector, unsigned long isr, unsigned long ifa,
unsigned long iim, unsigned long itir, long arg5, long arg6,
long arg7, struct pt_regs regs)

查看文件

@@ -48,6 +48,7 @@ SECTIONS
*(.text)
SCHED_TEXT
LOCK_TEXT
KPROBES_TEXT
*(.gnu.linkonce.t*)
}
.text2 : AT(ADDR(.text2) - LOAD_OFFSET)

查看文件

@@ -20,6 +20,7 @@
*
* Note: "in0" and "in1" are preserved for debugging purposes.
*/
.section .kprobes.text,"ax"
GLOBAL_ENTRY(flush_icache_range)
.prologue

查看文件

@@ -9,6 +9,7 @@
#include <linux/mm.h>
#include <linux/smp_lock.h>
#include <linux/interrupt.h>
#include <linux/kprobes.h>
#include <asm/pgtable.h>
#include <asm/processor.h>
@@ -76,7 +77,7 @@ mapped_kernel_page_is_present (unsigned long address)
return pte_present(pte);
}
void
void __kprobes
ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *regs)
{
int signal = SIGSEGV, code = SEGV_MAPERR;

查看文件

@@ -431,7 +431,7 @@ void sn_bus_store_sysdata(struct pci_dev *dev)
{
struct sysdata_el *element;
element = kcalloc(1, sizeof(struct sysdata_el), GFP_KERNEL);
element = kzalloc(sizeof(struct sysdata_el), GFP_KERNEL);
if (!element) {
dev_dbg(dev, "%s: out of memory!\n", __FUNCTION__);
return;

查看文件

@@ -191,7 +191,7 @@ cx_device_register(nasid_t nasid, int part_num, int mfg_num,
{
struct cx_dev *cx_dev;
cx_dev = kcalloc(1, sizeof(struct cx_dev), GFP_KERNEL);
cx_dev = kzalloc(sizeof(struct cx_dev), GFP_KERNEL);
DBG("cx_dev= 0x%p\n", cx_dev);
if (cx_dev == NULL)
return -ENOMEM;

查看文件

@@ -148,7 +148,7 @@ tioca_gart_init(struct tioca_kernel *tioca_kern)
tioca_kern->ca_pcigart_entries =
tioca_kern->ca_pciap_size / tioca_kern->ca_ap_pagesize;
tioca_kern->ca_pcigart_pagemap =
kcalloc(1, tioca_kern->ca_pcigart_entries / 8, GFP_KERNEL);
kzalloc(tioca_kern->ca_pcigart_entries / 8, GFP_KERNEL);
if (!tioca_kern->ca_pcigart_pagemap) {
free_pages((unsigned long)tioca_kern->ca_gart,
get_order(tioca_kern->ca_gart_size));
@@ -392,7 +392,7 @@ tioca_dma_mapped(struct pci_dev *pdev, uint64_t paddr, size_t req_size)
* allocate a map struct
*/
ca_dmamap = kcalloc(1, sizeof(struct tioca_dmamap), GFP_ATOMIC);
ca_dmamap = kzalloc(sizeof(struct tioca_dmamap), GFP_ATOMIC);
if (!ca_dmamap)
goto map_return;
@@ -600,7 +600,7 @@ tioca_bus_fixup(struct pcibus_bussoft *prom_bussoft, struct pci_controller *cont
* Allocate kernel bus soft and copy from prom.
*/
tioca_common = kcalloc(1, sizeof(struct tioca_common), GFP_KERNEL);
tioca_common = kzalloc(sizeof(struct tioca_common), GFP_KERNEL);
if (!tioca_common)
return NULL;
@@ -609,7 +609,7 @@ tioca_bus_fixup(struct pcibus_bussoft *prom_bussoft, struct pci_controller *cont
/* init kernel-private area */
tioca_kern = kcalloc(1, sizeof(struct tioca_kernel), GFP_KERNEL);
tioca_kern = kzalloc(sizeof(struct tioca_kernel), GFP_KERNEL);
if (!tioca_kern) {
kfree(tioca_common);
return NULL;