Merge branches 'irq/sparseirq', 'irq/genirq' and 'irq/urgent'; commit 'v2.6.28' into irq/core
This commit is contained in:
@@ -61,8 +61,11 @@
|
||||
|
||||
#include "audit.h"
|
||||
|
||||
/* No auditing will take place until audit_initialized != 0.
|
||||
/* No auditing will take place until audit_initialized == AUDIT_INITIALIZED.
|
||||
* (Initialization happens after skb_init is called.) */
|
||||
#define AUDIT_DISABLED -1
|
||||
#define AUDIT_UNINITIALIZED 0
|
||||
#define AUDIT_INITIALIZED 1
|
||||
static int audit_initialized;
|
||||
|
||||
#define AUDIT_OFF 0
|
||||
@@ -965,6 +968,9 @@ static int __init audit_init(void)
|
||||
{
|
||||
int i;
|
||||
|
||||
if (audit_initialized == AUDIT_DISABLED)
|
||||
return 0;
|
||||
|
||||
printk(KERN_INFO "audit: initializing netlink socket (%s)\n",
|
||||
audit_default ? "enabled" : "disabled");
|
||||
audit_sock = netlink_kernel_create(&init_net, NETLINK_AUDIT, 0,
|
||||
@@ -976,7 +982,7 @@ static int __init audit_init(void)
|
||||
|
||||
skb_queue_head_init(&audit_skb_queue);
|
||||
skb_queue_head_init(&audit_skb_hold_queue);
|
||||
audit_initialized = 1;
|
||||
audit_initialized = AUDIT_INITIALIZED;
|
||||
audit_enabled = audit_default;
|
||||
audit_ever_enabled |= !!audit_default;
|
||||
|
||||
@@ -999,13 +1005,21 @@ __initcall(audit_init);
|
||||
static int __init audit_enable(char *str)
|
||||
{
|
||||
audit_default = !!simple_strtol(str, NULL, 0);
|
||||
printk(KERN_INFO "audit: %s%s\n",
|
||||
audit_default ? "enabled" : "disabled",
|
||||
audit_initialized ? "" : " (after initialization)");
|
||||
if (audit_initialized) {
|
||||
if (!audit_default)
|
||||
audit_initialized = AUDIT_DISABLED;
|
||||
|
||||
printk(KERN_INFO "audit: %s", audit_default ? "enabled" : "disabled");
|
||||
|
||||
if (audit_initialized == AUDIT_INITIALIZED) {
|
||||
audit_enabled = audit_default;
|
||||
audit_ever_enabled |= !!audit_default;
|
||||
} else if (audit_initialized == AUDIT_UNINITIALIZED) {
|
||||
printk(" (after initialization)");
|
||||
} else {
|
||||
printk(" (until reboot)");
|
||||
}
|
||||
printk("\n");
|
||||
|
||||
return 1;
|
||||
}
|
||||
|
||||
@@ -1107,9 +1121,7 @@ unsigned int audit_serial(void)
|
||||
static inline void audit_get_stamp(struct audit_context *ctx,
|
||||
struct timespec *t, unsigned int *serial)
|
||||
{
|
||||
if (ctx)
|
||||
auditsc_get_stamp(ctx, t, serial);
|
||||
else {
|
||||
if (!ctx || !auditsc_get_stamp(ctx, t, serial)) {
|
||||
*t = CURRENT_TIME;
|
||||
*serial = audit_serial();
|
||||
}
|
||||
@@ -1146,7 +1158,7 @@ struct audit_buffer *audit_log_start(struct audit_context *ctx, gfp_t gfp_mask,
|
||||
int reserve;
|
||||
unsigned long timeout_start = jiffies;
|
||||
|
||||
if (!audit_initialized)
|
||||
if (audit_initialized != AUDIT_INITIALIZED)
|
||||
return NULL;
|
||||
|
||||
if (unlikely(audit_filter_type(type)))
|
||||
|
@@ -1459,7 +1459,6 @@ void audit_free(struct task_struct *tsk)
|
||||
|
||||
/**
|
||||
* audit_syscall_entry - fill in an audit record at syscall entry
|
||||
* @tsk: task being audited
|
||||
* @arch: architecture type
|
||||
* @major: major syscall type (function)
|
||||
* @a1: additional syscall register 1
|
||||
@@ -1548,9 +1547,25 @@ void audit_syscall_entry(int arch, int major,
|
||||
context->ppid = 0;
|
||||
}
|
||||
|
||||
void audit_finish_fork(struct task_struct *child)
|
||||
{
|
||||
struct audit_context *ctx = current->audit_context;
|
||||
struct audit_context *p = child->audit_context;
|
||||
if (!p || !ctx || !ctx->auditable)
|
||||
return;
|
||||
p->arch = ctx->arch;
|
||||
p->major = ctx->major;
|
||||
memcpy(p->argv, ctx->argv, sizeof(ctx->argv));
|
||||
p->ctime = ctx->ctime;
|
||||
p->dummy = ctx->dummy;
|
||||
p->auditable = ctx->auditable;
|
||||
p->in_syscall = ctx->in_syscall;
|
||||
p->filterkey = kstrdup(ctx->filterkey, GFP_KERNEL);
|
||||
p->ppid = current->pid;
|
||||
}
|
||||
|
||||
/**
|
||||
* audit_syscall_exit - deallocate audit context after a system call
|
||||
* @tsk: task being audited
|
||||
* @valid: success/failure flag
|
||||
* @return_code: syscall return value
|
||||
*
|
||||
@@ -1942,15 +1957,18 @@ EXPORT_SYMBOL_GPL(__audit_inode_child);
|
||||
*
|
||||
* Also sets the context as auditable.
|
||||
*/
|
||||
void auditsc_get_stamp(struct audit_context *ctx,
|
||||
int auditsc_get_stamp(struct audit_context *ctx,
|
||||
struct timespec *t, unsigned int *serial)
|
||||
{
|
||||
if (!ctx->in_syscall)
|
||||
return 0;
|
||||
if (!ctx->serial)
|
||||
ctx->serial = audit_serial();
|
||||
t->tv_sec = ctx->ctime.tv_sec;
|
||||
t->tv_nsec = ctx->ctime.tv_nsec;
|
||||
*serial = ctx->serial;
|
||||
ctx->auditable = 1;
|
||||
return 1;
|
||||
}
|
||||
|
||||
/* global counter which is incremented every time something logs in */
|
||||
|
@@ -702,7 +702,7 @@ static int rebind_subsystems(struct cgroupfs_root *root,
|
||||
* any child cgroups exist. This is theoretically supportable
|
||||
* but involves complex error handling, so it's being left until
|
||||
* later */
|
||||
if (!list_empty(&cgrp->children))
|
||||
if (root->number_of_cgroups > 1)
|
||||
return -EBUSY;
|
||||
|
||||
/* Process each subsystem */
|
||||
@@ -1024,7 +1024,7 @@ static int cgroup_get_sb(struct file_system_type *fs_type,
|
||||
if (ret == -EBUSY) {
|
||||
mutex_unlock(&cgroup_mutex);
|
||||
mutex_unlock(&inode->i_mutex);
|
||||
goto drop_new_super;
|
||||
goto free_cg_links;
|
||||
}
|
||||
|
||||
/* EBUSY should be the only error here */
|
||||
@@ -1073,10 +1073,11 @@ static int cgroup_get_sb(struct file_system_type *fs_type,
|
||||
|
||||
return simple_set_mnt(mnt, sb);
|
||||
|
||||
free_cg_links:
|
||||
free_cg_links(&tmp_cg_links);
|
||||
drop_new_super:
|
||||
up_write(&sb->s_umount);
|
||||
deactivate_super(sb);
|
||||
free_cg_links(&tmp_cg_links);
|
||||
return ret;
|
||||
}
|
||||
|
||||
@@ -2934,9 +2935,6 @@ int cgroup_clone(struct task_struct *tsk, struct cgroup_subsys *subsys,
|
||||
again:
|
||||
root = subsys->root;
|
||||
if (root == &rootnode) {
|
||||
printk(KERN_INFO
|
||||
"Not cloning cgroup for unused subsystem %s\n",
|
||||
subsys->name);
|
||||
mutex_unlock(&cgroup_mutex);
|
||||
return 0;
|
||||
}
|
||||
|
@@ -462,7 +462,7 @@ out:
|
||||
* It must be called by the arch code on the new cpu, before the new cpu
|
||||
* enables interrupts and before the "boot" cpu returns from __cpu_up().
|
||||
*/
|
||||
void notify_cpu_starting(unsigned int cpu)
|
||||
void __cpuinit notify_cpu_starting(unsigned int cpu)
|
||||
{
|
||||
unsigned long val = CPU_STARTING;
|
||||
|
||||
|
@@ -585,7 +585,7 @@ static int generate_sched_domains(cpumask_t **domains,
|
||||
int i, j, k; /* indices for partition finding loops */
|
||||
cpumask_t *doms; /* resulting partition; i.e. sched domains */
|
||||
struct sched_domain_attr *dattr; /* attributes for custom domains */
|
||||
int ndoms; /* number of sched domains in result */
|
||||
int ndoms = 0; /* number of sched domains in result */
|
||||
int nslot; /* next empty doms[] cpumask_t slot */
|
||||
|
||||
doms = NULL;
|
||||
|
@@ -315,17 +315,20 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
|
||||
file = tmp->vm_file;
|
||||
if (file) {
|
||||
struct inode *inode = file->f_path.dentry->d_inode;
|
||||
struct address_space *mapping = file->f_mapping;
|
||||
|
||||
get_file(file);
|
||||
if (tmp->vm_flags & VM_DENYWRITE)
|
||||
atomic_dec(&inode->i_writecount);
|
||||
|
||||
/* insert tmp into the share list, just after mpnt */
|
||||
spin_lock(&file->f_mapping->i_mmap_lock);
|
||||
spin_lock(&mapping->i_mmap_lock);
|
||||
if (tmp->vm_flags & VM_SHARED)
|
||||
mapping->i_mmap_writable++;
|
||||
tmp->vm_truncate_count = mpnt->vm_truncate_count;
|
||||
flush_dcache_mmap_lock(file->f_mapping);
|
||||
flush_dcache_mmap_lock(mapping);
|
||||
/* insert tmp into the share list, just after mpnt */
|
||||
vma_prio_tree_add(tmp, mpnt);
|
||||
flush_dcache_mmap_unlock(file->f_mapping);
|
||||
spin_unlock(&file->f_mapping->i_mmap_lock);
|
||||
flush_dcache_mmap_unlock(mapping);
|
||||
spin_unlock(&mapping->i_mmap_lock);
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -1398,6 +1401,7 @@ long do_fork(unsigned long clone_flags,
|
||||
init_completion(&vfork);
|
||||
}
|
||||
|
||||
audit_finish_fork(p);
|
||||
tracehook_report_clone(trace, regs, clone_flags, nr, p);
|
||||
|
||||
/*
|
||||
|
@@ -3,3 +3,4 @@ obj-y := handle.o manage.o spurious.o resend.o chip.o devres.o
|
||||
obj-$(CONFIG_GENERIC_IRQ_PROBE) += autoprobe.o
|
||||
obj-$(CONFIG_PROC_FS) += proc.o
|
||||
obj-$(CONFIG_GENERIC_PENDING_IRQ) += migration.o
|
||||
obj-$(CONFIG_NUMA_MIGRATE_IRQ_DESC) += numa_migrate.o
|
||||
|
@@ -40,6 +40,9 @@ unsigned long probe_irq_on(void)
|
||||
* flush such a longstanding irq before considering it as spurious.
|
||||
*/
|
||||
for_each_irq_desc_reverse(i, desc) {
|
||||
if (!desc)
|
||||
continue;
|
||||
|
||||
spin_lock_irq(&desc->lock);
|
||||
if (!desc->action && !(desc->status & IRQ_NOPROBE)) {
|
||||
/*
|
||||
@@ -68,6 +71,9 @@ unsigned long probe_irq_on(void)
|
||||
* happened in the previous stage, it may have masked itself)
|
||||
*/
|
||||
for_each_irq_desc_reverse(i, desc) {
|
||||
if (!desc)
|
||||
continue;
|
||||
|
||||
spin_lock_irq(&desc->lock);
|
||||
if (!desc->action && !(desc->status & IRQ_NOPROBE)) {
|
||||
desc->status |= IRQ_AUTODETECT | IRQ_WAITING;
|
||||
@@ -86,6 +92,9 @@ unsigned long probe_irq_on(void)
|
||||
* Now filter out any obviously spurious interrupts
|
||||
*/
|
||||
for_each_irq_desc(i, desc) {
|
||||
if (!desc)
|
||||
continue;
|
||||
|
||||
spin_lock_irq(&desc->lock);
|
||||
status = desc->status;
|
||||
|
||||
@@ -124,6 +133,9 @@ unsigned int probe_irq_mask(unsigned long val)
|
||||
int i;
|
||||
|
||||
for_each_irq_desc(i, desc) {
|
||||
if (!desc)
|
||||
continue;
|
||||
|
||||
spin_lock_irq(&desc->lock);
|
||||
status = desc->status;
|
||||
|
||||
@@ -166,6 +178,9 @@ int probe_irq_off(unsigned long val)
|
||||
unsigned int status;
|
||||
|
||||
for_each_irq_desc(i, desc) {
|
||||
if (!desc)
|
||||
continue;
|
||||
|
||||
spin_lock_irq(&desc->lock);
|
||||
status = desc->status;
|
||||
|
||||
|
@@ -24,9 +24,10 @@
|
||||
*/
|
||||
void dynamic_irq_init(unsigned int irq)
|
||||
{
|
||||
struct irq_desc *desc = irq_to_desc(irq);
|
||||
struct irq_desc *desc;
|
||||
unsigned long flags;
|
||||
|
||||
desc = irq_to_desc(irq);
|
||||
if (!desc) {
|
||||
WARN(1, KERN_ERR "Trying to initialize invalid IRQ%d\n", irq);
|
||||
return;
|
||||
@@ -124,6 +125,7 @@ int set_irq_type(unsigned int irq, unsigned int type)
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
type &= IRQ_TYPE_SENSE_MASK;
|
||||
if (type == IRQ_TYPE_NONE)
|
||||
return 0;
|
||||
|
||||
@@ -352,6 +354,7 @@ handle_level_irq(unsigned int irq, struct irq_desc *desc)
|
||||
|
||||
spin_lock(&desc->lock);
|
||||
mask_ack_irq(desc, irq);
|
||||
desc = irq_remap_to_desc(irq, desc);
|
||||
|
||||
if (unlikely(desc->status & IRQ_INPROGRESS))
|
||||
goto out_unlock;
|
||||
@@ -429,6 +432,7 @@ handle_fasteoi_irq(unsigned int irq, struct irq_desc *desc)
|
||||
desc->status &= ~IRQ_INPROGRESS;
|
||||
out:
|
||||
desc->chip->eoi(irq);
|
||||
desc = irq_remap_to_desc(irq, desc);
|
||||
|
||||
spin_unlock(&desc->lock);
|
||||
}
|
||||
@@ -465,12 +469,14 @@ handle_edge_irq(unsigned int irq, struct irq_desc *desc)
|
||||
!desc->action)) {
|
||||
desc->status |= (IRQ_PENDING | IRQ_MASKED);
|
||||
mask_ack_irq(desc, irq);
|
||||
desc = irq_remap_to_desc(irq, desc);
|
||||
goto out_unlock;
|
||||
}
|
||||
kstat_incr_irqs_this_cpu(irq, desc);
|
||||
|
||||
/* Start handling the irq */
|
||||
desc->chip->ack(irq);
|
||||
desc = irq_remap_to_desc(irq, desc);
|
||||
|
||||
/* Mark the IRQ currently in progress.*/
|
||||
desc->status |= IRQ_INPROGRESS;
|
||||
@@ -531,8 +537,10 @@ handle_percpu_irq(unsigned int irq, struct irq_desc *desc)
|
||||
if (!noirqdebug)
|
||||
note_interrupt(irq, desc, action_ret);
|
||||
|
||||
if (desc->chip->eoi)
|
||||
if (desc->chip->eoi) {
|
||||
desc->chip->eoi(irq);
|
||||
desc = irq_remap_to_desc(irq, desc);
|
||||
}
|
||||
}
|
||||
|
||||
void
|
||||
@@ -567,8 +575,10 @@ __set_irq_handler(unsigned int irq, irq_flow_handler_t handle, int is_chained,
|
||||
|
||||
/* Uninstall? */
|
||||
if (handle == handle_bad_irq) {
|
||||
if (desc->chip != &no_irq_chip)
|
||||
if (desc->chip != &no_irq_chip) {
|
||||
mask_ack_irq(desc, irq);
|
||||
desc = irq_remap_to_desc(irq, desc);
|
||||
}
|
||||
desc->status |= IRQ_DISABLED;
|
||||
desc->depth = 1;
|
||||
}
|
||||
|
@@ -15,9 +15,16 @@
|
||||
#include <linux/random.h>
|
||||
#include <linux/interrupt.h>
|
||||
#include <linux/kernel_stat.h>
|
||||
#include <linux/rculist.h>
|
||||
#include <linux/hash.h>
|
||||
|
||||
#include "internals.h"
|
||||
|
||||
/*
|
||||
* lockdep: we want to handle all irq_desc locks as a single lock-class:
|
||||
*/
|
||||
struct lock_class_key irq_desc_lock_class;
|
||||
|
||||
/**
|
||||
* handle_bad_irq - handle spurious and unhandled irqs
|
||||
* @irq: the interrupt number
|
||||
@@ -49,6 +56,155 @@ void handle_bad_irq(unsigned int irq, struct irq_desc *desc)
|
||||
int nr_irqs = NR_IRQS;
|
||||
EXPORT_SYMBOL_GPL(nr_irqs);
|
||||
|
||||
void __init __attribute__((weak)) arch_early_irq_init(void)
|
||||
{
|
||||
}
|
||||
|
||||
#ifdef CONFIG_SPARSE_IRQ
|
||||
static struct irq_desc irq_desc_init = {
|
||||
.irq = -1,
|
||||
.status = IRQ_DISABLED,
|
||||
.chip = &no_irq_chip,
|
||||
.handle_irq = handle_bad_irq,
|
||||
.depth = 1,
|
||||
.lock = __SPIN_LOCK_UNLOCKED(irq_desc_init.lock),
|
||||
#ifdef CONFIG_SMP
|
||||
.affinity = CPU_MASK_ALL
|
||||
#endif
|
||||
};
|
||||
|
||||
void init_kstat_irqs(struct irq_desc *desc, int cpu, int nr)
|
||||
{
|
||||
unsigned long bytes;
|
||||
char *ptr;
|
||||
int node;
|
||||
|
||||
/* Compute how many bytes we need per irq and allocate them */
|
||||
bytes = nr * sizeof(unsigned int);
|
||||
|
||||
node = cpu_to_node(cpu);
|
||||
ptr = kzalloc_node(bytes, GFP_ATOMIC, node);
|
||||
printk(KERN_DEBUG " alloc kstat_irqs on cpu %d node %d\n", cpu, node);
|
||||
|
||||
if (ptr)
|
||||
desc->kstat_irqs = (unsigned int *)ptr;
|
||||
}
|
||||
|
||||
void __attribute__((weak)) arch_init_chip_data(struct irq_desc *desc, int cpu)
|
||||
{
|
||||
}
|
||||
|
||||
static void init_one_irq_desc(int irq, struct irq_desc *desc, int cpu)
|
||||
{
|
||||
memcpy(desc, &irq_desc_init, sizeof(struct irq_desc));
|
||||
desc->irq = irq;
|
||||
#ifdef CONFIG_SMP
|
||||
desc->cpu = cpu;
|
||||
#endif
|
||||
lockdep_set_class(&desc->lock, &irq_desc_lock_class);
|
||||
init_kstat_irqs(desc, cpu, nr_cpu_ids);
|
||||
if (!desc->kstat_irqs) {
|
||||
printk(KERN_ERR "can not alloc kstat_irqs\n");
|
||||
BUG_ON(1);
|
||||
}
|
||||
arch_init_chip_data(desc, cpu);
|
||||
}
|
||||
|
||||
/*
|
||||
* Protect the sparse_irqs:
|
||||
*/
|
||||
DEFINE_SPINLOCK(sparse_irq_lock);
|
||||
|
||||
struct irq_desc *irq_desc_ptrs[NR_IRQS] __read_mostly;
|
||||
|
||||
static struct irq_desc irq_desc_legacy[NR_IRQS_LEGACY] __cacheline_aligned_in_smp = {
|
||||
[0 ... NR_IRQS_LEGACY-1] = {
|
||||
.irq = -1,
|
||||
.status = IRQ_DISABLED,
|
||||
.chip = &no_irq_chip,
|
||||
.handle_irq = handle_bad_irq,
|
||||
.depth = 1,
|
||||
.lock = __SPIN_LOCK_UNLOCKED(irq_desc_init.lock),
|
||||
#ifdef CONFIG_SMP
|
||||
.affinity = CPU_MASK_ALL
|
||||
#endif
|
||||
}
|
||||
};
|
||||
|
||||
/* FIXME: use bootmem alloc ...*/
|
||||
static unsigned int kstat_irqs_legacy[NR_IRQS_LEGACY][NR_CPUS];
|
||||
|
||||
void __init early_irq_init(void)
|
||||
{
|
||||
struct irq_desc *desc;
|
||||
int legacy_count;
|
||||
int i;
|
||||
|
||||
desc = irq_desc_legacy;
|
||||
legacy_count = ARRAY_SIZE(irq_desc_legacy);
|
||||
|
||||
for (i = 0; i < legacy_count; i++) {
|
||||
desc[i].irq = i;
|
||||
desc[i].kstat_irqs = kstat_irqs_legacy[i];
|
||||
|
||||
irq_desc_ptrs[i] = desc + i;
|
||||
}
|
||||
|
||||
for (i = legacy_count; i < NR_IRQS; i++)
|
||||
irq_desc_ptrs[i] = NULL;
|
||||
|
||||
arch_early_irq_init();
|
||||
}
|
||||
|
||||
struct irq_desc *irq_to_desc(unsigned int irq)
|
||||
{
|
||||
return (irq < NR_IRQS) ? irq_desc_ptrs[irq] : NULL;
|
||||
}
|
||||
|
||||
struct irq_desc *irq_to_desc_alloc_cpu(unsigned int irq, int cpu)
|
||||
{
|
||||
struct irq_desc *desc;
|
||||
unsigned long flags;
|
||||
int node;
|
||||
|
||||
if (irq >= NR_IRQS) {
|
||||
printk(KERN_WARNING "irq >= NR_IRQS in irq_to_desc_alloc: %d %d\n",
|
||||
irq, NR_IRQS);
|
||||
WARN_ON(1);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
desc = irq_desc_ptrs[irq];
|
||||
if (desc)
|
||||
return desc;
|
||||
|
||||
spin_lock_irqsave(&sparse_irq_lock, flags);
|
||||
|
||||
/* We have to check it to avoid races with another CPU */
|
||||
desc = irq_desc_ptrs[irq];
|
||||
if (desc)
|
||||
goto out_unlock;
|
||||
|
||||
node = cpu_to_node(cpu);
|
||||
desc = kzalloc_node(sizeof(*desc), GFP_ATOMIC, node);
|
||||
printk(KERN_DEBUG " alloc irq_desc for %d on cpu %d node %d\n",
|
||||
irq, cpu, node);
|
||||
if (!desc) {
|
||||
printk(KERN_ERR "can not alloc irq_desc\n");
|
||||
BUG_ON(1);
|
||||
}
|
||||
init_one_irq_desc(irq, desc, cpu);
|
||||
|
||||
irq_desc_ptrs[irq] = desc;
|
||||
|
||||
out_unlock:
|
||||
spin_unlock_irqrestore(&sparse_irq_lock, flags);
|
||||
|
||||
return desc;
|
||||
}
|
||||
|
||||
#else
|
||||
|
||||
struct irq_desc irq_desc[NR_IRQS] __cacheline_aligned_in_smp = {
|
||||
[0 ... NR_IRQS-1] = {
|
||||
.status = IRQ_DISABLED,
|
||||
@@ -62,6 +218,8 @@ struct irq_desc irq_desc[NR_IRQS] __cacheline_aligned_in_smp = {
|
||||
}
|
||||
};
|
||||
|
||||
#endif
|
||||
|
||||
/*
|
||||
* What should we do if we get a hw irq event on an illegal vector?
|
||||
* Each architecture has to answer this themself.
|
||||
@@ -179,8 +337,11 @@ unsigned int __do_IRQ(unsigned int irq)
|
||||
/*
|
||||
* No locking required for CPU-local interrupts:
|
||||
*/
|
||||
if (desc->chip->ack)
|
||||
if (desc->chip->ack) {
|
||||
desc->chip->ack(irq);
|
||||
/* get new one */
|
||||
desc = irq_remap_to_desc(irq, desc);
|
||||
}
|
||||
if (likely(!(desc->status & IRQ_DISABLED))) {
|
||||
action_ret = handle_IRQ_event(irq, desc->action);
|
||||
if (!noirqdebug)
|
||||
@@ -191,8 +352,10 @@ unsigned int __do_IRQ(unsigned int irq)
|
||||
}
|
||||
|
||||
spin_lock(&desc->lock);
|
||||
if (desc->chip->ack)
|
||||
if (desc->chip->ack) {
|
||||
desc->chip->ack(irq);
|
||||
desc = irq_remap_to_desc(irq, desc);
|
||||
}
|
||||
/*
|
||||
* REPLAY is when Linux resends an IRQ that was dropped earlier
|
||||
* WAITING is used by probe to mark irqs that are being tested
|
||||
@@ -259,19 +422,25 @@ out:
|
||||
}
|
||||
#endif
|
||||
|
||||
|
||||
#ifdef CONFIG_TRACE_IRQFLAGS
|
||||
/*
|
||||
* lockdep: we want to handle all irq_desc locks as a single lock-class:
|
||||
*/
|
||||
static struct lock_class_key irq_desc_lock_class;
|
||||
|
||||
void early_init_irq_lock_class(void)
|
||||
{
|
||||
struct irq_desc *desc;
|
||||
int i;
|
||||
|
||||
for_each_irq_desc(i, desc)
|
||||
for_each_irq_desc(i, desc) {
|
||||
if (!desc)
|
||||
continue;
|
||||
|
||||
lockdep_set_class(&desc->lock, &irq_desc_lock_class);
|
||||
}
|
||||
}
|
||||
|
||||
#ifdef CONFIG_SPARSE_IRQ
|
||||
unsigned int kstat_irqs_cpu(unsigned int irq, int cpu)
|
||||
{
|
||||
struct irq_desc *desc = irq_to_desc(irq);
|
||||
return desc->kstat_irqs[cpu];
|
||||
}
|
||||
#endif
|
||||
EXPORT_SYMBOL(kstat_irqs_cpu);
|
||||
|
||||
|
@@ -13,6 +13,11 @@ extern void compat_irq_chip_set_default_handler(struct irq_desc *desc);
|
||||
extern int __irq_set_trigger(struct irq_desc *desc, unsigned int irq,
|
||||
unsigned long flags);
|
||||
|
||||
extern struct lock_class_key irq_desc_lock_class;
|
||||
extern void init_kstat_irqs(struct irq_desc *desc, int cpu, int nr);
|
||||
extern spinlock_t sparse_irq_lock;
|
||||
extern struct irq_desc *irq_desc_ptrs[NR_IRQS];
|
||||
|
||||
#ifdef CONFIG_PROC_FS
|
||||
extern void register_irq_proc(unsigned int irq, struct irq_desc *desc);
|
||||
extern void register_handler_proc(unsigned int irq, struct irqaction *action);
|
||||
@@ -25,6 +30,8 @@ static inline void unregister_handler_proc(unsigned int irq,
|
||||
struct irqaction *action) { }
|
||||
#endif
|
||||
|
||||
extern int irq_select_affinity_usr(unsigned int irq);
|
||||
|
||||
/*
|
||||
* Debugging printout:
|
||||
*/
|
||||
|
@@ -82,24 +82,27 @@ int irq_can_set_affinity(unsigned int irq)
|
||||
int irq_set_affinity(unsigned int irq, cpumask_t cpumask)
|
||||
{
|
||||
struct irq_desc *desc = irq_to_desc(irq);
|
||||
unsigned long flags;
|
||||
|
||||
if (!desc->chip->set_affinity)
|
||||
return -EINVAL;
|
||||
|
||||
spin_lock_irqsave(&desc->lock, flags);
|
||||
|
||||
#ifdef CONFIG_GENERIC_PENDING_IRQ
|
||||
if (desc->status & IRQ_MOVE_PCNTXT || desc->status & IRQ_DISABLED) {
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&desc->lock, flags);
|
||||
desc->affinity = cpumask;
|
||||
desc->chip->set_affinity(irq, cpumask);
|
||||
spin_unlock_irqrestore(&desc->lock, flags);
|
||||
} else
|
||||
set_pending_irq(irq, cpumask);
|
||||
} else {
|
||||
desc->status |= IRQ_MOVE_PENDING;
|
||||
desc->pending_mask = cpumask;
|
||||
}
|
||||
#else
|
||||
desc->affinity = cpumask;
|
||||
desc->chip->set_affinity(irq, cpumask);
|
||||
#endif
|
||||
desc->status |= IRQ_AFFINITY_SET;
|
||||
spin_unlock_irqrestore(&desc->lock, flags);
|
||||
return 0;
|
||||
}
|
||||
|
||||
@@ -107,24 +110,59 @@ int irq_set_affinity(unsigned int irq, cpumask_t cpumask)
|
||||
/*
|
||||
* Generic version of the affinity autoselector.
|
||||
*/
|
||||
int irq_select_affinity(unsigned int irq)
|
||||
int do_irq_select_affinity(unsigned int irq, struct irq_desc *desc)
|
||||
{
|
||||
cpumask_t mask;
|
||||
struct irq_desc *desc;
|
||||
|
||||
if (!irq_can_set_affinity(irq))
|
||||
return 0;
|
||||
|
||||
cpus_and(mask, cpu_online_map, irq_default_affinity);
|
||||
|
||||
desc = irq_to_desc(irq);
|
||||
/*
|
||||
* Preserve an userspace affinity setup, but make sure that
|
||||
* one of the targets is online.
|
||||
*/
|
||||
if (desc->status & (IRQ_AFFINITY_SET | IRQ_NO_BALANCING)) {
|
||||
if (cpus_intersects(desc->affinity, cpu_online_map))
|
||||
mask = desc->affinity;
|
||||
else
|
||||
desc->status &= ~IRQ_AFFINITY_SET;
|
||||
}
|
||||
|
||||
desc->affinity = mask;
|
||||
desc->chip->set_affinity(irq, mask);
|
||||
|
||||
return 0;
|
||||
}
|
||||
#else
|
||||
static inline int do_irq_select_affinity(unsigned int irq, struct irq_desc *d)
|
||||
{
|
||||
return irq_select_affinity(irq);
|
||||
}
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Called when affinity is set via /proc/irq
|
||||
*/
|
||||
int irq_select_affinity_usr(unsigned int irq)
|
||||
{
|
||||
struct irq_desc *desc = irq_to_desc(irq);
|
||||
unsigned long flags;
|
||||
int ret;
|
||||
|
||||
spin_lock_irqsave(&desc->lock, flags);
|
||||
ret = do_irq_select_affinity(irq, desc);
|
||||
spin_unlock_irqrestore(&desc->lock, flags);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
#else
|
||||
static inline int do_irq_select_affinity(int irq, struct irq_desc *desc)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
#endif
|
||||
|
||||
/**
|
||||
@@ -327,21 +365,23 @@ int __irq_set_trigger(struct irq_desc *desc, unsigned int irq,
|
||||
* IRQF_TRIGGER_* but the PIC does not support multiple
|
||||
* flow-types?
|
||||
*/
|
||||
pr_warning("No set_type function for IRQ %d (%s)\n", irq,
|
||||
pr_debug("No set_type function for IRQ %d (%s)\n", irq,
|
||||
chip ? (chip->name ? : "unknown") : "unknown");
|
||||
return 0;
|
||||
}
|
||||
|
||||
ret = chip->set_type(irq, flags & IRQF_TRIGGER_MASK);
|
||||
/* caller masked out all except trigger mode flags */
|
||||
ret = chip->set_type(irq, flags);
|
||||
|
||||
if (ret)
|
||||
pr_err("setting trigger mode %d for irq %u failed (%pF)\n",
|
||||
(int)(flags & IRQF_TRIGGER_MASK),
|
||||
irq, chip->set_type);
|
||||
(int)flags, irq, chip->set_type);
|
||||
else {
|
||||
if (flags & (IRQ_TYPE_LEVEL_LOW | IRQ_TYPE_LEVEL_HIGH))
|
||||
flags |= IRQ_LEVEL;
|
||||
/* note that IRQF_TRIGGER_MASK == IRQ_TYPE_SENSE_MASK */
|
||||
desc->status &= ~IRQ_TYPE_SENSE_MASK;
|
||||
desc->status |= flags & IRQ_TYPE_SENSE_MASK;
|
||||
desc->status &= ~(IRQ_LEVEL | IRQ_TYPE_SENSE_MASK);
|
||||
desc->status |= flags;
|
||||
}
|
||||
|
||||
return ret;
|
||||
@@ -421,7 +461,8 @@ __setup_irq(unsigned int irq, struct irq_desc * desc, struct irqaction *new)
|
||||
|
||||
/* Setup the type (level, edge polarity) if configured: */
|
||||
if (new->flags & IRQF_TRIGGER_MASK) {
|
||||
ret = __irq_set_trigger(desc, irq, new->flags);
|
||||
ret = __irq_set_trigger(desc, irq,
|
||||
new->flags & IRQF_TRIGGER_MASK);
|
||||
|
||||
if (ret) {
|
||||
spin_unlock_irqrestore(&desc->lock, flags);
|
||||
@@ -445,8 +486,12 @@ __setup_irq(unsigned int irq, struct irq_desc * desc, struct irqaction *new)
|
||||
/* Undo nested disables: */
|
||||
desc->depth = 1;
|
||||
|
||||
/* Exclude IRQ from balancing if requested */
|
||||
if (new->flags & IRQF_NOBALANCING)
|
||||
desc->status |= IRQ_NO_BALANCING;
|
||||
|
||||
/* Set default affinity mask once everything is setup */
|
||||
irq_select_affinity(irq);
|
||||
do_irq_select_affinity(irq, desc);
|
||||
|
||||
} else if ((new->flags & IRQF_TRIGGER_MASK)
|
||||
&& (new->flags & IRQF_TRIGGER_MASK)
|
||||
@@ -459,10 +504,6 @@ __setup_irq(unsigned int irq, struct irq_desc * desc, struct irqaction *new)
|
||||
|
||||
*p = new;
|
||||
|
||||
/* Exclude IRQ from balancing */
|
||||
if (new->flags & IRQF_NOBALANCING)
|
||||
desc->status |= IRQ_NO_BALANCING;
|
||||
|
||||
/* Reset broken irq detection when installing new handler */
|
||||
desc->irq_count = 0;
|
||||
desc->irqs_unhandled = 0;
|
||||
|
@@ -1,17 +1,6 @@
|
||||
|
||||
#include <linux/irq.h>
|
||||
|
||||
void set_pending_irq(unsigned int irq, cpumask_t mask)
|
||||
{
|
||||
struct irq_desc *desc = irq_to_desc(irq);
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&desc->lock, flags);
|
||||
desc->status |= IRQ_MOVE_PENDING;
|
||||
desc->pending_mask = mask;
|
||||
spin_unlock_irqrestore(&desc->lock, flags);
|
||||
}
|
||||
|
||||
void move_masked_irq(int irq)
|
||||
{
|
||||
struct irq_desc *desc = irq_to_desc(irq);
|
||||
|
122
kernel/irq/numa_migrate.c
Normal file
122
kernel/irq/numa_migrate.c
Normal file
@@ -0,0 +1,122 @@
|
||||
/*
|
||||
* NUMA irq-desc migration code
|
||||
*
|
||||
* Migrate IRQ data structures (irq_desc, chip_data, etc.) over to
|
||||
* the new "home node" of the IRQ.
|
||||
*/
|
||||
|
||||
#include <linux/irq.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/random.h>
|
||||
#include <linux/interrupt.h>
|
||||
#include <linux/kernel_stat.h>
|
||||
|
||||
#include "internals.h"
|
||||
|
||||
static void init_copy_kstat_irqs(struct irq_desc *old_desc,
|
||||
struct irq_desc *desc,
|
||||
int cpu, int nr)
|
||||
{
|
||||
unsigned long bytes;
|
||||
|
||||
init_kstat_irqs(desc, cpu, nr);
|
||||
|
||||
if (desc->kstat_irqs != old_desc->kstat_irqs) {
|
||||
/* Compute how many bytes we need per irq and allocate them */
|
||||
bytes = nr * sizeof(unsigned int);
|
||||
|
||||
memcpy(desc->kstat_irqs, old_desc->kstat_irqs, bytes);
|
||||
}
|
||||
}
|
||||
|
||||
static void free_kstat_irqs(struct irq_desc *old_desc, struct irq_desc *desc)
|
||||
{
|
||||
if (old_desc->kstat_irqs == desc->kstat_irqs)
|
||||
return;
|
||||
|
||||
kfree(old_desc->kstat_irqs);
|
||||
old_desc->kstat_irqs = NULL;
|
||||
}
|
||||
|
||||
static void init_copy_one_irq_desc(int irq, struct irq_desc *old_desc,
|
||||
struct irq_desc *desc, int cpu)
|
||||
{
|
||||
memcpy(desc, old_desc, sizeof(struct irq_desc));
|
||||
desc->cpu = cpu;
|
||||
lockdep_set_class(&desc->lock, &irq_desc_lock_class);
|
||||
init_copy_kstat_irqs(old_desc, desc, cpu, nr_cpu_ids);
|
||||
arch_init_copy_chip_data(old_desc, desc, cpu);
|
||||
}
|
||||
|
||||
static void free_one_irq_desc(struct irq_desc *old_desc, struct irq_desc *desc)
|
||||
{
|
||||
free_kstat_irqs(old_desc, desc);
|
||||
arch_free_chip_data(old_desc, desc);
|
||||
}
|
||||
|
||||
static struct irq_desc *__real_move_irq_desc(struct irq_desc *old_desc,
|
||||
int cpu)
|
||||
{
|
||||
struct irq_desc *desc;
|
||||
unsigned int irq;
|
||||
unsigned long flags;
|
||||
int node;
|
||||
|
||||
irq = old_desc->irq;
|
||||
|
||||
spin_lock_irqsave(&sparse_irq_lock, flags);
|
||||
|
||||
/* We have to check it to avoid races with another CPU */
|
||||
desc = irq_desc_ptrs[irq];
|
||||
|
||||
if (desc && old_desc != desc)
|
||||
goto out_unlock;
|
||||
|
||||
node = cpu_to_node(cpu);
|
||||
desc = kzalloc_node(sizeof(*desc), GFP_ATOMIC, node);
|
||||
printk(KERN_DEBUG " move irq_desc for %d to cpu %d node %d\n",
|
||||
irq, cpu, node);
|
||||
if (!desc) {
|
||||
printk(KERN_ERR "can not get new irq_desc for moving\n");
|
||||
/* still use old one */
|
||||
desc = old_desc;
|
||||
goto out_unlock;
|
||||
}
|
||||
init_copy_one_irq_desc(irq, old_desc, desc, cpu);
|
||||
|
||||
irq_desc_ptrs[irq] = desc;
|
||||
|
||||
/* free the old one */
|
||||
free_one_irq_desc(old_desc, desc);
|
||||
kfree(old_desc);
|
||||
|
||||
out_unlock:
|
||||
spin_unlock_irqrestore(&sparse_irq_lock, flags);
|
||||
|
||||
return desc;
|
||||
}
|
||||
|
||||
struct irq_desc *move_irq_desc(struct irq_desc *desc, int cpu)
|
||||
{
|
||||
int old_cpu;
|
||||
int node, old_node;
|
||||
|
||||
/* those all static, do move them */
|
||||
if (desc->irq < NR_IRQS_LEGACY)
|
||||
return desc;
|
||||
|
||||
old_cpu = desc->cpu;
|
||||
printk(KERN_DEBUG
|
||||
"try to move irq_desc from cpu %d to %d\n", old_cpu, cpu);
|
||||
if (old_cpu != cpu) {
|
||||
node = cpu_to_node(cpu);
|
||||
old_node = cpu_to_node(old_cpu);
|
||||
if (old_node != node)
|
||||
desc = __real_move_irq_desc(desc, cpu);
|
||||
else
|
||||
desc->cpu = cpu;
|
||||
}
|
||||
|
||||
return desc;
|
||||
}
|
||||
|
@@ -62,7 +62,7 @@ static ssize_t irq_affinity_proc_write(struct file *file,
|
||||
if (!cpus_intersects(new_value, cpu_online_map))
|
||||
/* Special case for empty set - allow the architecture
|
||||
code to set default SMP affinity. */
|
||||
return irq_select_affinity(irq) ? -EINVAL : count;
|
||||
return irq_select_affinity_usr(irq) ? -EINVAL : count;
|
||||
|
||||
irq_set_affinity(irq, new_value);
|
||||
|
||||
@@ -243,7 +243,11 @@ void init_irq_proc(void)
|
||||
/*
|
||||
* Create entries for all existing IRQs.
|
||||
*/
|
||||
for_each_irq_desc(irq, desc)
|
||||
for_each_irq_desc(irq, desc) {
|
||||
if (!desc)
|
||||
continue;
|
||||
|
||||
register_irq_proc(irq, desc);
|
||||
}
|
||||
}
|
||||
|
||||
|
@@ -91,6 +91,9 @@ static int misrouted_irq(int irq)
|
||||
int i, ok = 0;
|
||||
|
||||
for_each_irq_desc(i, desc) {
|
||||
if (!desc)
|
||||
continue;
|
||||
|
||||
if (!i)
|
||||
continue;
|
||||
|
||||
@@ -112,6 +115,8 @@ static void poll_spurious_irqs(unsigned long dummy)
|
||||
for_each_irq_desc(i, desc) {
|
||||
unsigned int status;
|
||||
|
||||
if (!desc)
|
||||
continue;
|
||||
if (!i)
|
||||
continue;
|
||||
|
||||
|
@@ -191,7 +191,7 @@ static int lstats_show(struct seq_file *m, void *v)
|
||||
latency_record[i].time,
|
||||
latency_record[i].max);
|
||||
for (q = 0; q < LT_BACKTRACEDEPTH; q++) {
|
||||
char sym[KSYM_NAME_LEN];
|
||||
char sym[KSYM_SYMBOL_LEN];
|
||||
char *c;
|
||||
if (!latency_record[i].backtrace[q])
|
||||
break;
|
||||
|
@@ -3276,10 +3276,10 @@ void __init lockdep_info(void)
|
||||
{
|
||||
printk("Lock dependency validator: Copyright (c) 2006 Red Hat, Inc., Ingo Molnar\n");
|
||||
|
||||
printk("... MAX_LOCKDEP_SUBCLASSES: %lu\n", MAX_LOCKDEP_SUBCLASSES);
|
||||
printk("... MAX_LOCKDEP_SUBCLASSES: %lu\n", MAX_LOCKDEP_SUBCLASSES);
|
||||
printk("... MAX_LOCK_DEPTH: %lu\n", MAX_LOCK_DEPTH);
|
||||
printk("... MAX_LOCKDEP_KEYS: %lu\n", MAX_LOCKDEP_KEYS);
|
||||
printk("... CLASSHASH_SIZE: %lu\n", CLASSHASH_SIZE);
|
||||
printk("... CLASSHASH_SIZE: %lu\n", CLASSHASH_SIZE);
|
||||
printk("... MAX_LOCKDEP_ENTRIES: %lu\n", MAX_LOCKDEP_ENTRIES);
|
||||
printk("... MAX_LOCKDEP_CHAINS: %lu\n", MAX_LOCKDEP_CHAINS);
|
||||
printk("... CHAINHASH_SIZE: %lu\n", CHAINHASH_SIZE);
|
||||
|
@@ -167,6 +167,7 @@ static const struct tnt tnts[] = {
|
||||
* 'M' - System experienced a machine check exception.
|
||||
* 'B' - System has hit bad_page.
|
||||
* 'U' - Userspace-defined naughtiness.
|
||||
* 'D' - Kernel has oopsed before
|
||||
* 'A' - ACPI table overridden.
|
||||
* 'W' - Taint on warning.
|
||||
* 'C' - modules from drivers/staging are loaded.
|
||||
|
@@ -311,7 +311,7 @@ static int cpu_clock_sample_group(const clockid_t which_clock,
|
||||
struct task_cputime cputime;
|
||||
|
||||
thread_group_cputime(p, &cputime);
|
||||
switch (which_clock) {
|
||||
switch (CPUCLOCK_WHICH(which_clock)) {
|
||||
default:
|
||||
return -EINVAL;
|
||||
case CPUCLOCK_PROF:
|
||||
|
@@ -197,6 +197,11 @@ static int common_timer_create(struct k_itimer *new_timer)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int no_timer_create(struct k_itimer *new_timer)
|
||||
{
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
|
||||
/*
|
||||
* Return nonzero if we know a priori this clockid_t value is bogus.
|
||||
*/
|
||||
@@ -248,6 +253,7 @@ static __init int init_posix_timers(void)
|
||||
.clock_getres = hrtimer_get_res,
|
||||
.clock_get = posix_get_monotonic_raw,
|
||||
.clock_set = do_posix_clock_nosettime,
|
||||
.timer_create = no_timer_create,
|
||||
};
|
||||
|
||||
register_posix_clock(CLOCK_REALTIME, &clock_realtime);
|
||||
|
@@ -633,7 +633,7 @@ void swsusp_close(fmode_t mode)
|
||||
return;
|
||||
}
|
||||
|
||||
blkdev_put(resume_bdev, mode); /* move up */
|
||||
blkdev_put(resume_bdev, mode);
|
||||
}
|
||||
|
||||
static int swsusp_header_init(void)
|
||||
|
@@ -351,7 +351,7 @@ out:
|
||||
put_cpu();
|
||||
}
|
||||
|
||||
static int __devinit profile_cpu_callback(struct notifier_block *info,
|
||||
static int __cpuinit profile_cpu_callback(struct notifier_block *info,
|
||||
unsigned long action, void *__cpu)
|
||||
{
|
||||
int node, cpu = (unsigned long)__cpu;
|
||||
@@ -596,7 +596,7 @@ out_cleanup:
|
||||
#define create_hash_tables() ({ 0; })
|
||||
#endif
|
||||
|
||||
int create_proc_profile(void)
|
||||
int __ref create_proc_profile(void) /* false positive from hotcpu_notifier */
|
||||
{
|
||||
struct proc_dir_entry *entry;
|
||||
|
||||
|
@@ -612,7 +612,7 @@ int generic_ptrace_pokedata(struct task_struct *tsk, long addr, long data)
|
||||
return (copied == sizeof(data)) ? 0 : -EIO;
|
||||
}
|
||||
|
||||
#if defined CONFIG_COMPAT && defined __ARCH_WANT_COMPAT_SYS_PTRACE
|
||||
#if defined CONFIG_COMPAT
|
||||
#include <linux/compat.h>
|
||||
|
||||
int compat_ptrace_request(struct task_struct *child, compat_long_t request,
|
||||
@@ -709,4 +709,4 @@ asmlinkage long compat_sys_ptrace(compat_long_t request, compat_long_t pid,
|
||||
unlock_kernel();
|
||||
return ret;
|
||||
}
|
||||
#endif /* CONFIG_COMPAT && __ARCH_WANT_COMPAT_SYS_PTRACE */
|
||||
#endif /* CONFIG_COMPAT */
|
||||
|
@@ -1317,12 +1317,9 @@ static ssize_t relay_file_splice_read(struct file *in,
|
||||
if (ret < 0)
|
||||
break;
|
||||
else if (!ret) {
|
||||
if (spliced)
|
||||
break;
|
||||
if (flags & SPLICE_F_NONBLOCK) {
|
||||
if (flags & SPLICE_F_NONBLOCK)
|
||||
ret = -EAGAIN;
|
||||
break;
|
||||
}
|
||||
break;
|
||||
}
|
||||
|
||||
*ppos += ret;
|
||||
|
@@ -1453,9 +1453,10 @@ static int task_hot(struct task_struct *p, u64 now, struct sched_domain *sd);
|
||||
static unsigned long cpu_avg_load_per_task(int cpu)
|
||||
{
|
||||
struct rq *rq = cpu_rq(cpu);
|
||||
unsigned long nr_running = ACCESS_ONCE(rq->nr_running);
|
||||
|
||||
if (rq->nr_running)
|
||||
rq->avg_load_per_task = rq->load.weight / rq->nr_running;
|
||||
if (nr_running)
|
||||
rq->avg_load_per_task = rq->load.weight / nr_running;
|
||||
else
|
||||
rq->avg_load_per_task = 0;
|
||||
|
||||
@@ -6586,7 +6587,9 @@ migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu)
|
||||
req = list_entry(rq->migration_queue.next,
|
||||
struct migration_req, list);
|
||||
list_del_init(&req->list);
|
||||
spin_unlock_irq(&rq->lock);
|
||||
complete(&req->done);
|
||||
spin_lock_irq(&rq->lock);
|
||||
}
|
||||
spin_unlock_irq(&rq->lock);
|
||||
break;
|
||||
|
@@ -118,13 +118,13 @@ static u64 __update_sched_clock(struct sched_clock_data *scd, u64 now)
|
||||
|
||||
/*
|
||||
* scd->clock = clamp(scd->tick_gtod + delta,
|
||||
* max(scd->tick_gtod, scd->clock),
|
||||
* max(scd->clock, scd->tick_gtod + TICK_NSEC));
|
||||
* max(scd->tick_gtod, scd->clock),
|
||||
* scd->tick_gtod + TICK_NSEC);
|
||||
*/
|
||||
|
||||
clock = scd->tick_gtod + delta;
|
||||
min_clock = wrap_max(scd->tick_gtod, scd->clock);
|
||||
max_clock = wrap_max(scd->clock, scd->tick_gtod + TICK_NSEC);
|
||||
max_clock = scd->tick_gtod + TICK_NSEC;
|
||||
|
||||
clock = wrap_max(clock, min_clock);
|
||||
clock = wrap_min(clock, max_clock);
|
||||
|
@@ -188,7 +188,7 @@ static void check_hung_task(struct task_struct *t, unsigned long now)
|
||||
if ((long)(now - t->last_switch_timestamp) <
|
||||
sysctl_hung_task_timeout_secs)
|
||||
return;
|
||||
if (sysctl_hung_task_warnings < 0)
|
||||
if (!sysctl_hung_task_warnings)
|
||||
return;
|
||||
sysctl_hung_task_warnings--;
|
||||
|
||||
|
@@ -176,6 +176,9 @@ extern struct ctl_table random_table[];
|
||||
#ifdef CONFIG_INOTIFY_USER
|
||||
extern struct ctl_table inotify_table[];
|
||||
#endif
|
||||
#ifdef CONFIG_EPOLL
|
||||
extern struct ctl_table epoll_table[];
|
||||
#endif
|
||||
|
||||
#ifdef HAVE_ARCH_PICK_MMAP_LAYOUT
|
||||
int sysctl_legacy_va_layout;
|
||||
@@ -1325,6 +1328,13 @@ static struct ctl_table fs_table[] = {
|
||||
.child = inotify_table,
|
||||
},
|
||||
#endif
|
||||
#ifdef CONFIG_EPOLL
|
||||
{
|
||||
.procname = "epoll",
|
||||
.mode = 0555,
|
||||
.child = epoll_table,
|
||||
},
|
||||
#endif
|
||||
#endif
|
||||
{
|
||||
.ctl_name = KERN_SETUID_DUMPABLE,
|
||||
|
@@ -518,6 +518,28 @@ void update_wall_time(void)
|
||||
/* correct the clock when NTP error is too big */
|
||||
clocksource_adjust(offset);
|
||||
|
||||
/*
|
||||
* Since in the loop above, we accumulate any amount of time
|
||||
* in xtime_nsec over a second into xtime.tv_sec, its possible for
|
||||
* xtime_nsec to be fairly small after the loop. Further, if we're
|
||||
* slightly speeding the clocksource up in clocksource_adjust(),
|
||||
* its possible the required corrective factor to xtime_nsec could
|
||||
* cause it to underflow.
|
||||
*
|
||||
* Now, we cannot simply roll the accumulated second back, since
|
||||
* the NTP subsystem has been notified via second_overflow. So
|
||||
* instead we push xtime_nsec forward by the amount we underflowed,
|
||||
* and add that amount into the error.
|
||||
*
|
||||
* We'll correct this error next time through this function, when
|
||||
* xtime_nsec is not as small.
|
||||
*/
|
||||
if (unlikely((s64)clock->xtime_nsec < 0)) {
|
||||
s64 neg = -(s64)clock->xtime_nsec;
|
||||
clock->xtime_nsec = 0;
|
||||
clock->error += neg << (NTP_SCALE_SHIFT - clock->shift);
|
||||
}
|
||||
|
||||
/* store full nanoseconds into xtime after rounding it up and
|
||||
* add the remainder to the error difference.
|
||||
*/
|
||||
|
@@ -1215,7 +1215,7 @@ ring_buffer_lock_reserve(struct ring_buffer *buffer,
|
||||
|
||||
out:
|
||||
if (resched)
|
||||
preempt_enable_notrace();
|
||||
preempt_enable_no_resched_notrace();
|
||||
else
|
||||
preempt_enable_notrace();
|
||||
return NULL;
|
||||
|
@@ -18,12 +18,14 @@ struct header_iter {
|
||||
|
||||
static struct trace_array *mmio_trace_array;
|
||||
static bool overrun_detected;
|
||||
static unsigned long prev_overruns;
|
||||
|
||||
static void mmio_reset_data(struct trace_array *tr)
|
||||
{
|
||||
int cpu;
|
||||
|
||||
overrun_detected = false;
|
||||
prev_overruns = 0;
|
||||
tr->time_start = ftrace_now(tr->cpu);
|
||||
|
||||
for_each_online_cpu(cpu)
|
||||
@@ -128,16 +130,12 @@ static void mmio_close(struct trace_iterator *iter)
|
||||
|
||||
static unsigned long count_overruns(struct trace_iterator *iter)
|
||||
{
|
||||
int cpu;
|
||||
unsigned long cnt = 0;
|
||||
/* FIXME: */
|
||||
#if 0
|
||||
for_each_online_cpu(cpu) {
|
||||
cnt += iter->overrun[cpu];
|
||||
iter->overrun[cpu] = 0;
|
||||
}
|
||||
#endif
|
||||
(void)cpu;
|
||||
unsigned long over = ring_buffer_overruns(iter->tr->buffer);
|
||||
|
||||
if (over > prev_overruns)
|
||||
cnt = over - prev_overruns;
|
||||
prev_overruns = over;
|
||||
return cnt;
|
||||
}
|
||||
|
||||
|
@@ -184,11 +184,16 @@ static struct file_operations stack_max_size_fops = {
|
||||
static void *
|
||||
t_next(struct seq_file *m, void *v, loff_t *pos)
|
||||
{
|
||||
long i = (long)m->private;
|
||||
long i;
|
||||
|
||||
(*pos)++;
|
||||
|
||||
i++;
|
||||
if (v == SEQ_START_TOKEN)
|
||||
i = 0;
|
||||
else {
|
||||
i = *(long *)v;
|
||||
i++;
|
||||
}
|
||||
|
||||
if (i >= max_stack_trace.nr_entries ||
|
||||
stack_dump_trace[i] == ULONG_MAX)
|
||||
@@ -201,12 +206,15 @@ t_next(struct seq_file *m, void *v, loff_t *pos)
|
||||
|
||||
static void *t_start(struct seq_file *m, loff_t *pos)
|
||||
{
|
||||
void *t = &m->private;
|
||||
void *t = SEQ_START_TOKEN;
|
||||
loff_t l = 0;
|
||||
|
||||
local_irq_disable();
|
||||
__raw_spin_lock(&max_stack_lock);
|
||||
|
||||
if (*pos == 0)
|
||||
return SEQ_START_TOKEN;
|
||||
|
||||
for (; t && l < *pos; t = t_next(m, t, &l))
|
||||
;
|
||||
|
||||
@@ -235,10 +243,10 @@ static int trace_lookup_stack(struct seq_file *m, long i)
|
||||
|
||||
static int t_show(struct seq_file *m, void *v)
|
||||
{
|
||||
long i = *(long *)v;
|
||||
long i;
|
||||
int size;
|
||||
|
||||
if (i < 0) {
|
||||
if (v == SEQ_START_TOKEN) {
|
||||
seq_printf(m, " Depth Size Location"
|
||||
" (%d entries)\n"
|
||||
" ----- ---- --------\n",
|
||||
@@ -246,6 +254,8 @@ static int t_show(struct seq_file *m, void *v)
|
||||
return 0;
|
||||
}
|
||||
|
||||
i = *(long *)v;
|
||||
|
||||
if (i >= max_stack_trace.nr_entries ||
|
||||
stack_dump_trace[i] == ULONG_MAX)
|
||||
return 0;
|
||||
@@ -275,10 +285,6 @@ static int stack_trace_open(struct inode *inode, struct file *file)
|
||||
int ret;
|
||||
|
||||
ret = seq_open(file, &stack_trace_seq_ops);
|
||||
if (!ret) {
|
||||
struct seq_file *m = file->private_data;
|
||||
m->private = (void *)-1;
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
Reference in New Issue
Block a user