Merge branch 'sched/urgent' into sched/core
Merge reason: avoid upcoming patch conflict. Signed-off-by: Ingo Molnar <mingo@elte.hu>
This commit is contained in:
151
kernel/cgroup.c
151
kernel/cgroup.c
@@ -47,6 +47,7 @@
|
||||
#include <linux/hash.h>
|
||||
#include <linux/namei.h>
|
||||
#include <linux/smp_lock.h>
|
||||
#include <linux/pid_namespace.h>
|
||||
|
||||
#include <asm/atomic.h>
|
||||
|
||||
@@ -734,16 +735,28 @@ static void cgroup_d_remove_dir(struct dentry *dentry)
|
||||
* reference to css->refcnt. In general, this refcnt is expected to goes down
|
||||
* to zero, soon.
|
||||
*
|
||||
* CGRP_WAIT_ON_RMDIR flag is modified under cgroup's inode->i_mutex;
|
||||
* CGRP_WAIT_ON_RMDIR flag is set under cgroup's inode->i_mutex;
|
||||
*/
|
||||
DECLARE_WAIT_QUEUE_HEAD(cgroup_rmdir_waitq);
|
||||
|
||||
static void cgroup_wakeup_rmdir_waiters(const struct cgroup *cgrp)
|
||||
static void cgroup_wakeup_rmdir_waiter(struct cgroup *cgrp)
|
||||
{
|
||||
if (unlikely(test_bit(CGRP_WAIT_ON_RMDIR, &cgrp->flags)))
|
||||
if (unlikely(test_and_clear_bit(CGRP_WAIT_ON_RMDIR, &cgrp->flags)))
|
||||
wake_up_all(&cgroup_rmdir_waitq);
|
||||
}
|
||||
|
||||
void cgroup_exclude_rmdir(struct cgroup_subsys_state *css)
|
||||
{
|
||||
css_get(css);
|
||||
}
|
||||
|
||||
void cgroup_release_and_wakeup_rmdir(struct cgroup_subsys_state *css)
|
||||
{
|
||||
cgroup_wakeup_rmdir_waiter(css->cgroup);
|
||||
css_put(css);
|
||||
}
|
||||
|
||||
|
||||
static int rebind_subsystems(struct cgroupfs_root *root,
|
||||
unsigned long final_bits)
|
||||
{
|
||||
@@ -960,6 +973,7 @@ static void init_cgroup_housekeeping(struct cgroup *cgrp)
|
||||
INIT_LIST_HEAD(&cgrp->children);
|
||||
INIT_LIST_HEAD(&cgrp->css_sets);
|
||||
INIT_LIST_HEAD(&cgrp->release_list);
|
||||
INIT_LIST_HEAD(&cgrp->pids_list);
|
||||
init_rwsem(&cgrp->pids_mutex);
|
||||
}
|
||||
static void init_cgroup_root(struct cgroupfs_root *root)
|
||||
@@ -1357,7 +1371,7 @@ int cgroup_attach_task(struct cgroup *cgrp, struct task_struct *tsk)
|
||||
* wake up rmdir() waiter. the rmdir should fail since the cgroup
|
||||
* is no longer empty.
|
||||
*/
|
||||
cgroup_wakeup_rmdir_waiters(cgrp);
|
||||
cgroup_wakeup_rmdir_waiter(cgrp);
|
||||
return 0;
|
||||
}
|
||||
|
||||
@@ -2201,12 +2215,30 @@ err:
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* Cache pids for all threads in the same pid namespace that are
|
||||
* opening the same "tasks" file.
|
||||
*/
|
||||
struct cgroup_pids {
|
||||
/* The node in cgrp->pids_list */
|
||||
struct list_head list;
|
||||
/* The cgroup those pids belong to */
|
||||
struct cgroup *cgrp;
|
||||
/* The namepsace those pids belong to */
|
||||
struct pid_namespace *ns;
|
||||
/* Array of process ids in the cgroup */
|
||||
pid_t *tasks_pids;
|
||||
/* How many files are using the this tasks_pids array */
|
||||
int use_count;
|
||||
/* Length of the current tasks_pids array */
|
||||
int length;
|
||||
};
|
||||
|
||||
static int cmppid(const void *a, const void *b)
|
||||
{
|
||||
return *(pid_t *)a - *(pid_t *)b;
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* seq_file methods for the "tasks" file. The seq_file position is the
|
||||
* next pid to display; the seq_file iterator is a pointer to the pid
|
||||
@@ -2221,45 +2253,47 @@ static void *cgroup_tasks_start(struct seq_file *s, loff_t *pos)
|
||||
* after a seek to the start). Use a binary-search to find the
|
||||
* next pid to display, if any
|
||||
*/
|
||||
struct cgroup *cgrp = s->private;
|
||||
struct cgroup_pids *cp = s->private;
|
||||
struct cgroup *cgrp = cp->cgrp;
|
||||
int index = 0, pid = *pos;
|
||||
int *iter;
|
||||
|
||||
down_read(&cgrp->pids_mutex);
|
||||
if (pid) {
|
||||
int end = cgrp->pids_length;
|
||||
int end = cp->length;
|
||||
|
||||
while (index < end) {
|
||||
int mid = (index + end) / 2;
|
||||
if (cgrp->tasks_pids[mid] == pid) {
|
||||
if (cp->tasks_pids[mid] == pid) {
|
||||
index = mid;
|
||||
break;
|
||||
} else if (cgrp->tasks_pids[mid] <= pid)
|
||||
} else if (cp->tasks_pids[mid] <= pid)
|
||||
index = mid + 1;
|
||||
else
|
||||
end = mid;
|
||||
}
|
||||
}
|
||||
/* If we're off the end of the array, we're done */
|
||||
if (index >= cgrp->pids_length)
|
||||
if (index >= cp->length)
|
||||
return NULL;
|
||||
/* Update the abstract position to be the actual pid that we found */
|
||||
iter = cgrp->tasks_pids + index;
|
||||
iter = cp->tasks_pids + index;
|
||||
*pos = *iter;
|
||||
return iter;
|
||||
}
|
||||
|
||||
static void cgroup_tasks_stop(struct seq_file *s, void *v)
|
||||
{
|
||||
struct cgroup *cgrp = s->private;
|
||||
struct cgroup_pids *cp = s->private;
|
||||
struct cgroup *cgrp = cp->cgrp;
|
||||
up_read(&cgrp->pids_mutex);
|
||||
}
|
||||
|
||||
static void *cgroup_tasks_next(struct seq_file *s, void *v, loff_t *pos)
|
||||
{
|
||||
struct cgroup *cgrp = s->private;
|
||||
struct cgroup_pids *cp = s->private;
|
||||
int *p = v;
|
||||
int *end = cgrp->tasks_pids + cgrp->pids_length;
|
||||
int *end = cp->tasks_pids + cp->length;
|
||||
|
||||
/*
|
||||
* Advance to the next pid in the array. If this goes off the
|
||||
@@ -2286,26 +2320,33 @@ static struct seq_operations cgroup_tasks_seq_operations = {
|
||||
.show = cgroup_tasks_show,
|
||||
};
|
||||
|
||||
static void release_cgroup_pid_array(struct cgroup *cgrp)
|
||||
static void release_cgroup_pid_array(struct cgroup_pids *cp)
|
||||
{
|
||||
struct cgroup *cgrp = cp->cgrp;
|
||||
|
||||
down_write(&cgrp->pids_mutex);
|
||||
BUG_ON(!cgrp->pids_use_count);
|
||||
if (!--cgrp->pids_use_count) {
|
||||
kfree(cgrp->tasks_pids);
|
||||
cgrp->tasks_pids = NULL;
|
||||
cgrp->pids_length = 0;
|
||||
BUG_ON(!cp->use_count);
|
||||
if (!--cp->use_count) {
|
||||
list_del(&cp->list);
|
||||
put_pid_ns(cp->ns);
|
||||
kfree(cp->tasks_pids);
|
||||
kfree(cp);
|
||||
}
|
||||
up_write(&cgrp->pids_mutex);
|
||||
}
|
||||
|
||||
static int cgroup_tasks_release(struct inode *inode, struct file *file)
|
||||
{
|
||||
struct cgroup *cgrp = __d_cgrp(file->f_dentry->d_parent);
|
||||
struct seq_file *seq;
|
||||
struct cgroup_pids *cp;
|
||||
|
||||
if (!(file->f_mode & FMODE_READ))
|
||||
return 0;
|
||||
|
||||
release_cgroup_pid_array(cgrp);
|
||||
seq = file->private_data;
|
||||
cp = seq->private;
|
||||
|
||||
release_cgroup_pid_array(cp);
|
||||
return seq_release(inode, file);
|
||||
}
|
||||
|
||||
@@ -2324,6 +2365,8 @@ static struct file_operations cgroup_tasks_operations = {
|
||||
static int cgroup_tasks_open(struct inode *unused, struct file *file)
|
||||
{
|
||||
struct cgroup *cgrp = __d_cgrp(file->f_dentry->d_parent);
|
||||
struct pid_namespace *ns = current->nsproxy->pid_ns;
|
||||
struct cgroup_pids *cp;
|
||||
pid_t *pidarray;
|
||||
int npids;
|
||||
int retval;
|
||||
@@ -2350,20 +2393,37 @@ static int cgroup_tasks_open(struct inode *unused, struct file *file)
|
||||
* array if necessary
|
||||
*/
|
||||
down_write(&cgrp->pids_mutex);
|
||||
kfree(cgrp->tasks_pids);
|
||||
cgrp->tasks_pids = pidarray;
|
||||
cgrp->pids_length = npids;
|
||||
cgrp->pids_use_count++;
|
||||
|
||||
list_for_each_entry(cp, &cgrp->pids_list, list) {
|
||||
if (ns == cp->ns)
|
||||
goto found;
|
||||
}
|
||||
|
||||
cp = kzalloc(sizeof(*cp), GFP_KERNEL);
|
||||
if (!cp) {
|
||||
up_write(&cgrp->pids_mutex);
|
||||
kfree(pidarray);
|
||||
return -ENOMEM;
|
||||
}
|
||||
cp->cgrp = cgrp;
|
||||
cp->ns = ns;
|
||||
get_pid_ns(ns);
|
||||
list_add(&cp->list, &cgrp->pids_list);
|
||||
found:
|
||||
kfree(cp->tasks_pids);
|
||||
cp->tasks_pids = pidarray;
|
||||
cp->length = npids;
|
||||
cp->use_count++;
|
||||
up_write(&cgrp->pids_mutex);
|
||||
|
||||
file->f_op = &cgroup_tasks_operations;
|
||||
|
||||
retval = seq_open(file, &cgroup_tasks_seq_operations);
|
||||
if (retval) {
|
||||
release_cgroup_pid_array(cgrp);
|
||||
release_cgroup_pid_array(cp);
|
||||
return retval;
|
||||
}
|
||||
((struct seq_file *)file->private_data)->private = cgrp;
|
||||
((struct seq_file *)file->private_data)->private = cp;
|
||||
return 0;
|
||||
}
|
||||
|
||||
@@ -2695,34 +2755,43 @@ again:
|
||||
}
|
||||
mutex_unlock(&cgroup_mutex);
|
||||
|
||||
/*
|
||||
* In general, subsystem has no css->refcnt after pre_destroy(). But
|
||||
* in racy cases, subsystem may have to get css->refcnt after
|
||||
* pre_destroy() and it makes rmdir return with -EBUSY. This sometimes
|
||||
* make rmdir return -EBUSY too often. To avoid that, we use waitqueue
|
||||
* for cgroup's rmdir. CGRP_WAIT_ON_RMDIR is for synchronizing rmdir
|
||||
* and subsystem's reference count handling. Please see css_get/put
|
||||
* and css_tryget() and cgroup_wakeup_rmdir_waiter() implementation.
|
||||
*/
|
||||
set_bit(CGRP_WAIT_ON_RMDIR, &cgrp->flags);
|
||||
|
||||
/*
|
||||
* Call pre_destroy handlers of subsys. Notify subsystems
|
||||
* that rmdir() request comes.
|
||||
*/
|
||||
ret = cgroup_call_pre_destroy(cgrp);
|
||||
if (ret)
|
||||
if (ret) {
|
||||
clear_bit(CGRP_WAIT_ON_RMDIR, &cgrp->flags);
|
||||
return ret;
|
||||
}
|
||||
|
||||
mutex_lock(&cgroup_mutex);
|
||||
parent = cgrp->parent;
|
||||
if (atomic_read(&cgrp->count) || !list_empty(&cgrp->children)) {
|
||||
clear_bit(CGRP_WAIT_ON_RMDIR, &cgrp->flags);
|
||||
mutex_unlock(&cgroup_mutex);
|
||||
return -EBUSY;
|
||||
}
|
||||
/*
|
||||
* css_put/get is provided for subsys to grab refcnt to css. In typical
|
||||
* case, subsystem has no reference after pre_destroy(). But, under
|
||||
* hierarchy management, some *temporal* refcnt can be hold.
|
||||
* To avoid returning -EBUSY to a user, waitqueue is used. If subsys
|
||||
* is really busy, it should return -EBUSY at pre_destroy(). wake_up
|
||||
* is called when css_put() is called and refcnt goes down to 0.
|
||||
*/
|
||||
set_bit(CGRP_WAIT_ON_RMDIR, &cgrp->flags);
|
||||
prepare_to_wait(&cgroup_rmdir_waitq, &wait, TASK_INTERRUPTIBLE);
|
||||
|
||||
if (!cgroup_clear_css_refs(cgrp)) {
|
||||
mutex_unlock(&cgroup_mutex);
|
||||
schedule();
|
||||
/*
|
||||
* Because someone may call cgroup_wakeup_rmdir_waiter() before
|
||||
* prepare_to_wait(), we need to check this flag.
|
||||
*/
|
||||
if (test_bit(CGRP_WAIT_ON_RMDIR, &cgrp->flags))
|
||||
schedule();
|
||||
finish_wait(&cgroup_rmdir_waitq, &wait);
|
||||
clear_bit(CGRP_WAIT_ON_RMDIR, &cgrp->flags);
|
||||
if (signal_pending(current))
|
||||
@@ -3294,7 +3363,7 @@ void __css_put(struct cgroup_subsys_state *css)
|
||||
set_bit(CGRP_RELEASABLE, &cgrp->flags);
|
||||
check_for_release(cgrp);
|
||||
}
|
||||
cgroup_wakeup_rmdir_waiters(cgrp);
|
||||
cgroup_wakeup_rmdir_waiter(cgrp);
|
||||
}
|
||||
rcu_read_unlock();
|
||||
}
|
||||
|
@@ -426,6 +426,7 @@ static struct mm_struct * mm_init(struct mm_struct * mm, struct task_struct *p)
|
||||
init_rwsem(&mm->mmap_sem);
|
||||
INIT_LIST_HEAD(&mm->mmlist);
|
||||
mm->flags = (current->mm) ? current->mm->flags : default_dump_filter;
|
||||
mm->oom_adj = (current->mm) ? current->mm->oom_adj : 0;
|
||||
mm->core_state = NULL;
|
||||
mm->nr_ptes = 0;
|
||||
set_mm_counter(mm, file_rss, 0);
|
||||
@@ -1407,14 +1408,11 @@ long do_fork(unsigned long clone_flags,
|
||||
if (clone_flags & CLONE_VFORK) {
|
||||
p->vfork_done = &vfork;
|
||||
init_completion(&vfork);
|
||||
} else if (!(clone_flags & CLONE_VM)) {
|
||||
/*
|
||||
* vfork will do an exec which will call
|
||||
* set_task_comm()
|
||||
*/
|
||||
perf_counter_fork(p);
|
||||
}
|
||||
|
||||
if (!(clone_flags & CLONE_THREAD))
|
||||
perf_counter_fork(p);
|
||||
|
||||
audit_finish_fork(p);
|
||||
tracehook_report_clone(regs, clone_flags, nr, p);
|
||||
|
||||
|
@@ -44,12 +44,19 @@ void refrigerator(void)
|
||||
recalc_sigpending(); /* We sent fake signal, clean it up */
|
||||
spin_unlock_irq(¤t->sighand->siglock);
|
||||
|
||||
/* prevent accounting of that task to load */
|
||||
current->flags |= PF_FREEZING;
|
||||
|
||||
for (;;) {
|
||||
set_current_state(TASK_UNINTERRUPTIBLE);
|
||||
if (!frozen(current))
|
||||
break;
|
||||
schedule();
|
||||
}
|
||||
|
||||
/* Remove the accounting blocker */
|
||||
current->flags &= ~PF_FREEZING;
|
||||
|
||||
pr_debug("%s left refrigerator\n", current->comm);
|
||||
__set_current_state(save);
|
||||
}
|
||||
|
@@ -42,8 +42,7 @@ static inline void unregister_handler_proc(unsigned int irq,
|
||||
|
||||
extern int irq_select_affinity_usr(unsigned int irq);
|
||||
|
||||
extern void
|
||||
irq_set_thread_affinity(struct irq_desc *desc, const struct cpumask *cpumask);
|
||||
extern void irq_set_thread_affinity(struct irq_desc *desc);
|
||||
|
||||
/*
|
||||
* Debugging printout:
|
||||
|
@@ -80,14 +80,22 @@ int irq_can_set_affinity(unsigned int irq)
|
||||
return 1;
|
||||
}
|
||||
|
||||
void
|
||||
irq_set_thread_affinity(struct irq_desc *desc, const struct cpumask *cpumask)
|
||||
/**
|
||||
* irq_set_thread_affinity - Notify irq threads to adjust affinity
|
||||
* @desc: irq descriptor which has affitnity changed
|
||||
*
|
||||
* We just set IRQTF_AFFINITY and delegate the affinity setting
|
||||
* to the interrupt thread itself. We can not call
|
||||
* set_cpus_allowed_ptr() here as we hold desc->lock and this
|
||||
* code can be called from hard interrupt context.
|
||||
*/
|
||||
void irq_set_thread_affinity(struct irq_desc *desc)
|
||||
{
|
||||
struct irqaction *action = desc->action;
|
||||
|
||||
while (action) {
|
||||
if (action->thread)
|
||||
set_cpus_allowed_ptr(action->thread, cpumask);
|
||||
set_bit(IRQTF_AFFINITY, &action->thread_flags);
|
||||
action = action->next;
|
||||
}
|
||||
}
|
||||
@@ -112,7 +120,7 @@ int irq_set_affinity(unsigned int irq, const struct cpumask *cpumask)
|
||||
if (desc->status & IRQ_MOVE_PCNTXT) {
|
||||
if (!desc->chip->set_affinity(irq, cpumask)) {
|
||||
cpumask_copy(desc->affinity, cpumask);
|
||||
irq_set_thread_affinity(desc, cpumask);
|
||||
irq_set_thread_affinity(desc);
|
||||
}
|
||||
}
|
||||
else {
|
||||
@@ -122,7 +130,7 @@ int irq_set_affinity(unsigned int irq, const struct cpumask *cpumask)
|
||||
#else
|
||||
if (!desc->chip->set_affinity(irq, cpumask)) {
|
||||
cpumask_copy(desc->affinity, cpumask);
|
||||
irq_set_thread_affinity(desc, cpumask);
|
||||
irq_set_thread_affinity(desc);
|
||||
}
|
||||
#endif
|
||||
desc->status |= IRQ_AFFINITY_SET;
|
||||
@@ -176,7 +184,7 @@ int irq_select_affinity_usr(unsigned int irq)
|
||||
spin_lock_irqsave(&desc->lock, flags);
|
||||
ret = setup_affinity(irq, desc);
|
||||
if (!ret)
|
||||
irq_set_thread_affinity(desc, desc->affinity);
|
||||
irq_set_thread_affinity(desc);
|
||||
spin_unlock_irqrestore(&desc->lock, flags);
|
||||
|
||||
return ret;
|
||||
@@ -443,6 +451,39 @@ static int irq_wait_for_interrupt(struct irqaction *action)
|
||||
return -1;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
/*
|
||||
* Check whether we need to change the affinity of the interrupt thread.
|
||||
*/
|
||||
static void
|
||||
irq_thread_check_affinity(struct irq_desc *desc, struct irqaction *action)
|
||||
{
|
||||
cpumask_var_t mask;
|
||||
|
||||
if (!test_and_clear_bit(IRQTF_AFFINITY, &action->thread_flags))
|
||||
return;
|
||||
|
||||
/*
|
||||
* In case we are out of memory we set IRQTF_AFFINITY again and
|
||||
* try again next time
|
||||
*/
|
||||
if (!alloc_cpumask_var(&mask, GFP_KERNEL)) {
|
||||
set_bit(IRQTF_AFFINITY, &action->thread_flags);
|
||||
return;
|
||||
}
|
||||
|
||||
spin_lock_irq(&desc->lock);
|
||||
cpumask_copy(mask, desc->affinity);
|
||||
spin_unlock_irq(&desc->lock);
|
||||
|
||||
set_cpus_allowed_ptr(current, mask);
|
||||
free_cpumask_var(mask);
|
||||
}
|
||||
#else
|
||||
static inline void
|
||||
irq_thread_check_affinity(struct irq_desc *desc, struct irqaction *action) { }
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Interrupt handler thread
|
||||
*/
|
||||
@@ -458,6 +499,8 @@ static int irq_thread(void *data)
|
||||
|
||||
while (!irq_wait_for_interrupt(action)) {
|
||||
|
||||
irq_thread_check_affinity(desc, action);
|
||||
|
||||
atomic_inc(&desc->threads_active);
|
||||
|
||||
spin_lock_irq(&desc->lock);
|
||||
|
@@ -45,7 +45,7 @@ void move_masked_irq(int irq)
|
||||
< nr_cpu_ids))
|
||||
if (!desc->chip->set_affinity(irq, desc->pending_mask)) {
|
||||
cpumask_copy(desc->affinity, desc->pending_mask);
|
||||
irq_set_thread_affinity(desc, desc->pending_mask);
|
||||
irq_set_thread_affinity(desc);
|
||||
}
|
||||
|
||||
cpumask_clear(desc->pending_mask);
|
||||
|
@@ -1228,7 +1228,7 @@ static int __init parse_crashkernel_mem(char *cmdline,
|
||||
} while (*cur++ == ',');
|
||||
|
||||
if (*crash_size > 0) {
|
||||
while (*cur != ' ' && *cur != '@')
|
||||
while (*cur && *cur != ' ' && *cur != '@')
|
||||
cur++;
|
||||
if (*cur == '@') {
|
||||
cur++;
|
||||
|
@@ -694,7 +694,7 @@ int __kprobes register_kprobe(struct kprobe *p)
|
||||
p->addr = addr;
|
||||
|
||||
preempt_disable();
|
||||
if (!__kernel_text_address((unsigned long) p->addr) ||
|
||||
if (!kernel_text_address((unsigned long) p->addr) ||
|
||||
in_kprobes_functions((unsigned long) p->addr)) {
|
||||
preempt_enable();
|
||||
return -EINVAL;
|
||||
|
@@ -180,10 +180,12 @@ EXPORT_SYMBOL(kthread_bind);
|
||||
* @k: thread created by kthread_create().
|
||||
*
|
||||
* Sets kthread_should_stop() for @k to return true, wakes it, and
|
||||
* waits for it to exit. Your threadfn() must not call do_exit()
|
||||
* itself if you use this function! This can also be called after
|
||||
* kthread_create() instead of calling wake_up_process(): the thread
|
||||
* will exit without calling threadfn().
|
||||
* waits for it to exit. This can also be called after kthread_create()
|
||||
* instead of calling wake_up_process(): the thread will exit without
|
||||
* calling threadfn().
|
||||
*
|
||||
* If threadfn() may call do_exit() itself, the caller must ensure
|
||||
* task_struct can't go away.
|
||||
*
|
||||
* Returns the result of threadfn(), or %-EINTR if wake_up_process()
|
||||
* was never called.
|
||||
|
@@ -1068,7 +1068,8 @@ static inline int check_modstruct_version(Elf_Shdr *sechdrs,
|
||||
{
|
||||
const unsigned long *crc;
|
||||
|
||||
if (!find_symbol("module_layout", NULL, &crc, true, false))
|
||||
if (!find_symbol(MODULE_SYMBOL_PREFIX "module_layout", NULL,
|
||||
&crc, true, false))
|
||||
BUG();
|
||||
return check_version(sechdrs, versindex, "module_layout", mod, crc);
|
||||
}
|
||||
|
@@ -146,6 +146,28 @@ static void put_ctx(struct perf_counter_context *ctx)
|
||||
}
|
||||
}
|
||||
|
||||
static void unclone_ctx(struct perf_counter_context *ctx)
|
||||
{
|
||||
if (ctx->parent_ctx) {
|
||||
put_ctx(ctx->parent_ctx);
|
||||
ctx->parent_ctx = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* If we inherit counters we want to return the parent counter id
|
||||
* to userspace.
|
||||
*/
|
||||
static u64 primary_counter_id(struct perf_counter *counter)
|
||||
{
|
||||
u64 id = counter->id;
|
||||
|
||||
if (counter->parent)
|
||||
id = counter->parent->id;
|
||||
|
||||
return id;
|
||||
}
|
||||
|
||||
/*
|
||||
* Get the perf_counter_context for a task and lock it.
|
||||
* This has to cope with with the fact that until it is locked,
|
||||
@@ -1288,7 +1310,6 @@ static void perf_counter_cpu_sched_in(struct perf_cpu_context *cpuctx, int cpu)
|
||||
#define MAX_INTERRUPTS (~0ULL)
|
||||
|
||||
static void perf_log_throttle(struct perf_counter *counter, int enable);
|
||||
static void perf_log_period(struct perf_counter *counter, u64 period);
|
||||
|
||||
static void perf_adjust_period(struct perf_counter *counter, u64 events)
|
||||
{
|
||||
@@ -1307,8 +1328,6 @@ static void perf_adjust_period(struct perf_counter *counter, u64 events)
|
||||
if (!sample_period)
|
||||
sample_period = 1;
|
||||
|
||||
perf_log_period(counter, sample_period);
|
||||
|
||||
hwc->sample_period = sample_period;
|
||||
}
|
||||
|
||||
@@ -1463,10 +1482,8 @@ static void perf_counter_enable_on_exec(struct task_struct *task)
|
||||
/*
|
||||
* Unclone this context if we enabled any counter.
|
||||
*/
|
||||
if (enabled && ctx->parent_ctx) {
|
||||
put_ctx(ctx->parent_ctx);
|
||||
ctx->parent_ctx = NULL;
|
||||
}
|
||||
if (enabled)
|
||||
unclone_ctx(ctx);
|
||||
|
||||
spin_unlock(&ctx->lock);
|
||||
|
||||
@@ -1526,7 +1543,6 @@ __perf_counter_init_context(struct perf_counter_context *ctx,
|
||||
|
||||
static struct perf_counter_context *find_get_context(pid_t pid, int cpu)
|
||||
{
|
||||
struct perf_counter_context *parent_ctx;
|
||||
struct perf_counter_context *ctx;
|
||||
struct perf_cpu_context *cpuctx;
|
||||
struct task_struct *task;
|
||||
@@ -1586,11 +1602,7 @@ static struct perf_counter_context *find_get_context(pid_t pid, int cpu)
|
||||
retry:
|
||||
ctx = perf_lock_task_context(task, &flags);
|
||||
if (ctx) {
|
||||
parent_ctx = ctx->parent_ctx;
|
||||
if (parent_ctx) {
|
||||
put_ctx(parent_ctx);
|
||||
ctx->parent_ctx = NULL; /* no longer a clone */
|
||||
}
|
||||
unclone_ctx(ctx);
|
||||
spin_unlock_irqrestore(&ctx->lock, flags);
|
||||
}
|
||||
|
||||
@@ -1704,7 +1716,7 @@ perf_read_hw(struct perf_counter *counter, char __user *buf, size_t count)
|
||||
values[n++] = counter->total_time_running +
|
||||
atomic64_read(&counter->child_total_time_running);
|
||||
if (counter->attr.read_format & PERF_FORMAT_ID)
|
||||
values[n++] = counter->id;
|
||||
values[n++] = primary_counter_id(counter);
|
||||
mutex_unlock(&counter->child_mutex);
|
||||
|
||||
if (count < n * sizeof(u64))
|
||||
@@ -1811,8 +1823,6 @@ static int perf_counter_period(struct perf_counter *counter, u64 __user *arg)
|
||||
|
||||
counter->attr.sample_freq = value;
|
||||
} else {
|
||||
perf_log_period(counter, value);
|
||||
|
||||
counter->attr.sample_period = value;
|
||||
counter->hw.sample_period = value;
|
||||
}
|
||||
@@ -2661,10 +2671,14 @@ static void perf_counter_output(struct perf_counter *counter, int nmi,
|
||||
if (sample_type & PERF_SAMPLE_ID)
|
||||
header.size += sizeof(u64);
|
||||
|
||||
if (sample_type & PERF_SAMPLE_STREAM_ID)
|
||||
header.size += sizeof(u64);
|
||||
|
||||
if (sample_type & PERF_SAMPLE_CPU) {
|
||||
header.size += sizeof(cpu_entry);
|
||||
|
||||
cpu_entry.cpu = raw_smp_processor_id();
|
||||
cpu_entry.reserved = 0;
|
||||
}
|
||||
|
||||
if (sample_type & PERF_SAMPLE_PERIOD)
|
||||
@@ -2703,7 +2717,13 @@ static void perf_counter_output(struct perf_counter *counter, int nmi,
|
||||
if (sample_type & PERF_SAMPLE_ADDR)
|
||||
perf_output_put(&handle, data->addr);
|
||||
|
||||
if (sample_type & PERF_SAMPLE_ID)
|
||||
if (sample_type & PERF_SAMPLE_ID) {
|
||||
u64 id = primary_counter_id(counter);
|
||||
|
||||
perf_output_put(&handle, id);
|
||||
}
|
||||
|
||||
if (sample_type & PERF_SAMPLE_STREAM_ID)
|
||||
perf_output_put(&handle, counter->id);
|
||||
|
||||
if (sample_type & PERF_SAMPLE_CPU)
|
||||
@@ -2726,7 +2746,7 @@ static void perf_counter_output(struct perf_counter *counter, int nmi,
|
||||
if (sub != counter)
|
||||
sub->pmu->read(sub);
|
||||
|
||||
group_entry.id = sub->id;
|
||||
group_entry.id = primary_counter_id(sub);
|
||||
group_entry.counter = atomic64_read(&sub->count);
|
||||
|
||||
perf_output_put(&handle, group_entry);
|
||||
@@ -2786,15 +2806,8 @@ perf_counter_read_event(struct perf_counter *counter,
|
||||
}
|
||||
|
||||
if (counter->attr.read_format & PERF_FORMAT_ID) {
|
||||
u64 id;
|
||||
|
||||
event.header.size += sizeof(u64);
|
||||
if (counter->parent)
|
||||
id = counter->parent->id;
|
||||
else
|
||||
id = counter->id;
|
||||
|
||||
event.format[i++] = id;
|
||||
event.format[i++] = primary_counter_id(counter);
|
||||
}
|
||||
|
||||
ret = perf_output_begin(&handle, counter, event.header.size, 0, 0);
|
||||
@@ -2895,8 +2908,11 @@ void perf_counter_fork(struct task_struct *task)
|
||||
.event = {
|
||||
.header = {
|
||||
.type = PERF_EVENT_FORK,
|
||||
.misc = 0,
|
||||
.size = sizeof(fork_event.event),
|
||||
},
|
||||
/* .pid */
|
||||
/* .ppid */
|
||||
},
|
||||
};
|
||||
|
||||
@@ -2968,8 +2984,10 @@ static void perf_counter_comm_event(struct perf_comm_event *comm_event)
|
||||
struct perf_cpu_context *cpuctx;
|
||||
struct perf_counter_context *ctx;
|
||||
unsigned int size;
|
||||
char *comm = comm_event->task->comm;
|
||||
char comm[TASK_COMM_LEN];
|
||||
|
||||
memset(comm, 0, sizeof(comm));
|
||||
strncpy(comm, comm_event->task->comm, sizeof(comm));
|
||||
size = ALIGN(strlen(comm)+1, sizeof(u64));
|
||||
|
||||
comm_event->comm = comm;
|
||||
@@ -3004,8 +3022,16 @@ void perf_counter_comm(struct task_struct *task)
|
||||
|
||||
comm_event = (struct perf_comm_event){
|
||||
.task = task,
|
||||
/* .comm */
|
||||
/* .comm_size */
|
||||
.event = {
|
||||
.header = { .type = PERF_EVENT_COMM, },
|
||||
.header = {
|
||||
.type = PERF_EVENT_COMM,
|
||||
.misc = 0,
|
||||
/* .size */
|
||||
},
|
||||
/* .pid */
|
||||
/* .tid */
|
||||
},
|
||||
};
|
||||
|
||||
@@ -3088,8 +3114,15 @@ static void perf_counter_mmap_event(struct perf_mmap_event *mmap_event)
|
||||
char *buf = NULL;
|
||||
const char *name;
|
||||
|
||||
memset(tmp, 0, sizeof(tmp));
|
||||
|
||||
if (file) {
|
||||
buf = kzalloc(PATH_MAX, GFP_KERNEL);
|
||||
/*
|
||||
* d_path works from the end of the buffer backwards, so we
|
||||
* need to add enough zero bytes after the string to handle
|
||||
* the 64bit alignment we do later.
|
||||
*/
|
||||
buf = kzalloc(PATH_MAX + sizeof(u64), GFP_KERNEL);
|
||||
if (!buf) {
|
||||
name = strncpy(tmp, "//enomem", sizeof(tmp));
|
||||
goto got_name;
|
||||
@@ -3100,9 +3133,11 @@ static void perf_counter_mmap_event(struct perf_mmap_event *mmap_event)
|
||||
goto got_name;
|
||||
}
|
||||
} else {
|
||||
name = arch_vma_name(mmap_event->vma);
|
||||
if (name)
|
||||
if (arch_vma_name(mmap_event->vma)) {
|
||||
name = strncpy(tmp, arch_vma_name(mmap_event->vma),
|
||||
sizeof(tmp));
|
||||
goto got_name;
|
||||
}
|
||||
|
||||
if (!vma->vm_mm) {
|
||||
name = strncpy(tmp, "[vdso]", sizeof(tmp));
|
||||
@@ -3147,8 +3182,16 @@ void __perf_counter_mmap(struct vm_area_struct *vma)
|
||||
|
||||
mmap_event = (struct perf_mmap_event){
|
||||
.vma = vma,
|
||||
/* .file_name */
|
||||
/* .file_size */
|
||||
.event = {
|
||||
.header = { .type = PERF_EVENT_MMAP, },
|
||||
.header = {
|
||||
.type = PERF_EVENT_MMAP,
|
||||
.misc = 0,
|
||||
/* .size */
|
||||
},
|
||||
/* .pid */
|
||||
/* .tid */
|
||||
.start = vma->vm_start,
|
||||
.len = vma->vm_end - vma->vm_start,
|
||||
.pgoff = vma->vm_pgoff,
|
||||
@@ -3158,49 +3201,6 @@ void __perf_counter_mmap(struct vm_area_struct *vma)
|
||||
perf_counter_mmap_event(&mmap_event);
|
||||
}
|
||||
|
||||
/*
|
||||
* Log sample_period changes so that analyzing tools can re-normalize the
|
||||
* event flow.
|
||||
*/
|
||||
|
||||
struct freq_event {
|
||||
struct perf_event_header header;
|
||||
u64 time;
|
||||
u64 id;
|
||||
u64 period;
|
||||
};
|
||||
|
||||
static void perf_log_period(struct perf_counter *counter, u64 period)
|
||||
{
|
||||
struct perf_output_handle handle;
|
||||
struct freq_event event;
|
||||
int ret;
|
||||
|
||||
if (counter->hw.sample_period == period)
|
||||
return;
|
||||
|
||||
if (counter->attr.sample_type & PERF_SAMPLE_PERIOD)
|
||||
return;
|
||||
|
||||
event = (struct freq_event) {
|
||||
.header = {
|
||||
.type = PERF_EVENT_PERIOD,
|
||||
.misc = 0,
|
||||
.size = sizeof(event),
|
||||
},
|
||||
.time = sched_clock(),
|
||||
.id = counter->id,
|
||||
.period = period,
|
||||
};
|
||||
|
||||
ret = perf_output_begin(&handle, counter, sizeof(event), 1, 0);
|
||||
if (ret)
|
||||
return;
|
||||
|
||||
perf_output_put(&handle, event);
|
||||
perf_output_end(&handle);
|
||||
}
|
||||
|
||||
/*
|
||||
* IRQ throttle logging
|
||||
*/
|
||||
@@ -3214,16 +3214,21 @@ static void perf_log_throttle(struct perf_counter *counter, int enable)
|
||||
struct perf_event_header header;
|
||||
u64 time;
|
||||
u64 id;
|
||||
u64 stream_id;
|
||||
} throttle_event = {
|
||||
.header = {
|
||||
.type = PERF_EVENT_THROTTLE + 1,
|
||||
.type = PERF_EVENT_THROTTLE,
|
||||
.misc = 0,
|
||||
.size = sizeof(throttle_event),
|
||||
},
|
||||
.time = sched_clock(),
|
||||
.id = counter->id,
|
||||
.time = sched_clock(),
|
||||
.id = primary_counter_id(counter),
|
||||
.stream_id = counter->id,
|
||||
};
|
||||
|
||||
if (enable)
|
||||
throttle_event.header.type = PERF_EVENT_UNTHROTTLE;
|
||||
|
||||
ret = perf_output_begin(&handle, counter, sizeof(throttle_event), 1, 0);
|
||||
if (ret)
|
||||
return;
|
||||
@@ -3671,7 +3676,7 @@ static const struct pmu perf_ops_task_clock = {
|
||||
void perf_tpcounter_event(int event_id)
|
||||
{
|
||||
struct perf_sample_data data = {
|
||||
.regs = get_irq_regs();
|
||||
.regs = get_irq_regs(),
|
||||
.addr = 0,
|
||||
};
|
||||
|
||||
@@ -3687,16 +3692,12 @@ extern void ftrace_profile_disable(int);
|
||||
|
||||
static void tp_perf_counter_destroy(struct perf_counter *counter)
|
||||
{
|
||||
ftrace_profile_disable(perf_event_id(&counter->attr));
|
||||
ftrace_profile_disable(counter->attr.config);
|
||||
}
|
||||
|
||||
static const struct pmu *tp_perf_counter_init(struct perf_counter *counter)
|
||||
{
|
||||
int event_id = perf_event_id(&counter->attr);
|
||||
int ret;
|
||||
|
||||
ret = ftrace_profile_enable(event_id);
|
||||
if (ret)
|
||||
if (ftrace_profile_enable(counter->attr.config))
|
||||
return NULL;
|
||||
|
||||
counter->destroy = tp_perf_counter_destroy;
|
||||
@@ -4255,15 +4256,12 @@ void perf_counter_exit_task(struct task_struct *child)
|
||||
*/
|
||||
spin_lock(&child_ctx->lock);
|
||||
child->perf_counter_ctxp = NULL;
|
||||
if (child_ctx->parent_ctx) {
|
||||
/*
|
||||
* This context is a clone; unclone it so it can't get
|
||||
* swapped to another process while we're removing all
|
||||
* the counters from it.
|
||||
*/
|
||||
put_ctx(child_ctx->parent_ctx);
|
||||
child_ctx->parent_ctx = NULL;
|
||||
}
|
||||
/*
|
||||
* If this context is a clone; unclone it so it can't get
|
||||
* swapped to another process while we're removing all
|
||||
* the counters from it.
|
||||
*/
|
||||
unclone_ctx(child_ctx);
|
||||
spin_unlock(&child_ctx->lock);
|
||||
local_irq_restore(flags);
|
||||
|
||||
|
@@ -117,11 +117,12 @@ int __ref profile_init(void)
|
||||
|
||||
cpumask_copy(prof_cpu_mask, cpu_possible_mask);
|
||||
|
||||
prof_buffer = kzalloc(buffer_bytes, GFP_KERNEL);
|
||||
prof_buffer = kzalloc(buffer_bytes, GFP_KERNEL|__GFP_NOWARN);
|
||||
if (prof_buffer)
|
||||
return 0;
|
||||
|
||||
prof_buffer = alloc_pages_exact(buffer_bytes, GFP_KERNEL|__GFP_ZERO);
|
||||
prof_buffer = alloc_pages_exact(buffer_bytes,
|
||||
GFP_KERNEL|__GFP_ZERO|__GFP_NOWARN);
|
||||
if (prof_buffer)
|
||||
return 0;
|
||||
|
||||
|
@@ -7318,6 +7318,7 @@ static void migrate_dead_tasks(unsigned int dead_cpu)
|
||||
static void calc_global_load_remove(struct rq *rq)
|
||||
{
|
||||
atomic_long_sub(rq->calc_load_active, &calc_load_tasks);
|
||||
rq->calc_load_active = 0;
|
||||
}
|
||||
#endif /* CONFIG_HOTPLUG_CPU */
|
||||
|
||||
@@ -7544,6 +7545,7 @@ migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu)
|
||||
task_rq_unlock(rq, &flags);
|
||||
get_task_struct(p);
|
||||
cpu_rq(cpu)->migration_thread = p;
|
||||
rq->calc_load_update = calc_load_update;
|
||||
break;
|
||||
|
||||
case CPU_ONLINE:
|
||||
@@ -7554,8 +7556,6 @@ migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu)
|
||||
/* Update our root-domain */
|
||||
rq = cpu_rq(cpu);
|
||||
spin_lock_irqsave(&rq->lock, flags);
|
||||
rq->calc_load_update = calc_load_update;
|
||||
rq->calc_load_active = 0;
|
||||
if (rq->rd) {
|
||||
BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span));
|
||||
|
||||
|
@@ -81,8 +81,21 @@ int cpupri_find(struct cpupri *cp, struct task_struct *p,
|
||||
if (cpumask_any_and(&p->cpus_allowed, vec->mask) >= nr_cpu_ids)
|
||||
continue;
|
||||
|
||||
if (lowest_mask)
|
||||
if (lowest_mask) {
|
||||
cpumask_and(lowest_mask, &p->cpus_allowed, vec->mask);
|
||||
|
||||
/*
|
||||
* We have to ensure that we have at least one bit
|
||||
* still set in the array, since the map could have
|
||||
* been concurrently emptied between the first and
|
||||
* second reads of vec->mask. If we hit this
|
||||
* condition, simply act as though we never hit this
|
||||
* priority level and continue on.
|
||||
*/
|
||||
if (cpumask_any(lowest_mask) >= nr_cpu_ids)
|
||||
continue;
|
||||
}
|
||||
|
||||
return 1;
|
||||
}
|
||||
|
||||
|
@@ -266,6 +266,12 @@ static inline u64 min_vruntime(u64 min_vruntime, u64 vruntime)
|
||||
return min_vruntime;
|
||||
}
|
||||
|
||||
static inline int entity_before(struct sched_entity *a,
|
||||
struct sched_entity *b)
|
||||
{
|
||||
return (s64)(a->vruntime - b->vruntime) < 0;
|
||||
}
|
||||
|
||||
static inline s64 entity_key(struct cfs_rq *cfs_rq, struct sched_entity *se)
|
||||
{
|
||||
return se->vruntime - cfs_rq->min_vruntime;
|
||||
@@ -605,9 +611,13 @@ account_entity_dequeue(struct cfs_rq *cfs_rq, struct sched_entity *se)
|
||||
static void enqueue_sleeper(struct cfs_rq *cfs_rq, struct sched_entity *se)
|
||||
{
|
||||
#ifdef CONFIG_SCHEDSTATS
|
||||
struct task_struct *tsk = NULL;
|
||||
|
||||
if (entity_is_task(se))
|
||||
tsk = task_of(se);
|
||||
|
||||
if (se->sleep_start) {
|
||||
u64 delta = rq_of(cfs_rq)->clock - se->sleep_start;
|
||||
struct task_struct *tsk = task_of(se);
|
||||
|
||||
if ((s64)delta < 0)
|
||||
delta = 0;
|
||||
@@ -618,11 +628,11 @@ static void enqueue_sleeper(struct cfs_rq *cfs_rq, struct sched_entity *se)
|
||||
se->sleep_start = 0;
|
||||
se->sum_sleep_runtime += delta;
|
||||
|
||||
account_scheduler_latency(tsk, delta >> 10, 1);
|
||||
if (tsk)
|
||||
account_scheduler_latency(tsk, delta >> 10, 1);
|
||||
}
|
||||
if (se->block_start) {
|
||||
u64 delta = rq_of(cfs_rq)->clock - se->block_start;
|
||||
struct task_struct *tsk = task_of(se);
|
||||
|
||||
if ((s64)delta < 0)
|
||||
delta = 0;
|
||||
@@ -633,17 +643,19 @@ static void enqueue_sleeper(struct cfs_rq *cfs_rq, struct sched_entity *se)
|
||||
se->block_start = 0;
|
||||
se->sum_sleep_runtime += delta;
|
||||
|
||||
/*
|
||||
* Blocking time is in units of nanosecs, so shift by 20 to
|
||||
* get a milliseconds-range estimation of the amount of
|
||||
* time that the task spent sleeping:
|
||||
*/
|
||||
if (unlikely(prof_on == SLEEP_PROFILING)) {
|
||||
|
||||
profile_hits(SLEEP_PROFILING, (void *)get_wchan(tsk),
|
||||
delta >> 20);
|
||||
if (tsk) {
|
||||
/*
|
||||
* Blocking time is in units of nanosecs, so shift by
|
||||
* 20 to get a milliseconds-range estimation of the
|
||||
* amount of time that the task spent sleeping:
|
||||
*/
|
||||
if (unlikely(prof_on == SLEEP_PROFILING)) {
|
||||
profile_hits(SLEEP_PROFILING,
|
||||
(void *)get_wchan(tsk),
|
||||
delta >> 20);
|
||||
}
|
||||
account_scheduler_latency(tsk, delta >> 10, 0);
|
||||
}
|
||||
account_scheduler_latency(tsk, delta >> 10, 0);
|
||||
}
|
||||
#endif
|
||||
}
|
||||
@@ -1017,7 +1029,7 @@ static void yield_task_fair(struct rq *rq)
|
||||
/*
|
||||
* Already in the rightmost position?
|
||||
*/
|
||||
if (unlikely(!rightmost || rightmost->vruntime < se->vruntime))
|
||||
if (unlikely(!rightmost || entity_before(rightmost, se)))
|
||||
return;
|
||||
|
||||
/*
|
||||
@@ -1713,7 +1725,7 @@ static void task_new_fair(struct rq *rq, struct task_struct *p)
|
||||
|
||||
/* 'curr' will be NULL if the child belongs to a different group */
|
||||
if (sysctl_sched_child_runs_first && this_cpu == task_cpu(p) &&
|
||||
curr && curr->vruntime < se->vruntime) {
|
||||
curr && entity_before(curr, se)) {
|
||||
/*
|
||||
* Upon rescheduling, sched_class::put_prev_task() will place
|
||||
* 'current' within the tree based on its new key value.
|
||||
|
@@ -345,7 +345,9 @@ void open_softirq(int nr, void (*action)(struct softirq_action *))
|
||||
softirq_vec[nr].action = action;
|
||||
}
|
||||
|
||||
/* Tasklets */
|
||||
/*
|
||||
* Tasklets
|
||||
*/
|
||||
struct tasklet_head
|
||||
{
|
||||
struct tasklet_struct *head;
|
||||
@@ -493,6 +495,66 @@ void tasklet_kill(struct tasklet_struct *t)
|
||||
|
||||
EXPORT_SYMBOL(tasklet_kill);
|
||||
|
||||
/*
|
||||
* tasklet_hrtimer
|
||||
*/
|
||||
|
||||
/*
|
||||
* The trampoline is called when the hrtimer expires. If this is
|
||||
* called from the hrtimer interrupt then we schedule the tasklet as
|
||||
* the timer callback function expects to run in softirq context. If
|
||||
* it's called in softirq context anyway (i.e. high resolution timers
|
||||
* disabled) then the hrtimer callback is called right away.
|
||||
*/
|
||||
static enum hrtimer_restart __hrtimer_tasklet_trampoline(struct hrtimer *timer)
|
||||
{
|
||||
struct tasklet_hrtimer *ttimer =
|
||||
container_of(timer, struct tasklet_hrtimer, timer);
|
||||
|
||||
if (hrtimer_is_hres_active(timer)) {
|
||||
tasklet_hi_schedule(&ttimer->tasklet);
|
||||
return HRTIMER_NORESTART;
|
||||
}
|
||||
return ttimer->function(timer);
|
||||
}
|
||||
|
||||
/*
|
||||
* Helper function which calls the hrtimer callback from
|
||||
* tasklet/softirq context
|
||||
*/
|
||||
static void __tasklet_hrtimer_trampoline(unsigned long data)
|
||||
{
|
||||
struct tasklet_hrtimer *ttimer = (void *)data;
|
||||
enum hrtimer_restart restart;
|
||||
|
||||
restart = ttimer->function(&ttimer->timer);
|
||||
if (restart != HRTIMER_NORESTART)
|
||||
hrtimer_restart(&ttimer->timer);
|
||||
}
|
||||
|
||||
/**
|
||||
* tasklet_hrtimer_init - Init a tasklet/hrtimer combo for softirq callbacks
|
||||
* @ttimer: tasklet_hrtimer which is initialized
|
||||
* @function: hrtimer callback funtion which gets called from softirq context
|
||||
* @which_clock: clock id (CLOCK_MONOTONIC/CLOCK_REALTIME)
|
||||
* @mode: hrtimer mode (HRTIMER_MODE_ABS/HRTIMER_MODE_REL)
|
||||
*/
|
||||
void tasklet_hrtimer_init(struct tasklet_hrtimer *ttimer,
|
||||
enum hrtimer_restart (*function)(struct hrtimer *),
|
||||
clockid_t which_clock, enum hrtimer_mode mode)
|
||||
{
|
||||
hrtimer_init(&ttimer->timer, which_clock, mode);
|
||||
ttimer->timer.function = __hrtimer_tasklet_trampoline;
|
||||
tasklet_init(&ttimer->tasklet, __tasklet_hrtimer_trampoline,
|
||||
(unsigned long)ttimer);
|
||||
ttimer->function = function;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(tasklet_hrtimer_init);
|
||||
|
||||
/*
|
||||
* Remote softirq bits
|
||||
*/
|
||||
|
||||
DEFINE_PER_CPU(struct list_head [NR_SOFTIRQS], softirq_work_list);
|
||||
EXPORT_PER_CPU_SYMBOL(softirq_work_list);
|
||||
|
||||
|
@@ -513,7 +513,7 @@ static ssize_t sysfs_override_clocksource(struct sys_device *dev,
|
||||
* Check to make sure we don't switch to a non-highres capable
|
||||
* clocksource if the tick code is in oneshot mode (highres or nohz)
|
||||
*/
|
||||
if (tick_oneshot_mode_active() &&
|
||||
if (tick_oneshot_mode_active() && ovr &&
|
||||
!(ovr->flags & CLOCK_SOURCE_VALID_FOR_HRES)) {
|
||||
printk(KERN_WARNING "%s clocksource is not HRT compatible. "
|
||||
"Cannot switch while in HRT/NOHZ mode\n", ovr->name);
|
||||
|
@@ -714,7 +714,7 @@ int mod_timer(struct timer_list *timer, unsigned long expires)
|
||||
* networking code - if the timer is re-modified
|
||||
* to be the same thing then just return:
|
||||
*/
|
||||
if (timer->expires == expires && timer_pending(timer))
|
||||
if (timer_pending(timer) && timer->expires == expires)
|
||||
return 1;
|
||||
|
||||
return __mod_timer(timer, expires, false, TIMER_NOT_PINNED);
|
||||
|
@@ -2595,6 +2595,14 @@ ftrace_graph_open(struct inode *inode, struct file *file)
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int
|
||||
ftrace_graph_release(struct inode *inode, struct file *file)
|
||||
{
|
||||
if (file->f_mode & FMODE_READ)
|
||||
seq_release(inode, file);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int
|
||||
ftrace_set_func(unsigned long *array, int *idx, char *buffer)
|
||||
{
|
||||
@@ -2724,9 +2732,10 @@ ftrace_graph_write(struct file *file, const char __user *ubuf,
|
||||
}
|
||||
|
||||
static const struct file_operations ftrace_graph_fops = {
|
||||
.open = ftrace_graph_open,
|
||||
.read = seq_read,
|
||||
.write = ftrace_graph_write,
|
||||
.open = ftrace_graph_open,
|
||||
.read = seq_read,
|
||||
.write = ftrace_graph_write,
|
||||
.release = ftrace_graph_release,
|
||||
};
|
||||
#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
|
||||
|
||||
|
@@ -301,17 +301,14 @@ static const struct seq_operations stack_trace_seq_ops = {
|
||||
|
||||
static int stack_trace_open(struct inode *inode, struct file *file)
|
||||
{
|
||||
int ret;
|
||||
|
||||
ret = seq_open(file, &stack_trace_seq_ops);
|
||||
|
||||
return ret;
|
||||
return seq_open(file, &stack_trace_seq_ops);
|
||||
}
|
||||
|
||||
static const struct file_operations stack_trace_fops = {
|
||||
.open = stack_trace_open,
|
||||
.read = seq_read,
|
||||
.llseek = seq_lseek,
|
||||
.release = seq_release,
|
||||
};
|
||||
|
||||
int
|
||||
|
@@ -73,7 +73,7 @@ static struct rb_node *release_next(struct rb_node *node)
|
||||
}
|
||||
}
|
||||
|
||||
static void reset_stat_session(struct stat_session *session)
|
||||
static void __reset_stat_session(struct stat_session *session)
|
||||
{
|
||||
struct rb_node *node = session->stat_root.rb_node;
|
||||
|
||||
@@ -83,10 +83,17 @@ static void reset_stat_session(struct stat_session *session)
|
||||
session->stat_root = RB_ROOT;
|
||||
}
|
||||
|
||||
static void reset_stat_session(struct stat_session *session)
|
||||
{
|
||||
mutex_lock(&session->stat_mutex);
|
||||
__reset_stat_session(session);
|
||||
mutex_unlock(&session->stat_mutex);
|
||||
}
|
||||
|
||||
static void destroy_session(struct stat_session *session)
|
||||
{
|
||||
debugfs_remove(session->file);
|
||||
reset_stat_session(session);
|
||||
__reset_stat_session(session);
|
||||
mutex_destroy(&session->stat_mutex);
|
||||
kfree(session);
|
||||
}
|
||||
@@ -150,7 +157,7 @@ static int stat_seq_init(struct stat_session *session)
|
||||
int i;
|
||||
|
||||
mutex_lock(&session->stat_mutex);
|
||||
reset_stat_session(session);
|
||||
__reset_stat_session(session);
|
||||
|
||||
if (!ts->stat_cmp)
|
||||
ts->stat_cmp = dummy_cmp;
|
||||
@@ -183,7 +190,7 @@ exit:
|
||||
return ret;
|
||||
|
||||
exit_free_rbtree:
|
||||
reset_stat_session(session);
|
||||
__reset_stat_session(session);
|
||||
mutex_unlock(&session->stat_mutex);
|
||||
return ret;
|
||||
}
|
||||
@@ -250,16 +257,21 @@ static const struct seq_operations trace_stat_seq_ops = {
|
||||
static int tracing_stat_open(struct inode *inode, struct file *file)
|
||||
{
|
||||
int ret;
|
||||
|
||||
struct seq_file *m;
|
||||
struct stat_session *session = inode->i_private;
|
||||
|
||||
ret = stat_seq_init(session);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = seq_open(file, &trace_stat_seq_ops);
|
||||
if (!ret) {
|
||||
struct seq_file *m = file->private_data;
|
||||
m->private = session;
|
||||
ret = stat_seq_init(session);
|
||||
if (ret) {
|
||||
reset_stat_session(session);
|
||||
return ret;
|
||||
}
|
||||
|
||||
m = file->private_data;
|
||||
m->private = session;
|
||||
return ret;
|
||||
}
|
||||
|
||||
@@ -270,11 +282,9 @@ static int tracing_stat_release(struct inode *i, struct file *f)
|
||||
{
|
||||
struct stat_session *session = i->i_private;
|
||||
|
||||
mutex_lock(&session->stat_mutex);
|
||||
reset_stat_session(session);
|
||||
mutex_unlock(&session->stat_mutex);
|
||||
|
||||
return 0;
|
||||
return seq_release(i, f);
|
||||
}
|
||||
|
||||
static const struct file_operations tracing_stat_fops = {
|
||||
|
Reference in New Issue
Block a user