Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tj/wq
* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tj/wq: (55 commits) workqueue: mark init_workqueues() as early_initcall() workqueue: explain for_each_*cwq_cpu() iterators fscache: fix build on !CONFIG_SYSCTL slow-work: kill it gfs2: use workqueue instead of slow-work drm: use workqueue instead of slow-work cifs: use workqueue instead of slow-work fscache: drop references to slow-work fscache: convert operation to use workqueue instead of slow-work fscache: convert object to use workqueue instead of slow-work workqueue: fix how cpu number is stored in work->data workqueue: fix mayday_mask handling on UP workqueue: fix build problem on !CONFIG_SMP workqueue: fix locking in retry path of maybe_create_worker() async: use workqueue for worker pool workqueue: remove WQ_SINGLE_CPU and use WQ_UNBOUND instead workqueue: implement unbound workqueue workqueue: prepare for WQ_UNBOUND implementation libata: take advantage of cmwq and remove concurrency limitations workqueue: fix worker management invocation without pending works ... Fixed up conflicts in fs/cifs/* as per Tejun. Other trivial conflicts in include/linux/workqueue.h, kernel/trace/Kconfig and kernel/workqueue.c
This commit is contained in:
@@ -99,8 +99,6 @@ obj-$(CONFIG_TRACING) += trace/
|
||||
obj-$(CONFIG_X86_DS) += trace/
|
||||
obj-$(CONFIG_RING_BUFFER) += trace/
|
||||
obj-$(CONFIG_SMP) += sched_cpupri.o
|
||||
obj-$(CONFIG_SLOW_WORK) += slow-work.o
|
||||
obj-$(CONFIG_SLOW_WORK_DEBUG) += slow-work-debugfs.o
|
||||
obj-$(CONFIG_PERF_EVENTS) += perf_event.o
|
||||
obj-$(CONFIG_HAVE_HW_BREAKPOINT) += hw_breakpoint.o
|
||||
obj-$(CONFIG_USER_RETURN_NOTIFIER) += user-return-notifier.o
|
||||
|
141
kernel/async.c
141
kernel/async.c
@@ -49,40 +49,33 @@ asynchronous and synchronous parts of the kernel.
|
||||
*/
|
||||
|
||||
#include <linux/async.h>
|
||||
#include <linux/bug.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/wait.h>
|
||||
#include <linux/sched.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/kthread.h>
|
||||
#include <linux/delay.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/workqueue.h>
|
||||
#include <asm/atomic.h>
|
||||
|
||||
static async_cookie_t next_cookie = 1;
|
||||
|
||||
#define MAX_THREADS 256
|
||||
#define MAX_WORK 32768
|
||||
|
||||
static LIST_HEAD(async_pending);
|
||||
static LIST_HEAD(async_running);
|
||||
static DEFINE_SPINLOCK(async_lock);
|
||||
|
||||
static int async_enabled = 0;
|
||||
|
||||
struct async_entry {
|
||||
struct list_head list;
|
||||
async_cookie_t cookie;
|
||||
async_func_ptr *func;
|
||||
void *data;
|
||||
struct list_head *running;
|
||||
struct list_head list;
|
||||
struct work_struct work;
|
||||
async_cookie_t cookie;
|
||||
async_func_ptr *func;
|
||||
void *data;
|
||||
struct list_head *running;
|
||||
};
|
||||
|
||||
static DECLARE_WAIT_QUEUE_HEAD(async_done);
|
||||
static DECLARE_WAIT_QUEUE_HEAD(async_new);
|
||||
|
||||
static atomic_t entry_count;
|
||||
static atomic_t thread_count;
|
||||
|
||||
extern int initcall_debug;
|
||||
|
||||
@@ -117,27 +110,23 @@ static async_cookie_t lowest_in_progress(struct list_head *running)
|
||||
spin_unlock_irqrestore(&async_lock, flags);
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* pick the first pending entry and run it
|
||||
*/
|
||||
static void run_one_entry(void)
|
||||
static void async_run_entry_fn(struct work_struct *work)
|
||||
{
|
||||
struct async_entry *entry =
|
||||
container_of(work, struct async_entry, work);
|
||||
unsigned long flags;
|
||||
struct async_entry *entry;
|
||||
ktime_t calltime, delta, rettime;
|
||||
|
||||
/* 1) pick one task from the pending queue */
|
||||
|
||||
/* 1) move self to the running queue */
|
||||
spin_lock_irqsave(&async_lock, flags);
|
||||
if (list_empty(&async_pending))
|
||||
goto out;
|
||||
entry = list_first_entry(&async_pending, struct async_entry, list);
|
||||
|
||||
/* 2) move it to the running queue */
|
||||
list_move_tail(&entry->list, entry->running);
|
||||
spin_unlock_irqrestore(&async_lock, flags);
|
||||
|
||||
/* 3) run it (and print duration)*/
|
||||
/* 2) run (and print duration) */
|
||||
if (initcall_debug && system_state == SYSTEM_BOOTING) {
|
||||
printk("calling %lli_%pF @ %i\n", (long long)entry->cookie,
|
||||
entry->func, task_pid_nr(current));
|
||||
@@ -153,31 +142,25 @@ static void run_one_entry(void)
|
||||
(long long)ktime_to_ns(delta) >> 10);
|
||||
}
|
||||
|
||||
/* 4) remove it from the running queue */
|
||||
/* 3) remove self from the running queue */
|
||||
spin_lock_irqsave(&async_lock, flags);
|
||||
list_del(&entry->list);
|
||||
|
||||
/* 5) free the entry */
|
||||
/* 4) free the entry */
|
||||
kfree(entry);
|
||||
atomic_dec(&entry_count);
|
||||
|
||||
spin_unlock_irqrestore(&async_lock, flags);
|
||||
|
||||
/* 6) wake up any waiters. */
|
||||
/* 5) wake up any waiters */
|
||||
wake_up(&async_done);
|
||||
return;
|
||||
|
||||
out:
|
||||
spin_unlock_irqrestore(&async_lock, flags);
|
||||
}
|
||||
|
||||
|
||||
static async_cookie_t __async_schedule(async_func_ptr *ptr, void *data, struct list_head *running)
|
||||
{
|
||||
struct async_entry *entry;
|
||||
unsigned long flags;
|
||||
async_cookie_t newcookie;
|
||||
|
||||
|
||||
/* allow irq-off callers */
|
||||
entry = kzalloc(sizeof(struct async_entry), GFP_ATOMIC);
|
||||
@@ -186,7 +169,7 @@ static async_cookie_t __async_schedule(async_func_ptr *ptr, void *data, struct l
|
||||
* If we're out of memory or if there's too much work
|
||||
* pending already, we execute synchronously.
|
||||
*/
|
||||
if (!async_enabled || !entry || atomic_read(&entry_count) > MAX_WORK) {
|
||||
if (!entry || atomic_read(&entry_count) > MAX_WORK) {
|
||||
kfree(entry);
|
||||
spin_lock_irqsave(&async_lock, flags);
|
||||
newcookie = next_cookie++;
|
||||
@@ -196,6 +179,7 @@ static async_cookie_t __async_schedule(async_func_ptr *ptr, void *data, struct l
|
||||
ptr(data, newcookie);
|
||||
return newcookie;
|
||||
}
|
||||
INIT_WORK(&entry->work, async_run_entry_fn);
|
||||
entry->func = ptr;
|
||||
entry->data = data;
|
||||
entry->running = running;
|
||||
@@ -205,7 +189,10 @@ static async_cookie_t __async_schedule(async_func_ptr *ptr, void *data, struct l
|
||||
list_add_tail(&entry->list, &async_pending);
|
||||
atomic_inc(&entry_count);
|
||||
spin_unlock_irqrestore(&async_lock, flags);
|
||||
wake_up(&async_new);
|
||||
|
||||
/* schedule for execution */
|
||||
queue_work(system_unbound_wq, &entry->work);
|
||||
|
||||
return newcookie;
|
||||
}
|
||||
|
||||
@@ -312,87 +299,3 @@ void async_synchronize_cookie(async_cookie_t cookie)
|
||||
async_synchronize_cookie_domain(cookie, &async_running);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(async_synchronize_cookie);
|
||||
|
||||
|
||||
static int async_thread(void *unused)
|
||||
{
|
||||
DECLARE_WAITQUEUE(wq, current);
|
||||
add_wait_queue(&async_new, &wq);
|
||||
|
||||
while (!kthread_should_stop()) {
|
||||
int ret = HZ;
|
||||
set_current_state(TASK_INTERRUPTIBLE);
|
||||
/*
|
||||
* check the list head without lock.. false positives
|
||||
* are dealt with inside run_one_entry() while holding
|
||||
* the lock.
|
||||
*/
|
||||
rmb();
|
||||
if (!list_empty(&async_pending))
|
||||
run_one_entry();
|
||||
else
|
||||
ret = schedule_timeout(HZ);
|
||||
|
||||
if (ret == 0) {
|
||||
/*
|
||||
* we timed out, this means we as thread are redundant.
|
||||
* we sign off and die, but we to avoid any races there
|
||||
* is a last-straw check to see if work snuck in.
|
||||
*/
|
||||
atomic_dec(&thread_count);
|
||||
wmb(); /* manager must see our departure first */
|
||||
if (list_empty(&async_pending))
|
||||
break;
|
||||
/*
|
||||
* woops work came in between us timing out and us
|
||||
* signing off; we need to stay alive and keep working.
|
||||
*/
|
||||
atomic_inc(&thread_count);
|
||||
}
|
||||
}
|
||||
remove_wait_queue(&async_new, &wq);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int async_manager_thread(void *unused)
|
||||
{
|
||||
DECLARE_WAITQUEUE(wq, current);
|
||||
add_wait_queue(&async_new, &wq);
|
||||
|
||||
while (!kthread_should_stop()) {
|
||||
int tc, ec;
|
||||
|
||||
set_current_state(TASK_INTERRUPTIBLE);
|
||||
|
||||
tc = atomic_read(&thread_count);
|
||||
rmb();
|
||||
ec = atomic_read(&entry_count);
|
||||
|
||||
while (tc < ec && tc < MAX_THREADS) {
|
||||
if (IS_ERR(kthread_run(async_thread, NULL, "async/%i",
|
||||
tc))) {
|
||||
msleep(100);
|
||||
continue;
|
||||
}
|
||||
atomic_inc(&thread_count);
|
||||
tc++;
|
||||
}
|
||||
|
||||
schedule();
|
||||
}
|
||||
remove_wait_queue(&async_new, &wq);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int __init async_init(void)
|
||||
{
|
||||
async_enabled =
|
||||
!IS_ERR(kthread_run(async_manager_thread, NULL, "async/mgr"));
|
||||
|
||||
WARN_ON(!async_enabled);
|
||||
return 0;
|
||||
}
|
||||
|
||||
core_initcall(async_init);
|
||||
|
164
kernel/kthread.c
164
kernel/kthread.c
@@ -14,6 +14,8 @@
|
||||
#include <linux/file.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/mutex.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/freezer.h>
|
||||
#include <trace/events/sched.h>
|
||||
|
||||
static DEFINE_SPINLOCK(kthread_create_lock);
|
||||
@@ -35,6 +37,7 @@ struct kthread_create_info
|
||||
|
||||
struct kthread {
|
||||
int should_stop;
|
||||
void *data;
|
||||
struct completion exited;
|
||||
};
|
||||
|
||||
@@ -54,6 +57,19 @@ int kthread_should_stop(void)
|
||||
}
|
||||
EXPORT_SYMBOL(kthread_should_stop);
|
||||
|
||||
/**
|
||||
* kthread_data - return data value specified on kthread creation
|
||||
* @task: kthread task in question
|
||||
*
|
||||
* Return the data value specified when kthread @task was created.
|
||||
* The caller is responsible for ensuring the validity of @task when
|
||||
* calling this function.
|
||||
*/
|
||||
void *kthread_data(struct task_struct *task)
|
||||
{
|
||||
return to_kthread(task)->data;
|
||||
}
|
||||
|
||||
static int kthread(void *_create)
|
||||
{
|
||||
/* Copy data: it's on kthread's stack */
|
||||
@@ -64,6 +80,7 @@ static int kthread(void *_create)
|
||||
int ret;
|
||||
|
||||
self.should_stop = 0;
|
||||
self.data = data;
|
||||
init_completion(&self.exited);
|
||||
current->vfork_done = &self.exited;
|
||||
|
||||
@@ -247,3 +264,150 @@ int kthreadd(void *unused)
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* kthread_worker_fn - kthread function to process kthread_worker
|
||||
* @worker_ptr: pointer to initialized kthread_worker
|
||||
*
|
||||
* This function can be used as @threadfn to kthread_create() or
|
||||
* kthread_run() with @worker_ptr argument pointing to an initialized
|
||||
* kthread_worker. The started kthread will process work_list until
|
||||
* the it is stopped with kthread_stop(). A kthread can also call
|
||||
* this function directly after extra initialization.
|
||||
*
|
||||
* Different kthreads can be used for the same kthread_worker as long
|
||||
* as there's only one kthread attached to it at any given time. A
|
||||
* kthread_worker without an attached kthread simply collects queued
|
||||
* kthread_works.
|
||||
*/
|
||||
int kthread_worker_fn(void *worker_ptr)
|
||||
{
|
||||
struct kthread_worker *worker = worker_ptr;
|
||||
struct kthread_work *work;
|
||||
|
||||
WARN_ON(worker->task);
|
||||
worker->task = current;
|
||||
repeat:
|
||||
set_current_state(TASK_INTERRUPTIBLE); /* mb paired w/ kthread_stop */
|
||||
|
||||
if (kthread_should_stop()) {
|
||||
__set_current_state(TASK_RUNNING);
|
||||
spin_lock_irq(&worker->lock);
|
||||
worker->task = NULL;
|
||||
spin_unlock_irq(&worker->lock);
|
||||
return 0;
|
||||
}
|
||||
|
||||
work = NULL;
|
||||
spin_lock_irq(&worker->lock);
|
||||
if (!list_empty(&worker->work_list)) {
|
||||
work = list_first_entry(&worker->work_list,
|
||||
struct kthread_work, node);
|
||||
list_del_init(&work->node);
|
||||
}
|
||||
spin_unlock_irq(&worker->lock);
|
||||
|
||||
if (work) {
|
||||
__set_current_state(TASK_RUNNING);
|
||||
work->func(work);
|
||||
smp_wmb(); /* wmb worker-b0 paired with flush-b1 */
|
||||
work->done_seq = work->queue_seq;
|
||||
smp_mb(); /* mb worker-b1 paired with flush-b0 */
|
||||
if (atomic_read(&work->flushing))
|
||||
wake_up_all(&work->done);
|
||||
} else if (!freezing(current))
|
||||
schedule();
|
||||
|
||||
try_to_freeze();
|
||||
goto repeat;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(kthread_worker_fn);
|
||||
|
||||
/**
|
||||
* queue_kthread_work - queue a kthread_work
|
||||
* @worker: target kthread_worker
|
||||
* @work: kthread_work to queue
|
||||
*
|
||||
* Queue @work to work processor @task for async execution. @task
|
||||
* must have been created with kthread_worker_create(). Returns %true
|
||||
* if @work was successfully queued, %false if it was already pending.
|
||||
*/
|
||||
bool queue_kthread_work(struct kthread_worker *worker,
|
||||
struct kthread_work *work)
|
||||
{
|
||||
bool ret = false;
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&worker->lock, flags);
|
||||
if (list_empty(&work->node)) {
|
||||
list_add_tail(&work->node, &worker->work_list);
|
||||
work->queue_seq++;
|
||||
if (likely(worker->task))
|
||||
wake_up_process(worker->task);
|
||||
ret = true;
|
||||
}
|
||||
spin_unlock_irqrestore(&worker->lock, flags);
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(queue_kthread_work);
|
||||
|
||||
/**
|
||||
* flush_kthread_work - flush a kthread_work
|
||||
* @work: work to flush
|
||||
*
|
||||
* If @work is queued or executing, wait for it to finish execution.
|
||||
*/
|
||||
void flush_kthread_work(struct kthread_work *work)
|
||||
{
|
||||
int seq = work->queue_seq;
|
||||
|
||||
atomic_inc(&work->flushing);
|
||||
|
||||
/*
|
||||
* mb flush-b0 paired with worker-b1, to make sure either
|
||||
* worker sees the above increment or we see done_seq update.
|
||||
*/
|
||||
smp_mb__after_atomic_inc();
|
||||
|
||||
/* A - B <= 0 tests whether B is in front of A regardless of overflow */
|
||||
wait_event(work->done, seq - work->done_seq <= 0);
|
||||
atomic_dec(&work->flushing);
|
||||
|
||||
/*
|
||||
* rmb flush-b1 paired with worker-b0, to make sure our caller
|
||||
* sees every change made by work->func().
|
||||
*/
|
||||
smp_mb__after_atomic_dec();
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(flush_kthread_work);
|
||||
|
||||
struct kthread_flush_work {
|
||||
struct kthread_work work;
|
||||
struct completion done;
|
||||
};
|
||||
|
||||
static void kthread_flush_work_fn(struct kthread_work *work)
|
||||
{
|
||||
struct kthread_flush_work *fwork =
|
||||
container_of(work, struct kthread_flush_work, work);
|
||||
complete(&fwork->done);
|
||||
}
|
||||
|
||||
/**
|
||||
* flush_kthread_worker - flush all current works on a kthread_worker
|
||||
* @worker: worker to flush
|
||||
*
|
||||
* Wait until all currently executing or pending works on @worker are
|
||||
* finished.
|
||||
*/
|
||||
void flush_kthread_worker(struct kthread_worker *worker)
|
||||
{
|
||||
struct kthread_flush_work fwork = {
|
||||
KTHREAD_WORK_INIT(fwork.work, kthread_flush_work_fn),
|
||||
COMPLETION_INITIALIZER_ONSTACK(fwork.done),
|
||||
};
|
||||
|
||||
queue_kthread_work(worker, &fwork.work);
|
||||
wait_for_completion(&fwork.done);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(flush_kthread_worker);
|
||||
|
@@ -15,6 +15,7 @@
|
||||
#include <linux/syscalls.h>
|
||||
#include <linux/freezer.h>
|
||||
#include <linux/delay.h>
|
||||
#include <linux/workqueue.h>
|
||||
|
||||
/*
|
||||
* Timeout for stopping processes
|
||||
@@ -35,6 +36,7 @@ static int try_to_freeze_tasks(bool sig_only)
|
||||
struct task_struct *g, *p;
|
||||
unsigned long end_time;
|
||||
unsigned int todo;
|
||||
bool wq_busy = false;
|
||||
struct timeval start, end;
|
||||
u64 elapsed_csecs64;
|
||||
unsigned int elapsed_csecs;
|
||||
@@ -42,6 +44,10 @@ static int try_to_freeze_tasks(bool sig_only)
|
||||
do_gettimeofday(&start);
|
||||
|
||||
end_time = jiffies + TIMEOUT;
|
||||
|
||||
if (!sig_only)
|
||||
freeze_workqueues_begin();
|
||||
|
||||
while (true) {
|
||||
todo = 0;
|
||||
read_lock(&tasklist_lock);
|
||||
@@ -63,6 +69,12 @@ static int try_to_freeze_tasks(bool sig_only)
|
||||
todo++;
|
||||
} while_each_thread(g, p);
|
||||
read_unlock(&tasklist_lock);
|
||||
|
||||
if (!sig_only) {
|
||||
wq_busy = freeze_workqueues_busy();
|
||||
todo += wq_busy;
|
||||
}
|
||||
|
||||
if (!todo || time_after(jiffies, end_time))
|
||||
break;
|
||||
|
||||
@@ -86,8 +98,12 @@ static int try_to_freeze_tasks(bool sig_only)
|
||||
*/
|
||||
printk("\n");
|
||||
printk(KERN_ERR "Freezing of tasks failed after %d.%02d seconds "
|
||||
"(%d tasks refusing to freeze):\n",
|
||||
elapsed_csecs / 100, elapsed_csecs % 100, todo);
|
||||
"(%d tasks refusing to freeze, wq_busy=%d):\n",
|
||||
elapsed_csecs / 100, elapsed_csecs % 100,
|
||||
todo - wq_busy, wq_busy);
|
||||
|
||||
thaw_workqueues();
|
||||
|
||||
read_lock(&tasklist_lock);
|
||||
do_each_thread(g, p) {
|
||||
task_lock(p);
|
||||
@@ -157,6 +173,7 @@ void thaw_processes(void)
|
||||
oom_killer_enable();
|
||||
|
||||
printk("Restarting tasks ... ");
|
||||
thaw_workqueues();
|
||||
thaw_tasks(true);
|
||||
thaw_tasks(false);
|
||||
schedule();
|
||||
|
@@ -1,227 +0,0 @@
|
||||
/* Slow work debugging
|
||||
*
|
||||
* Copyright (C) 2009 Red Hat, Inc. All Rights Reserved.
|
||||
* Written by David Howells (dhowells@redhat.com)
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public Licence
|
||||
* as published by the Free Software Foundation; either version
|
||||
* 2 of the Licence, or (at your option) any later version.
|
||||
*/
|
||||
|
||||
#include <linux/module.h>
|
||||
#include <linux/slow-work.h>
|
||||
#include <linux/fs.h>
|
||||
#include <linux/time.h>
|
||||
#include <linux/seq_file.h>
|
||||
#include "slow-work.h"
|
||||
|
||||
#define ITERATOR_SHIFT (BITS_PER_LONG - 4)
|
||||
#define ITERATOR_SELECTOR (0xfUL << ITERATOR_SHIFT)
|
||||
#define ITERATOR_COUNTER (~ITERATOR_SELECTOR)
|
||||
|
||||
void slow_work_new_thread_desc(struct slow_work *work, struct seq_file *m)
|
||||
{
|
||||
seq_puts(m, "Slow-work: New thread");
|
||||
}
|
||||
|
||||
/*
|
||||
* Render the time mark field on a work item into a 5-char time with units plus
|
||||
* a space
|
||||
*/
|
||||
static void slow_work_print_mark(struct seq_file *m, struct slow_work *work)
|
||||
{
|
||||
struct timespec now, diff;
|
||||
|
||||
now = CURRENT_TIME;
|
||||
diff = timespec_sub(now, work->mark);
|
||||
|
||||
if (diff.tv_sec < 0)
|
||||
seq_puts(m, " -ve ");
|
||||
else if (diff.tv_sec == 0 && diff.tv_nsec < 1000)
|
||||
seq_printf(m, "%3luns ", diff.tv_nsec);
|
||||
else if (diff.tv_sec == 0 && diff.tv_nsec < 1000000)
|
||||
seq_printf(m, "%3luus ", diff.tv_nsec / 1000);
|
||||
else if (diff.tv_sec == 0 && diff.tv_nsec < 1000000000)
|
||||
seq_printf(m, "%3lums ", diff.tv_nsec / 1000000);
|
||||
else if (diff.tv_sec <= 1)
|
||||
seq_puts(m, " 1s ");
|
||||
else if (diff.tv_sec < 60)
|
||||
seq_printf(m, "%4lus ", diff.tv_sec);
|
||||
else if (diff.tv_sec < 60 * 60)
|
||||
seq_printf(m, "%4lum ", diff.tv_sec / 60);
|
||||
else if (diff.tv_sec < 60 * 60 * 24)
|
||||
seq_printf(m, "%4luh ", diff.tv_sec / 3600);
|
||||
else
|
||||
seq_puts(m, "exces ");
|
||||
}
|
||||
|
||||
/*
|
||||
* Describe a slow work item for debugfs
|
||||
*/
|
||||
static int slow_work_runqueue_show(struct seq_file *m, void *v)
|
||||
{
|
||||
struct slow_work *work;
|
||||
struct list_head *p = v;
|
||||
unsigned long id;
|
||||
|
||||
switch ((unsigned long) v) {
|
||||
case 1:
|
||||
seq_puts(m, "THR PID ITEM ADDR FL MARK DESC\n");
|
||||
return 0;
|
||||
case 2:
|
||||
seq_puts(m, "=== ===== ================ == ===== ==========\n");
|
||||
return 0;
|
||||
|
||||
case 3 ... 3 + SLOW_WORK_THREAD_LIMIT - 1:
|
||||
id = (unsigned long) v - 3;
|
||||
|
||||
read_lock(&slow_work_execs_lock);
|
||||
work = slow_work_execs[id];
|
||||
if (work) {
|
||||
smp_read_barrier_depends();
|
||||
|
||||
seq_printf(m, "%3lu %5d %16p %2lx ",
|
||||
id, slow_work_pids[id], work, work->flags);
|
||||
slow_work_print_mark(m, work);
|
||||
|
||||
if (work->ops->desc)
|
||||
work->ops->desc(work, m);
|
||||
seq_putc(m, '\n');
|
||||
}
|
||||
read_unlock(&slow_work_execs_lock);
|
||||
return 0;
|
||||
|
||||
default:
|
||||
work = list_entry(p, struct slow_work, link);
|
||||
seq_printf(m, "%3s - %16p %2lx ",
|
||||
work->flags & SLOW_WORK_VERY_SLOW ? "vsq" : "sq",
|
||||
work, work->flags);
|
||||
slow_work_print_mark(m, work);
|
||||
|
||||
if (work->ops->desc)
|
||||
work->ops->desc(work, m);
|
||||
seq_putc(m, '\n');
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* map the iterator to a work item
|
||||
*/
|
||||
static void *slow_work_runqueue_index(struct seq_file *m, loff_t *_pos)
|
||||
{
|
||||
struct list_head *p;
|
||||
unsigned long count, id;
|
||||
|
||||
switch (*_pos >> ITERATOR_SHIFT) {
|
||||
case 0x0:
|
||||
if (*_pos == 0)
|
||||
*_pos = 1;
|
||||
if (*_pos < 3)
|
||||
return (void *)(unsigned long) *_pos;
|
||||
if (*_pos < 3 + SLOW_WORK_THREAD_LIMIT)
|
||||
for (id = *_pos - 3;
|
||||
id < SLOW_WORK_THREAD_LIMIT;
|
||||
id++, (*_pos)++)
|
||||
if (slow_work_execs[id])
|
||||
return (void *)(unsigned long) *_pos;
|
||||
*_pos = 0x1UL << ITERATOR_SHIFT;
|
||||
|
||||
case 0x1:
|
||||
count = *_pos & ITERATOR_COUNTER;
|
||||
list_for_each(p, &slow_work_queue) {
|
||||
if (count == 0)
|
||||
return p;
|
||||
count--;
|
||||
}
|
||||
*_pos = 0x2UL << ITERATOR_SHIFT;
|
||||
|
||||
case 0x2:
|
||||
count = *_pos & ITERATOR_COUNTER;
|
||||
list_for_each(p, &vslow_work_queue) {
|
||||
if (count == 0)
|
||||
return p;
|
||||
count--;
|
||||
}
|
||||
*_pos = 0x3UL << ITERATOR_SHIFT;
|
||||
|
||||
default:
|
||||
return NULL;
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* set up the iterator to start reading from the first line
|
||||
*/
|
||||
static void *slow_work_runqueue_start(struct seq_file *m, loff_t *_pos)
|
||||
{
|
||||
spin_lock_irq(&slow_work_queue_lock);
|
||||
return slow_work_runqueue_index(m, _pos);
|
||||
}
|
||||
|
||||
/*
|
||||
* move to the next line
|
||||
*/
|
||||
static void *slow_work_runqueue_next(struct seq_file *m, void *v, loff_t *_pos)
|
||||
{
|
||||
struct list_head *p = v;
|
||||
unsigned long selector = *_pos >> ITERATOR_SHIFT;
|
||||
|
||||
(*_pos)++;
|
||||
switch (selector) {
|
||||
case 0x0:
|
||||
return slow_work_runqueue_index(m, _pos);
|
||||
|
||||
case 0x1:
|
||||
if (*_pos >> ITERATOR_SHIFT == 0x1) {
|
||||
p = p->next;
|
||||
if (p != &slow_work_queue)
|
||||
return p;
|
||||
}
|
||||
*_pos = 0x2UL << ITERATOR_SHIFT;
|
||||
p = &vslow_work_queue;
|
||||
|
||||
case 0x2:
|
||||
if (*_pos >> ITERATOR_SHIFT == 0x2) {
|
||||
p = p->next;
|
||||
if (p != &vslow_work_queue)
|
||||
return p;
|
||||
}
|
||||
*_pos = 0x3UL << ITERATOR_SHIFT;
|
||||
|
||||
default:
|
||||
return NULL;
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* clean up after reading
|
||||
*/
|
||||
static void slow_work_runqueue_stop(struct seq_file *m, void *v)
|
||||
{
|
||||
spin_unlock_irq(&slow_work_queue_lock);
|
||||
}
|
||||
|
||||
static const struct seq_operations slow_work_runqueue_ops = {
|
||||
.start = slow_work_runqueue_start,
|
||||
.stop = slow_work_runqueue_stop,
|
||||
.next = slow_work_runqueue_next,
|
||||
.show = slow_work_runqueue_show,
|
||||
};
|
||||
|
||||
/*
|
||||
* open "/sys/kernel/debug/slow_work/runqueue" to list queue contents
|
||||
*/
|
||||
static int slow_work_runqueue_open(struct inode *inode, struct file *file)
|
||||
{
|
||||
return seq_open(file, &slow_work_runqueue_ops);
|
||||
}
|
||||
|
||||
const struct file_operations slow_work_runqueue_fops = {
|
||||
.owner = THIS_MODULE,
|
||||
.open = slow_work_runqueue_open,
|
||||
.read = seq_read,
|
||||
.llseek = seq_lseek,
|
||||
.release = seq_release,
|
||||
};
|
1068
kernel/slow-work.c
1068
kernel/slow-work.c
File diff suppressed because it is too large
Load Diff
@@ -1,72 +0,0 @@
|
||||
/* Slow work private definitions
|
||||
*
|
||||
* Copyright (C) 2009 Red Hat, Inc. All Rights Reserved.
|
||||
* Written by David Howells (dhowells@redhat.com)
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public Licence
|
||||
* as published by the Free Software Foundation; either version
|
||||
* 2 of the Licence, or (at your option) any later version.
|
||||
*/
|
||||
|
||||
#define SLOW_WORK_CULL_TIMEOUT (5 * HZ) /* cull threads 5s after running out of
|
||||
* things to do */
|
||||
#define SLOW_WORK_OOM_TIMEOUT (5 * HZ) /* can't start new threads for 5s after
|
||||
* OOM */
|
||||
|
||||
#define SLOW_WORK_THREAD_LIMIT 255 /* abs maximum number of slow-work threads */
|
||||
|
||||
/*
|
||||
* slow-work.c
|
||||
*/
|
||||
#ifdef CONFIG_SLOW_WORK_DEBUG
|
||||
extern struct slow_work *slow_work_execs[];
|
||||
extern pid_t slow_work_pids[];
|
||||
extern rwlock_t slow_work_execs_lock;
|
||||
#endif
|
||||
|
||||
extern struct list_head slow_work_queue;
|
||||
extern struct list_head vslow_work_queue;
|
||||
extern spinlock_t slow_work_queue_lock;
|
||||
|
||||
/*
|
||||
* slow-work-debugfs.c
|
||||
*/
|
||||
#ifdef CONFIG_SLOW_WORK_DEBUG
|
||||
extern const struct file_operations slow_work_runqueue_fops;
|
||||
|
||||
extern void slow_work_new_thread_desc(struct slow_work *, struct seq_file *);
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Helper functions
|
||||
*/
|
||||
static inline void slow_work_set_thread_pid(int id, pid_t pid)
|
||||
{
|
||||
#ifdef CONFIG_SLOW_WORK_DEBUG
|
||||
slow_work_pids[id] = pid;
|
||||
#endif
|
||||
}
|
||||
|
||||
static inline void slow_work_mark_time(struct slow_work *work)
|
||||
{
|
||||
#ifdef CONFIG_SLOW_WORK_DEBUG
|
||||
work->mark = CURRENT_TIME;
|
||||
#endif
|
||||
}
|
||||
|
||||
static inline void slow_work_begin_exec(int id, struct slow_work *work)
|
||||
{
|
||||
#ifdef CONFIG_SLOW_WORK_DEBUG
|
||||
slow_work_execs[id] = work;
|
||||
#endif
|
||||
}
|
||||
|
||||
static inline void slow_work_end_exec(int id, struct slow_work *work)
|
||||
{
|
||||
#ifdef CONFIG_SLOW_WORK_DEBUG
|
||||
write_lock(&slow_work_execs_lock);
|
||||
slow_work_execs[id] = NULL;
|
||||
write_unlock(&slow_work_execs_lock);
|
||||
#endif
|
||||
}
|
@@ -50,7 +50,6 @@
|
||||
#include <linux/acpi.h>
|
||||
#include <linux/reboot.h>
|
||||
#include <linux/ftrace.h>
|
||||
#include <linux/slow-work.h>
|
||||
#include <linux/perf_event.h>
|
||||
#include <linux/kprobes.h>
|
||||
#include <linux/pipe_fs_i.h>
|
||||
@@ -917,13 +916,6 @@ static struct ctl_table kern_table[] = {
|
||||
.proc_handler = proc_dointvec,
|
||||
},
|
||||
#endif
|
||||
#ifdef CONFIG_SLOW_WORK
|
||||
{
|
||||
.procname = "slow-work",
|
||||
.mode = 0555,
|
||||
.child = slow_work_sysctls,
|
||||
},
|
||||
#endif
|
||||
#ifdef CONFIG_PERF_EVENTS
|
||||
{
|
||||
.procname = "perf_event_paranoid",
|
||||
|
@@ -323,17 +323,6 @@ config STACK_TRACER
|
||||
|
||||
Say N if unsure.
|
||||
|
||||
config WORKQUEUE_TRACER
|
||||
bool "Trace workqueues"
|
||||
select GENERIC_TRACER
|
||||
help
|
||||
The workqueue tracer provides some statistical information
|
||||
about each cpu workqueue thread such as the number of the
|
||||
works inserted and executed since their creation. It can help
|
||||
to evaluate the amount of work each of them has to perform.
|
||||
For example it can help a developer to decide whether he should
|
||||
choose a per-cpu workqueue instead of a singlethreaded one.
|
||||
|
||||
config BLK_DEV_IO_TRACE
|
||||
bool "Support for tracing block IO actions"
|
||||
depends on SYSFS
|
||||
|
3198
kernel/workqueue.c
3198
kernel/workqueue.c
File diff suppressed because it is too large
Load Diff
@@ -4,13 +4,6 @@
|
||||
* Scheduler hooks for concurrency managed workqueue. Only to be
|
||||
* included from sched.c and workqueue.c.
|
||||
*/
|
||||
static inline void wq_worker_waking_up(struct task_struct *task,
|
||||
unsigned int cpu)
|
||||
{
|
||||
}
|
||||
|
||||
static inline struct task_struct *wq_worker_sleeping(struct task_struct *task,
|
||||
unsigned int cpu)
|
||||
{
|
||||
return NULL;
|
||||
}
|
||||
void wq_worker_waking_up(struct task_struct *task, unsigned int cpu);
|
||||
struct task_struct *wq_worker_sleeping(struct task_struct *task,
|
||||
unsigned int cpu);
|
||||
|
Reference in New Issue
Block a user