Merge 647781347a
("workqueue: Fix hung time report of worker pools") into android12-5.10-lts
Steps on the way to 5.10.180 Change-Id: I559e6bbf071d1d28e4a296c6229e32d4d938764b Signed-off-by: Greg Kroah-Hartman <gregkh@google.com>
This commit is contained in:
@@ -211,7 +211,7 @@ struct pool_workqueue {
|
||||
/* L: nr of in_flight works */
|
||||
int nr_active; /* L: nr of active works */
|
||||
int max_active; /* L: max active works */
|
||||
struct list_head delayed_works; /* L: delayed works */
|
||||
struct list_head inactive_works; /* L: inactive works */
|
||||
struct list_head pwqs_node; /* WR: node on wq->pwqs */
|
||||
struct list_head mayday_node; /* MD: node on wq->maydays */
|
||||
|
||||
@@ -1152,7 +1152,7 @@ static void put_pwq_unlocked(struct pool_workqueue *pwq)
|
||||
}
|
||||
}
|
||||
|
||||
static void pwq_activate_delayed_work(struct work_struct *work)
|
||||
static void pwq_activate_inactive_work(struct work_struct *work)
|
||||
{
|
||||
struct pool_workqueue *pwq = get_work_pwq(work);
|
||||
|
||||
@@ -1160,16 +1160,16 @@ static void pwq_activate_delayed_work(struct work_struct *work)
|
||||
if (list_empty(&pwq->pool->worklist))
|
||||
pwq->pool->watchdog_ts = jiffies;
|
||||
move_linked_works(work, &pwq->pool->worklist, NULL);
|
||||
__clear_bit(WORK_STRUCT_DELAYED_BIT, work_data_bits(work));
|
||||
__clear_bit(WORK_STRUCT_INACTIVE_BIT, work_data_bits(work));
|
||||
pwq->nr_active++;
|
||||
}
|
||||
|
||||
static void pwq_activate_first_delayed(struct pool_workqueue *pwq)
|
||||
static void pwq_activate_first_inactive(struct pool_workqueue *pwq)
|
||||
{
|
||||
struct work_struct *work = list_first_entry(&pwq->delayed_works,
|
||||
struct work_struct *work = list_first_entry(&pwq->inactive_works,
|
||||
struct work_struct, entry);
|
||||
|
||||
pwq_activate_delayed_work(work);
|
||||
pwq_activate_inactive_work(work);
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -1192,10 +1192,10 @@ static void pwq_dec_nr_in_flight(struct pool_workqueue *pwq, int color)
|
||||
pwq->nr_in_flight[color]--;
|
||||
|
||||
pwq->nr_active--;
|
||||
if (!list_empty(&pwq->delayed_works)) {
|
||||
/* one down, submit a delayed one */
|
||||
if (!list_empty(&pwq->inactive_works)) {
|
||||
/* one down, submit an inactive one */
|
||||
if (pwq->nr_active < pwq->max_active)
|
||||
pwq_activate_first_delayed(pwq);
|
||||
pwq_activate_first_inactive(pwq);
|
||||
}
|
||||
|
||||
/* is flush in progress and are we at the flushing tip? */
|
||||
@@ -1297,14 +1297,14 @@ static int try_to_grab_pending(struct work_struct *work, bool is_dwork,
|
||||
debug_work_deactivate(work);
|
||||
|
||||
/*
|
||||
* A delayed work item cannot be grabbed directly because
|
||||
* An inactive work item cannot be grabbed directly because
|
||||
* it might have linked NO_COLOR work items which, if left
|
||||
* on the delayed_list, will confuse pwq->nr_active
|
||||
* on the inactive_works list, will confuse pwq->nr_active
|
||||
* management later on and cause stall. Make sure the work
|
||||
* item is activated before grabbing.
|
||||
*/
|
||||
if (*work_data_bits(work) & WORK_STRUCT_DELAYED)
|
||||
pwq_activate_delayed_work(work);
|
||||
if (*work_data_bits(work) & WORK_STRUCT_INACTIVE)
|
||||
pwq_activate_inactive_work(work);
|
||||
|
||||
list_del_init(&work->entry);
|
||||
pwq_dec_nr_in_flight(pwq, get_work_color(work));
|
||||
@@ -1506,8 +1506,8 @@ retry:
|
||||
if (list_empty(worklist))
|
||||
pwq->pool->watchdog_ts = jiffies;
|
||||
} else {
|
||||
work_flags |= WORK_STRUCT_DELAYED;
|
||||
worklist = &pwq->delayed_works;
|
||||
work_flags |= WORK_STRUCT_INACTIVE;
|
||||
worklist = &pwq->inactive_works;
|
||||
}
|
||||
|
||||
debug_work_activate(work);
|
||||
@@ -2551,7 +2551,7 @@ repeat:
|
||||
/*
|
||||
* The above execution of rescued work items could
|
||||
* have created more to rescue through
|
||||
* pwq_activate_first_delayed() or chained
|
||||
* pwq_activate_first_inactive() or chained
|
||||
* queueing. Let's put @pwq back on mayday list so
|
||||
* that such back-to-back work items, which may be
|
||||
* being used to relieve memory pressure, don't
|
||||
@@ -2977,7 +2977,7 @@ reflush:
|
||||
bool drained;
|
||||
|
||||
raw_spin_lock_irq(&pwq->pool->lock);
|
||||
drained = !pwq->nr_active && list_empty(&pwq->delayed_works);
|
||||
drained = !pwq->nr_active && list_empty(&pwq->inactive_works);
|
||||
raw_spin_unlock_irq(&pwq->pool->lock);
|
||||
|
||||
if (drained)
|
||||
@@ -3731,7 +3731,7 @@ static void pwq_unbound_release_workfn(struct work_struct *work)
|
||||
* @pwq: target pool_workqueue
|
||||
*
|
||||
* If @pwq isn't freezing, set @pwq->max_active to the associated
|
||||
* workqueue's saved_max_active and activate delayed work items
|
||||
* workqueue's saved_max_active and activate inactive work items
|
||||
* accordingly. If @pwq is freezing, clear @pwq->max_active to zero.
|
||||
*/
|
||||
static void pwq_adjust_max_active(struct pool_workqueue *pwq)
|
||||
@@ -3760,9 +3760,9 @@ static void pwq_adjust_max_active(struct pool_workqueue *pwq)
|
||||
|
||||
pwq->max_active = wq->saved_max_active;
|
||||
|
||||
while (!list_empty(&pwq->delayed_works) &&
|
||||
while (!list_empty(&pwq->inactive_works) &&
|
||||
pwq->nr_active < pwq->max_active) {
|
||||
pwq_activate_first_delayed(pwq);
|
||||
pwq_activate_first_inactive(pwq);
|
||||
kick = true;
|
||||
}
|
||||
|
||||
@@ -3793,7 +3793,7 @@ static void init_pwq(struct pool_workqueue *pwq, struct workqueue_struct *wq,
|
||||
pwq->wq = wq;
|
||||
pwq->flush_color = -1;
|
||||
pwq->refcnt = 1;
|
||||
INIT_LIST_HEAD(&pwq->delayed_works);
|
||||
INIT_LIST_HEAD(&pwq->inactive_works);
|
||||
INIT_LIST_HEAD(&pwq->pwqs_node);
|
||||
INIT_LIST_HEAD(&pwq->mayday_node);
|
||||
INIT_WORK(&pwq->unbound_release_work, pwq_unbound_release_workfn);
|
||||
@@ -4380,7 +4380,7 @@ static bool pwq_busy(struct pool_workqueue *pwq)
|
||||
|
||||
if ((pwq != pwq->wq->dfl_pwq) && (pwq->refcnt > 1))
|
||||
return true;
|
||||
if (pwq->nr_active || !list_empty(&pwq->delayed_works))
|
||||
if (pwq->nr_active || !list_empty(&pwq->inactive_works))
|
||||
return true;
|
||||
|
||||
return false;
|
||||
@@ -4576,7 +4576,7 @@ bool workqueue_congested(int cpu, struct workqueue_struct *wq)
|
||||
else
|
||||
pwq = unbound_pwq_by_node(wq, cpu_to_node(cpu));
|
||||
|
||||
ret = !list_empty(&pwq->delayed_works);
|
||||
ret = !list_empty(&pwq->inactive_works);
|
||||
preempt_enable();
|
||||
rcu_read_unlock();
|
||||
|
||||
@@ -4772,11 +4772,11 @@ static void show_pwq(struct pool_workqueue *pwq)
|
||||
pr_cont("\n");
|
||||
}
|
||||
|
||||
if (!list_empty(&pwq->delayed_works)) {
|
||||
if (!list_empty(&pwq->inactive_works)) {
|
||||
bool comma = false;
|
||||
|
||||
pr_info(" delayed:");
|
||||
list_for_each_entry(work, &pwq->delayed_works, entry) {
|
||||
pr_info(" inactive:");
|
||||
list_for_each_entry(work, &pwq->inactive_works, entry) {
|
||||
pr_cont_work(comma, work);
|
||||
comma = !(*work_data_bits(work) & WORK_STRUCT_LINKED);
|
||||
}
|
||||
@@ -4806,7 +4806,7 @@ void show_workqueue_state(void)
|
||||
bool idle = true;
|
||||
|
||||
for_each_pwq(pwq, wq) {
|
||||
if (pwq->nr_active || !list_empty(&pwq->delayed_works)) {
|
||||
if (pwq->nr_active || !list_empty(&pwq->inactive_works)) {
|
||||
idle = false;
|
||||
break;
|
||||
}
|
||||
@@ -4818,7 +4818,7 @@ void show_workqueue_state(void)
|
||||
|
||||
for_each_pwq(pwq, wq) {
|
||||
raw_spin_lock_irqsave(&pwq->pool->lock, flags);
|
||||
if (pwq->nr_active || !list_empty(&pwq->delayed_works))
|
||||
if (pwq->nr_active || !list_empty(&pwq->inactive_works))
|
||||
show_pwq(pwq);
|
||||
raw_spin_unlock_irqrestore(&pwq->pool->lock, flags);
|
||||
/*
|
||||
@@ -4833,16 +4833,19 @@ void show_workqueue_state(void)
|
||||
for_each_pool(pool, pi) {
|
||||
struct worker *worker;
|
||||
bool first = true;
|
||||
unsigned long hung = 0;
|
||||
|
||||
raw_spin_lock_irqsave(&pool->lock, flags);
|
||||
if (pool->nr_workers == pool->nr_idle)
|
||||
goto next_pool;
|
||||
|
||||
/* How long the first pending work is waiting for a worker. */
|
||||
if (!list_empty(&pool->worklist))
|
||||
hung = jiffies_to_msecs(jiffies - pool->watchdog_ts) / 1000;
|
||||
|
||||
pr_info("pool %d:", pool->id);
|
||||
pr_cont_pool_info(pool);
|
||||
pr_cont(" hung=%us workers=%d",
|
||||
jiffies_to_msecs(jiffies - pool->watchdog_ts) / 1000,
|
||||
pool->nr_workers);
|
||||
pr_cont(" hung=%lus workers=%d", hung, pool->nr_workers);
|
||||
if (pool->manager)
|
||||
pr_cont(" manager: %d",
|
||||
task_pid_nr(pool->manager->task));
|
||||
@@ -5194,7 +5197,7 @@ EXPORT_SYMBOL_GPL(work_on_cpu_safe);
|
||||
* freeze_workqueues_begin - begin freezing workqueues
|
||||
*
|
||||
* Start freezing workqueues. After this function returns, all freezable
|
||||
* workqueues will queue new works to their delayed_works list instead of
|
||||
* workqueues will queue new works to their inactive_works list instead of
|
||||
* pool->worklist.
|
||||
*
|
||||
* CONTEXT:
|
||||
|
Reference in New Issue
Block a user