Merge 647781347a ("workqueue: Fix hung time report of worker pools") into android12-5.10-lts

Steps on the way to 5.10.180

Change-Id: I559e6bbf071d1d28e4a296c6229e32d4d938764b
Signed-off-by: Greg Kroah-Hartman <gregkh@google.com>
This commit is contained in:
Greg Kroah-Hartman
2023-06-25 17:07:01 +00:00
2 changed files with 37 additions and 34 deletions

View File

@@ -30,7 +30,7 @@ void delayed_work_timer_fn(struct timer_list *t);
enum { enum {
WORK_STRUCT_PENDING_BIT = 0, /* work item is pending execution */ WORK_STRUCT_PENDING_BIT = 0, /* work item is pending execution */
WORK_STRUCT_DELAYED_BIT = 1, /* work item is delayed */ WORK_STRUCT_INACTIVE_BIT= 1, /* work item is inactive */
WORK_STRUCT_PWQ_BIT = 2, /* data points to pwq */ WORK_STRUCT_PWQ_BIT = 2, /* data points to pwq */
WORK_STRUCT_LINKED_BIT = 3, /* next work is linked to this one */ WORK_STRUCT_LINKED_BIT = 3, /* next work is linked to this one */
#ifdef CONFIG_DEBUG_OBJECTS_WORK #ifdef CONFIG_DEBUG_OBJECTS_WORK
@@ -43,7 +43,7 @@ enum {
WORK_STRUCT_COLOR_BITS = 4, WORK_STRUCT_COLOR_BITS = 4,
WORK_STRUCT_PENDING = 1 << WORK_STRUCT_PENDING_BIT, WORK_STRUCT_PENDING = 1 << WORK_STRUCT_PENDING_BIT,
WORK_STRUCT_DELAYED = 1 << WORK_STRUCT_DELAYED_BIT, WORK_STRUCT_INACTIVE = 1 << WORK_STRUCT_INACTIVE_BIT,
WORK_STRUCT_PWQ = 1 << WORK_STRUCT_PWQ_BIT, WORK_STRUCT_PWQ = 1 << WORK_STRUCT_PWQ_BIT,
WORK_STRUCT_LINKED = 1 << WORK_STRUCT_LINKED_BIT, WORK_STRUCT_LINKED = 1 << WORK_STRUCT_LINKED_BIT,
#ifdef CONFIG_DEBUG_OBJECTS_WORK #ifdef CONFIG_DEBUG_OBJECTS_WORK

View File

@@ -211,7 +211,7 @@ struct pool_workqueue {
/* L: nr of in_flight works */ /* L: nr of in_flight works */
int nr_active; /* L: nr of active works */ int nr_active; /* L: nr of active works */
int max_active; /* L: max active works */ int max_active; /* L: max active works */
struct list_head delayed_works; /* L: delayed works */ struct list_head inactive_works; /* L: inactive works */
struct list_head pwqs_node; /* WR: node on wq->pwqs */ struct list_head pwqs_node; /* WR: node on wq->pwqs */
struct list_head mayday_node; /* MD: node on wq->maydays */ struct list_head mayday_node; /* MD: node on wq->maydays */
@@ -1152,7 +1152,7 @@ static void put_pwq_unlocked(struct pool_workqueue *pwq)
} }
} }
static void pwq_activate_delayed_work(struct work_struct *work) static void pwq_activate_inactive_work(struct work_struct *work)
{ {
struct pool_workqueue *pwq = get_work_pwq(work); struct pool_workqueue *pwq = get_work_pwq(work);
@@ -1160,16 +1160,16 @@ static void pwq_activate_delayed_work(struct work_struct *work)
if (list_empty(&pwq->pool->worklist)) if (list_empty(&pwq->pool->worklist))
pwq->pool->watchdog_ts = jiffies; pwq->pool->watchdog_ts = jiffies;
move_linked_works(work, &pwq->pool->worklist, NULL); move_linked_works(work, &pwq->pool->worklist, NULL);
__clear_bit(WORK_STRUCT_DELAYED_BIT, work_data_bits(work)); __clear_bit(WORK_STRUCT_INACTIVE_BIT, work_data_bits(work));
pwq->nr_active++; pwq->nr_active++;
} }
static void pwq_activate_first_delayed(struct pool_workqueue *pwq) static void pwq_activate_first_inactive(struct pool_workqueue *pwq)
{ {
struct work_struct *work = list_first_entry(&pwq->delayed_works, struct work_struct *work = list_first_entry(&pwq->inactive_works,
struct work_struct, entry); struct work_struct, entry);
pwq_activate_delayed_work(work); pwq_activate_inactive_work(work);
} }
/** /**
@@ -1192,10 +1192,10 @@ static void pwq_dec_nr_in_flight(struct pool_workqueue *pwq, int color)
pwq->nr_in_flight[color]--; pwq->nr_in_flight[color]--;
pwq->nr_active--; pwq->nr_active--;
if (!list_empty(&pwq->delayed_works)) { if (!list_empty(&pwq->inactive_works)) {
/* one down, submit a delayed one */ /* one down, submit an inactive one */
if (pwq->nr_active < pwq->max_active) if (pwq->nr_active < pwq->max_active)
pwq_activate_first_delayed(pwq); pwq_activate_first_inactive(pwq);
} }
/* is flush in progress and are we at the flushing tip? */ /* is flush in progress and are we at the flushing tip? */
@@ -1297,14 +1297,14 @@ static int try_to_grab_pending(struct work_struct *work, bool is_dwork,
debug_work_deactivate(work); debug_work_deactivate(work);
/* /*
* A delayed work item cannot be grabbed directly because * An inactive work item cannot be grabbed directly because
* it might have linked NO_COLOR work items which, if left * it might have linked NO_COLOR work items which, if left
* on the delayed_list, will confuse pwq->nr_active * on the inactive_works list, will confuse pwq->nr_active
* management later on and cause stall. Make sure the work * management later on and cause stall. Make sure the work
* item is activated before grabbing. * item is activated before grabbing.
*/ */
if (*work_data_bits(work) & WORK_STRUCT_DELAYED) if (*work_data_bits(work) & WORK_STRUCT_INACTIVE)
pwq_activate_delayed_work(work); pwq_activate_inactive_work(work);
list_del_init(&work->entry); list_del_init(&work->entry);
pwq_dec_nr_in_flight(pwq, get_work_color(work)); pwq_dec_nr_in_flight(pwq, get_work_color(work));
@@ -1506,8 +1506,8 @@ retry:
if (list_empty(worklist)) if (list_empty(worklist))
pwq->pool->watchdog_ts = jiffies; pwq->pool->watchdog_ts = jiffies;
} else { } else {
work_flags |= WORK_STRUCT_DELAYED; work_flags |= WORK_STRUCT_INACTIVE;
worklist = &pwq->delayed_works; worklist = &pwq->inactive_works;
} }
debug_work_activate(work); debug_work_activate(work);
@@ -2551,7 +2551,7 @@ repeat:
/* /*
* The above execution of rescued work items could * The above execution of rescued work items could
* have created more to rescue through * have created more to rescue through
* pwq_activate_first_delayed() or chained * pwq_activate_first_inactive() or chained
* queueing. Let's put @pwq back on mayday list so * queueing. Let's put @pwq back on mayday list so
* that such back-to-back work items, which may be * that such back-to-back work items, which may be
* being used to relieve memory pressure, don't * being used to relieve memory pressure, don't
@@ -2977,7 +2977,7 @@ reflush:
bool drained; bool drained;
raw_spin_lock_irq(&pwq->pool->lock); raw_spin_lock_irq(&pwq->pool->lock);
drained = !pwq->nr_active && list_empty(&pwq->delayed_works); drained = !pwq->nr_active && list_empty(&pwq->inactive_works);
raw_spin_unlock_irq(&pwq->pool->lock); raw_spin_unlock_irq(&pwq->pool->lock);
if (drained) if (drained)
@@ -3731,7 +3731,7 @@ static void pwq_unbound_release_workfn(struct work_struct *work)
* @pwq: target pool_workqueue * @pwq: target pool_workqueue
* *
* If @pwq isn't freezing, set @pwq->max_active to the associated * If @pwq isn't freezing, set @pwq->max_active to the associated
* workqueue's saved_max_active and activate delayed work items * workqueue's saved_max_active and activate inactive work items
* accordingly. If @pwq is freezing, clear @pwq->max_active to zero. * accordingly. If @pwq is freezing, clear @pwq->max_active to zero.
*/ */
static void pwq_adjust_max_active(struct pool_workqueue *pwq) static void pwq_adjust_max_active(struct pool_workqueue *pwq)
@@ -3760,9 +3760,9 @@ static void pwq_adjust_max_active(struct pool_workqueue *pwq)
pwq->max_active = wq->saved_max_active; pwq->max_active = wq->saved_max_active;
while (!list_empty(&pwq->delayed_works) && while (!list_empty(&pwq->inactive_works) &&
pwq->nr_active < pwq->max_active) { pwq->nr_active < pwq->max_active) {
pwq_activate_first_delayed(pwq); pwq_activate_first_inactive(pwq);
kick = true; kick = true;
} }
@@ -3793,7 +3793,7 @@ static void init_pwq(struct pool_workqueue *pwq, struct workqueue_struct *wq,
pwq->wq = wq; pwq->wq = wq;
pwq->flush_color = -1; pwq->flush_color = -1;
pwq->refcnt = 1; pwq->refcnt = 1;
INIT_LIST_HEAD(&pwq->delayed_works); INIT_LIST_HEAD(&pwq->inactive_works);
INIT_LIST_HEAD(&pwq->pwqs_node); INIT_LIST_HEAD(&pwq->pwqs_node);
INIT_LIST_HEAD(&pwq->mayday_node); INIT_LIST_HEAD(&pwq->mayday_node);
INIT_WORK(&pwq->unbound_release_work, pwq_unbound_release_workfn); INIT_WORK(&pwq->unbound_release_work, pwq_unbound_release_workfn);
@@ -4380,7 +4380,7 @@ static bool pwq_busy(struct pool_workqueue *pwq)
if ((pwq != pwq->wq->dfl_pwq) && (pwq->refcnt > 1)) if ((pwq != pwq->wq->dfl_pwq) && (pwq->refcnt > 1))
return true; return true;
if (pwq->nr_active || !list_empty(&pwq->delayed_works)) if (pwq->nr_active || !list_empty(&pwq->inactive_works))
return true; return true;
return false; return false;
@@ -4576,7 +4576,7 @@ bool workqueue_congested(int cpu, struct workqueue_struct *wq)
else else
pwq = unbound_pwq_by_node(wq, cpu_to_node(cpu)); pwq = unbound_pwq_by_node(wq, cpu_to_node(cpu));
ret = !list_empty(&pwq->delayed_works); ret = !list_empty(&pwq->inactive_works);
preempt_enable(); preempt_enable();
rcu_read_unlock(); rcu_read_unlock();
@@ -4772,11 +4772,11 @@ static void show_pwq(struct pool_workqueue *pwq)
pr_cont("\n"); pr_cont("\n");
} }
if (!list_empty(&pwq->delayed_works)) { if (!list_empty(&pwq->inactive_works)) {
bool comma = false; bool comma = false;
pr_info(" delayed:"); pr_info(" inactive:");
list_for_each_entry(work, &pwq->delayed_works, entry) { list_for_each_entry(work, &pwq->inactive_works, entry) {
pr_cont_work(comma, work); pr_cont_work(comma, work);
comma = !(*work_data_bits(work) & WORK_STRUCT_LINKED); comma = !(*work_data_bits(work) & WORK_STRUCT_LINKED);
} }
@@ -4806,7 +4806,7 @@ void show_workqueue_state(void)
bool idle = true; bool idle = true;
for_each_pwq(pwq, wq) { for_each_pwq(pwq, wq) {
if (pwq->nr_active || !list_empty(&pwq->delayed_works)) { if (pwq->nr_active || !list_empty(&pwq->inactive_works)) {
idle = false; idle = false;
break; break;
} }
@@ -4818,7 +4818,7 @@ void show_workqueue_state(void)
for_each_pwq(pwq, wq) { for_each_pwq(pwq, wq) {
raw_spin_lock_irqsave(&pwq->pool->lock, flags); raw_spin_lock_irqsave(&pwq->pool->lock, flags);
if (pwq->nr_active || !list_empty(&pwq->delayed_works)) if (pwq->nr_active || !list_empty(&pwq->inactive_works))
show_pwq(pwq); show_pwq(pwq);
raw_spin_unlock_irqrestore(&pwq->pool->lock, flags); raw_spin_unlock_irqrestore(&pwq->pool->lock, flags);
/* /*
@@ -4833,16 +4833,19 @@ void show_workqueue_state(void)
for_each_pool(pool, pi) { for_each_pool(pool, pi) {
struct worker *worker; struct worker *worker;
bool first = true; bool first = true;
unsigned long hung = 0;
raw_spin_lock_irqsave(&pool->lock, flags); raw_spin_lock_irqsave(&pool->lock, flags);
if (pool->nr_workers == pool->nr_idle) if (pool->nr_workers == pool->nr_idle)
goto next_pool; goto next_pool;
/* How long the first pending work is waiting for a worker. */
if (!list_empty(&pool->worklist))
hung = jiffies_to_msecs(jiffies - pool->watchdog_ts) / 1000;
pr_info("pool %d:", pool->id); pr_info("pool %d:", pool->id);
pr_cont_pool_info(pool); pr_cont_pool_info(pool);
pr_cont(" hung=%us workers=%d", pr_cont(" hung=%lus workers=%d", hung, pool->nr_workers);
jiffies_to_msecs(jiffies - pool->watchdog_ts) / 1000,
pool->nr_workers);
if (pool->manager) if (pool->manager)
pr_cont(" manager: %d", pr_cont(" manager: %d",
task_pid_nr(pool->manager->task)); task_pid_nr(pool->manager->task));
@@ -5194,7 +5197,7 @@ EXPORT_SYMBOL_GPL(work_on_cpu_safe);
* freeze_workqueues_begin - begin freezing workqueues * freeze_workqueues_begin - begin freezing workqueues
* *
* Start freezing workqueues. After this function returns, all freezable * Start freezing workqueues. After this function returns, all freezable
* workqueues will queue new works to their delayed_works list instead of * workqueues will queue new works to their inactive_works list instead of
* pool->worklist. * pool->worklist.
* *
* CONTEXT: * CONTEXT: