trim task_work: get rid of hlist
layout based on Oleg's suggestion; single-linked list, task->task_works points to the last element, forward pointer from said last element points to head. I'd still prefer much more regular scheme with two pointers in task_work, but... Signed-off-by: Al Viro <viro@zeniv.linux.org.uk>
This commit is contained in:
@@ -1405,7 +1405,7 @@ struct task_struct {
|
|||||||
int (*notifier)(void *priv);
|
int (*notifier)(void *priv);
|
||||||
void *notifier_data;
|
void *notifier_data;
|
||||||
sigset_t *notifier_mask;
|
sigset_t *notifier_mask;
|
||||||
struct hlist_head task_works;
|
void *task_works;
|
||||||
|
|
||||||
struct audit_context *audit_context;
|
struct audit_context *audit_context;
|
||||||
#ifdef CONFIG_AUDITSYSCALL
|
#ifdef CONFIG_AUDITSYSCALL
|
||||||
|
@@ -8,7 +8,7 @@ struct task_work;
|
|||||||
typedef void (*task_work_func_t)(struct task_work *);
|
typedef void (*task_work_func_t)(struct task_work *);
|
||||||
|
|
||||||
struct task_work {
|
struct task_work {
|
||||||
struct hlist_node hlist;
|
struct task_work *next;
|
||||||
task_work_func_t func;
|
task_work_func_t func;
|
||||||
};
|
};
|
||||||
|
|
||||||
@@ -24,7 +24,7 @@ void task_work_run(void);
|
|||||||
|
|
||||||
static inline void exit_task_work(struct task_struct *task)
|
static inline void exit_task_work(struct task_struct *task)
|
||||||
{
|
{
|
||||||
if (unlikely(!hlist_empty(&task->task_works)))
|
if (unlikely(task->task_works))
|
||||||
task_work_run();
|
task_work_run();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@@ -192,7 +192,7 @@ static inline void tracehook_notify_resume(struct pt_regs *regs)
|
|||||||
* hlist_add_head(task->task_works);
|
* hlist_add_head(task->task_works);
|
||||||
*/
|
*/
|
||||||
smp_mb__after_clear_bit();
|
smp_mb__after_clear_bit();
|
||||||
if (unlikely(!hlist_empty(¤t->task_works)))
|
if (unlikely(current->task_works))
|
||||||
task_work_run();
|
task_work_run();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@@ -1415,7 +1415,7 @@ static struct task_struct *copy_process(unsigned long clone_flags,
|
|||||||
*/
|
*/
|
||||||
p->group_leader = p;
|
p->group_leader = p;
|
||||||
INIT_LIST_HEAD(&p->thread_group);
|
INIT_LIST_HEAD(&p->thread_group);
|
||||||
INIT_HLIST_HEAD(&p->task_works);
|
p->task_works = NULL;
|
||||||
|
|
||||||
/* Now that the task is set up, run cgroup callbacks if
|
/* Now that the task is set up, run cgroup callbacks if
|
||||||
* necessary. We need to run them before the task is visible
|
* necessary. We need to run them before the task is visible
|
||||||
|
@@ -19,7 +19,12 @@ task_work_add(struct task_struct *task, struct task_work *twork, bool notify)
|
|||||||
*/
|
*/
|
||||||
raw_spin_lock_irqsave(&task->pi_lock, flags);
|
raw_spin_lock_irqsave(&task->pi_lock, flags);
|
||||||
if (likely(!(task->flags & PF_EXITING))) {
|
if (likely(!(task->flags & PF_EXITING))) {
|
||||||
hlist_add_head(&twork->hlist, &task->task_works);
|
struct task_work *last = task->task_works;
|
||||||
|
struct task_work *first = last ? last->next : twork;
|
||||||
|
twork->next = first;
|
||||||
|
if (last)
|
||||||
|
last->next = twork;
|
||||||
|
task->task_works = twork;
|
||||||
err = 0;
|
err = 0;
|
||||||
}
|
}
|
||||||
raw_spin_unlock_irqrestore(&task->pi_lock, flags);
|
raw_spin_unlock_irqrestore(&task->pi_lock, flags);
|
||||||
@@ -34,51 +39,48 @@ struct task_work *
|
|||||||
task_work_cancel(struct task_struct *task, task_work_func_t func)
|
task_work_cancel(struct task_struct *task, task_work_func_t func)
|
||||||
{
|
{
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
struct task_work *twork;
|
struct task_work *last, *res = NULL;
|
||||||
struct hlist_node *pos;
|
|
||||||
|
|
||||||
raw_spin_lock_irqsave(&task->pi_lock, flags);
|
raw_spin_lock_irqsave(&task->pi_lock, flags);
|
||||||
hlist_for_each_entry(twork, pos, &task->task_works, hlist) {
|
last = task->task_works;
|
||||||
if (twork->func == func) {
|
if (last) {
|
||||||
hlist_del(&twork->hlist);
|
struct task_work *q = last, *p = q->next;
|
||||||
goto found;
|
while (1) {
|
||||||
|
if (p->func == func) {
|
||||||
|
q->next = p->next;
|
||||||
|
if (p == last)
|
||||||
|
task->task_works = q == p ? NULL : q;
|
||||||
|
res = p;
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
if (p == last)
|
||||||
|
break;
|
||||||
|
q = p;
|
||||||
|
p = q->next;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
twork = NULL;
|
|
||||||
found:
|
|
||||||
raw_spin_unlock_irqrestore(&task->pi_lock, flags);
|
raw_spin_unlock_irqrestore(&task->pi_lock, flags);
|
||||||
|
return res;
|
||||||
return twork;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void task_work_run(void)
|
void task_work_run(void)
|
||||||
{
|
{
|
||||||
struct task_struct *task = current;
|
struct task_struct *task = current;
|
||||||
struct hlist_head task_works;
|
struct task_work *p, *q;
|
||||||
struct hlist_node *pos;
|
|
||||||
|
|
||||||
raw_spin_lock_irq(&task->pi_lock);
|
raw_spin_lock_irq(&task->pi_lock);
|
||||||
hlist_move_list(&task->task_works, &task_works);
|
p = task->task_works;
|
||||||
|
task->task_works = NULL;
|
||||||
raw_spin_unlock_irq(&task->pi_lock);
|
raw_spin_unlock_irq(&task->pi_lock);
|
||||||
|
|
||||||
if (unlikely(hlist_empty(&task_works)))
|
if (unlikely(!p))
|
||||||
return;
|
return;
|
||||||
/*
|
|
||||||
* We use hlist to save the space in task_struct, but we want fifo.
|
|
||||||
* Find the last entry, the list should be short, then process them
|
|
||||||
* in reverse order.
|
|
||||||
*/
|
|
||||||
for (pos = task_works.first; pos->next; pos = pos->next)
|
|
||||||
;
|
|
||||||
|
|
||||||
for (;;) {
|
q = p->next; /* head */
|
||||||
struct hlist_node **pprev = pos->pprev;
|
p->next = NULL; /* cut it */
|
||||||
struct task_work *twork = container_of(pos, struct task_work,
|
while (q) {
|
||||||
hlist);
|
p = q->next;
|
||||||
twork->func(twork);
|
q->func(q);
|
||||||
|
q = p;
|
||||||
if (pprev == &task_works.first)
|
|
||||||
break;
|
|
||||||
pos = container_of(pprev, struct hlist_node, next);
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
Reference in New Issue
Block a user