task_work.c 5.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184
  1. // SPDX-License-Identifier: GPL-2.0
  2. #include <linux/spinlock.h>
  3. #include <linux/task_work.h>
  4. #include <linux/resume_user_mode.h>
  5. static struct callback_head work_exited; /* all we need is ->next == NULL */
  6. /**
  7. * task_work_add - ask the @task to execute @work->func()
  8. * @task: the task which should run the callback
  9. * @work: the callback to run
  10. * @notify: how to notify the targeted task
  11. *
  12. * Queue @work for task_work_run() below and notify the @task if @notify
  13. * is @TWA_RESUME, @TWA_SIGNAL, or @TWA_SIGNAL_NO_IPI.
  14. *
  15. * @TWA_SIGNAL works like signals, in that the it will interrupt the targeted
  16. * task and run the task_work, regardless of whether the task is currently
  17. * running in the kernel or userspace.
  18. * @TWA_SIGNAL_NO_IPI works like @TWA_SIGNAL, except it doesn't send a
  19. * reschedule IPI to force the targeted task to reschedule and run task_work.
  20. * This can be advantageous if there's no strict requirement that the
  21. * task_work be run as soon as possible, just whenever the task enters the
  22. * kernel anyway.
  23. * @TWA_RESUME work is run only when the task exits the kernel and returns to
  24. * user mode, or before entering guest mode.
  25. *
  26. * Fails if the @task is exiting/exited and thus it can't process this @work.
  27. * Otherwise @work->func() will be called when the @task goes through one of
  28. * the aforementioned transitions, or exits.
  29. *
  30. * If the targeted task is exiting, then an error is returned and the work item
  31. * is not queued. It's up to the caller to arrange for an alternative mechanism
  32. * in that case.
  33. *
  34. * Note: there is no ordering guarantee on works queued here. The task_work
  35. * list is LIFO.
  36. *
  37. * RETURNS:
  38. * 0 if succeeds or -ESRCH.
  39. */
  40. int task_work_add(struct task_struct *task, struct callback_head *work,
  41. enum task_work_notify_mode notify)
  42. {
  43. struct callback_head *head;
  44. /* record the work call stack in order to print it in KASAN reports */
  45. kasan_record_aux_stack(work);
  46. head = READ_ONCE(task->task_works);
  47. do {
  48. if (unlikely(head == &work_exited))
  49. return -ESRCH;
  50. work->next = head;
  51. } while (!try_cmpxchg(&task->task_works, &head, work));
  52. switch (notify) {
  53. case TWA_NONE:
  54. break;
  55. case TWA_RESUME:
  56. set_notify_resume(task);
  57. break;
  58. case TWA_SIGNAL:
  59. set_notify_signal(task);
  60. break;
  61. case TWA_SIGNAL_NO_IPI:
  62. __set_notify_signal(task);
  63. break;
  64. default:
  65. WARN_ON_ONCE(1);
  66. break;
  67. }
  68. return 0;
  69. }
  70. /**
  71. * task_work_cancel_match - cancel a pending work added by task_work_add()
  72. * @task: the task which should execute the work
  73. * @match: match function to call
  74. *
  75. * RETURNS:
  76. * The found work or NULL if not found.
  77. */
  78. struct callback_head *
  79. task_work_cancel_match(struct task_struct *task,
  80. bool (*match)(struct callback_head *, void *data),
  81. void *data)
  82. {
  83. struct callback_head **pprev = &task->task_works;
  84. struct callback_head *work;
  85. unsigned long flags;
  86. if (likely(!task_work_pending(task)))
  87. return NULL;
  88. /*
  89. * If cmpxchg() fails we continue without updating pprev.
  90. * Either we raced with task_work_add() which added the
  91. * new entry before this work, we will find it again. Or
  92. * we raced with task_work_run(), *pprev == NULL/exited.
  93. */
  94. raw_spin_lock_irqsave(&task->pi_lock, flags);
  95. work = READ_ONCE(*pprev);
  96. while (work) {
  97. if (!match(work, data)) {
  98. pprev = &work->next;
  99. work = READ_ONCE(*pprev);
  100. } else if (try_cmpxchg(pprev, &work, work->next))
  101. break;
  102. }
  103. raw_spin_unlock_irqrestore(&task->pi_lock, flags);
  104. return work;
  105. }
  106. static bool task_work_func_match(struct callback_head *cb, void *data)
  107. {
  108. return cb->func == data;
  109. }
  110. /**
  111. * task_work_cancel - cancel a pending work added by task_work_add()
  112. * @task: the task which should execute the work
  113. * @func: identifies the work to remove
  114. *
  115. * Find the last queued pending work with ->func == @func and remove
  116. * it from queue.
  117. *
  118. * RETURNS:
  119. * The found work or NULL if not found.
  120. */
  121. struct callback_head *
  122. task_work_cancel(struct task_struct *task, task_work_func_t func)
  123. {
  124. return task_work_cancel_match(task, task_work_func_match, func);
  125. }
  126. /**
  127. * task_work_run - execute the works added by task_work_add()
  128. *
  129. * Flush the pending works. Should be used by the core kernel code.
  130. * Called before the task returns to the user-mode or stops, or when
  131. * it exits. In the latter case task_work_add() can no longer add the
  132. * new work after task_work_run() returns.
  133. */
  134. void task_work_run(void)
  135. {
  136. struct task_struct *task = current;
  137. struct callback_head *work, *head, *next;
  138. for (;;) {
  139. /*
  140. * work->func() can do task_work_add(), do not set
  141. * work_exited unless the list is empty.
  142. */
  143. work = READ_ONCE(task->task_works);
  144. do {
  145. head = NULL;
  146. if (!work) {
  147. if (task->flags & PF_EXITING)
  148. head = &work_exited;
  149. else
  150. break;
  151. }
  152. } while (!try_cmpxchg(&task->task_works, &work, head));
  153. if (!work)
  154. break;
  155. /*
  156. * Synchronize with task_work_cancel(). It can not remove
  157. * the first entry == work, cmpxchg(task_works) must fail.
  158. * But it can remove another entry from the ->next list.
  159. */
  160. raw_spin_lock_irq(&task->pi_lock);
  161. raw_spin_unlock_irq(&task->pi_lock);
  162. do {
  163. next = work->next;
  164. work->func(work);
  165. work = next;
  166. cond_resched();
  167. } while (work);
  168. }
  169. }