swait.c 3.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * <linux/swait.h> (simple wait queues ) implementation:
  4. */
  5. void __init_swait_queue_head(struct swait_queue_head *q, const char *name,
  6. struct lock_class_key *key)
  7. {
  8. raw_spin_lock_init(&q->lock);
  9. lockdep_set_class_and_name(&q->lock, key, name);
  10. INIT_LIST_HEAD(&q->task_list);
  11. }
  12. EXPORT_SYMBOL(__init_swait_queue_head);
  13. /*
  14. * The thing about the wake_up_state() return value; I think we can ignore it.
  15. *
  16. * If for some reason it would return 0, that means the previously waiting
  17. * task is already running, so it will observe condition true (or has already).
  18. */
  19. void swake_up_locked(struct swait_queue_head *q)
  20. {
  21. struct swait_queue *curr;
  22. if (list_empty(&q->task_list))
  23. return;
  24. curr = list_first_entry(&q->task_list, typeof(*curr), task_list);
  25. wake_up_process(curr->task);
  26. list_del_init(&curr->task_list);
  27. }
  28. EXPORT_SYMBOL(swake_up_locked);
  29. /*
  30. * Wake up all waiters. This is an interface which is solely exposed for
  31. * completions and not for general usage.
  32. *
  33. * It is intentionally different from swake_up_all() to allow usage from
  34. * hard interrupt context and interrupt disabled regions.
  35. */
  36. void swake_up_all_locked(struct swait_queue_head *q)
  37. {
  38. while (!list_empty(&q->task_list))
  39. swake_up_locked(q);
  40. }
  41. void swake_up_one(struct swait_queue_head *q)
  42. {
  43. unsigned long flags;
  44. raw_spin_lock_irqsave(&q->lock, flags);
  45. swake_up_locked(q);
  46. raw_spin_unlock_irqrestore(&q->lock, flags);
  47. }
  48. EXPORT_SYMBOL(swake_up_one);
  49. /*
  50. * Does not allow usage from IRQ disabled, since we must be able to
  51. * release IRQs to guarantee bounded hold time.
  52. */
  53. void swake_up_all(struct swait_queue_head *q)
  54. {
  55. struct swait_queue *curr;
  56. LIST_HEAD(tmp);
  57. raw_spin_lock_irq(&q->lock);
  58. list_splice_init(&q->task_list, &tmp);
  59. while (!list_empty(&tmp)) {
  60. curr = list_first_entry(&tmp, typeof(*curr), task_list);
  61. wake_up_state(curr->task, TASK_NORMAL);
  62. list_del_init(&curr->task_list);
  63. if (list_empty(&tmp))
  64. break;
  65. raw_spin_unlock_irq(&q->lock);
  66. raw_spin_lock_irq(&q->lock);
  67. }
  68. raw_spin_unlock_irq(&q->lock);
  69. }
  70. EXPORT_SYMBOL(swake_up_all);
  71. void __prepare_to_swait(struct swait_queue_head *q, struct swait_queue *wait)
  72. {
  73. wait->task = current;
  74. if (list_empty(&wait->task_list))
  75. list_add_tail(&wait->task_list, &q->task_list);
  76. }
  77. void prepare_to_swait_exclusive(struct swait_queue_head *q, struct swait_queue *wait, int state)
  78. {
  79. unsigned long flags;
  80. raw_spin_lock_irqsave(&q->lock, flags);
  81. __prepare_to_swait(q, wait);
  82. set_current_state(state);
  83. raw_spin_unlock_irqrestore(&q->lock, flags);
  84. }
  85. EXPORT_SYMBOL(prepare_to_swait_exclusive);
  86. long prepare_to_swait_event(struct swait_queue_head *q, struct swait_queue *wait, int state)
  87. {
  88. unsigned long flags;
  89. long ret = 0;
  90. raw_spin_lock_irqsave(&q->lock, flags);
  91. if (signal_pending_state(state, current)) {
  92. /*
  93. * See prepare_to_wait_event(). TL;DR, subsequent swake_up_one()
  94. * must not see us.
  95. */
  96. list_del_init(&wait->task_list);
  97. ret = -ERESTARTSYS;
  98. } else {
  99. __prepare_to_swait(q, wait);
  100. set_current_state(state);
  101. }
  102. raw_spin_unlock_irqrestore(&q->lock, flags);
  103. return ret;
  104. }
  105. EXPORT_SYMBOL(prepare_to_swait_event);
  106. void __finish_swait(struct swait_queue_head *q, struct swait_queue *wait)
  107. {
  108. __set_current_state(TASK_RUNNING);
  109. if (!list_empty(&wait->task_list))
  110. list_del_init(&wait->task_list);
  111. }
  112. void finish_swait(struct swait_queue_head *q, struct swait_queue *wait)
  113. {
  114. unsigned long flags;
  115. __set_current_state(TASK_RUNNING);
  116. if (!list_empty_careful(&wait->task_list)) {
  117. raw_spin_lock_irqsave(&q->lock, flags);
  118. list_del_init(&wait->task_list);
  119. raw_spin_unlock_irqrestore(&q->lock, flags);
  120. }
  121. }
  122. EXPORT_SYMBOL(finish_swait);