swait.c 2.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * <linux/swait.h> (simple wait queues ) implementation:
  4. */
  5. #include "sched.h"
  6. void __init_swait_queue_head(struct swait_queue_head *q, const char *name,
  7. struct lock_class_key *key)
  8. {
  9. raw_spin_lock_init(&q->lock);
  10. lockdep_set_class_and_name(&q->lock, key, name);
  11. INIT_LIST_HEAD(&q->task_list);
  12. }
  13. EXPORT_SYMBOL(__init_swait_queue_head);
  14. /*
  15. * The thing about the wake_up_state() return value; I think we can ignore it.
  16. *
  17. * If for some reason it would return 0, that means the previously waiting
  18. * task is already running, so it will observe condition true (or has already).
  19. */
  20. void swake_up_locked(struct swait_queue_head *q)
  21. {
  22. struct swait_queue *curr;
  23. if (list_empty(&q->task_list))
  24. return;
  25. curr = list_first_entry(&q->task_list, typeof(*curr), task_list);
  26. wake_up_process(curr->task);
  27. list_del_init(&curr->task_list);
  28. }
  29. EXPORT_SYMBOL(swake_up_locked);
  30. void swake_up(struct swait_queue_head *q)
  31. {
  32. unsigned long flags;
  33. raw_spin_lock_irqsave(&q->lock, flags);
  34. swake_up_locked(q);
  35. raw_spin_unlock_irqrestore(&q->lock, flags);
  36. }
  37. EXPORT_SYMBOL(swake_up);
  38. /*
  39. * Does not allow usage from IRQ disabled, since we must be able to
  40. * release IRQs to guarantee bounded hold time.
  41. */
  42. void swake_up_all(struct swait_queue_head *q)
  43. {
  44. struct swait_queue *curr;
  45. LIST_HEAD(tmp);
  46. raw_spin_lock_irq(&q->lock);
  47. list_splice_init(&q->task_list, &tmp);
  48. while (!list_empty(&tmp)) {
  49. curr = list_first_entry(&tmp, typeof(*curr), task_list);
  50. wake_up_state(curr->task, TASK_NORMAL);
  51. list_del_init(&curr->task_list);
  52. if (list_empty(&tmp))
  53. break;
  54. raw_spin_unlock_irq(&q->lock);
  55. raw_spin_lock_irq(&q->lock);
  56. }
  57. raw_spin_unlock_irq(&q->lock);
  58. }
  59. EXPORT_SYMBOL(swake_up_all);
  60. void __prepare_to_swait(struct swait_queue_head *q, struct swait_queue *wait)
  61. {
  62. wait->task = current;
  63. if (list_empty(&wait->task_list))
  64. list_add(&wait->task_list, &q->task_list);
  65. }
  66. void prepare_to_swait(struct swait_queue_head *q, struct swait_queue *wait, int state)
  67. {
  68. unsigned long flags;
  69. raw_spin_lock_irqsave(&q->lock, flags);
  70. __prepare_to_swait(q, wait);
  71. set_current_state(state);
  72. raw_spin_unlock_irqrestore(&q->lock, flags);
  73. }
  74. EXPORT_SYMBOL(prepare_to_swait);
  75. long prepare_to_swait_event(struct swait_queue_head *q, struct swait_queue *wait, int state)
  76. {
  77. if (signal_pending_state(state, current))
  78. return -ERESTARTSYS;
  79. prepare_to_swait(q, wait, state);
  80. return 0;
  81. }
  82. EXPORT_SYMBOL(prepare_to_swait_event);
  83. void __finish_swait(struct swait_queue_head *q, struct swait_queue *wait)
  84. {
  85. __set_current_state(TASK_RUNNING);
  86. if (!list_empty(&wait->task_list))
  87. list_del_init(&wait->task_list);
  88. }
  89. void finish_swait(struct swait_queue_head *q, struct swait_queue *wait)
  90. {
  91. unsigned long flags;
  92. __set_current_state(TASK_RUNNING);
  93. if (!list_empty_careful(&wait->task_list)) {
  94. raw_spin_lock_irqsave(&q->lock, flags);
  95. list_del_init(&wait->task_list);
  96. raw_spin_unlock_irqrestore(&q->lock, flags);
  97. }
  98. }
  99. EXPORT_SYMBOL(finish_swait);