closure.c 4.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210
  1. /*
  2. * Asynchronous refcounty things
  3. *
  4. * Copyright 2010, 2011 Kent Overstreet <kent.overstreet@gmail.com>
  5. * Copyright 2012 Google, Inc.
  6. */
  7. #include <linux/debugfs.h>
  8. #include <linux/module.h>
  9. #include <linux/seq_file.h>
  10. #include <linux/sched/debug.h>
  11. #include "closure.h"
  12. static inline void closure_put_after_sub(struct closure *cl, int flags)
  13. {
  14. int r = flags & CLOSURE_REMAINING_MASK;
  15. BUG_ON(flags & CLOSURE_GUARD_MASK);
  16. BUG_ON(!r && (flags & ~CLOSURE_DESTRUCTOR));
  17. if (!r) {
  18. if (cl->fn && !(flags & CLOSURE_DESTRUCTOR)) {
  19. atomic_set(&cl->remaining,
  20. CLOSURE_REMAINING_INITIALIZER);
  21. closure_queue(cl);
  22. } else {
  23. struct closure *parent = cl->parent;
  24. closure_fn *destructor = cl->fn;
  25. closure_debug_destroy(cl);
  26. if (destructor)
  27. destructor(cl);
  28. if (parent)
  29. closure_put(parent);
  30. }
  31. }
  32. }
  33. /* For clearing flags with the same atomic op as a put */
  34. void closure_sub(struct closure *cl, int v)
  35. {
  36. closure_put_after_sub(cl, atomic_sub_return(v, &cl->remaining));
  37. }
  38. EXPORT_SYMBOL(closure_sub);
  39. /**
  40. * closure_put - decrement a closure's refcount
  41. */
  42. void closure_put(struct closure *cl)
  43. {
  44. closure_put_after_sub(cl, atomic_dec_return(&cl->remaining));
  45. }
  46. EXPORT_SYMBOL(closure_put);
  47. /**
  48. * closure_wake_up - wake up all closures on a wait list, without memory barrier
  49. */
  50. void __closure_wake_up(struct closure_waitlist *wait_list)
  51. {
  52. struct llist_node *list;
  53. struct closure *cl, *t;
  54. struct llist_node *reverse = NULL;
  55. list = llist_del_all(&wait_list->list);
  56. /* We first reverse the list to preserve FIFO ordering and fairness */
  57. reverse = llist_reverse_order(list);
  58. /* Then do the wakeups */
  59. llist_for_each_entry_safe(cl, t, reverse, list) {
  60. closure_set_waiting(cl, 0);
  61. closure_sub(cl, CLOSURE_WAITING + 1);
  62. }
  63. }
  64. EXPORT_SYMBOL(__closure_wake_up);
  65. /**
  66. * closure_wait - add a closure to a waitlist
  67. *
  68. * @waitlist will own a ref on @cl, which will be released when
  69. * closure_wake_up() is called on @waitlist.
  70. *
  71. */
  72. bool closure_wait(struct closure_waitlist *waitlist, struct closure *cl)
  73. {
  74. if (atomic_read(&cl->remaining) & CLOSURE_WAITING)
  75. return false;
  76. closure_set_waiting(cl, _RET_IP_);
  77. atomic_add(CLOSURE_WAITING + 1, &cl->remaining);
  78. llist_add(&cl->list, &waitlist->list);
  79. return true;
  80. }
  81. EXPORT_SYMBOL(closure_wait);
  82. struct closure_syncer {
  83. struct task_struct *task;
  84. int done;
  85. };
  86. static void closure_sync_fn(struct closure *cl)
  87. {
  88. cl->s->done = 1;
  89. wake_up_process(cl->s->task);
  90. }
  91. void __sched __closure_sync(struct closure *cl)
  92. {
  93. struct closure_syncer s = { .task = current };
  94. cl->s = &s;
  95. continue_at(cl, closure_sync_fn, NULL);
  96. while (1) {
  97. set_current_state(TASK_UNINTERRUPTIBLE);
  98. if (s.done)
  99. break;
  100. schedule();
  101. }
  102. __set_current_state(TASK_RUNNING);
  103. }
  104. EXPORT_SYMBOL(__closure_sync);
  105. #ifdef CONFIG_BCACHE_CLOSURES_DEBUG
  106. static LIST_HEAD(closure_list);
  107. static DEFINE_SPINLOCK(closure_list_lock);
  108. void closure_debug_create(struct closure *cl)
  109. {
  110. unsigned long flags;
  111. BUG_ON(cl->magic == CLOSURE_MAGIC_ALIVE);
  112. cl->magic = CLOSURE_MAGIC_ALIVE;
  113. spin_lock_irqsave(&closure_list_lock, flags);
  114. list_add(&cl->all, &closure_list);
  115. spin_unlock_irqrestore(&closure_list_lock, flags);
  116. }
  117. EXPORT_SYMBOL(closure_debug_create);
  118. void closure_debug_destroy(struct closure *cl)
  119. {
  120. unsigned long flags;
  121. BUG_ON(cl->magic != CLOSURE_MAGIC_ALIVE);
  122. cl->magic = CLOSURE_MAGIC_DEAD;
  123. spin_lock_irqsave(&closure_list_lock, flags);
  124. list_del(&cl->all);
  125. spin_unlock_irqrestore(&closure_list_lock, flags);
  126. }
  127. EXPORT_SYMBOL(closure_debug_destroy);
  128. static struct dentry *debug;
  129. static int debug_seq_show(struct seq_file *f, void *data)
  130. {
  131. struct closure *cl;
  132. spin_lock_irq(&closure_list_lock);
  133. list_for_each_entry(cl, &closure_list, all) {
  134. int r = atomic_read(&cl->remaining);
  135. seq_printf(f, "%p: %pF -> %pf p %p r %i ",
  136. cl, (void *) cl->ip, cl->fn, cl->parent,
  137. r & CLOSURE_REMAINING_MASK);
  138. seq_printf(f, "%s%s\n",
  139. test_bit(WORK_STRUCT_PENDING_BIT,
  140. work_data_bits(&cl->work)) ? "Q" : "",
  141. r & CLOSURE_RUNNING ? "R" : "");
  142. if (r & CLOSURE_WAITING)
  143. seq_printf(f, " W %pF\n",
  144. (void *) cl->waiting_on);
  145. seq_printf(f, "\n");
  146. }
  147. spin_unlock_irq(&closure_list_lock);
  148. return 0;
  149. }
  150. static int debug_seq_open(struct inode *inode, struct file *file)
  151. {
  152. return single_open(file, debug_seq_show, NULL);
  153. }
  154. static const struct file_operations debug_ops = {
  155. .owner = THIS_MODULE,
  156. .open = debug_seq_open,
  157. .read = seq_read,
  158. .release = single_release
  159. };
  160. void __init closure_debug_init(void)
  161. {
  162. debug = debugfs_create_file("closures", 0400, NULL, NULL, &debug_ops);
  163. }
  164. #endif
  165. MODULE_AUTHOR("Kent Overstreet <koverstreet@google.com>");
  166. MODULE_LICENSE("GPL");