closure.c 5.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276
  1. /*
  2. * Asynchronous refcounty things
  3. *
  4. * Copyright 2010, 2011 Kent Overstreet <kent.overstreet@gmail.com>
  5. * Copyright 2012 Google, Inc.
  6. */
  7. #include <linux/debugfs.h>
  8. #include <linux/module.h>
  9. #include <linux/seq_file.h>
  10. #include "closure.h"
  11. #define CL_FIELD(type, field) \
  12. case TYPE_ ## type: \
  13. return &container_of(cl, struct type, cl)->field
  14. static struct closure_waitlist *closure_waitlist(struct closure *cl)
  15. {
  16. switch (cl->type) {
  17. CL_FIELD(closure_with_waitlist, wait);
  18. default:
  19. return NULL;
  20. }
  21. }
  22. static inline void closure_put_after_sub(struct closure *cl, int flags)
  23. {
  24. int r = flags & CLOSURE_REMAINING_MASK;
  25. BUG_ON(flags & CLOSURE_GUARD_MASK);
  26. BUG_ON(!r && (flags & ~CLOSURE_DESTRUCTOR));
  27. /* Must deliver precisely one wakeup */
  28. if (r == 1 && (flags & CLOSURE_SLEEPING))
  29. wake_up_process(cl->task);
  30. if (!r) {
  31. if (cl->fn && !(flags & CLOSURE_DESTRUCTOR)) {
  32. atomic_set(&cl->remaining,
  33. CLOSURE_REMAINING_INITIALIZER);
  34. closure_queue(cl);
  35. } else {
  36. struct closure *parent = cl->parent;
  37. struct closure_waitlist *wait = closure_waitlist(cl);
  38. closure_fn *destructor = cl->fn;
  39. closure_debug_destroy(cl);
  40. smp_mb();
  41. atomic_set(&cl->remaining, -1);
  42. if (wait)
  43. closure_wake_up(wait);
  44. if (destructor)
  45. destructor(cl);
  46. if (parent)
  47. closure_put(parent);
  48. }
  49. }
  50. }
  51. /* For clearing flags with the same atomic op as a put */
  52. void closure_sub(struct closure *cl, int v)
  53. {
  54. closure_put_after_sub(cl, atomic_sub_return(v, &cl->remaining));
  55. }
  56. EXPORT_SYMBOL(closure_sub);
  57. void closure_put(struct closure *cl)
  58. {
  59. closure_put_after_sub(cl, atomic_dec_return(&cl->remaining));
  60. }
  61. EXPORT_SYMBOL(closure_put);
  62. static void set_waiting(struct closure *cl, unsigned long f)
  63. {
  64. #ifdef CONFIG_BCACHE_CLOSURES_DEBUG
  65. cl->waiting_on = f;
  66. #endif
  67. }
  68. void __closure_wake_up(struct closure_waitlist *wait_list)
  69. {
  70. struct llist_node *list;
  71. struct closure *cl;
  72. struct llist_node *reverse = NULL;
  73. list = llist_del_all(&wait_list->list);
  74. /* We first reverse the list to preserve FIFO ordering and fairness */
  75. while (list) {
  76. struct llist_node *t = list;
  77. list = llist_next(list);
  78. t->next = reverse;
  79. reverse = t;
  80. }
  81. /* Then do the wakeups */
  82. while (reverse) {
  83. cl = container_of(reverse, struct closure, list);
  84. reverse = llist_next(reverse);
  85. set_waiting(cl, 0);
  86. closure_sub(cl, CLOSURE_WAITING + 1);
  87. }
  88. }
  89. EXPORT_SYMBOL(__closure_wake_up);
  90. bool closure_wait(struct closure_waitlist *list, struct closure *cl)
  91. {
  92. if (atomic_read(&cl->remaining) & CLOSURE_WAITING)
  93. return false;
  94. set_waiting(cl, _RET_IP_);
  95. atomic_add(CLOSURE_WAITING + 1, &cl->remaining);
  96. llist_add(&cl->list, &list->list);
  97. return true;
  98. }
  99. EXPORT_SYMBOL(closure_wait);
  100. /**
  101. * closure_sync() - sleep until a closure a closure has nothing left to wait on
  102. *
  103. * Sleeps until the refcount hits 1 - the thread that's running the closure owns
  104. * the last refcount.
  105. */
  106. void closure_sync(struct closure *cl)
  107. {
  108. while (1) {
  109. __closure_start_sleep(cl);
  110. closure_set_ret_ip(cl);
  111. if ((atomic_read(&cl->remaining) &
  112. CLOSURE_REMAINING_MASK) == 1)
  113. break;
  114. schedule();
  115. }
  116. __closure_end_sleep(cl);
  117. }
  118. EXPORT_SYMBOL(closure_sync);
  119. /**
  120. * closure_trylock() - try to acquire the closure, without waiting
  121. * @cl: closure to lock
  122. *
  123. * Returns true if the closure was succesfully locked.
  124. */
  125. bool closure_trylock(struct closure *cl, struct closure *parent)
  126. {
  127. if (atomic_cmpxchg(&cl->remaining, -1,
  128. CLOSURE_REMAINING_INITIALIZER) != -1)
  129. return false;
  130. smp_mb();
  131. cl->parent = parent;
  132. if (parent)
  133. closure_get(parent);
  134. closure_set_ret_ip(cl);
  135. closure_debug_create(cl);
  136. return true;
  137. }
  138. EXPORT_SYMBOL(closure_trylock);
  139. void __closure_lock(struct closure *cl, struct closure *parent,
  140. struct closure_waitlist *wait_list)
  141. {
  142. struct closure wait;
  143. closure_init_stack(&wait);
  144. while (1) {
  145. if (closure_trylock(cl, parent))
  146. return;
  147. closure_wait_event(wait_list, &wait,
  148. atomic_read(&cl->remaining) == -1);
  149. }
  150. }
  151. EXPORT_SYMBOL(__closure_lock);
  152. #ifdef CONFIG_BCACHE_CLOSURES_DEBUG
  153. static LIST_HEAD(closure_list);
  154. static DEFINE_SPINLOCK(closure_list_lock);
  155. void closure_debug_create(struct closure *cl)
  156. {
  157. unsigned long flags;
  158. BUG_ON(cl->magic == CLOSURE_MAGIC_ALIVE);
  159. cl->magic = CLOSURE_MAGIC_ALIVE;
  160. spin_lock_irqsave(&closure_list_lock, flags);
  161. list_add(&cl->all, &closure_list);
  162. spin_unlock_irqrestore(&closure_list_lock, flags);
  163. }
  164. EXPORT_SYMBOL(closure_debug_create);
  165. void closure_debug_destroy(struct closure *cl)
  166. {
  167. unsigned long flags;
  168. BUG_ON(cl->magic != CLOSURE_MAGIC_ALIVE);
  169. cl->magic = CLOSURE_MAGIC_DEAD;
  170. spin_lock_irqsave(&closure_list_lock, flags);
  171. list_del(&cl->all);
  172. spin_unlock_irqrestore(&closure_list_lock, flags);
  173. }
  174. EXPORT_SYMBOL(closure_debug_destroy);
  175. static struct dentry *debug;
  176. #define work_data_bits(work) ((unsigned long *)(&(work)->data))
  177. static int debug_seq_show(struct seq_file *f, void *data)
  178. {
  179. struct closure *cl;
  180. spin_lock_irq(&closure_list_lock);
  181. list_for_each_entry(cl, &closure_list, all) {
  182. int r = atomic_read(&cl->remaining);
  183. seq_printf(f, "%p: %pF -> %pf p %p r %i ",
  184. cl, (void *) cl->ip, cl->fn, cl->parent,
  185. r & CLOSURE_REMAINING_MASK);
  186. seq_printf(f, "%s%s%s%s\n",
  187. test_bit(WORK_STRUCT_PENDING,
  188. work_data_bits(&cl->work)) ? "Q" : "",
  189. r & CLOSURE_RUNNING ? "R" : "",
  190. r & CLOSURE_STACK ? "S" : "",
  191. r & CLOSURE_SLEEPING ? "Sl" : "");
  192. if (r & CLOSURE_WAITING)
  193. seq_printf(f, " W %pF\n",
  194. (void *) cl->waiting_on);
  195. seq_printf(f, "\n");
  196. }
  197. spin_unlock_irq(&closure_list_lock);
  198. return 0;
  199. }
  200. static int debug_seq_open(struct inode *inode, struct file *file)
  201. {
  202. return single_open(file, debug_seq_show, NULL);
  203. }
  204. static const struct file_operations debug_ops = {
  205. .owner = THIS_MODULE,
  206. .open = debug_seq_open,
  207. .read = seq_read,
  208. .release = single_release
  209. };
  210. void __init closure_debug_init(void)
  211. {
  212. debug = debugfs_create_file("closures", 0400, NULL, NULL, &debug_ops);
  213. }
  214. #endif
  215. MODULE_AUTHOR("Kent Overstreet <koverstreet@google.com>");
  216. MODULE_LICENSE("GPL");