waitqueue.c 9.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344
  1. /*
  2. * (C) 2001 Clemson University and The University of Chicago
  3. * (C) 2011 Omnibond Systems
  4. *
  5. * Changes by Acxiom Corporation to implement generic service_operation()
  6. * function, Copyright Acxiom Corporation, 2005.
  7. *
  8. * See COPYING in top-level directory.
  9. */
  10. /*
  11. * In-kernel waitqueue operations.
  12. */
  13. #include "protocol.h"
  14. #include "orangefs-kernel.h"
  15. #include "orangefs-bufmap.h"
  16. static int wait_for_matching_downcall(struct orangefs_kernel_op_s *, long, bool);
  17. static void orangefs_clean_up_interrupted_operation(struct orangefs_kernel_op_s *);
  18. /*
  19. * What we do in this function is to walk the list of operations that are
  20. * present in the request queue and mark them as purged.
  21. * NOTE: This is called from the device close after client-core has
  22. * guaranteed that no new operations could appear on the list since the
  23. * client-core is anyway going to exit.
  24. */
  25. void purge_waiting_ops(void)
  26. {
  27. struct orangefs_kernel_op_s *op;
  28. spin_lock(&orangefs_request_list_lock);
  29. list_for_each_entry(op, &orangefs_request_list, list) {
  30. gossip_debug(GOSSIP_WAIT_DEBUG,
  31. "pvfs2-client-core: purging op tag %llu %s\n",
  32. llu(op->tag),
  33. get_opname_string(op));
  34. set_op_state_purged(op);
  35. }
  36. spin_unlock(&orangefs_request_list_lock);
  37. }
  38. /*
  39. * submits a ORANGEFS operation and waits for it to complete
  40. *
  41. * Note op->downcall.status will contain the status of the operation (in
  42. * errno format), whether provided by pvfs2-client or a result of failure to
  43. * service the operation. If the caller wishes to distinguish, then
  44. * op->state can be checked to see if it was serviced or not.
  45. *
  46. * Returns contents of op->downcall.status for convenience
  47. */
  48. int service_operation(struct orangefs_kernel_op_s *op,
  49. const char *op_name,
  50. int flags)
  51. {
  52. long timeout = MAX_SCHEDULE_TIMEOUT;
  53. int ret = 0;
  54. DEFINE_WAIT(wait_entry);
  55. op->upcall.tgid = current->tgid;
  56. op->upcall.pid = current->pid;
  57. retry_servicing:
  58. op->downcall.status = 0;
  59. gossip_debug(GOSSIP_WAIT_DEBUG,
  60. "%s: %s op:%p: process:%s: pid:%d:\n",
  61. __func__,
  62. op_name,
  63. op,
  64. current->comm,
  65. current->pid);
  66. /*
  67. * If ORANGEFS_OP_NO_MUTEX was set in flags, we need to avoid
  68. * acquiring the request_mutex because we're servicing a
  69. * high priority remount operation and the request_mutex is
  70. * already taken.
  71. */
  72. if (!(flags & ORANGEFS_OP_NO_MUTEX)) {
  73. if (flags & ORANGEFS_OP_INTERRUPTIBLE)
  74. ret = mutex_lock_interruptible(&request_mutex);
  75. else
  76. ret = mutex_lock_killable(&request_mutex);
  77. /*
  78. * check to see if we were interrupted while waiting for
  79. * mutex
  80. */
  81. if (ret < 0) {
  82. op->downcall.status = ret;
  83. gossip_debug(GOSSIP_WAIT_DEBUG,
  84. "%s: service_operation interrupted.\n",
  85. __func__);
  86. return ret;
  87. }
  88. }
  89. /* queue up the operation */
  90. spin_lock(&orangefs_request_list_lock);
  91. spin_lock(&op->lock);
  92. set_op_state_waiting(op);
  93. /* add high priority remount op to the front of the line. */
  94. if (flags & ORANGEFS_OP_PRIORITY)
  95. list_add(&op->list, &orangefs_request_list);
  96. else
  97. list_add_tail(&op->list, &orangefs_request_list);
  98. spin_unlock(&op->lock);
  99. wake_up_interruptible(&orangefs_request_list_waitq);
  100. if (!__is_daemon_in_service()) {
  101. gossip_debug(GOSSIP_WAIT_DEBUG,
  102. "%s:client core is NOT in service.\n",
  103. __func__);
  104. timeout = op_timeout_secs * HZ;
  105. }
  106. spin_unlock(&orangefs_request_list_lock);
  107. if (!(flags & ORANGEFS_OP_NO_MUTEX))
  108. mutex_unlock(&request_mutex);
  109. ret = wait_for_matching_downcall(op, timeout,
  110. flags & ORANGEFS_OP_INTERRUPTIBLE);
  111. gossip_debug(GOSSIP_WAIT_DEBUG,
  112. "%s: wait_for_matching_downcall returned %d for %p\n",
  113. __func__,
  114. ret,
  115. op);
  116. /* got matching downcall; make sure status is in errno format */
  117. if (!ret) {
  118. spin_unlock(&op->lock);
  119. op->downcall.status =
  120. orangefs_normalize_to_errno(op->downcall.status);
  121. ret = op->downcall.status;
  122. goto out;
  123. }
  124. /* failed to get matching downcall */
  125. if (ret == -ETIMEDOUT) {
  126. gossip_err("%s: %s -- wait timed out; aborting attempt.\n",
  127. __func__,
  128. op_name);
  129. }
  130. /*
  131. * remove a waiting op from the request list or
  132. * remove an in-progress op from the in-progress list.
  133. */
  134. orangefs_clean_up_interrupted_operation(op);
  135. op->downcall.status = ret;
  136. /* retry if operation has not been serviced and if requested */
  137. if (ret == -EAGAIN) {
  138. op->attempts++;
  139. timeout = op_timeout_secs * HZ;
  140. gossip_debug(GOSSIP_WAIT_DEBUG,
  141. "orangefs: tag %llu (%s)"
  142. " -- operation to be retried (%d attempt)\n",
  143. llu(op->tag),
  144. op_name,
  145. op->attempts);
  146. /*
  147. * io ops (ops that use the shared memory buffer) have
  148. * to be returned to their caller for a retry. Other ops
  149. * can just be recycled here.
  150. */
  151. if (!op->uses_shared_memory)
  152. goto retry_servicing;
  153. }
  154. out:
  155. gossip_debug(GOSSIP_WAIT_DEBUG,
  156. "orangefs: service_operation %s returning: %d for %p.\n",
  157. op_name,
  158. ret,
  159. op);
  160. return ret;
  161. }
  162. /* This can get called on an I/O op if it had a bad service_operation. */
  163. bool orangefs_cancel_op_in_progress(struct orangefs_kernel_op_s *op)
  164. {
  165. u64 tag = op->tag;
  166. if (!op_state_in_progress(op))
  167. return false;
  168. op->slot_to_free = op->upcall.req.io.buf_index;
  169. memset(&op->upcall, 0, sizeof(op->upcall));
  170. memset(&op->downcall, 0, sizeof(op->downcall));
  171. op->upcall.type = ORANGEFS_VFS_OP_CANCEL;
  172. op->upcall.req.cancel.op_tag = tag;
  173. op->downcall.type = ORANGEFS_VFS_OP_INVALID;
  174. op->downcall.status = -1;
  175. orangefs_new_tag(op);
  176. spin_lock(&orangefs_request_list_lock);
  177. /* orangefs_request_list_lock is enough of a barrier here */
  178. if (!__is_daemon_in_service()) {
  179. spin_unlock(&orangefs_request_list_lock);
  180. return false;
  181. }
  182. spin_lock(&op->lock);
  183. set_op_state_waiting(op);
  184. list_add(&op->list, &orangefs_request_list);
  185. spin_unlock(&op->lock);
  186. spin_unlock(&orangefs_request_list_lock);
  187. gossip_debug(GOSSIP_WAIT_DEBUG,
  188. "Attempting ORANGEFS operation cancellation of tag %llu\n",
  189. llu(tag));
  190. return true;
  191. }
  192. /*
  193. * Change an op to the "given up" state and remove it from its list.
  194. */
  195. static void
  196. orangefs_clean_up_interrupted_operation(struct orangefs_kernel_op_s *op)
  197. {
  198. /*
  199. * handle interrupted cases depending on what state we were in when
  200. * the interruption is detected.
  201. *
  202. * Called with op->lock held.
  203. */
  204. /*
  205. * List manipulation code elsewhere will ignore ops that
  206. * have been given up upon.
  207. */
  208. op->op_state |= OP_VFS_STATE_GIVEN_UP;
  209. if (list_empty(&op->list)) {
  210. /* caught copying to/from daemon */
  211. BUG_ON(op_state_serviced(op));
  212. spin_unlock(&op->lock);
  213. wait_for_completion(&op->waitq);
  214. } else if (op_state_waiting(op)) {
  215. /*
  216. * upcall hasn't been read; remove op from upcall request
  217. * list.
  218. */
  219. spin_unlock(&op->lock);
  220. spin_lock(&orangefs_request_list_lock);
  221. list_del_init(&op->list);
  222. spin_unlock(&orangefs_request_list_lock);
  223. gossip_debug(GOSSIP_WAIT_DEBUG,
  224. "Interrupted: Removed op %p from request_list\n",
  225. op);
  226. } else if (op_state_in_progress(op)) {
  227. /* op must be removed from the in progress htable */
  228. spin_unlock(&op->lock);
  229. spin_lock(&htable_ops_in_progress_lock);
  230. list_del_init(&op->list);
  231. spin_unlock(&htable_ops_in_progress_lock);
  232. gossip_debug(GOSSIP_WAIT_DEBUG,
  233. "Interrupted: Removed op %p"
  234. " from htable_ops_in_progress\n",
  235. op);
  236. } else {
  237. spin_unlock(&op->lock);
  238. gossip_err("interrupted operation is in a weird state 0x%x\n",
  239. op->op_state);
  240. }
  241. reinit_completion(&op->waitq);
  242. }
  243. /*
  244. * Sleeps on waitqueue waiting for matching downcall.
  245. * If client-core finishes servicing, then we are good to go.
  246. * else if client-core exits, we get woken up here, and retry with a timeout
  247. *
  248. * When this call returns to the caller, the specified op will no
  249. * longer be in either the in_progress hash table or on the request list.
  250. *
  251. * Returns 0 on success and -errno on failure
  252. * Errors are:
  253. * EAGAIN in case we want the caller to requeue and try again..
  254. * EINTR/EIO/ETIMEDOUT indicating we are done trying to service this
  255. * operation since client-core seems to be exiting too often
  256. * or if we were interrupted.
  257. *
  258. * Returns with op->lock taken.
  259. */
  260. static int wait_for_matching_downcall(struct orangefs_kernel_op_s *op,
  261. long timeout,
  262. bool interruptible)
  263. {
  264. long n;
  265. /*
  266. * There's a "schedule_timeout" inside of these wait
  267. * primitives, during which the op is out of the hands of the
  268. * user process that needs something done and is being
  269. * manipulated by the client-core process.
  270. */
  271. if (interruptible)
  272. n = wait_for_completion_interruptible_timeout(&op->waitq,
  273. timeout);
  274. else
  275. n = wait_for_completion_killable_timeout(&op->waitq, timeout);
  276. spin_lock(&op->lock);
  277. if (op_state_serviced(op))
  278. return 0;
  279. if (unlikely(n < 0)) {
  280. gossip_debug(GOSSIP_WAIT_DEBUG,
  281. "*** %s:"
  282. " operation interrupted by a signal (tag "
  283. "%llu, op %p)\n",
  284. __func__,
  285. llu(op->tag),
  286. op);
  287. return -EINTR;
  288. }
  289. if (op_state_purged(op)) {
  290. gossip_debug(GOSSIP_WAIT_DEBUG,
  291. "*** %s:"
  292. " operation purged (tag "
  293. "%llu, %p, att %d)\n",
  294. __func__,
  295. llu(op->tag),
  296. op,
  297. op->attempts);
  298. return (op->attempts < ORANGEFS_PURGE_RETRY_COUNT) ?
  299. -EAGAIN :
  300. -EIO;
  301. }
  302. /* must have timed out, then... */
  303. gossip_debug(GOSSIP_WAIT_DEBUG,
  304. "*** %s:"
  305. " operation timed out (tag"
  306. " %llu, %p, att %d)\n",
  307. __func__,
  308. llu(op->tag),
  309. op,
  310. op->attempts);
  311. return -ETIMEDOUT;
  312. }