call_object.c 20 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795
  1. /* RxRPC individual remote procedure call handling
  2. *
  3. * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
  4. * Written by David Howells (dhowells@redhat.com)
  5. *
  6. * This program is free software; you can redistribute it and/or
  7. * modify it under the terms of the GNU General Public License
  8. * as published by the Free Software Foundation; either version
  9. * 2 of the License, or (at your option) any later version.
  10. */
  11. #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  12. #include <linux/slab.h>
  13. #include <linux/module.h>
  14. #include <linux/circ_buf.h>
  15. #include <linux/spinlock_types.h>
  16. #include <net/sock.h>
  17. #include <net/af_rxrpc.h>
  18. #include "ar-internal.h"
  19. /*
  20. * Maximum lifetime of a call (in jiffies).
  21. */
  22. unsigned int rxrpc_max_call_lifetime = 60 * HZ;
  23. /*
  24. * Time till dead call expires after last use (in jiffies).
  25. */
  26. unsigned int rxrpc_dead_call_expiry = 2 * HZ;
  27. const char *const rxrpc_call_states[NR__RXRPC_CALL_STATES] = {
  28. [RXRPC_CALL_UNINITIALISED] = "Uninit",
  29. [RXRPC_CALL_CLIENT_AWAIT_CONN] = "ClWtConn",
  30. [RXRPC_CALL_CLIENT_SEND_REQUEST] = "ClSndReq",
  31. [RXRPC_CALL_CLIENT_AWAIT_REPLY] = "ClAwtRpl",
  32. [RXRPC_CALL_CLIENT_RECV_REPLY] = "ClRcvRpl",
  33. [RXRPC_CALL_CLIENT_FINAL_ACK] = "ClFnlACK",
  34. [RXRPC_CALL_SERVER_SECURING] = "SvSecure",
  35. [RXRPC_CALL_SERVER_ACCEPTING] = "SvAccept",
  36. [RXRPC_CALL_SERVER_RECV_REQUEST] = "SvRcvReq",
  37. [RXRPC_CALL_SERVER_ACK_REQUEST] = "SvAckReq",
  38. [RXRPC_CALL_SERVER_SEND_REPLY] = "SvSndRpl",
  39. [RXRPC_CALL_SERVER_AWAIT_ACK] = "SvAwtACK",
  40. [RXRPC_CALL_COMPLETE] = "Complete",
  41. [RXRPC_CALL_SERVER_BUSY] = "SvBusy ",
  42. [RXRPC_CALL_REMOTELY_ABORTED] = "RmtAbort",
  43. [RXRPC_CALL_LOCALLY_ABORTED] = "LocAbort",
  44. [RXRPC_CALL_NETWORK_ERROR] = "NetError",
  45. [RXRPC_CALL_DEAD] = "Dead ",
  46. };
  47. struct kmem_cache *rxrpc_call_jar;
  48. LIST_HEAD(rxrpc_calls);
  49. DEFINE_RWLOCK(rxrpc_call_lock);
  50. static void rxrpc_destroy_call(struct work_struct *work);
  51. static void rxrpc_call_life_expired(unsigned long _call);
  52. static void rxrpc_dead_call_expired(unsigned long _call);
  53. static void rxrpc_ack_time_expired(unsigned long _call);
  54. static void rxrpc_resend_time_expired(unsigned long _call);
  55. /*
  56. * find an extant server call
  57. * - called in process context with IRQs enabled
  58. */
  59. struct rxrpc_call *rxrpc_find_call_by_user_ID(struct rxrpc_sock *rx,
  60. unsigned long user_call_ID)
  61. {
  62. struct rxrpc_call *call;
  63. struct rb_node *p;
  64. _enter("%p,%lx", rx, user_call_ID);
  65. read_lock(&rx->call_lock);
  66. p = rx->calls.rb_node;
  67. while (p) {
  68. call = rb_entry(p, struct rxrpc_call, sock_node);
  69. if (user_call_ID < call->user_call_ID)
  70. p = p->rb_left;
  71. else if (user_call_ID > call->user_call_ID)
  72. p = p->rb_right;
  73. else
  74. goto found_extant_call;
  75. }
  76. read_unlock(&rx->call_lock);
  77. _leave(" = NULL");
  78. return NULL;
  79. found_extant_call:
  80. rxrpc_get_call(call);
  81. read_unlock(&rx->call_lock);
  82. _leave(" = %p [%d]", call, atomic_read(&call->usage));
  83. return call;
  84. }
  85. /*
  86. * allocate a new call
  87. */
  88. static struct rxrpc_call *rxrpc_alloc_call(gfp_t gfp)
  89. {
  90. struct rxrpc_call *call;
  91. call = kmem_cache_zalloc(rxrpc_call_jar, gfp);
  92. if (!call)
  93. return NULL;
  94. call->acks_winsz = 16;
  95. call->acks_window = kmalloc(call->acks_winsz * sizeof(unsigned long),
  96. gfp);
  97. if (!call->acks_window) {
  98. kmem_cache_free(rxrpc_call_jar, call);
  99. return NULL;
  100. }
  101. setup_timer(&call->lifetimer, &rxrpc_call_life_expired,
  102. (unsigned long) call);
  103. setup_timer(&call->deadspan, &rxrpc_dead_call_expired,
  104. (unsigned long) call);
  105. setup_timer(&call->ack_timer, &rxrpc_ack_time_expired,
  106. (unsigned long) call);
  107. setup_timer(&call->resend_timer, &rxrpc_resend_time_expired,
  108. (unsigned long) call);
  109. INIT_WORK(&call->destroyer, &rxrpc_destroy_call);
  110. INIT_WORK(&call->processor, &rxrpc_process_call);
  111. INIT_LIST_HEAD(&call->link);
  112. INIT_LIST_HEAD(&call->accept_link);
  113. skb_queue_head_init(&call->rx_queue);
  114. skb_queue_head_init(&call->rx_oos_queue);
  115. init_waitqueue_head(&call->tx_waitq);
  116. spin_lock_init(&call->lock);
  117. rwlock_init(&call->state_lock);
  118. atomic_set(&call->usage, 1);
  119. call->debug_id = atomic_inc_return(&rxrpc_debug_id);
  120. memset(&call->sock_node, 0xed, sizeof(call->sock_node));
  121. call->rx_data_expect = 1;
  122. call->rx_data_eaten = 0;
  123. call->rx_first_oos = 0;
  124. call->ackr_win_top = call->rx_data_eaten + 1 + rxrpc_rx_window_size;
  125. call->creation_jif = jiffies;
  126. return call;
  127. }
  128. /*
  129. * Allocate a new client call.
  130. */
  131. static struct rxrpc_call *rxrpc_alloc_client_call(struct rxrpc_sock *rx,
  132. struct sockaddr_rxrpc *srx,
  133. gfp_t gfp)
  134. {
  135. struct rxrpc_call *call;
  136. _enter("");
  137. ASSERT(rx->local != NULL);
  138. call = rxrpc_alloc_call(gfp);
  139. if (!call)
  140. return ERR_PTR(-ENOMEM);
  141. call->state = RXRPC_CALL_CLIENT_AWAIT_CONN;
  142. sock_hold(&rx->sk);
  143. call->socket = rx;
  144. call->rx_data_post = 1;
  145. call->service_id = srx->srx_service;
  146. _leave(" = %p", call);
  147. return call;
  148. }
  149. /*
  150. * Begin client call.
  151. */
  152. static int rxrpc_begin_client_call(struct rxrpc_call *call,
  153. struct rxrpc_conn_parameters *cp,
  154. struct sockaddr_rxrpc *srx,
  155. gfp_t gfp)
  156. {
  157. int ret;
  158. /* Set up or get a connection record and set the protocol parameters,
  159. * including channel number and call ID.
  160. */
  161. ret = rxrpc_connect_call(call, cp, srx, gfp);
  162. if (ret < 0)
  163. return ret;
  164. call->state = RXRPC_CALL_CLIENT_SEND_REQUEST;
  165. spin_lock(&call->conn->params.peer->lock);
  166. hlist_add_head(&call->error_link, &call->conn->params.peer->error_targets);
  167. spin_unlock(&call->conn->params.peer->lock);
  168. call->lifetimer.expires = jiffies + rxrpc_max_call_lifetime;
  169. add_timer(&call->lifetimer);
  170. return 0;
  171. }
  172. /*
  173. * set up a call for the given data
  174. * - called in process context with IRQs enabled
  175. */
  176. struct rxrpc_call *rxrpc_new_client_call(struct rxrpc_sock *rx,
  177. struct rxrpc_conn_parameters *cp,
  178. struct sockaddr_rxrpc *srx,
  179. unsigned long user_call_ID,
  180. gfp_t gfp)
  181. {
  182. struct rxrpc_call *call, *xcall;
  183. struct rb_node *parent, **pp;
  184. int ret;
  185. _enter("%p,%lx", rx, user_call_ID);
  186. call = rxrpc_alloc_client_call(rx, srx, gfp);
  187. if (IS_ERR(call)) {
  188. _leave(" = %ld", PTR_ERR(call));
  189. return call;
  190. }
  191. /* Publish the call, even though it is incompletely set up as yet */
  192. call->user_call_ID = user_call_ID;
  193. __set_bit(RXRPC_CALL_HAS_USERID, &call->flags);
  194. write_lock(&rx->call_lock);
  195. pp = &rx->calls.rb_node;
  196. parent = NULL;
  197. while (*pp) {
  198. parent = *pp;
  199. xcall = rb_entry(parent, struct rxrpc_call, sock_node);
  200. if (user_call_ID < xcall->user_call_ID)
  201. pp = &(*pp)->rb_left;
  202. else if (user_call_ID > xcall->user_call_ID)
  203. pp = &(*pp)->rb_right;
  204. else
  205. goto found_user_ID_now_present;
  206. }
  207. rxrpc_get_call(call);
  208. rb_link_node(&call->sock_node, parent, pp);
  209. rb_insert_color(&call->sock_node, &rx->calls);
  210. write_unlock(&rx->call_lock);
  211. write_lock_bh(&rxrpc_call_lock);
  212. list_add_tail(&call->link, &rxrpc_calls);
  213. write_unlock_bh(&rxrpc_call_lock);
  214. ret = rxrpc_begin_client_call(call, cp, srx, gfp);
  215. if (ret < 0)
  216. goto error;
  217. _net("CALL new %d on CONN %d", call->debug_id, call->conn->debug_id);
  218. _leave(" = %p [new]", call);
  219. return call;
  220. error:
  221. write_lock(&rx->call_lock);
  222. rb_erase(&call->sock_node, &rx->calls);
  223. write_unlock(&rx->call_lock);
  224. rxrpc_put_call(call);
  225. write_lock_bh(&rxrpc_call_lock);
  226. list_del_init(&call->link);
  227. write_unlock_bh(&rxrpc_call_lock);
  228. set_bit(RXRPC_CALL_RELEASED, &call->flags);
  229. call->state = RXRPC_CALL_DEAD;
  230. rxrpc_put_call(call);
  231. _leave(" = %d", ret);
  232. return ERR_PTR(ret);
  233. /* We unexpectedly found the user ID in the list after taking
  234. * the call_lock. This shouldn't happen unless the user races
  235. * with itself and tries to add the same user ID twice at the
  236. * same time in different threads.
  237. */
  238. found_user_ID_now_present:
  239. write_unlock(&rx->call_lock);
  240. set_bit(RXRPC_CALL_RELEASED, &call->flags);
  241. call->state = RXRPC_CALL_DEAD;
  242. rxrpc_put_call(call);
  243. _leave(" = -EEXIST [%p]", call);
  244. return ERR_PTR(-EEXIST);
  245. }
  246. /*
  247. * set up an incoming call
  248. * - called in process context with IRQs enabled
  249. */
  250. struct rxrpc_call *rxrpc_incoming_call(struct rxrpc_sock *rx,
  251. struct rxrpc_connection *conn,
  252. struct sk_buff *skb)
  253. {
  254. struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
  255. struct rxrpc_call *call, *candidate;
  256. u32 call_id, chan;
  257. _enter(",%d", conn->debug_id);
  258. ASSERT(rx != NULL);
  259. candidate = rxrpc_alloc_call(GFP_NOIO);
  260. if (!candidate)
  261. return ERR_PTR(-EBUSY);
  262. chan = sp->hdr.cid & RXRPC_CHANNELMASK;
  263. candidate->socket = rx;
  264. candidate->conn = conn;
  265. candidate->cid = sp->hdr.cid;
  266. candidate->call_id = sp->hdr.callNumber;
  267. candidate->rx_data_post = 0;
  268. candidate->state = RXRPC_CALL_SERVER_ACCEPTING;
  269. candidate->flags |= (1 << RXRPC_CALL_IS_SERVICE);
  270. if (conn->security_ix > 0)
  271. candidate->state = RXRPC_CALL_SERVER_SECURING;
  272. spin_lock(&conn->channel_lock);
  273. /* set the channel for this call */
  274. call = rcu_dereference_protected(conn->channels[chan].call,
  275. lockdep_is_held(&conn->channel_lock));
  276. _debug("channel[%u] is %p", candidate->cid & RXRPC_CHANNELMASK, call);
  277. if (call && call->call_id == sp->hdr.callNumber) {
  278. /* already set; must've been a duplicate packet */
  279. _debug("extant call [%d]", call->state);
  280. ASSERTCMP(call->conn, ==, conn);
  281. read_lock(&call->state_lock);
  282. switch (call->state) {
  283. case RXRPC_CALL_LOCALLY_ABORTED:
  284. if (!test_and_set_bit(RXRPC_CALL_EV_ABORT, &call->events))
  285. rxrpc_queue_call(call);
  286. case RXRPC_CALL_REMOTELY_ABORTED:
  287. read_unlock(&call->state_lock);
  288. goto aborted_call;
  289. default:
  290. rxrpc_get_call(call);
  291. read_unlock(&call->state_lock);
  292. goto extant_call;
  293. }
  294. }
  295. if (call) {
  296. /* it seems the channel is still in use from the previous call
  297. * - ditch the old binding if its call is now complete */
  298. _debug("CALL: %u { %s }",
  299. call->debug_id, rxrpc_call_states[call->state]);
  300. if (call->state >= RXRPC_CALL_COMPLETE) {
  301. __rxrpc_disconnect_call(call);
  302. } else {
  303. spin_unlock(&conn->channel_lock);
  304. kmem_cache_free(rxrpc_call_jar, candidate);
  305. _leave(" = -EBUSY");
  306. return ERR_PTR(-EBUSY);
  307. }
  308. }
  309. /* check the call number isn't duplicate */
  310. _debug("check dup");
  311. call_id = sp->hdr.callNumber;
  312. /* We just ignore calls prior to the current call ID. Terminated calls
  313. * are handled via the connection.
  314. */
  315. if (call_id <= conn->channels[chan].call_counter)
  316. goto old_call; /* TODO: Just drop packet */
  317. /* make the call available */
  318. _debug("new call");
  319. call = candidate;
  320. candidate = NULL;
  321. conn->channels[chan].call_counter = call_id;
  322. rcu_assign_pointer(conn->channels[chan].call, call);
  323. sock_hold(&rx->sk);
  324. rxrpc_get_connection(conn);
  325. spin_unlock(&conn->channel_lock);
  326. spin_lock(&conn->params.peer->lock);
  327. hlist_add_head(&call->error_link, &conn->params.peer->error_targets);
  328. spin_unlock(&conn->params.peer->lock);
  329. write_lock_bh(&rxrpc_call_lock);
  330. list_add_tail(&call->link, &rxrpc_calls);
  331. write_unlock_bh(&rxrpc_call_lock);
  332. call->service_id = conn->params.service_id;
  333. _net("CALL incoming %d on CONN %d", call->debug_id, call->conn->debug_id);
  334. call->lifetimer.expires = jiffies + rxrpc_max_call_lifetime;
  335. add_timer(&call->lifetimer);
  336. _leave(" = %p {%d} [new]", call, call->debug_id);
  337. return call;
  338. extant_call:
  339. spin_unlock(&conn->channel_lock);
  340. kmem_cache_free(rxrpc_call_jar, candidate);
  341. _leave(" = %p {%d} [extant]", call, call ? call->debug_id : -1);
  342. return call;
  343. aborted_call:
  344. spin_unlock(&conn->channel_lock);
  345. kmem_cache_free(rxrpc_call_jar, candidate);
  346. _leave(" = -ECONNABORTED");
  347. return ERR_PTR(-ECONNABORTED);
  348. old_call:
  349. spin_unlock(&conn->channel_lock);
  350. kmem_cache_free(rxrpc_call_jar, candidate);
  351. _leave(" = -ECONNRESET [old]");
  352. return ERR_PTR(-ECONNRESET);
  353. }
  354. /*
  355. * detach a call from a socket and set up for release
  356. */
  357. void rxrpc_release_call(struct rxrpc_call *call)
  358. {
  359. struct rxrpc_connection *conn = call->conn;
  360. struct rxrpc_sock *rx = call->socket;
  361. _enter("{%d,%d,%d,%d}",
  362. call->debug_id, atomic_read(&call->usage),
  363. atomic_read(&call->ackr_not_idle),
  364. call->rx_first_oos);
  365. spin_lock_bh(&call->lock);
  366. if (test_and_set_bit(RXRPC_CALL_RELEASED, &call->flags))
  367. BUG();
  368. spin_unlock_bh(&call->lock);
  369. /* dissociate from the socket
  370. * - the socket's ref on the call is passed to the death timer
  371. */
  372. _debug("RELEASE CALL %p (%d CONN %p)", call, call->debug_id, conn);
  373. spin_lock(&conn->params.peer->lock);
  374. hlist_del_init(&call->error_link);
  375. spin_unlock(&conn->params.peer->lock);
  376. write_lock_bh(&rx->call_lock);
  377. if (!list_empty(&call->accept_link)) {
  378. _debug("unlinking once-pending call %p { e=%lx f=%lx }",
  379. call, call->events, call->flags);
  380. ASSERT(!test_bit(RXRPC_CALL_HAS_USERID, &call->flags));
  381. list_del_init(&call->accept_link);
  382. sk_acceptq_removed(&rx->sk);
  383. } else if (test_bit(RXRPC_CALL_HAS_USERID, &call->flags)) {
  384. rb_erase(&call->sock_node, &rx->calls);
  385. memset(&call->sock_node, 0xdd, sizeof(call->sock_node));
  386. clear_bit(RXRPC_CALL_HAS_USERID, &call->flags);
  387. }
  388. write_unlock_bh(&rx->call_lock);
  389. /* free up the channel for reuse */
  390. write_lock_bh(&call->state_lock);
  391. if (call->state < RXRPC_CALL_COMPLETE &&
  392. call->state != RXRPC_CALL_CLIENT_FINAL_ACK) {
  393. _debug("+++ ABORTING STATE %d +++\n", call->state);
  394. call->state = RXRPC_CALL_LOCALLY_ABORTED;
  395. call->local_abort = RX_CALL_DEAD;
  396. }
  397. write_unlock_bh(&call->state_lock);
  398. rxrpc_disconnect_call(call);
  399. /* clean up the Rx queue */
  400. if (!skb_queue_empty(&call->rx_queue) ||
  401. !skb_queue_empty(&call->rx_oos_queue)) {
  402. struct rxrpc_skb_priv *sp;
  403. struct sk_buff *skb;
  404. _debug("purge Rx queues");
  405. spin_lock_bh(&call->lock);
  406. while ((skb = skb_dequeue(&call->rx_queue)) ||
  407. (skb = skb_dequeue(&call->rx_oos_queue))) {
  408. spin_unlock_bh(&call->lock);
  409. sp = rxrpc_skb(skb);
  410. _debug("- zap %s %%%u #%u",
  411. rxrpc_pkts[sp->hdr.type],
  412. sp->hdr.serial, sp->hdr.seq);
  413. rxrpc_free_skb(skb);
  414. spin_lock_bh(&call->lock);
  415. }
  416. spin_unlock_bh(&call->lock);
  417. ASSERTCMP(call->state, !=, RXRPC_CALL_COMPLETE);
  418. }
  419. del_timer_sync(&call->resend_timer);
  420. del_timer_sync(&call->ack_timer);
  421. del_timer_sync(&call->lifetimer);
  422. call->deadspan.expires = jiffies + rxrpc_dead_call_expiry;
  423. add_timer(&call->deadspan);
  424. _leave("");
  425. }
  426. /*
  427. * handle a dead call being ready for reaping
  428. */
  429. static void rxrpc_dead_call_expired(unsigned long _call)
  430. {
  431. struct rxrpc_call *call = (struct rxrpc_call *) _call;
  432. _enter("{%d}", call->debug_id);
  433. write_lock_bh(&call->state_lock);
  434. call->state = RXRPC_CALL_DEAD;
  435. write_unlock_bh(&call->state_lock);
  436. rxrpc_put_call(call);
  437. }
  438. /*
  439. * mark a call as to be released, aborting it if it's still in progress
  440. * - called with softirqs disabled
  441. */
  442. static void rxrpc_mark_call_released(struct rxrpc_call *call)
  443. {
  444. bool sched;
  445. write_lock(&call->state_lock);
  446. if (call->state < RXRPC_CALL_DEAD) {
  447. sched = false;
  448. if (call->state < RXRPC_CALL_COMPLETE) {
  449. _debug("abort call %p", call);
  450. call->state = RXRPC_CALL_LOCALLY_ABORTED;
  451. call->local_abort = RX_CALL_DEAD;
  452. if (!test_and_set_bit(RXRPC_CALL_EV_ABORT, &call->events))
  453. sched = true;
  454. }
  455. if (!test_and_set_bit(RXRPC_CALL_EV_RELEASE, &call->events))
  456. sched = true;
  457. if (sched)
  458. rxrpc_queue_call(call);
  459. }
  460. write_unlock(&call->state_lock);
  461. }
  462. /*
  463. * release all the calls associated with a socket
  464. */
  465. void rxrpc_release_calls_on_socket(struct rxrpc_sock *rx)
  466. {
  467. struct rxrpc_call *call;
  468. struct rb_node *p;
  469. _enter("%p", rx);
  470. read_lock_bh(&rx->call_lock);
  471. /* kill the not-yet-accepted incoming calls */
  472. list_for_each_entry(call, &rx->secureq, accept_link) {
  473. rxrpc_mark_call_released(call);
  474. }
  475. list_for_each_entry(call, &rx->acceptq, accept_link) {
  476. rxrpc_mark_call_released(call);
  477. }
  478. /* mark all the calls as no longer wanting incoming packets */
  479. for (p = rb_first(&rx->calls); p; p = rb_next(p)) {
  480. call = rb_entry(p, struct rxrpc_call, sock_node);
  481. rxrpc_mark_call_released(call);
  482. }
  483. read_unlock_bh(&rx->call_lock);
  484. _leave("");
  485. }
  486. /*
  487. * release a call
  488. */
  489. void __rxrpc_put_call(struct rxrpc_call *call)
  490. {
  491. ASSERT(call != NULL);
  492. _enter("%p{u=%d}", call, atomic_read(&call->usage));
  493. ASSERTCMP(atomic_read(&call->usage), >, 0);
  494. if (atomic_dec_and_test(&call->usage)) {
  495. _debug("call %d dead", call->debug_id);
  496. WARN_ON(atomic_read(&call->skb_count) != 0);
  497. ASSERTCMP(call->state, ==, RXRPC_CALL_DEAD);
  498. rxrpc_queue_work(&call->destroyer);
  499. }
  500. _leave("");
  501. }
  502. /*
  503. * Final call destruction under RCU.
  504. */
  505. static void rxrpc_rcu_destroy_call(struct rcu_head *rcu)
  506. {
  507. struct rxrpc_call *call = container_of(rcu, struct rxrpc_call, rcu);
  508. rxrpc_purge_queue(&call->rx_queue);
  509. kmem_cache_free(rxrpc_call_jar, call);
  510. }
  511. /*
  512. * clean up a call
  513. */
  514. static void rxrpc_cleanup_call(struct rxrpc_call *call)
  515. {
  516. _net("DESTROY CALL %d", call->debug_id);
  517. ASSERT(call->socket);
  518. memset(&call->sock_node, 0xcd, sizeof(call->sock_node));
  519. del_timer_sync(&call->lifetimer);
  520. del_timer_sync(&call->deadspan);
  521. del_timer_sync(&call->ack_timer);
  522. del_timer_sync(&call->resend_timer);
  523. ASSERT(test_bit(RXRPC_CALL_RELEASED, &call->flags));
  524. ASSERTCMP(call->events, ==, 0);
  525. if (work_pending(&call->processor)) {
  526. _debug("defer destroy");
  527. rxrpc_queue_work(&call->destroyer);
  528. return;
  529. }
  530. ASSERTCMP(call->conn, ==, NULL);
  531. if (call->acks_window) {
  532. _debug("kill Tx window %d",
  533. CIRC_CNT(call->acks_head, call->acks_tail,
  534. call->acks_winsz));
  535. smp_mb();
  536. while (CIRC_CNT(call->acks_head, call->acks_tail,
  537. call->acks_winsz) > 0) {
  538. struct rxrpc_skb_priv *sp;
  539. unsigned long _skb;
  540. _skb = call->acks_window[call->acks_tail] & ~1;
  541. sp = rxrpc_skb((struct sk_buff *)_skb);
  542. _debug("+++ clear Tx %u", sp->hdr.seq);
  543. rxrpc_free_skb((struct sk_buff *)_skb);
  544. call->acks_tail =
  545. (call->acks_tail + 1) & (call->acks_winsz - 1);
  546. }
  547. kfree(call->acks_window);
  548. }
  549. rxrpc_free_skb(call->tx_pending);
  550. rxrpc_purge_queue(&call->rx_queue);
  551. ASSERT(skb_queue_empty(&call->rx_oos_queue));
  552. sock_put(&call->socket->sk);
  553. call_rcu(&call->rcu, rxrpc_rcu_destroy_call);
  554. }
  555. /*
  556. * destroy a call
  557. */
  558. static void rxrpc_destroy_call(struct work_struct *work)
  559. {
  560. struct rxrpc_call *call =
  561. container_of(work, struct rxrpc_call, destroyer);
  562. _enter("%p{%d,%x,%p}",
  563. call, atomic_read(&call->usage), call->cid, call->conn);
  564. ASSERTCMP(call->state, ==, RXRPC_CALL_DEAD);
  565. write_lock_bh(&rxrpc_call_lock);
  566. list_del_init(&call->link);
  567. write_unlock_bh(&rxrpc_call_lock);
  568. rxrpc_cleanup_call(call);
  569. _leave("");
  570. }
  571. /*
  572. * preemptively destroy all the call records from a transport endpoint rather
  573. * than waiting for them to time out
  574. */
  575. void __exit rxrpc_destroy_all_calls(void)
  576. {
  577. struct rxrpc_call *call;
  578. _enter("");
  579. write_lock_bh(&rxrpc_call_lock);
  580. while (!list_empty(&rxrpc_calls)) {
  581. call = list_entry(rxrpc_calls.next, struct rxrpc_call, link);
  582. _debug("Zapping call %p", call);
  583. list_del_init(&call->link);
  584. switch (atomic_read(&call->usage)) {
  585. case 0:
  586. ASSERTCMP(call->state, ==, RXRPC_CALL_DEAD);
  587. break;
  588. case 1:
  589. if (del_timer_sync(&call->deadspan) != 0 &&
  590. call->state != RXRPC_CALL_DEAD)
  591. rxrpc_dead_call_expired((unsigned long) call);
  592. if (call->state != RXRPC_CALL_DEAD)
  593. break;
  594. default:
  595. pr_err("Call %p still in use (%d,%d,%s,%lx,%lx)!\n",
  596. call, atomic_read(&call->usage),
  597. atomic_read(&call->ackr_not_idle),
  598. rxrpc_call_states[call->state],
  599. call->flags, call->events);
  600. if (!skb_queue_empty(&call->rx_queue))
  601. pr_err("Rx queue occupied\n");
  602. if (!skb_queue_empty(&call->rx_oos_queue))
  603. pr_err("OOS queue occupied\n");
  604. break;
  605. }
  606. write_unlock_bh(&rxrpc_call_lock);
  607. cond_resched();
  608. write_lock_bh(&rxrpc_call_lock);
  609. }
  610. write_unlock_bh(&rxrpc_call_lock);
  611. _leave("");
  612. }
  613. /*
  614. * handle call lifetime being exceeded
  615. */
  616. static void rxrpc_call_life_expired(unsigned long _call)
  617. {
  618. struct rxrpc_call *call = (struct rxrpc_call *) _call;
  619. if (call->state >= RXRPC_CALL_COMPLETE)
  620. return;
  621. _enter("{%d}", call->debug_id);
  622. read_lock_bh(&call->state_lock);
  623. if (call->state < RXRPC_CALL_COMPLETE) {
  624. set_bit(RXRPC_CALL_EV_LIFE_TIMER, &call->events);
  625. rxrpc_queue_call(call);
  626. }
  627. read_unlock_bh(&call->state_lock);
  628. }
  629. /*
  630. * handle resend timer expiry
  631. * - may not take call->state_lock as this can deadlock against del_timer_sync()
  632. */
  633. static void rxrpc_resend_time_expired(unsigned long _call)
  634. {
  635. struct rxrpc_call *call = (struct rxrpc_call *) _call;
  636. _enter("{%d}", call->debug_id);
  637. if (call->state >= RXRPC_CALL_COMPLETE)
  638. return;
  639. clear_bit(RXRPC_CALL_RUN_RTIMER, &call->flags);
  640. if (!test_and_set_bit(RXRPC_CALL_EV_RESEND_TIMER, &call->events))
  641. rxrpc_queue_call(call);
  642. }
  643. /*
  644. * handle ACK timer expiry
  645. */
  646. static void rxrpc_ack_time_expired(unsigned long _call)
  647. {
  648. struct rxrpc_call *call = (struct rxrpc_call *) _call;
  649. _enter("{%d}", call->debug_id);
  650. if (call->state >= RXRPC_CALL_COMPLETE)
  651. return;
  652. read_lock_bh(&call->state_lock);
  653. if (call->state < RXRPC_CALL_COMPLETE &&
  654. !test_and_set_bit(RXRPC_CALL_EV_ACK, &call->events))
  655. rxrpc_queue_call(call);
  656. read_unlock_bh(&call->state_lock);
  657. }