call_object.c 21 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799
  1. /* RxRPC individual remote procedure call handling
  2. *
  3. * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
  4. * Written by David Howells (dhowells@redhat.com)
  5. *
  6. * This program is free software; you can redistribute it and/or
  7. * modify it under the terms of the GNU General Public License
  8. * as published by the Free Software Foundation; either version
  9. * 2 of the License, or (at your option) any later version.
  10. */
  11. #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  12. #include <linux/slab.h>
  13. #include <linux/module.h>
  14. #include <linux/circ_buf.h>
  15. #include <linux/spinlock_types.h>
  16. #include <net/sock.h>
  17. #include <net/af_rxrpc.h>
  18. #include "ar-internal.h"
  19. /*
  20. * Maximum lifetime of a call (in jiffies).
  21. */
  22. unsigned int rxrpc_max_call_lifetime = 60 * HZ;
  23. /*
  24. * Time till dead call expires after last use (in jiffies).
  25. */
  26. unsigned int rxrpc_dead_call_expiry = 2 * HZ;
  27. const char *const rxrpc_call_states[NR__RXRPC_CALL_STATES] = {
  28. [RXRPC_CALL_UNINITIALISED] = "Uninit",
  29. [RXRPC_CALL_CLIENT_AWAIT_CONN] = "ClWtConn",
  30. [RXRPC_CALL_CLIENT_SEND_REQUEST] = "ClSndReq",
  31. [RXRPC_CALL_CLIENT_AWAIT_REPLY] = "ClAwtRpl",
  32. [RXRPC_CALL_CLIENT_RECV_REPLY] = "ClRcvRpl",
  33. [RXRPC_CALL_CLIENT_FINAL_ACK] = "ClFnlACK",
  34. [RXRPC_CALL_SERVER_SECURING] = "SvSecure",
  35. [RXRPC_CALL_SERVER_ACCEPTING] = "SvAccept",
  36. [RXRPC_CALL_SERVER_RECV_REQUEST] = "SvRcvReq",
  37. [RXRPC_CALL_SERVER_ACK_REQUEST] = "SvAckReq",
  38. [RXRPC_CALL_SERVER_SEND_REPLY] = "SvSndRpl",
  39. [RXRPC_CALL_SERVER_AWAIT_ACK] = "SvAwtACK",
  40. [RXRPC_CALL_COMPLETE] = "Complete",
  41. [RXRPC_CALL_SERVER_BUSY] = "SvBusy ",
  42. [RXRPC_CALL_REMOTELY_ABORTED] = "RmtAbort",
  43. [RXRPC_CALL_LOCALLY_ABORTED] = "LocAbort",
  44. [RXRPC_CALL_NETWORK_ERROR] = "NetError",
  45. [RXRPC_CALL_DEAD] = "Dead ",
  46. };
  47. struct kmem_cache *rxrpc_call_jar;
  48. LIST_HEAD(rxrpc_calls);
  49. DEFINE_RWLOCK(rxrpc_call_lock);
  50. static void rxrpc_destroy_call(struct work_struct *work);
  51. static void rxrpc_call_life_expired(unsigned long _call);
  52. static void rxrpc_dead_call_expired(unsigned long _call);
  53. static void rxrpc_ack_time_expired(unsigned long _call);
  54. static void rxrpc_resend_time_expired(unsigned long _call);
  55. /*
  56. * find an extant server call
  57. * - called in process context with IRQs enabled
  58. */
  59. struct rxrpc_call *rxrpc_find_call_by_user_ID(struct rxrpc_sock *rx,
  60. unsigned long user_call_ID)
  61. {
  62. struct rxrpc_call *call;
  63. struct rb_node *p;
  64. _enter("%p,%lx", rx, user_call_ID);
  65. read_lock(&rx->call_lock);
  66. p = rx->calls.rb_node;
  67. while (p) {
  68. call = rb_entry(p, struct rxrpc_call, sock_node);
  69. if (user_call_ID < call->user_call_ID)
  70. p = p->rb_left;
  71. else if (user_call_ID > call->user_call_ID)
  72. p = p->rb_right;
  73. else
  74. goto found_extant_call;
  75. }
  76. read_unlock(&rx->call_lock);
  77. _leave(" = NULL");
  78. return NULL;
  79. found_extant_call:
  80. rxrpc_get_call(call);
  81. read_unlock(&rx->call_lock);
  82. _leave(" = %p [%d]", call, atomic_read(&call->usage));
  83. return call;
  84. }
  85. /*
  86. * allocate a new call
  87. */
  88. static struct rxrpc_call *rxrpc_alloc_call(gfp_t gfp)
  89. {
  90. struct rxrpc_call *call;
  91. call = kmem_cache_zalloc(rxrpc_call_jar, gfp);
  92. if (!call)
  93. return NULL;
  94. call->acks_winsz = 16;
  95. call->acks_window = kmalloc(call->acks_winsz * sizeof(unsigned long),
  96. gfp);
  97. if (!call->acks_window) {
  98. kmem_cache_free(rxrpc_call_jar, call);
  99. return NULL;
  100. }
  101. setup_timer(&call->lifetimer, &rxrpc_call_life_expired,
  102. (unsigned long) call);
  103. setup_timer(&call->deadspan, &rxrpc_dead_call_expired,
  104. (unsigned long) call);
  105. setup_timer(&call->ack_timer, &rxrpc_ack_time_expired,
  106. (unsigned long) call);
  107. setup_timer(&call->resend_timer, &rxrpc_resend_time_expired,
  108. (unsigned long) call);
  109. INIT_WORK(&call->destroyer, &rxrpc_destroy_call);
  110. INIT_WORK(&call->processor, &rxrpc_process_call);
  111. INIT_LIST_HEAD(&call->link);
  112. INIT_LIST_HEAD(&call->chan_wait_link);
  113. INIT_LIST_HEAD(&call->accept_link);
  114. skb_queue_head_init(&call->rx_queue);
  115. skb_queue_head_init(&call->rx_oos_queue);
  116. init_waitqueue_head(&call->waitq);
  117. spin_lock_init(&call->lock);
  118. rwlock_init(&call->state_lock);
  119. atomic_set(&call->usage, 1);
  120. call->debug_id = atomic_inc_return(&rxrpc_debug_id);
  121. memset(&call->sock_node, 0xed, sizeof(call->sock_node));
  122. call->rx_data_expect = 1;
  123. call->rx_data_eaten = 0;
  124. call->rx_first_oos = 0;
  125. call->ackr_win_top = call->rx_data_eaten + 1 + rxrpc_rx_window_size;
  126. call->creation_jif = jiffies;
  127. return call;
  128. }
  129. /*
  130. * Allocate a new client call.
  131. */
  132. static struct rxrpc_call *rxrpc_alloc_client_call(struct rxrpc_sock *rx,
  133. struct sockaddr_rxrpc *srx,
  134. gfp_t gfp)
  135. {
  136. struct rxrpc_call *call;
  137. _enter("");
  138. ASSERT(rx->local != NULL);
  139. call = rxrpc_alloc_call(gfp);
  140. if (!call)
  141. return ERR_PTR(-ENOMEM);
  142. call->state = RXRPC_CALL_CLIENT_AWAIT_CONN;
  143. sock_hold(&rx->sk);
  144. call->socket = rx;
  145. call->rx_data_post = 1;
  146. call->service_id = srx->srx_service;
  147. _leave(" = %p", call);
  148. return call;
  149. }
  150. /*
  151. * Begin client call.
  152. */
  153. static int rxrpc_begin_client_call(struct rxrpc_call *call,
  154. struct rxrpc_conn_parameters *cp,
  155. struct sockaddr_rxrpc *srx,
  156. gfp_t gfp)
  157. {
  158. int ret;
  159. /* Set up or get a connection record and set the protocol parameters,
  160. * including channel number and call ID.
  161. */
  162. ret = rxrpc_connect_call(call, cp, srx, gfp);
  163. if (ret < 0)
  164. return ret;
  165. call->state = RXRPC_CALL_CLIENT_SEND_REQUEST;
  166. spin_lock(&call->conn->params.peer->lock);
  167. hlist_add_head(&call->error_link, &call->conn->params.peer->error_targets);
  168. spin_unlock(&call->conn->params.peer->lock);
  169. call->lifetimer.expires = jiffies + rxrpc_max_call_lifetime;
  170. add_timer(&call->lifetimer);
  171. return 0;
  172. }
  173. /*
  174. * set up a call for the given data
  175. * - called in process context with IRQs enabled
  176. */
  177. struct rxrpc_call *rxrpc_new_client_call(struct rxrpc_sock *rx,
  178. struct rxrpc_conn_parameters *cp,
  179. struct sockaddr_rxrpc *srx,
  180. unsigned long user_call_ID,
  181. gfp_t gfp)
  182. {
  183. struct rxrpc_call *call, *xcall;
  184. struct rb_node *parent, **pp;
  185. int ret;
  186. _enter("%p,%lx", rx, user_call_ID);
  187. call = rxrpc_alloc_client_call(rx, srx, gfp);
  188. if (IS_ERR(call)) {
  189. _leave(" = %ld", PTR_ERR(call));
  190. return call;
  191. }
  192. /* Publish the call, even though it is incompletely set up as yet */
  193. call->user_call_ID = user_call_ID;
  194. __set_bit(RXRPC_CALL_HAS_USERID, &call->flags);
  195. write_lock(&rx->call_lock);
  196. pp = &rx->calls.rb_node;
  197. parent = NULL;
  198. while (*pp) {
  199. parent = *pp;
  200. xcall = rb_entry(parent, struct rxrpc_call, sock_node);
  201. if (user_call_ID < xcall->user_call_ID)
  202. pp = &(*pp)->rb_left;
  203. else if (user_call_ID > xcall->user_call_ID)
  204. pp = &(*pp)->rb_right;
  205. else
  206. goto found_user_ID_now_present;
  207. }
  208. rxrpc_get_call(call);
  209. rb_link_node(&call->sock_node, parent, pp);
  210. rb_insert_color(&call->sock_node, &rx->calls);
  211. write_unlock(&rx->call_lock);
  212. write_lock_bh(&rxrpc_call_lock);
  213. list_add_tail(&call->link, &rxrpc_calls);
  214. write_unlock_bh(&rxrpc_call_lock);
  215. ret = rxrpc_begin_client_call(call, cp, srx, gfp);
  216. if (ret < 0)
  217. goto error;
  218. _net("CALL new %d on CONN %d", call->debug_id, call->conn->debug_id);
  219. _leave(" = %p [new]", call);
  220. return call;
  221. error:
  222. write_lock(&rx->call_lock);
  223. rb_erase(&call->sock_node, &rx->calls);
  224. write_unlock(&rx->call_lock);
  225. rxrpc_put_call(call);
  226. write_lock_bh(&rxrpc_call_lock);
  227. list_del_init(&call->link);
  228. write_unlock_bh(&rxrpc_call_lock);
  229. set_bit(RXRPC_CALL_RELEASED, &call->flags);
  230. call->state = RXRPC_CALL_DEAD;
  231. rxrpc_put_call(call);
  232. _leave(" = %d", ret);
  233. return ERR_PTR(ret);
  234. /* We unexpectedly found the user ID in the list after taking
  235. * the call_lock. This shouldn't happen unless the user races
  236. * with itself and tries to add the same user ID twice at the
  237. * same time in different threads.
  238. */
  239. found_user_ID_now_present:
  240. write_unlock(&rx->call_lock);
  241. set_bit(RXRPC_CALL_RELEASED, &call->flags);
  242. call->state = RXRPC_CALL_DEAD;
  243. rxrpc_put_call(call);
  244. _leave(" = -EEXIST [%p]", call);
  245. return ERR_PTR(-EEXIST);
  246. }
  247. /*
  248. * set up an incoming call
  249. * - called in process context with IRQs enabled
  250. */
  251. struct rxrpc_call *rxrpc_incoming_call(struct rxrpc_sock *rx,
  252. struct rxrpc_connection *conn,
  253. struct sk_buff *skb)
  254. {
  255. struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
  256. struct rxrpc_call *call, *candidate;
  257. u32 call_id, chan;
  258. _enter(",%d", conn->debug_id);
  259. ASSERT(rx != NULL);
  260. candidate = rxrpc_alloc_call(GFP_NOIO);
  261. if (!candidate)
  262. return ERR_PTR(-EBUSY);
  263. chan = sp->hdr.cid & RXRPC_CHANNELMASK;
  264. candidate->socket = rx;
  265. candidate->conn = conn;
  266. candidate->peer = conn->params.peer;
  267. candidate->cid = sp->hdr.cid;
  268. candidate->call_id = sp->hdr.callNumber;
  269. candidate->rx_data_post = 0;
  270. candidate->state = RXRPC_CALL_SERVER_ACCEPTING;
  271. candidate->flags |= (1 << RXRPC_CALL_IS_SERVICE);
  272. if (conn->security_ix > 0)
  273. candidate->state = RXRPC_CALL_SERVER_SECURING;
  274. spin_lock(&conn->channel_lock);
  275. /* set the channel for this call */
  276. call = rcu_dereference_protected(conn->channels[chan].call,
  277. lockdep_is_held(&conn->channel_lock));
  278. _debug("channel[%u] is %p", candidate->cid & RXRPC_CHANNELMASK, call);
  279. if (call && call->call_id == sp->hdr.callNumber) {
  280. /* already set; must've been a duplicate packet */
  281. _debug("extant call [%d]", call->state);
  282. ASSERTCMP(call->conn, ==, conn);
  283. read_lock(&call->state_lock);
  284. switch (call->state) {
  285. case RXRPC_CALL_LOCALLY_ABORTED:
  286. if (!test_and_set_bit(RXRPC_CALL_EV_ABORT, &call->events))
  287. rxrpc_queue_call(call);
  288. case RXRPC_CALL_REMOTELY_ABORTED:
  289. read_unlock(&call->state_lock);
  290. goto aborted_call;
  291. default:
  292. rxrpc_get_call(call);
  293. read_unlock(&call->state_lock);
  294. goto extant_call;
  295. }
  296. }
  297. if (call) {
  298. /* it seems the channel is still in use from the previous call
  299. * - ditch the old binding if its call is now complete */
  300. _debug("CALL: %u { %s }",
  301. call->debug_id, rxrpc_call_states[call->state]);
  302. if (call->state >= RXRPC_CALL_COMPLETE) {
  303. __rxrpc_disconnect_call(conn, call);
  304. } else {
  305. spin_unlock(&conn->channel_lock);
  306. kmem_cache_free(rxrpc_call_jar, candidate);
  307. _leave(" = -EBUSY");
  308. return ERR_PTR(-EBUSY);
  309. }
  310. }
  311. /* check the call number isn't duplicate */
  312. _debug("check dup");
  313. call_id = sp->hdr.callNumber;
  314. /* We just ignore calls prior to the current call ID. Terminated calls
  315. * are handled via the connection.
  316. */
  317. if (call_id <= conn->channels[chan].call_counter)
  318. goto old_call; /* TODO: Just drop packet */
  319. /* make the call available */
  320. _debug("new call");
  321. call = candidate;
  322. candidate = NULL;
  323. conn->channels[chan].call_counter = call_id;
  324. rcu_assign_pointer(conn->channels[chan].call, call);
  325. sock_hold(&rx->sk);
  326. rxrpc_get_connection(conn);
  327. rxrpc_get_peer(call->peer);
  328. spin_unlock(&conn->channel_lock);
  329. spin_lock(&conn->params.peer->lock);
  330. hlist_add_head(&call->error_link, &conn->params.peer->error_targets);
  331. spin_unlock(&conn->params.peer->lock);
  332. write_lock_bh(&rxrpc_call_lock);
  333. list_add_tail(&call->link, &rxrpc_calls);
  334. write_unlock_bh(&rxrpc_call_lock);
  335. call->service_id = conn->params.service_id;
  336. _net("CALL incoming %d on CONN %d", call->debug_id, call->conn->debug_id);
  337. call->lifetimer.expires = jiffies + rxrpc_max_call_lifetime;
  338. add_timer(&call->lifetimer);
  339. _leave(" = %p {%d} [new]", call, call->debug_id);
  340. return call;
  341. extant_call:
  342. spin_unlock(&conn->channel_lock);
  343. kmem_cache_free(rxrpc_call_jar, candidate);
  344. _leave(" = %p {%d} [extant]", call, call ? call->debug_id : -1);
  345. return call;
  346. aborted_call:
  347. spin_unlock(&conn->channel_lock);
  348. kmem_cache_free(rxrpc_call_jar, candidate);
  349. _leave(" = -ECONNABORTED");
  350. return ERR_PTR(-ECONNABORTED);
  351. old_call:
  352. spin_unlock(&conn->channel_lock);
  353. kmem_cache_free(rxrpc_call_jar, candidate);
  354. _leave(" = -ECONNRESET [old]");
  355. return ERR_PTR(-ECONNRESET);
  356. }
  357. /*
  358. * detach a call from a socket and set up for release
  359. */
  360. void rxrpc_release_call(struct rxrpc_call *call)
  361. {
  362. struct rxrpc_connection *conn = call->conn;
  363. struct rxrpc_sock *rx = call->socket;
  364. _enter("{%d,%d,%d,%d}",
  365. call->debug_id, atomic_read(&call->usage),
  366. atomic_read(&call->ackr_not_idle),
  367. call->rx_first_oos);
  368. spin_lock_bh(&call->lock);
  369. if (test_and_set_bit(RXRPC_CALL_RELEASED, &call->flags))
  370. BUG();
  371. spin_unlock_bh(&call->lock);
  372. /* dissociate from the socket
  373. * - the socket's ref on the call is passed to the death timer
  374. */
  375. _debug("RELEASE CALL %p (%d CONN %p)", call, call->debug_id, conn);
  376. spin_lock(&conn->params.peer->lock);
  377. hlist_del_init(&call->error_link);
  378. spin_unlock(&conn->params.peer->lock);
  379. write_lock_bh(&rx->call_lock);
  380. if (!list_empty(&call->accept_link)) {
  381. _debug("unlinking once-pending call %p { e=%lx f=%lx }",
  382. call, call->events, call->flags);
  383. ASSERT(!test_bit(RXRPC_CALL_HAS_USERID, &call->flags));
  384. list_del_init(&call->accept_link);
  385. sk_acceptq_removed(&rx->sk);
  386. } else if (test_bit(RXRPC_CALL_HAS_USERID, &call->flags)) {
  387. rb_erase(&call->sock_node, &rx->calls);
  388. memset(&call->sock_node, 0xdd, sizeof(call->sock_node));
  389. clear_bit(RXRPC_CALL_HAS_USERID, &call->flags);
  390. }
  391. write_unlock_bh(&rx->call_lock);
  392. /* free up the channel for reuse */
  393. write_lock_bh(&call->state_lock);
  394. if (call->state < RXRPC_CALL_COMPLETE &&
  395. call->state != RXRPC_CALL_CLIENT_FINAL_ACK) {
  396. _debug("+++ ABORTING STATE %d +++\n", call->state);
  397. call->state = RXRPC_CALL_LOCALLY_ABORTED;
  398. call->local_abort = RX_CALL_DEAD;
  399. }
  400. write_unlock_bh(&call->state_lock);
  401. rxrpc_disconnect_call(call);
  402. /* clean up the Rx queue */
  403. if (!skb_queue_empty(&call->rx_queue) ||
  404. !skb_queue_empty(&call->rx_oos_queue)) {
  405. struct rxrpc_skb_priv *sp;
  406. struct sk_buff *skb;
  407. _debug("purge Rx queues");
  408. spin_lock_bh(&call->lock);
  409. while ((skb = skb_dequeue(&call->rx_queue)) ||
  410. (skb = skb_dequeue(&call->rx_oos_queue))) {
  411. spin_unlock_bh(&call->lock);
  412. sp = rxrpc_skb(skb);
  413. _debug("- zap %s %%%u #%u",
  414. rxrpc_pkts[sp->hdr.type],
  415. sp->hdr.serial, sp->hdr.seq);
  416. rxrpc_free_skb(skb);
  417. spin_lock_bh(&call->lock);
  418. }
  419. spin_unlock_bh(&call->lock);
  420. ASSERTCMP(call->state, !=, RXRPC_CALL_COMPLETE);
  421. }
  422. del_timer_sync(&call->resend_timer);
  423. del_timer_sync(&call->ack_timer);
  424. del_timer_sync(&call->lifetimer);
  425. call->deadspan.expires = jiffies + rxrpc_dead_call_expiry;
  426. add_timer(&call->deadspan);
  427. _leave("");
  428. }
  429. /*
  430. * handle a dead call being ready for reaping
  431. */
  432. static void rxrpc_dead_call_expired(unsigned long _call)
  433. {
  434. struct rxrpc_call *call = (struct rxrpc_call *) _call;
  435. _enter("{%d}", call->debug_id);
  436. write_lock_bh(&call->state_lock);
  437. call->state = RXRPC_CALL_DEAD;
  438. write_unlock_bh(&call->state_lock);
  439. rxrpc_put_call(call);
  440. }
  441. /*
  442. * mark a call as to be released, aborting it if it's still in progress
  443. * - called with softirqs disabled
  444. */
  445. static void rxrpc_mark_call_released(struct rxrpc_call *call)
  446. {
  447. bool sched;
  448. write_lock(&call->state_lock);
  449. if (call->state < RXRPC_CALL_DEAD) {
  450. sched = false;
  451. if (call->state < RXRPC_CALL_COMPLETE) {
  452. _debug("abort call %p", call);
  453. call->state = RXRPC_CALL_LOCALLY_ABORTED;
  454. call->local_abort = RX_CALL_DEAD;
  455. if (!test_and_set_bit(RXRPC_CALL_EV_ABORT, &call->events))
  456. sched = true;
  457. }
  458. if (!test_and_set_bit(RXRPC_CALL_EV_RELEASE, &call->events))
  459. sched = true;
  460. if (sched)
  461. rxrpc_queue_call(call);
  462. }
  463. write_unlock(&call->state_lock);
  464. }
  465. /*
  466. * release all the calls associated with a socket
  467. */
  468. void rxrpc_release_calls_on_socket(struct rxrpc_sock *rx)
  469. {
  470. struct rxrpc_call *call;
  471. struct rb_node *p;
  472. _enter("%p", rx);
  473. read_lock_bh(&rx->call_lock);
  474. /* kill the not-yet-accepted incoming calls */
  475. list_for_each_entry(call, &rx->secureq, accept_link) {
  476. rxrpc_mark_call_released(call);
  477. }
  478. list_for_each_entry(call, &rx->acceptq, accept_link) {
  479. rxrpc_mark_call_released(call);
  480. }
  481. /* mark all the calls as no longer wanting incoming packets */
  482. for (p = rb_first(&rx->calls); p; p = rb_next(p)) {
  483. call = rb_entry(p, struct rxrpc_call, sock_node);
  484. rxrpc_mark_call_released(call);
  485. }
  486. read_unlock_bh(&rx->call_lock);
  487. _leave("");
  488. }
  489. /*
  490. * release a call
  491. */
  492. void __rxrpc_put_call(struct rxrpc_call *call)
  493. {
  494. ASSERT(call != NULL);
  495. _enter("%p{u=%d}", call, atomic_read(&call->usage));
  496. ASSERTCMP(atomic_read(&call->usage), >, 0);
  497. if (atomic_dec_and_test(&call->usage)) {
  498. _debug("call %d dead", call->debug_id);
  499. WARN_ON(atomic_read(&call->skb_count) != 0);
  500. ASSERTCMP(call->state, ==, RXRPC_CALL_DEAD);
  501. rxrpc_queue_work(&call->destroyer);
  502. }
  503. _leave("");
  504. }
  505. /*
  506. * Final call destruction under RCU.
  507. */
  508. static void rxrpc_rcu_destroy_call(struct rcu_head *rcu)
  509. {
  510. struct rxrpc_call *call = container_of(rcu, struct rxrpc_call, rcu);
  511. rxrpc_purge_queue(&call->rx_queue);
  512. rxrpc_put_peer(call->peer);
  513. kmem_cache_free(rxrpc_call_jar, call);
  514. }
  515. /*
  516. * clean up a call
  517. */
  518. static void rxrpc_cleanup_call(struct rxrpc_call *call)
  519. {
  520. _net("DESTROY CALL %d", call->debug_id);
  521. ASSERT(call->socket);
  522. memset(&call->sock_node, 0xcd, sizeof(call->sock_node));
  523. del_timer_sync(&call->lifetimer);
  524. del_timer_sync(&call->deadspan);
  525. del_timer_sync(&call->ack_timer);
  526. del_timer_sync(&call->resend_timer);
  527. ASSERT(test_bit(RXRPC_CALL_RELEASED, &call->flags));
  528. ASSERTCMP(call->events, ==, 0);
  529. if (work_pending(&call->processor)) {
  530. _debug("defer destroy");
  531. rxrpc_queue_work(&call->destroyer);
  532. return;
  533. }
  534. ASSERTCMP(call->conn, ==, NULL);
  535. if (call->acks_window) {
  536. _debug("kill Tx window %d",
  537. CIRC_CNT(call->acks_head, call->acks_tail,
  538. call->acks_winsz));
  539. smp_mb();
  540. while (CIRC_CNT(call->acks_head, call->acks_tail,
  541. call->acks_winsz) > 0) {
  542. struct rxrpc_skb_priv *sp;
  543. unsigned long _skb;
  544. _skb = call->acks_window[call->acks_tail] & ~1;
  545. sp = rxrpc_skb((struct sk_buff *)_skb);
  546. _debug("+++ clear Tx %u", sp->hdr.seq);
  547. rxrpc_free_skb((struct sk_buff *)_skb);
  548. call->acks_tail =
  549. (call->acks_tail + 1) & (call->acks_winsz - 1);
  550. }
  551. kfree(call->acks_window);
  552. }
  553. rxrpc_free_skb(call->tx_pending);
  554. rxrpc_purge_queue(&call->rx_queue);
  555. ASSERT(skb_queue_empty(&call->rx_oos_queue));
  556. sock_put(&call->socket->sk);
  557. call_rcu(&call->rcu, rxrpc_rcu_destroy_call);
  558. }
  559. /*
  560. * destroy a call
  561. */
  562. static void rxrpc_destroy_call(struct work_struct *work)
  563. {
  564. struct rxrpc_call *call =
  565. container_of(work, struct rxrpc_call, destroyer);
  566. _enter("%p{%d,%x,%p}",
  567. call, atomic_read(&call->usage), call->cid, call->conn);
  568. ASSERTCMP(call->state, ==, RXRPC_CALL_DEAD);
  569. write_lock_bh(&rxrpc_call_lock);
  570. list_del_init(&call->link);
  571. write_unlock_bh(&rxrpc_call_lock);
  572. rxrpc_cleanup_call(call);
  573. _leave("");
  574. }
  575. /*
  576. * preemptively destroy all the call records from a transport endpoint rather
  577. * than waiting for them to time out
  578. */
  579. void __exit rxrpc_destroy_all_calls(void)
  580. {
  581. struct rxrpc_call *call;
  582. _enter("");
  583. write_lock_bh(&rxrpc_call_lock);
  584. while (!list_empty(&rxrpc_calls)) {
  585. call = list_entry(rxrpc_calls.next, struct rxrpc_call, link);
  586. _debug("Zapping call %p", call);
  587. list_del_init(&call->link);
  588. switch (atomic_read(&call->usage)) {
  589. case 0:
  590. ASSERTCMP(call->state, ==, RXRPC_CALL_DEAD);
  591. break;
  592. case 1:
  593. if (del_timer_sync(&call->deadspan) != 0 &&
  594. call->state != RXRPC_CALL_DEAD)
  595. rxrpc_dead_call_expired((unsigned long) call);
  596. if (call->state != RXRPC_CALL_DEAD)
  597. break;
  598. default:
  599. pr_err("Call %p still in use (%d,%d,%s,%lx,%lx)!\n",
  600. call, atomic_read(&call->usage),
  601. atomic_read(&call->ackr_not_idle),
  602. rxrpc_call_states[call->state],
  603. call->flags, call->events);
  604. if (!skb_queue_empty(&call->rx_queue))
  605. pr_err("Rx queue occupied\n");
  606. if (!skb_queue_empty(&call->rx_oos_queue))
  607. pr_err("OOS queue occupied\n");
  608. break;
  609. }
  610. write_unlock_bh(&rxrpc_call_lock);
  611. cond_resched();
  612. write_lock_bh(&rxrpc_call_lock);
  613. }
  614. write_unlock_bh(&rxrpc_call_lock);
  615. _leave("");
  616. }
  617. /*
  618. * handle call lifetime being exceeded
  619. */
  620. static void rxrpc_call_life_expired(unsigned long _call)
  621. {
  622. struct rxrpc_call *call = (struct rxrpc_call *) _call;
  623. if (call->state >= RXRPC_CALL_COMPLETE)
  624. return;
  625. _enter("{%d}", call->debug_id);
  626. read_lock_bh(&call->state_lock);
  627. if (call->state < RXRPC_CALL_COMPLETE) {
  628. set_bit(RXRPC_CALL_EV_LIFE_TIMER, &call->events);
  629. rxrpc_queue_call(call);
  630. }
  631. read_unlock_bh(&call->state_lock);
  632. }
  633. /*
  634. * handle resend timer expiry
  635. * - may not take call->state_lock as this can deadlock against del_timer_sync()
  636. */
  637. static void rxrpc_resend_time_expired(unsigned long _call)
  638. {
  639. struct rxrpc_call *call = (struct rxrpc_call *) _call;
  640. _enter("{%d}", call->debug_id);
  641. if (call->state >= RXRPC_CALL_COMPLETE)
  642. return;
  643. clear_bit(RXRPC_CALL_RUN_RTIMER, &call->flags);
  644. if (!test_and_set_bit(RXRPC_CALL_EV_RESEND_TIMER, &call->events))
  645. rxrpc_queue_call(call);
  646. }
  647. /*
  648. * handle ACK timer expiry
  649. */
  650. static void rxrpc_ack_time_expired(unsigned long _call)
  651. {
  652. struct rxrpc_call *call = (struct rxrpc_call *) _call;
  653. _enter("{%d}", call->debug_id);
  654. if (call->state >= RXRPC_CALL_COMPLETE)
  655. return;
  656. read_lock_bh(&call->state_lock);
  657. if (call->state < RXRPC_CALL_COMPLETE &&
  658. !test_and_set_bit(RXRPC_CALL_EV_ACK, &call->events))
  659. rxrpc_queue_call(call);
  660. read_unlock_bh(&call->state_lock);
  661. }