call_object.c 21 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801
  1. /* RxRPC individual remote procedure call handling
  2. *
  3. * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
  4. * Written by David Howells (dhowells@redhat.com)
  5. *
  6. * This program is free software; you can redistribute it and/or
  7. * modify it under the terms of the GNU General Public License
  8. * as published by the Free Software Foundation; either version
  9. * 2 of the License, or (at your option) any later version.
  10. */
  11. #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  12. #include <linux/slab.h>
  13. #include <linux/module.h>
  14. #include <linux/circ_buf.h>
  15. #include <linux/spinlock_types.h>
  16. #include <net/sock.h>
  17. #include <net/af_rxrpc.h>
  18. #include "ar-internal.h"
  19. /*
  20. * Maximum lifetime of a call (in jiffies).
  21. */
  22. unsigned int rxrpc_max_call_lifetime = 60 * HZ;
  23. /*
  24. * Time till dead call expires after last use (in jiffies).
  25. */
  26. unsigned int rxrpc_dead_call_expiry = 2 * HZ;
  27. const char *const rxrpc_call_states[NR__RXRPC_CALL_STATES] = {
  28. [RXRPC_CALL_UNINITIALISED] = "Uninit",
  29. [RXRPC_CALL_CLIENT_AWAIT_CONN] = "ClWtConn",
  30. [RXRPC_CALL_CLIENT_SEND_REQUEST] = "ClSndReq",
  31. [RXRPC_CALL_CLIENT_AWAIT_REPLY] = "ClAwtRpl",
  32. [RXRPC_CALL_CLIENT_RECV_REPLY] = "ClRcvRpl",
  33. [RXRPC_CALL_CLIENT_FINAL_ACK] = "ClFnlACK",
  34. [RXRPC_CALL_SERVER_SECURING] = "SvSecure",
  35. [RXRPC_CALL_SERVER_ACCEPTING] = "SvAccept",
  36. [RXRPC_CALL_SERVER_RECV_REQUEST] = "SvRcvReq",
  37. [RXRPC_CALL_SERVER_ACK_REQUEST] = "SvAckReq",
  38. [RXRPC_CALL_SERVER_SEND_REPLY] = "SvSndRpl",
  39. [RXRPC_CALL_SERVER_AWAIT_ACK] = "SvAwtACK",
  40. [RXRPC_CALL_COMPLETE] = "Complete",
  41. [RXRPC_CALL_SERVER_BUSY] = "SvBusy ",
  42. [RXRPC_CALL_REMOTELY_ABORTED] = "RmtAbort",
  43. [RXRPC_CALL_LOCALLY_ABORTED] = "LocAbort",
  44. [RXRPC_CALL_NETWORK_ERROR] = "NetError",
  45. [RXRPC_CALL_DEAD] = "Dead ",
  46. };
  47. struct kmem_cache *rxrpc_call_jar;
  48. LIST_HEAD(rxrpc_calls);
  49. DEFINE_RWLOCK(rxrpc_call_lock);
  50. static void rxrpc_destroy_call(struct work_struct *work);
  51. static void rxrpc_call_life_expired(unsigned long _call);
  52. static void rxrpc_dead_call_expired(unsigned long _call);
  53. static void rxrpc_ack_time_expired(unsigned long _call);
  54. static void rxrpc_resend_time_expired(unsigned long _call);
  55. /*
  56. * find an extant server call
  57. * - called in process context with IRQs enabled
  58. */
  59. struct rxrpc_call *rxrpc_find_call_by_user_ID(struct rxrpc_sock *rx,
  60. unsigned long user_call_ID)
  61. {
  62. struct rxrpc_call *call;
  63. struct rb_node *p;
  64. _enter("%p,%lx", rx, user_call_ID);
  65. read_lock(&rx->call_lock);
  66. p = rx->calls.rb_node;
  67. while (p) {
  68. call = rb_entry(p, struct rxrpc_call, sock_node);
  69. if (user_call_ID < call->user_call_ID)
  70. p = p->rb_left;
  71. else if (user_call_ID > call->user_call_ID)
  72. p = p->rb_right;
  73. else
  74. goto found_extant_call;
  75. }
  76. read_unlock(&rx->call_lock);
  77. _leave(" = NULL");
  78. return NULL;
  79. found_extant_call:
  80. rxrpc_get_call(call);
  81. read_unlock(&rx->call_lock);
  82. _leave(" = %p [%d]", call, atomic_read(&call->usage));
  83. return call;
  84. }
  85. /*
  86. * allocate a new call
  87. */
  88. static struct rxrpc_call *rxrpc_alloc_call(gfp_t gfp)
  89. {
  90. struct rxrpc_call *call;
  91. call = kmem_cache_zalloc(rxrpc_call_jar, gfp);
  92. if (!call)
  93. return NULL;
  94. call->acks_winsz = 16;
  95. call->acks_window = kmalloc(call->acks_winsz * sizeof(unsigned long),
  96. gfp);
  97. if (!call->acks_window) {
  98. kmem_cache_free(rxrpc_call_jar, call);
  99. return NULL;
  100. }
  101. setup_timer(&call->lifetimer, &rxrpc_call_life_expired,
  102. (unsigned long) call);
  103. setup_timer(&call->deadspan, &rxrpc_dead_call_expired,
  104. (unsigned long) call);
  105. setup_timer(&call->ack_timer, &rxrpc_ack_time_expired,
  106. (unsigned long) call);
  107. setup_timer(&call->resend_timer, &rxrpc_resend_time_expired,
  108. (unsigned long) call);
  109. INIT_WORK(&call->destroyer, &rxrpc_destroy_call);
  110. INIT_WORK(&call->processor, &rxrpc_process_call);
  111. INIT_LIST_HEAD(&call->link);
  112. INIT_LIST_HEAD(&call->accept_link);
  113. skb_queue_head_init(&call->rx_queue);
  114. skb_queue_head_init(&call->rx_oos_queue);
  115. init_waitqueue_head(&call->tx_waitq);
  116. spin_lock_init(&call->lock);
  117. rwlock_init(&call->state_lock);
  118. atomic_set(&call->usage, 1);
  119. call->debug_id = atomic_inc_return(&rxrpc_debug_id);
  120. memset(&call->sock_node, 0xed, sizeof(call->sock_node));
  121. call->rx_data_expect = 1;
  122. call->rx_data_eaten = 0;
  123. call->rx_first_oos = 0;
  124. call->ackr_win_top = call->rx_data_eaten + 1 + rxrpc_rx_window_size;
  125. call->creation_jif = jiffies;
  126. return call;
  127. }
  128. /*
  129. * Allocate a new client call.
  130. */
  131. static struct rxrpc_call *rxrpc_alloc_client_call(struct rxrpc_sock *rx,
  132. struct sockaddr_rxrpc *srx,
  133. gfp_t gfp)
  134. {
  135. struct rxrpc_call *call;
  136. _enter("");
  137. ASSERT(rx->local != NULL);
  138. call = rxrpc_alloc_call(gfp);
  139. if (!call)
  140. return ERR_PTR(-ENOMEM);
  141. call->state = RXRPC_CALL_CLIENT_AWAIT_CONN;
  142. sock_hold(&rx->sk);
  143. call->socket = rx;
  144. call->rx_data_post = 1;
  145. call->local = rx->local;
  146. call->service_id = srx->srx_service;
  147. call->in_clientflag = 0;
  148. _leave(" = %p", call);
  149. return call;
  150. }
  151. /*
  152. * Begin client call.
  153. */
  154. static int rxrpc_begin_client_call(struct rxrpc_call *call,
  155. struct rxrpc_conn_parameters *cp,
  156. struct sockaddr_rxrpc *srx,
  157. gfp_t gfp)
  158. {
  159. int ret;
  160. /* Set up or get a connection record and set the protocol parameters,
  161. * including channel number and call ID.
  162. */
  163. ret = rxrpc_connect_call(call, cp, srx, gfp);
  164. if (ret < 0)
  165. return ret;
  166. call->state = RXRPC_CALL_CLIENT_SEND_REQUEST;
  167. spin_lock(&call->conn->params.peer->lock);
  168. hlist_add_head(&call->error_link, &call->conn->params.peer->error_targets);
  169. spin_unlock(&call->conn->params.peer->lock);
  170. call->lifetimer.expires = jiffies + rxrpc_max_call_lifetime;
  171. add_timer(&call->lifetimer);
  172. return 0;
  173. }
  174. /*
  175. * set up a call for the given data
  176. * - called in process context with IRQs enabled
  177. */
  178. struct rxrpc_call *rxrpc_new_client_call(struct rxrpc_sock *rx,
  179. struct rxrpc_conn_parameters *cp,
  180. struct sockaddr_rxrpc *srx,
  181. unsigned long user_call_ID,
  182. gfp_t gfp)
  183. {
  184. struct rxrpc_call *call, *xcall;
  185. struct rb_node *parent, **pp;
  186. int ret;
  187. _enter("%p,%lx", rx, user_call_ID);
  188. call = rxrpc_alloc_client_call(rx, srx, gfp);
  189. if (IS_ERR(call)) {
  190. _leave(" = %ld", PTR_ERR(call));
  191. return call;
  192. }
  193. /* Publish the call, even though it is incompletely set up as yet */
  194. call->user_call_ID = user_call_ID;
  195. __set_bit(RXRPC_CALL_HAS_USERID, &call->flags);
  196. write_lock(&rx->call_lock);
  197. pp = &rx->calls.rb_node;
  198. parent = NULL;
  199. while (*pp) {
  200. parent = *pp;
  201. xcall = rb_entry(parent, struct rxrpc_call, sock_node);
  202. if (user_call_ID < xcall->user_call_ID)
  203. pp = &(*pp)->rb_left;
  204. else if (user_call_ID > xcall->user_call_ID)
  205. pp = &(*pp)->rb_right;
  206. else
  207. goto found_user_ID_now_present;
  208. }
  209. rxrpc_get_call(call);
  210. rb_link_node(&call->sock_node, parent, pp);
  211. rb_insert_color(&call->sock_node, &rx->calls);
  212. write_unlock(&rx->call_lock);
  213. write_lock_bh(&rxrpc_call_lock);
  214. list_add_tail(&call->link, &rxrpc_calls);
  215. write_unlock_bh(&rxrpc_call_lock);
  216. ret = rxrpc_begin_client_call(call, cp, srx, gfp);
  217. if (ret < 0)
  218. goto error;
  219. _net("CALL new %d on CONN %d", call->debug_id, call->conn->debug_id);
  220. _leave(" = %p [new]", call);
  221. return call;
  222. error:
  223. write_lock(&rx->call_lock);
  224. rb_erase(&call->sock_node, &rx->calls);
  225. write_unlock(&rx->call_lock);
  226. rxrpc_put_call(call);
  227. write_lock_bh(&rxrpc_call_lock);
  228. list_del_init(&call->link);
  229. write_unlock_bh(&rxrpc_call_lock);
  230. set_bit(RXRPC_CALL_RELEASED, &call->flags);
  231. call->state = RXRPC_CALL_DEAD;
  232. rxrpc_put_call(call);
  233. _leave(" = %d", ret);
  234. return ERR_PTR(ret);
  235. /* We unexpectedly found the user ID in the list after taking
  236. * the call_lock. This shouldn't happen unless the user races
  237. * with itself and tries to add the same user ID twice at the
  238. * same time in different threads.
  239. */
  240. found_user_ID_now_present:
  241. write_unlock(&rx->call_lock);
  242. set_bit(RXRPC_CALL_RELEASED, &call->flags);
  243. call->state = RXRPC_CALL_DEAD;
  244. rxrpc_put_call(call);
  245. _leave(" = -EEXIST [%p]", call);
  246. return ERR_PTR(-EEXIST);
  247. }
  248. /*
  249. * set up an incoming call
  250. * - called in process context with IRQs enabled
  251. */
  252. struct rxrpc_call *rxrpc_incoming_call(struct rxrpc_sock *rx,
  253. struct rxrpc_connection *conn,
  254. struct sk_buff *skb)
  255. {
  256. struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
  257. struct rxrpc_call *call, *candidate;
  258. u32 call_id, chan;
  259. _enter(",%d", conn->debug_id);
  260. ASSERT(rx != NULL);
  261. candidate = rxrpc_alloc_call(GFP_NOIO);
  262. if (!candidate)
  263. return ERR_PTR(-EBUSY);
  264. chan = sp->hdr.cid & RXRPC_CHANNELMASK;
  265. candidate->socket = rx;
  266. candidate->conn = conn;
  267. candidate->cid = sp->hdr.cid;
  268. candidate->call_id = sp->hdr.callNumber;
  269. candidate->channel = chan;
  270. candidate->rx_data_post = 0;
  271. candidate->state = RXRPC_CALL_SERVER_ACCEPTING;
  272. if (conn->security_ix > 0)
  273. candidate->state = RXRPC_CALL_SERVER_SECURING;
  274. spin_lock(&conn->channel_lock);
  275. /* set the channel for this call */
  276. call = rcu_dereference_protected(conn->channels[chan].call,
  277. lockdep_is_held(&conn->channel_lock));
  278. _debug("channel[%u] is %p", candidate->channel, call);
  279. if (call && call->call_id == sp->hdr.callNumber) {
  280. /* already set; must've been a duplicate packet */
  281. _debug("extant call [%d]", call->state);
  282. ASSERTCMP(call->conn, ==, conn);
  283. read_lock(&call->state_lock);
  284. switch (call->state) {
  285. case RXRPC_CALL_LOCALLY_ABORTED:
  286. if (!test_and_set_bit(RXRPC_CALL_EV_ABORT, &call->events))
  287. rxrpc_queue_call(call);
  288. case RXRPC_CALL_REMOTELY_ABORTED:
  289. read_unlock(&call->state_lock);
  290. goto aborted_call;
  291. default:
  292. rxrpc_get_call(call);
  293. read_unlock(&call->state_lock);
  294. goto extant_call;
  295. }
  296. }
  297. if (call) {
  298. /* it seems the channel is still in use from the previous call
  299. * - ditch the old binding if its call is now complete */
  300. _debug("CALL: %u { %s }",
  301. call->debug_id, rxrpc_call_states[call->state]);
  302. if (call->state >= RXRPC_CALL_COMPLETE) {
  303. __rxrpc_disconnect_call(call);
  304. } else {
  305. spin_unlock(&conn->channel_lock);
  306. kmem_cache_free(rxrpc_call_jar, candidate);
  307. _leave(" = -EBUSY");
  308. return ERR_PTR(-EBUSY);
  309. }
  310. }
  311. /* check the call number isn't duplicate */
  312. _debug("check dup");
  313. call_id = sp->hdr.callNumber;
  314. /* We just ignore calls prior to the current call ID. Terminated calls
  315. * are handled via the connection.
  316. */
  317. if (call_id <= conn->channels[chan].call_counter)
  318. goto old_call; /* TODO: Just drop packet */
  319. /* make the call available */
  320. _debug("new call");
  321. call = candidate;
  322. candidate = NULL;
  323. conn->channels[chan].call_counter = call_id;
  324. rcu_assign_pointer(conn->channels[chan].call, call);
  325. sock_hold(&rx->sk);
  326. rxrpc_get_connection(conn);
  327. spin_unlock(&conn->channel_lock);
  328. spin_lock(&conn->params.peer->lock);
  329. hlist_add_head(&call->error_link, &conn->params.peer->error_targets);
  330. spin_unlock(&conn->params.peer->lock);
  331. write_lock_bh(&rxrpc_call_lock);
  332. list_add_tail(&call->link, &rxrpc_calls);
  333. write_unlock_bh(&rxrpc_call_lock);
  334. call->local = conn->params.local;
  335. call->epoch = conn->proto.epoch;
  336. call->service_id = conn->params.service_id;
  337. call->in_clientflag = RXRPC_CLIENT_INITIATED;
  338. _net("CALL incoming %d on CONN %d", call->debug_id, call->conn->debug_id);
  339. call->lifetimer.expires = jiffies + rxrpc_max_call_lifetime;
  340. add_timer(&call->lifetimer);
  341. _leave(" = %p {%d} [new]", call, call->debug_id);
  342. return call;
  343. extant_call:
  344. spin_unlock(&conn->channel_lock);
  345. kmem_cache_free(rxrpc_call_jar, candidate);
  346. _leave(" = %p {%d} [extant]", call, call ? call->debug_id : -1);
  347. return call;
  348. aborted_call:
  349. spin_unlock(&conn->channel_lock);
  350. kmem_cache_free(rxrpc_call_jar, candidate);
  351. _leave(" = -ECONNABORTED");
  352. return ERR_PTR(-ECONNABORTED);
  353. old_call:
  354. spin_unlock(&conn->channel_lock);
  355. kmem_cache_free(rxrpc_call_jar, candidate);
  356. _leave(" = -ECONNRESET [old]");
  357. return ERR_PTR(-ECONNRESET);
  358. }
  359. /*
  360. * detach a call from a socket and set up for release
  361. */
  362. void rxrpc_release_call(struct rxrpc_call *call)
  363. {
  364. struct rxrpc_connection *conn = call->conn;
  365. struct rxrpc_sock *rx = call->socket;
  366. _enter("{%d,%d,%d,%d}",
  367. call->debug_id, atomic_read(&call->usage),
  368. atomic_read(&call->ackr_not_idle),
  369. call->rx_first_oos);
  370. spin_lock_bh(&call->lock);
  371. if (test_and_set_bit(RXRPC_CALL_RELEASED, &call->flags))
  372. BUG();
  373. spin_unlock_bh(&call->lock);
  374. /* dissociate from the socket
  375. * - the socket's ref on the call is passed to the death timer
  376. */
  377. _debug("RELEASE CALL %p (%d CONN %p)", call, call->debug_id, conn);
  378. spin_lock(&conn->params.peer->lock);
  379. hlist_del_init(&call->error_link);
  380. spin_unlock(&conn->params.peer->lock);
  381. write_lock_bh(&rx->call_lock);
  382. if (!list_empty(&call->accept_link)) {
  383. _debug("unlinking once-pending call %p { e=%lx f=%lx }",
  384. call, call->events, call->flags);
  385. ASSERT(!test_bit(RXRPC_CALL_HAS_USERID, &call->flags));
  386. list_del_init(&call->accept_link);
  387. sk_acceptq_removed(&rx->sk);
  388. } else if (test_bit(RXRPC_CALL_HAS_USERID, &call->flags)) {
  389. rb_erase(&call->sock_node, &rx->calls);
  390. memset(&call->sock_node, 0xdd, sizeof(call->sock_node));
  391. clear_bit(RXRPC_CALL_HAS_USERID, &call->flags);
  392. }
  393. write_unlock_bh(&rx->call_lock);
  394. /* free up the channel for reuse */
  395. write_lock_bh(&call->state_lock);
  396. if (call->state < RXRPC_CALL_COMPLETE &&
  397. call->state != RXRPC_CALL_CLIENT_FINAL_ACK) {
  398. _debug("+++ ABORTING STATE %d +++\n", call->state);
  399. call->state = RXRPC_CALL_LOCALLY_ABORTED;
  400. call->local_abort = RX_CALL_DEAD;
  401. }
  402. write_unlock_bh(&call->state_lock);
  403. rxrpc_disconnect_call(call);
  404. /* clean up the Rx queue */
  405. if (!skb_queue_empty(&call->rx_queue) ||
  406. !skb_queue_empty(&call->rx_oos_queue)) {
  407. struct rxrpc_skb_priv *sp;
  408. struct sk_buff *skb;
  409. _debug("purge Rx queues");
  410. spin_lock_bh(&call->lock);
  411. while ((skb = skb_dequeue(&call->rx_queue)) ||
  412. (skb = skb_dequeue(&call->rx_oos_queue))) {
  413. spin_unlock_bh(&call->lock);
  414. sp = rxrpc_skb(skb);
  415. _debug("- zap %s %%%u #%u",
  416. rxrpc_pkts[sp->hdr.type],
  417. sp->hdr.serial, sp->hdr.seq);
  418. rxrpc_free_skb(skb);
  419. spin_lock_bh(&call->lock);
  420. }
  421. spin_unlock_bh(&call->lock);
  422. ASSERTCMP(call->state, !=, RXRPC_CALL_COMPLETE);
  423. }
  424. del_timer_sync(&call->resend_timer);
  425. del_timer_sync(&call->ack_timer);
  426. del_timer_sync(&call->lifetimer);
  427. call->deadspan.expires = jiffies + rxrpc_dead_call_expiry;
  428. add_timer(&call->deadspan);
  429. _leave("");
  430. }
  431. /*
  432. * handle a dead call being ready for reaping
  433. */
  434. static void rxrpc_dead_call_expired(unsigned long _call)
  435. {
  436. struct rxrpc_call *call = (struct rxrpc_call *) _call;
  437. _enter("{%d}", call->debug_id);
  438. write_lock_bh(&call->state_lock);
  439. call->state = RXRPC_CALL_DEAD;
  440. write_unlock_bh(&call->state_lock);
  441. rxrpc_put_call(call);
  442. }
  443. /*
  444. * mark a call as to be released, aborting it if it's still in progress
  445. * - called with softirqs disabled
  446. */
  447. static void rxrpc_mark_call_released(struct rxrpc_call *call)
  448. {
  449. bool sched;
  450. write_lock(&call->state_lock);
  451. if (call->state < RXRPC_CALL_DEAD) {
  452. sched = false;
  453. if (call->state < RXRPC_CALL_COMPLETE) {
  454. _debug("abort call %p", call);
  455. call->state = RXRPC_CALL_LOCALLY_ABORTED;
  456. call->local_abort = RX_CALL_DEAD;
  457. if (!test_and_set_bit(RXRPC_CALL_EV_ABORT, &call->events))
  458. sched = true;
  459. }
  460. if (!test_and_set_bit(RXRPC_CALL_EV_RELEASE, &call->events))
  461. sched = true;
  462. if (sched)
  463. rxrpc_queue_call(call);
  464. }
  465. write_unlock(&call->state_lock);
  466. }
  467. /*
  468. * release all the calls associated with a socket
  469. */
  470. void rxrpc_release_calls_on_socket(struct rxrpc_sock *rx)
  471. {
  472. struct rxrpc_call *call;
  473. struct rb_node *p;
  474. _enter("%p", rx);
  475. read_lock_bh(&rx->call_lock);
  476. /* mark all the calls as no longer wanting incoming packets */
  477. for (p = rb_first(&rx->calls); p; p = rb_next(p)) {
  478. call = rb_entry(p, struct rxrpc_call, sock_node);
  479. rxrpc_mark_call_released(call);
  480. }
  481. /* kill the not-yet-accepted incoming calls */
  482. list_for_each_entry(call, &rx->secureq, accept_link) {
  483. rxrpc_mark_call_released(call);
  484. }
  485. list_for_each_entry(call, &rx->acceptq, accept_link) {
  486. rxrpc_mark_call_released(call);
  487. }
  488. read_unlock_bh(&rx->call_lock);
  489. _leave("");
  490. }
  491. /*
  492. * release a call
  493. */
  494. void __rxrpc_put_call(struct rxrpc_call *call)
  495. {
  496. ASSERT(call != NULL);
  497. _enter("%p{u=%d}", call, atomic_read(&call->usage));
  498. ASSERTCMP(atomic_read(&call->usage), >, 0);
  499. if (atomic_dec_and_test(&call->usage)) {
  500. _debug("call %d dead", call->debug_id);
  501. WARN_ON(atomic_read(&call->skb_count) != 0);
  502. ASSERTCMP(call->state, ==, RXRPC_CALL_DEAD);
  503. rxrpc_queue_work(&call->destroyer);
  504. }
  505. _leave("");
  506. }
  507. /*
  508. * Final call destruction under RCU.
  509. */
  510. static void rxrpc_rcu_destroy_call(struct rcu_head *rcu)
  511. {
  512. struct rxrpc_call *call = container_of(rcu, struct rxrpc_call, rcu);
  513. rxrpc_purge_queue(&call->rx_queue);
  514. kmem_cache_free(rxrpc_call_jar, call);
  515. }
  516. /*
  517. * clean up a call
  518. */
  519. static void rxrpc_cleanup_call(struct rxrpc_call *call)
  520. {
  521. _net("DESTROY CALL %d", call->debug_id);
  522. ASSERT(call->socket);
  523. memset(&call->sock_node, 0xcd, sizeof(call->sock_node));
  524. del_timer_sync(&call->lifetimer);
  525. del_timer_sync(&call->deadspan);
  526. del_timer_sync(&call->ack_timer);
  527. del_timer_sync(&call->resend_timer);
  528. ASSERT(test_bit(RXRPC_CALL_RELEASED, &call->flags));
  529. ASSERTCMP(call->events, ==, 0);
  530. if (work_pending(&call->processor)) {
  531. _debug("defer destroy");
  532. rxrpc_queue_work(&call->destroyer);
  533. return;
  534. }
  535. ASSERTCMP(call->conn, ==, NULL);
  536. if (call->acks_window) {
  537. _debug("kill Tx window %d",
  538. CIRC_CNT(call->acks_head, call->acks_tail,
  539. call->acks_winsz));
  540. smp_mb();
  541. while (CIRC_CNT(call->acks_head, call->acks_tail,
  542. call->acks_winsz) > 0) {
  543. struct rxrpc_skb_priv *sp;
  544. unsigned long _skb;
  545. _skb = call->acks_window[call->acks_tail] & ~1;
  546. sp = rxrpc_skb((struct sk_buff *)_skb);
  547. _debug("+++ clear Tx %u", sp->hdr.seq);
  548. rxrpc_free_skb((struct sk_buff *)_skb);
  549. call->acks_tail =
  550. (call->acks_tail + 1) & (call->acks_winsz - 1);
  551. }
  552. kfree(call->acks_window);
  553. }
  554. rxrpc_free_skb(call->tx_pending);
  555. rxrpc_purge_queue(&call->rx_queue);
  556. ASSERT(skb_queue_empty(&call->rx_oos_queue));
  557. sock_put(&call->socket->sk);
  558. call_rcu(&call->rcu, rxrpc_rcu_destroy_call);
  559. }
  560. /*
  561. * destroy a call
  562. */
  563. static void rxrpc_destroy_call(struct work_struct *work)
  564. {
  565. struct rxrpc_call *call =
  566. container_of(work, struct rxrpc_call, destroyer);
  567. _enter("%p{%d,%d,%p}",
  568. call, atomic_read(&call->usage), call->channel, call->conn);
  569. ASSERTCMP(call->state, ==, RXRPC_CALL_DEAD);
  570. write_lock_bh(&rxrpc_call_lock);
  571. list_del_init(&call->link);
  572. write_unlock_bh(&rxrpc_call_lock);
  573. rxrpc_cleanup_call(call);
  574. _leave("");
  575. }
  576. /*
  577. * preemptively destroy all the call records from a transport endpoint rather
  578. * than waiting for them to time out
  579. */
  580. void __exit rxrpc_destroy_all_calls(void)
  581. {
  582. struct rxrpc_call *call;
  583. _enter("");
  584. write_lock_bh(&rxrpc_call_lock);
  585. while (!list_empty(&rxrpc_calls)) {
  586. call = list_entry(rxrpc_calls.next, struct rxrpc_call, link);
  587. _debug("Zapping call %p", call);
  588. list_del_init(&call->link);
  589. switch (atomic_read(&call->usage)) {
  590. case 0:
  591. ASSERTCMP(call->state, ==, RXRPC_CALL_DEAD);
  592. break;
  593. case 1:
  594. if (del_timer_sync(&call->deadspan) != 0 &&
  595. call->state != RXRPC_CALL_DEAD)
  596. rxrpc_dead_call_expired((unsigned long) call);
  597. if (call->state != RXRPC_CALL_DEAD)
  598. break;
  599. default:
  600. pr_err("Call %p still in use (%d,%d,%s,%lx,%lx)!\n",
  601. call, atomic_read(&call->usage),
  602. atomic_read(&call->ackr_not_idle),
  603. rxrpc_call_states[call->state],
  604. call->flags, call->events);
  605. if (!skb_queue_empty(&call->rx_queue))
  606. pr_err("Rx queue occupied\n");
  607. if (!skb_queue_empty(&call->rx_oos_queue))
  608. pr_err("OOS queue occupied\n");
  609. break;
  610. }
  611. write_unlock_bh(&rxrpc_call_lock);
  612. cond_resched();
  613. write_lock_bh(&rxrpc_call_lock);
  614. }
  615. write_unlock_bh(&rxrpc_call_lock);
  616. _leave("");
  617. }
  618. /*
  619. * handle call lifetime being exceeded
  620. */
  621. static void rxrpc_call_life_expired(unsigned long _call)
  622. {
  623. struct rxrpc_call *call = (struct rxrpc_call *) _call;
  624. if (call->state >= RXRPC_CALL_COMPLETE)
  625. return;
  626. _enter("{%d}", call->debug_id);
  627. read_lock_bh(&call->state_lock);
  628. if (call->state < RXRPC_CALL_COMPLETE) {
  629. set_bit(RXRPC_CALL_EV_LIFE_TIMER, &call->events);
  630. rxrpc_queue_call(call);
  631. }
  632. read_unlock_bh(&call->state_lock);
  633. }
  634. /*
  635. * handle resend timer expiry
  636. * - may not take call->state_lock as this can deadlock against del_timer_sync()
  637. */
  638. static void rxrpc_resend_time_expired(unsigned long _call)
  639. {
  640. struct rxrpc_call *call = (struct rxrpc_call *) _call;
  641. _enter("{%d}", call->debug_id);
  642. if (call->state >= RXRPC_CALL_COMPLETE)
  643. return;
  644. clear_bit(RXRPC_CALL_RUN_RTIMER, &call->flags);
  645. if (!test_and_set_bit(RXRPC_CALL_EV_RESEND_TIMER, &call->events))
  646. rxrpc_queue_call(call);
  647. }
  648. /*
  649. * handle ACK timer expiry
  650. */
  651. static void rxrpc_ack_time_expired(unsigned long _call)
  652. {
  653. struct rxrpc_call *call = (struct rxrpc_call *) _call;
  654. _enter("{%d}", call->debug_id);
  655. if (call->state >= RXRPC_CALL_COMPLETE)
  656. return;
  657. read_lock_bh(&call->state_lock);
  658. if (call->state < RXRPC_CALL_COMPLETE &&
  659. !test_and_set_bit(RXRPC_CALL_EV_ACK, &call->events))
  660. rxrpc_queue_call(call);
  661. read_unlock_bh(&call->state_lock);
  662. }