call_object.c 25 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982
  1. /* RxRPC individual remote procedure call handling
  2. *
  3. * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
  4. * Written by David Howells (dhowells@redhat.com)
  5. *
  6. * This program is free software; you can redistribute it and/or
  7. * modify it under the terms of the GNU General Public License
  8. * as published by the Free Software Foundation; either version
  9. * 2 of the License, or (at your option) any later version.
  10. */
  11. #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  12. #include <linux/slab.h>
  13. #include <linux/module.h>
  14. #include <linux/circ_buf.h>
  15. #include <linux/hashtable.h>
  16. #include <linux/spinlock_types.h>
  17. #include <net/sock.h>
  18. #include <net/af_rxrpc.h>
  19. #include "ar-internal.h"
  20. /*
  21. * Maximum lifetime of a call (in jiffies).
  22. */
  23. unsigned int rxrpc_max_call_lifetime = 60 * HZ;
  24. /*
  25. * Time till dead call expires after last use (in jiffies).
  26. */
  27. unsigned int rxrpc_dead_call_expiry = 2 * HZ;
  28. const char *const rxrpc_call_states[NR__RXRPC_CALL_STATES] = {
  29. [RXRPC_CALL_CLIENT_SEND_REQUEST] = "ClSndReq",
  30. [RXRPC_CALL_CLIENT_AWAIT_REPLY] = "ClAwtRpl",
  31. [RXRPC_CALL_CLIENT_RECV_REPLY] = "ClRcvRpl",
  32. [RXRPC_CALL_CLIENT_FINAL_ACK] = "ClFnlACK",
  33. [RXRPC_CALL_SERVER_SECURING] = "SvSecure",
  34. [RXRPC_CALL_SERVER_ACCEPTING] = "SvAccept",
  35. [RXRPC_CALL_SERVER_RECV_REQUEST] = "SvRcvReq",
  36. [RXRPC_CALL_SERVER_ACK_REQUEST] = "SvAckReq",
  37. [RXRPC_CALL_SERVER_SEND_REPLY] = "SvSndRpl",
  38. [RXRPC_CALL_SERVER_AWAIT_ACK] = "SvAwtACK",
  39. [RXRPC_CALL_COMPLETE] = "Complete",
  40. [RXRPC_CALL_SERVER_BUSY] = "SvBusy ",
  41. [RXRPC_CALL_REMOTELY_ABORTED] = "RmtAbort",
  42. [RXRPC_CALL_LOCALLY_ABORTED] = "LocAbort",
  43. [RXRPC_CALL_NETWORK_ERROR] = "NetError",
  44. [RXRPC_CALL_DEAD] = "Dead ",
  45. };
  46. struct kmem_cache *rxrpc_call_jar;
  47. LIST_HEAD(rxrpc_calls);
  48. DEFINE_RWLOCK(rxrpc_call_lock);
  49. static void rxrpc_destroy_call(struct work_struct *work);
  50. static void rxrpc_call_life_expired(unsigned long _call);
  51. static void rxrpc_dead_call_expired(unsigned long _call);
  52. static void rxrpc_ack_time_expired(unsigned long _call);
  53. static void rxrpc_resend_time_expired(unsigned long _call);
  54. static DEFINE_SPINLOCK(rxrpc_call_hash_lock);
  55. static DEFINE_HASHTABLE(rxrpc_call_hash, 10);
  56. /*
  57. * Hash function for rxrpc_call_hash
  58. */
  59. static unsigned long rxrpc_call_hashfunc(
  60. u8 in_clientflag,
  61. u32 cid,
  62. u32 call_id,
  63. u32 epoch,
  64. u16 service_id,
  65. sa_family_t family,
  66. void *localptr,
  67. unsigned int addr_size,
  68. const u8 *peer_addr)
  69. {
  70. const u16 *p;
  71. unsigned int i;
  72. unsigned long key;
  73. _enter("");
  74. key = (unsigned long)localptr;
  75. /* We just want to add up the __be32 values, so forcing the
  76. * cast should be okay.
  77. */
  78. key += epoch;
  79. key += service_id;
  80. key += call_id;
  81. key += (cid & RXRPC_CIDMASK) >> RXRPC_CIDSHIFT;
  82. key += cid & RXRPC_CHANNELMASK;
  83. key += in_clientflag;
  84. key += family;
  85. /* Step through the peer address in 16-bit portions for speed */
  86. for (i = 0, p = (const u16 *)peer_addr; i < addr_size >> 1; i++, p++)
  87. key += *p;
  88. _leave(" key = 0x%lx", key);
  89. return key;
  90. }
  91. /*
  92. * Add a call to the hashtable
  93. */
  94. static void rxrpc_call_hash_add(struct rxrpc_call *call)
  95. {
  96. unsigned long key;
  97. unsigned int addr_size = 0;
  98. _enter("");
  99. switch (call->family) {
  100. case AF_INET:
  101. addr_size = sizeof(call->peer_ip.ipv4_addr);
  102. break;
  103. case AF_INET6:
  104. addr_size = sizeof(call->peer_ip.ipv6_addr);
  105. break;
  106. default:
  107. break;
  108. }
  109. key = rxrpc_call_hashfunc(call->in_clientflag, call->cid,
  110. call->call_id, call->epoch,
  111. call->service_id, call->family,
  112. call->conn->params.local, addr_size,
  113. call->peer_ip.ipv6_addr);
  114. /* Store the full key in the call */
  115. call->hash_key = key;
  116. spin_lock(&rxrpc_call_hash_lock);
  117. hash_add_rcu(rxrpc_call_hash, &call->hash_node, key);
  118. spin_unlock(&rxrpc_call_hash_lock);
  119. _leave("");
  120. }
  121. /*
  122. * Remove a call from the hashtable
  123. */
  124. static void rxrpc_call_hash_del(struct rxrpc_call *call)
  125. {
  126. _enter("");
  127. spin_lock(&rxrpc_call_hash_lock);
  128. hash_del_rcu(&call->hash_node);
  129. spin_unlock(&rxrpc_call_hash_lock);
  130. _leave("");
  131. }
  132. /*
  133. * Find a call in the hashtable and return it, or NULL if it
  134. * isn't there.
  135. */
  136. struct rxrpc_call *rxrpc_find_call_hash(
  137. struct rxrpc_host_header *hdr,
  138. void *localptr,
  139. sa_family_t family,
  140. const void *peer_addr)
  141. {
  142. unsigned long key;
  143. unsigned int addr_size = 0;
  144. struct rxrpc_call *call = NULL;
  145. struct rxrpc_call *ret = NULL;
  146. u8 in_clientflag = hdr->flags & RXRPC_CLIENT_INITIATED;
  147. _enter("");
  148. switch (family) {
  149. case AF_INET:
  150. addr_size = sizeof(call->peer_ip.ipv4_addr);
  151. break;
  152. case AF_INET6:
  153. addr_size = sizeof(call->peer_ip.ipv6_addr);
  154. break;
  155. default:
  156. break;
  157. }
  158. key = rxrpc_call_hashfunc(in_clientflag, hdr->cid, hdr->callNumber,
  159. hdr->epoch, hdr->serviceId,
  160. family, localptr, addr_size,
  161. peer_addr);
  162. hash_for_each_possible_rcu(rxrpc_call_hash, call, hash_node, key) {
  163. if (call->hash_key == key &&
  164. call->call_id == hdr->callNumber &&
  165. call->cid == hdr->cid &&
  166. call->in_clientflag == in_clientflag &&
  167. call->service_id == hdr->serviceId &&
  168. call->family == family &&
  169. call->local == localptr &&
  170. memcmp(call->peer_ip.ipv6_addr, peer_addr,
  171. addr_size) == 0 &&
  172. call->epoch == hdr->epoch) {
  173. ret = call;
  174. break;
  175. }
  176. }
  177. _leave(" = %p", ret);
  178. return ret;
  179. }
  180. /*
  181. * find an extant server call
  182. * - called in process context with IRQs enabled
  183. */
  184. struct rxrpc_call *rxrpc_find_call_by_user_ID(struct rxrpc_sock *rx,
  185. unsigned long user_call_ID)
  186. {
  187. struct rxrpc_call *call;
  188. struct rb_node *p;
  189. _enter("%p,%lx", rx, user_call_ID);
  190. read_lock(&rx->call_lock);
  191. p = rx->calls.rb_node;
  192. while (p) {
  193. call = rb_entry(p, struct rxrpc_call, sock_node);
  194. if (user_call_ID < call->user_call_ID)
  195. p = p->rb_left;
  196. else if (user_call_ID > call->user_call_ID)
  197. p = p->rb_right;
  198. else
  199. goto found_extant_call;
  200. }
  201. read_unlock(&rx->call_lock);
  202. _leave(" = NULL");
  203. return NULL;
  204. found_extant_call:
  205. rxrpc_get_call(call);
  206. read_unlock(&rx->call_lock);
  207. _leave(" = %p [%d]", call, atomic_read(&call->usage));
  208. return call;
  209. }
  210. /*
  211. * allocate a new call
  212. */
  213. static struct rxrpc_call *rxrpc_alloc_call(gfp_t gfp)
  214. {
  215. struct rxrpc_call *call;
  216. call = kmem_cache_zalloc(rxrpc_call_jar, gfp);
  217. if (!call)
  218. return NULL;
  219. call->acks_winsz = 16;
  220. call->acks_window = kmalloc(call->acks_winsz * sizeof(unsigned long),
  221. gfp);
  222. if (!call->acks_window) {
  223. kmem_cache_free(rxrpc_call_jar, call);
  224. return NULL;
  225. }
  226. setup_timer(&call->lifetimer, &rxrpc_call_life_expired,
  227. (unsigned long) call);
  228. setup_timer(&call->deadspan, &rxrpc_dead_call_expired,
  229. (unsigned long) call);
  230. setup_timer(&call->ack_timer, &rxrpc_ack_time_expired,
  231. (unsigned long) call);
  232. setup_timer(&call->resend_timer, &rxrpc_resend_time_expired,
  233. (unsigned long) call);
  234. INIT_WORK(&call->destroyer, &rxrpc_destroy_call);
  235. INIT_WORK(&call->processor, &rxrpc_process_call);
  236. INIT_LIST_HEAD(&call->accept_link);
  237. skb_queue_head_init(&call->rx_queue);
  238. skb_queue_head_init(&call->rx_oos_queue);
  239. init_waitqueue_head(&call->tx_waitq);
  240. spin_lock_init(&call->lock);
  241. rwlock_init(&call->state_lock);
  242. atomic_set(&call->usage, 1);
  243. call->debug_id = atomic_inc_return(&rxrpc_debug_id);
  244. call->state = RXRPC_CALL_CLIENT_SEND_REQUEST;
  245. memset(&call->sock_node, 0xed, sizeof(call->sock_node));
  246. call->rx_data_expect = 1;
  247. call->rx_data_eaten = 0;
  248. call->rx_first_oos = 0;
  249. call->ackr_win_top = call->rx_data_eaten + 1 + rxrpc_rx_window_size;
  250. call->creation_jif = jiffies;
  251. return call;
  252. }
  253. /*
  254. * allocate a new client call and attempt to get a connection slot for it
  255. */
  256. static struct rxrpc_call *rxrpc_alloc_client_call(
  257. struct rxrpc_sock *rx,
  258. struct rxrpc_conn_parameters *cp,
  259. struct rxrpc_transport *trans,
  260. struct rxrpc_conn_bundle *bundle,
  261. gfp_t gfp)
  262. {
  263. struct rxrpc_call *call;
  264. int ret;
  265. _enter("");
  266. ASSERT(rx != NULL);
  267. ASSERT(trans != NULL);
  268. ASSERT(bundle != NULL);
  269. call = rxrpc_alloc_call(gfp);
  270. if (!call)
  271. return ERR_PTR(-ENOMEM);
  272. sock_hold(&rx->sk);
  273. call->socket = rx;
  274. call->rx_data_post = 1;
  275. ret = rxrpc_connect_call(rx, cp, trans, bundle, call, gfp);
  276. if (ret < 0) {
  277. kmem_cache_free(rxrpc_call_jar, call);
  278. return ERR_PTR(ret);
  279. }
  280. /* Record copies of information for hashtable lookup */
  281. call->family = rx->family;
  282. call->local = call->conn->params.local;
  283. switch (call->family) {
  284. case AF_INET:
  285. call->peer_ip.ipv4_addr =
  286. call->conn->params.peer->srx.transport.sin.sin_addr.s_addr;
  287. break;
  288. case AF_INET6:
  289. memcpy(call->peer_ip.ipv6_addr,
  290. call->conn->params.peer->srx.transport.sin6.sin6_addr.in6_u.u6_addr8,
  291. sizeof(call->peer_ip.ipv6_addr));
  292. break;
  293. }
  294. call->epoch = call->conn->proto.epoch;
  295. call->service_id = call->conn->params.service_id;
  296. call->in_clientflag = call->conn->proto.in_clientflag;
  297. /* Add the new call to the hashtable */
  298. rxrpc_call_hash_add(call);
  299. spin_lock(&call->conn->params.peer->lock);
  300. hlist_add_head(&call->error_link, &call->conn->params.peer->error_targets);
  301. spin_unlock(&call->conn->params.peer->lock);
  302. call->lifetimer.expires = jiffies + rxrpc_max_call_lifetime;
  303. add_timer(&call->lifetimer);
  304. _leave(" = %p", call);
  305. return call;
  306. }
  307. /*
  308. * set up a call for the given data
  309. * - called in process context with IRQs enabled
  310. */
  311. struct rxrpc_call *rxrpc_new_client_call(struct rxrpc_sock *rx,
  312. struct rxrpc_conn_parameters *cp,
  313. struct rxrpc_transport *trans,
  314. struct rxrpc_conn_bundle *bundle,
  315. unsigned long user_call_ID,
  316. gfp_t gfp)
  317. {
  318. struct rxrpc_call *call, *xcall;
  319. struct rb_node *parent, **pp;
  320. _enter("%p,%d,%d,%lx",
  321. rx, trans->debug_id, bundle ? bundle->debug_id : -1,
  322. user_call_ID);
  323. call = rxrpc_alloc_client_call(rx, cp, trans, bundle, gfp);
  324. if (IS_ERR(call)) {
  325. _leave(" = %ld", PTR_ERR(call));
  326. return call;
  327. }
  328. call->user_call_ID = user_call_ID;
  329. __set_bit(RXRPC_CALL_HAS_USERID, &call->flags);
  330. write_lock(&rx->call_lock);
  331. pp = &rx->calls.rb_node;
  332. parent = NULL;
  333. while (*pp) {
  334. parent = *pp;
  335. xcall = rb_entry(parent, struct rxrpc_call, sock_node);
  336. if (user_call_ID < xcall->user_call_ID)
  337. pp = &(*pp)->rb_left;
  338. else if (user_call_ID > xcall->user_call_ID)
  339. pp = &(*pp)->rb_right;
  340. else
  341. goto found_user_ID_now_present;
  342. }
  343. rxrpc_get_call(call);
  344. rb_link_node(&call->sock_node, parent, pp);
  345. rb_insert_color(&call->sock_node, &rx->calls);
  346. write_unlock(&rx->call_lock);
  347. write_lock_bh(&rxrpc_call_lock);
  348. list_add_tail(&call->link, &rxrpc_calls);
  349. write_unlock_bh(&rxrpc_call_lock);
  350. _net("CALL new %d on CONN %d", call->debug_id, call->conn->debug_id);
  351. _leave(" = %p [new]", call);
  352. return call;
  353. /* We unexpectedly found the user ID in the list after taking
  354. * the call_lock. This shouldn't happen unless the user races
  355. * with itself and tries to add the same user ID twice at the
  356. * same time in different threads.
  357. */
  358. found_user_ID_now_present:
  359. write_unlock(&rx->call_lock);
  360. rxrpc_put_call(call);
  361. _leave(" = -EEXIST [%p]", call);
  362. return ERR_PTR(-EEXIST);
  363. }
  364. /*
  365. * set up an incoming call
  366. * - called in process context with IRQs enabled
  367. */
  368. struct rxrpc_call *rxrpc_incoming_call(struct rxrpc_sock *rx,
  369. struct rxrpc_connection *conn,
  370. struct rxrpc_host_header *hdr)
  371. {
  372. struct rxrpc_call *call, *candidate;
  373. struct rb_node **p, *parent;
  374. u32 call_id;
  375. _enter(",%d", conn->debug_id);
  376. ASSERT(rx != NULL);
  377. candidate = rxrpc_alloc_call(GFP_NOIO);
  378. if (!candidate)
  379. return ERR_PTR(-EBUSY);
  380. candidate->socket = rx;
  381. candidate->conn = conn;
  382. candidate->cid = hdr->cid;
  383. candidate->call_id = hdr->callNumber;
  384. candidate->channel = hdr->cid & RXRPC_CHANNELMASK;
  385. candidate->rx_data_post = 0;
  386. candidate->state = RXRPC_CALL_SERVER_ACCEPTING;
  387. if (conn->security_ix > 0)
  388. candidate->state = RXRPC_CALL_SERVER_SECURING;
  389. write_lock_bh(&conn->lock);
  390. /* set the channel for this call */
  391. call = conn->channels[candidate->channel];
  392. _debug("channel[%u] is %p", candidate->channel, call);
  393. if (call && call->call_id == hdr->callNumber) {
  394. /* already set; must've been a duplicate packet */
  395. _debug("extant call [%d]", call->state);
  396. ASSERTCMP(call->conn, ==, conn);
  397. read_lock(&call->state_lock);
  398. switch (call->state) {
  399. case RXRPC_CALL_LOCALLY_ABORTED:
  400. if (!test_and_set_bit(RXRPC_CALL_EV_ABORT, &call->events))
  401. rxrpc_queue_call(call);
  402. case RXRPC_CALL_REMOTELY_ABORTED:
  403. read_unlock(&call->state_lock);
  404. goto aborted_call;
  405. default:
  406. rxrpc_get_call(call);
  407. read_unlock(&call->state_lock);
  408. goto extant_call;
  409. }
  410. }
  411. if (call) {
  412. /* it seems the channel is still in use from the previous call
  413. * - ditch the old binding if its call is now complete */
  414. _debug("CALL: %u { %s }",
  415. call->debug_id, rxrpc_call_states[call->state]);
  416. if (call->state >= RXRPC_CALL_COMPLETE) {
  417. conn->channels[call->channel] = NULL;
  418. } else {
  419. write_unlock_bh(&conn->lock);
  420. kmem_cache_free(rxrpc_call_jar, candidate);
  421. _leave(" = -EBUSY");
  422. return ERR_PTR(-EBUSY);
  423. }
  424. }
  425. /* check the call number isn't duplicate */
  426. _debug("check dup");
  427. call_id = hdr->callNumber;
  428. p = &conn->calls.rb_node;
  429. parent = NULL;
  430. while (*p) {
  431. parent = *p;
  432. call = rb_entry(parent, struct rxrpc_call, conn_node);
  433. /* The tree is sorted in order of the __be32 value without
  434. * turning it into host order.
  435. */
  436. if (call_id < call->call_id)
  437. p = &(*p)->rb_left;
  438. else if (call_id > call->call_id)
  439. p = &(*p)->rb_right;
  440. else
  441. goto old_call;
  442. }
  443. /* make the call available */
  444. _debug("new call");
  445. call = candidate;
  446. candidate = NULL;
  447. rb_link_node(&call->conn_node, parent, p);
  448. rb_insert_color(&call->conn_node, &conn->calls);
  449. conn->channels[call->channel] = call;
  450. sock_hold(&rx->sk);
  451. atomic_inc(&conn->usage);
  452. write_unlock_bh(&conn->lock);
  453. spin_lock(&conn->params.peer->lock);
  454. hlist_add_head(&call->error_link, &conn->params.peer->error_targets);
  455. spin_unlock(&conn->params.peer->lock);
  456. write_lock_bh(&rxrpc_call_lock);
  457. list_add_tail(&call->link, &rxrpc_calls);
  458. write_unlock_bh(&rxrpc_call_lock);
  459. /* Record copies of information for hashtable lookup */
  460. call->family = rx->family;
  461. call->local = conn->params.local;
  462. switch (call->family) {
  463. case AF_INET:
  464. call->peer_ip.ipv4_addr =
  465. conn->params.peer->srx.transport.sin.sin_addr.s_addr;
  466. break;
  467. case AF_INET6:
  468. memcpy(call->peer_ip.ipv6_addr,
  469. conn->params.peer->srx.transport.sin6.sin6_addr.in6_u.u6_addr8,
  470. sizeof(call->peer_ip.ipv6_addr));
  471. break;
  472. default:
  473. break;
  474. }
  475. call->epoch = conn->proto.epoch;
  476. call->service_id = conn->params.service_id;
  477. call->in_clientflag = conn->proto.in_clientflag;
  478. /* Add the new call to the hashtable */
  479. rxrpc_call_hash_add(call);
  480. _net("CALL incoming %d on CONN %d", call->debug_id, call->conn->debug_id);
  481. call->lifetimer.expires = jiffies + rxrpc_max_call_lifetime;
  482. add_timer(&call->lifetimer);
  483. _leave(" = %p {%d} [new]", call, call->debug_id);
  484. return call;
  485. extant_call:
  486. write_unlock_bh(&conn->lock);
  487. kmem_cache_free(rxrpc_call_jar, candidate);
  488. _leave(" = %p {%d} [extant]", call, call ? call->debug_id : -1);
  489. return call;
  490. aborted_call:
  491. write_unlock_bh(&conn->lock);
  492. kmem_cache_free(rxrpc_call_jar, candidate);
  493. _leave(" = -ECONNABORTED");
  494. return ERR_PTR(-ECONNABORTED);
  495. old_call:
  496. write_unlock_bh(&conn->lock);
  497. kmem_cache_free(rxrpc_call_jar, candidate);
  498. _leave(" = -ECONNRESET [old]");
  499. return ERR_PTR(-ECONNRESET);
  500. }
  501. /*
  502. * detach a call from a socket and set up for release
  503. */
  504. void rxrpc_release_call(struct rxrpc_call *call)
  505. {
  506. struct rxrpc_connection *conn = call->conn;
  507. struct rxrpc_sock *rx = call->socket;
  508. _enter("{%d,%d,%d,%d}",
  509. call->debug_id, atomic_read(&call->usage),
  510. atomic_read(&call->ackr_not_idle),
  511. call->rx_first_oos);
  512. spin_lock_bh(&call->lock);
  513. if (test_and_set_bit(RXRPC_CALL_RELEASED, &call->flags))
  514. BUG();
  515. spin_unlock_bh(&call->lock);
  516. /* dissociate from the socket
  517. * - the socket's ref on the call is passed to the death timer
  518. */
  519. _debug("RELEASE CALL %p (%d CONN %p)", call, call->debug_id, conn);
  520. write_lock_bh(&rx->call_lock);
  521. if (!list_empty(&call->accept_link)) {
  522. _debug("unlinking once-pending call %p { e=%lx f=%lx }",
  523. call, call->events, call->flags);
  524. ASSERT(!test_bit(RXRPC_CALL_HAS_USERID, &call->flags));
  525. list_del_init(&call->accept_link);
  526. sk_acceptq_removed(&rx->sk);
  527. } else if (test_bit(RXRPC_CALL_HAS_USERID, &call->flags)) {
  528. rb_erase(&call->sock_node, &rx->calls);
  529. memset(&call->sock_node, 0xdd, sizeof(call->sock_node));
  530. clear_bit(RXRPC_CALL_HAS_USERID, &call->flags);
  531. }
  532. write_unlock_bh(&rx->call_lock);
  533. /* free up the channel for reuse */
  534. spin_lock(&conn->trans->client_lock);
  535. write_lock_bh(&conn->lock);
  536. write_lock(&call->state_lock);
  537. if (conn->channels[call->channel] == call)
  538. conn->channels[call->channel] = NULL;
  539. if (conn->out_clientflag && conn->bundle) {
  540. conn->avail_calls++;
  541. switch (conn->avail_calls) {
  542. case 1:
  543. list_move_tail(&conn->bundle_link,
  544. &conn->bundle->avail_conns);
  545. case 2 ... RXRPC_MAXCALLS - 1:
  546. ASSERT(conn->channels[0] == NULL ||
  547. conn->channels[1] == NULL ||
  548. conn->channels[2] == NULL ||
  549. conn->channels[3] == NULL);
  550. break;
  551. case RXRPC_MAXCALLS:
  552. list_move_tail(&conn->bundle_link,
  553. &conn->bundle->unused_conns);
  554. ASSERT(conn->channels[0] == NULL &&
  555. conn->channels[1] == NULL &&
  556. conn->channels[2] == NULL &&
  557. conn->channels[3] == NULL);
  558. break;
  559. default:
  560. pr_err("conn->avail_calls=%d\n", conn->avail_calls);
  561. BUG();
  562. }
  563. }
  564. spin_unlock(&conn->trans->client_lock);
  565. if (call->state < RXRPC_CALL_COMPLETE &&
  566. call->state != RXRPC_CALL_CLIENT_FINAL_ACK) {
  567. _debug("+++ ABORTING STATE %d +++\n", call->state);
  568. call->state = RXRPC_CALL_LOCALLY_ABORTED;
  569. call->local_abort = RX_CALL_DEAD;
  570. set_bit(RXRPC_CALL_EV_ABORT, &call->events);
  571. rxrpc_queue_call(call);
  572. }
  573. write_unlock(&call->state_lock);
  574. write_unlock_bh(&conn->lock);
  575. /* clean up the Rx queue */
  576. if (!skb_queue_empty(&call->rx_queue) ||
  577. !skb_queue_empty(&call->rx_oos_queue)) {
  578. struct rxrpc_skb_priv *sp;
  579. struct sk_buff *skb;
  580. _debug("purge Rx queues");
  581. spin_lock_bh(&call->lock);
  582. while ((skb = skb_dequeue(&call->rx_queue)) ||
  583. (skb = skb_dequeue(&call->rx_oos_queue))) {
  584. sp = rxrpc_skb(skb);
  585. if (sp->call) {
  586. ASSERTCMP(sp->call, ==, call);
  587. rxrpc_put_call(call);
  588. sp->call = NULL;
  589. }
  590. skb->destructor = NULL;
  591. spin_unlock_bh(&call->lock);
  592. _debug("- zap %s %%%u #%u",
  593. rxrpc_pkts[sp->hdr.type],
  594. sp->hdr.serial, sp->hdr.seq);
  595. rxrpc_free_skb(skb);
  596. spin_lock_bh(&call->lock);
  597. }
  598. spin_unlock_bh(&call->lock);
  599. ASSERTCMP(call->state, !=, RXRPC_CALL_COMPLETE);
  600. }
  601. del_timer_sync(&call->resend_timer);
  602. del_timer_sync(&call->ack_timer);
  603. del_timer_sync(&call->lifetimer);
  604. call->deadspan.expires = jiffies + rxrpc_dead_call_expiry;
  605. add_timer(&call->deadspan);
  606. _leave("");
  607. }
  608. /*
  609. * handle a dead call being ready for reaping
  610. */
  611. static void rxrpc_dead_call_expired(unsigned long _call)
  612. {
  613. struct rxrpc_call *call = (struct rxrpc_call *) _call;
  614. _enter("{%d}", call->debug_id);
  615. write_lock_bh(&call->state_lock);
  616. call->state = RXRPC_CALL_DEAD;
  617. write_unlock_bh(&call->state_lock);
  618. rxrpc_put_call(call);
  619. }
  620. /*
  621. * mark a call as to be released, aborting it if it's still in progress
  622. * - called with softirqs disabled
  623. */
  624. static void rxrpc_mark_call_released(struct rxrpc_call *call)
  625. {
  626. bool sched;
  627. write_lock(&call->state_lock);
  628. if (call->state < RXRPC_CALL_DEAD) {
  629. sched = false;
  630. if (call->state < RXRPC_CALL_COMPLETE) {
  631. _debug("abort call %p", call);
  632. call->state = RXRPC_CALL_LOCALLY_ABORTED;
  633. call->local_abort = RX_CALL_DEAD;
  634. if (!test_and_set_bit(RXRPC_CALL_EV_ABORT, &call->events))
  635. sched = true;
  636. }
  637. if (!test_and_set_bit(RXRPC_CALL_EV_RELEASE, &call->events))
  638. sched = true;
  639. if (sched)
  640. rxrpc_queue_call(call);
  641. }
  642. write_unlock(&call->state_lock);
  643. }
  644. /*
  645. * release all the calls associated with a socket
  646. */
  647. void rxrpc_release_calls_on_socket(struct rxrpc_sock *rx)
  648. {
  649. struct rxrpc_call *call;
  650. struct rb_node *p;
  651. _enter("%p", rx);
  652. read_lock_bh(&rx->call_lock);
  653. /* mark all the calls as no longer wanting incoming packets */
  654. for (p = rb_first(&rx->calls); p; p = rb_next(p)) {
  655. call = rb_entry(p, struct rxrpc_call, sock_node);
  656. rxrpc_mark_call_released(call);
  657. }
  658. /* kill the not-yet-accepted incoming calls */
  659. list_for_each_entry(call, &rx->secureq, accept_link) {
  660. rxrpc_mark_call_released(call);
  661. }
  662. list_for_each_entry(call, &rx->acceptq, accept_link) {
  663. rxrpc_mark_call_released(call);
  664. }
  665. read_unlock_bh(&rx->call_lock);
  666. _leave("");
  667. }
  668. /*
  669. * release a call
  670. */
  671. void __rxrpc_put_call(struct rxrpc_call *call)
  672. {
  673. ASSERT(call != NULL);
  674. _enter("%p{u=%d}", call, atomic_read(&call->usage));
  675. ASSERTCMP(atomic_read(&call->usage), >, 0);
  676. if (atomic_dec_and_test(&call->usage)) {
  677. _debug("call %d dead", call->debug_id);
  678. ASSERTCMP(call->state, ==, RXRPC_CALL_DEAD);
  679. rxrpc_queue_work(&call->destroyer);
  680. }
  681. _leave("");
  682. }
  683. /*
  684. * clean up a call
  685. */
  686. static void rxrpc_cleanup_call(struct rxrpc_call *call)
  687. {
  688. _net("DESTROY CALL %d", call->debug_id);
  689. ASSERT(call->socket);
  690. memset(&call->sock_node, 0xcd, sizeof(call->sock_node));
  691. del_timer_sync(&call->lifetimer);
  692. del_timer_sync(&call->deadspan);
  693. del_timer_sync(&call->ack_timer);
  694. del_timer_sync(&call->resend_timer);
  695. ASSERT(test_bit(RXRPC_CALL_RELEASED, &call->flags));
  696. ASSERTCMP(call->events, ==, 0);
  697. if (work_pending(&call->processor)) {
  698. _debug("defer destroy");
  699. rxrpc_queue_work(&call->destroyer);
  700. return;
  701. }
  702. if (call->conn) {
  703. spin_lock(&call->conn->params.peer->lock);
  704. hlist_del_init(&call->error_link);
  705. spin_unlock(&call->conn->params.peer->lock);
  706. write_lock_bh(&call->conn->lock);
  707. rb_erase(&call->conn_node, &call->conn->calls);
  708. write_unlock_bh(&call->conn->lock);
  709. rxrpc_put_connection(call->conn);
  710. }
  711. /* Remove the call from the hash */
  712. rxrpc_call_hash_del(call);
  713. if (call->acks_window) {
  714. _debug("kill Tx window %d",
  715. CIRC_CNT(call->acks_head, call->acks_tail,
  716. call->acks_winsz));
  717. smp_mb();
  718. while (CIRC_CNT(call->acks_head, call->acks_tail,
  719. call->acks_winsz) > 0) {
  720. struct rxrpc_skb_priv *sp;
  721. unsigned long _skb;
  722. _skb = call->acks_window[call->acks_tail] & ~1;
  723. sp = rxrpc_skb((struct sk_buff *)_skb);
  724. _debug("+++ clear Tx %u", sp->hdr.seq);
  725. rxrpc_free_skb((struct sk_buff *)_skb);
  726. call->acks_tail =
  727. (call->acks_tail + 1) & (call->acks_winsz - 1);
  728. }
  729. kfree(call->acks_window);
  730. }
  731. rxrpc_free_skb(call->tx_pending);
  732. rxrpc_purge_queue(&call->rx_queue);
  733. ASSERT(skb_queue_empty(&call->rx_oos_queue));
  734. sock_put(&call->socket->sk);
  735. kmem_cache_free(rxrpc_call_jar, call);
  736. }
  737. /*
  738. * destroy a call
  739. */
  740. static void rxrpc_destroy_call(struct work_struct *work)
  741. {
  742. struct rxrpc_call *call =
  743. container_of(work, struct rxrpc_call, destroyer);
  744. _enter("%p{%d,%d,%p}",
  745. call, atomic_read(&call->usage), call->channel, call->conn);
  746. ASSERTCMP(call->state, ==, RXRPC_CALL_DEAD);
  747. write_lock_bh(&rxrpc_call_lock);
  748. list_del_init(&call->link);
  749. write_unlock_bh(&rxrpc_call_lock);
  750. rxrpc_cleanup_call(call);
  751. _leave("");
  752. }
  753. /*
  754. * preemptively destroy all the call records from a transport endpoint rather
  755. * than waiting for them to time out
  756. */
  757. void __exit rxrpc_destroy_all_calls(void)
  758. {
  759. struct rxrpc_call *call;
  760. _enter("");
  761. write_lock_bh(&rxrpc_call_lock);
  762. while (!list_empty(&rxrpc_calls)) {
  763. call = list_entry(rxrpc_calls.next, struct rxrpc_call, link);
  764. _debug("Zapping call %p", call);
  765. list_del_init(&call->link);
  766. switch (atomic_read(&call->usage)) {
  767. case 0:
  768. ASSERTCMP(call->state, ==, RXRPC_CALL_DEAD);
  769. break;
  770. case 1:
  771. if (del_timer_sync(&call->deadspan) != 0 &&
  772. call->state != RXRPC_CALL_DEAD)
  773. rxrpc_dead_call_expired((unsigned long) call);
  774. if (call->state != RXRPC_CALL_DEAD)
  775. break;
  776. default:
  777. pr_err("Call %p still in use (%d,%d,%s,%lx,%lx)!\n",
  778. call, atomic_read(&call->usage),
  779. atomic_read(&call->ackr_not_idle),
  780. rxrpc_call_states[call->state],
  781. call->flags, call->events);
  782. if (!skb_queue_empty(&call->rx_queue))
  783. pr_err("Rx queue occupied\n");
  784. if (!skb_queue_empty(&call->rx_oos_queue))
  785. pr_err("OOS queue occupied\n");
  786. break;
  787. }
  788. write_unlock_bh(&rxrpc_call_lock);
  789. cond_resched();
  790. write_lock_bh(&rxrpc_call_lock);
  791. }
  792. write_unlock_bh(&rxrpc_call_lock);
  793. _leave("");
  794. }
  795. /*
  796. * handle call lifetime being exceeded
  797. */
  798. static void rxrpc_call_life_expired(unsigned long _call)
  799. {
  800. struct rxrpc_call *call = (struct rxrpc_call *) _call;
  801. if (call->state >= RXRPC_CALL_COMPLETE)
  802. return;
  803. _enter("{%d}", call->debug_id);
  804. read_lock_bh(&call->state_lock);
  805. if (call->state < RXRPC_CALL_COMPLETE) {
  806. set_bit(RXRPC_CALL_EV_LIFE_TIMER, &call->events);
  807. rxrpc_queue_call(call);
  808. }
  809. read_unlock_bh(&call->state_lock);
  810. }
  811. /*
  812. * handle resend timer expiry
  813. * - may not take call->state_lock as this can deadlock against del_timer_sync()
  814. */
  815. static void rxrpc_resend_time_expired(unsigned long _call)
  816. {
  817. struct rxrpc_call *call = (struct rxrpc_call *) _call;
  818. _enter("{%d}", call->debug_id);
  819. if (call->state >= RXRPC_CALL_COMPLETE)
  820. return;
  821. clear_bit(RXRPC_CALL_RUN_RTIMER, &call->flags);
  822. if (!test_and_set_bit(RXRPC_CALL_EV_RESEND_TIMER, &call->events))
  823. rxrpc_queue_call(call);
  824. }
  825. /*
  826. * handle ACK timer expiry
  827. */
  828. static void rxrpc_ack_time_expired(unsigned long _call)
  829. {
  830. struct rxrpc_call *call = (struct rxrpc_call *) _call;
  831. _enter("{%d}", call->debug_id);
  832. if (call->state >= RXRPC_CALL_COMPLETE)
  833. return;
  834. read_lock_bh(&call->state_lock);
  835. if (call->state < RXRPC_CALL_COMPLETE &&
  836. !test_and_set_bit(RXRPC_CALL_EV_ACK, &call->events))
  837. rxrpc_queue_call(call);
  838. read_unlock_bh(&call->state_lock);
  839. }