call_object.c 19 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719
  1. /* RxRPC individual remote procedure call handling
  2. *
  3. * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
  4. * Written by David Howells (dhowells@redhat.com)
  5. *
  6. * This program is free software; you can redistribute it and/or
  7. * modify it under the terms of the GNU General Public License
  8. * as published by the Free Software Foundation; either version
  9. * 2 of the License, or (at your option) any later version.
  10. */
  11. #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  12. #include <linux/slab.h>
  13. #include <linux/module.h>
  14. #include <linux/circ_buf.h>
  15. #include <linux/spinlock_types.h>
  16. #include <net/sock.h>
  17. #include <net/af_rxrpc.h>
  18. #include "ar-internal.h"
  19. const char *const rxrpc_call_states[NR__RXRPC_CALL_STATES] = {
  20. [RXRPC_CALL_UNINITIALISED] = "Uninit ",
  21. [RXRPC_CALL_CLIENT_AWAIT_CONN] = "ClWtConn",
  22. [RXRPC_CALL_CLIENT_SEND_REQUEST] = "ClSndReq",
  23. [RXRPC_CALL_CLIENT_AWAIT_REPLY] = "ClAwtRpl",
  24. [RXRPC_CALL_CLIENT_RECV_REPLY] = "ClRcvRpl",
  25. [RXRPC_CALL_SERVER_PREALLOC] = "SvPrealc",
  26. [RXRPC_CALL_SERVER_SECURING] = "SvSecure",
  27. [RXRPC_CALL_SERVER_ACCEPTING] = "SvAccept",
  28. [RXRPC_CALL_SERVER_RECV_REQUEST] = "SvRcvReq",
  29. [RXRPC_CALL_SERVER_ACK_REQUEST] = "SvAckReq",
  30. [RXRPC_CALL_SERVER_SEND_REPLY] = "SvSndRpl",
  31. [RXRPC_CALL_SERVER_AWAIT_ACK] = "SvAwtACK",
  32. [RXRPC_CALL_COMPLETE] = "Complete",
  33. };
  34. const char *const rxrpc_call_completions[NR__RXRPC_CALL_COMPLETIONS] = {
  35. [RXRPC_CALL_SUCCEEDED] = "Complete",
  36. [RXRPC_CALL_REMOTELY_ABORTED] = "RmtAbort",
  37. [RXRPC_CALL_LOCALLY_ABORTED] = "LocAbort",
  38. [RXRPC_CALL_LOCAL_ERROR] = "LocError",
  39. [RXRPC_CALL_NETWORK_ERROR] = "NetError",
  40. };
  41. struct kmem_cache *rxrpc_call_jar;
  42. static void rxrpc_call_timer_expired(struct timer_list *t)
  43. {
  44. struct rxrpc_call *call = from_timer(call, t, timer);
  45. _enter("%d", call->debug_id);
  46. if (call->state < RXRPC_CALL_COMPLETE) {
  47. trace_rxrpc_timer(call, rxrpc_timer_expired, jiffies);
  48. rxrpc_queue_call(call);
  49. }
  50. }
  51. static struct lock_class_key rxrpc_call_user_mutex_lock_class_key;
  52. /*
  53. * find an extant server call
  54. * - called in process context with IRQs enabled
  55. */
  56. struct rxrpc_call *rxrpc_find_call_by_user_ID(struct rxrpc_sock *rx,
  57. unsigned long user_call_ID)
  58. {
  59. struct rxrpc_call *call;
  60. struct rb_node *p;
  61. _enter("%p,%lx", rx, user_call_ID);
  62. read_lock(&rx->call_lock);
  63. p = rx->calls.rb_node;
  64. while (p) {
  65. call = rb_entry(p, struct rxrpc_call, sock_node);
  66. if (user_call_ID < call->user_call_ID)
  67. p = p->rb_left;
  68. else if (user_call_ID > call->user_call_ID)
  69. p = p->rb_right;
  70. else
  71. goto found_extant_call;
  72. }
  73. read_unlock(&rx->call_lock);
  74. _leave(" = NULL");
  75. return NULL;
  76. found_extant_call:
  77. rxrpc_get_call(call, rxrpc_call_got);
  78. read_unlock(&rx->call_lock);
  79. _leave(" = %p [%d]", call, atomic_read(&call->usage));
  80. return call;
  81. }
  82. /*
  83. * allocate a new call
  84. */
  85. struct rxrpc_call *rxrpc_alloc_call(struct rxrpc_sock *rx, gfp_t gfp,
  86. unsigned int debug_id)
  87. {
  88. struct rxrpc_call *call;
  89. call = kmem_cache_zalloc(rxrpc_call_jar, gfp);
  90. if (!call)
  91. return NULL;
  92. call->rxtx_buffer = kcalloc(RXRPC_RXTX_BUFF_SIZE,
  93. sizeof(struct sk_buff *),
  94. gfp);
  95. if (!call->rxtx_buffer)
  96. goto nomem;
  97. call->rxtx_annotations = kcalloc(RXRPC_RXTX_BUFF_SIZE, sizeof(u8), gfp);
  98. if (!call->rxtx_annotations)
  99. goto nomem_2;
  100. mutex_init(&call->user_mutex);
  101. /* Prevent lockdep reporting a deadlock false positive between the afs
  102. * filesystem and sys_sendmsg() via the mmap sem.
  103. */
  104. if (rx->sk.sk_kern_sock)
  105. lockdep_set_class(&call->user_mutex,
  106. &rxrpc_call_user_mutex_lock_class_key);
  107. timer_setup(&call->timer, rxrpc_call_timer_expired, 0);
  108. INIT_WORK(&call->processor, &rxrpc_process_call);
  109. INIT_LIST_HEAD(&call->link);
  110. INIT_LIST_HEAD(&call->chan_wait_link);
  111. INIT_LIST_HEAD(&call->accept_link);
  112. INIT_LIST_HEAD(&call->recvmsg_link);
  113. INIT_LIST_HEAD(&call->sock_link);
  114. init_waitqueue_head(&call->waitq);
  115. spin_lock_init(&call->lock);
  116. spin_lock_init(&call->notify_lock);
  117. rwlock_init(&call->state_lock);
  118. atomic_set(&call->usage, 1);
  119. call->debug_id = debug_id;
  120. call->tx_total_len = -1;
  121. call->next_rx_timo = 20 * HZ;
  122. call->next_req_timo = 1 * HZ;
  123. memset(&call->sock_node, 0xed, sizeof(call->sock_node));
  124. /* Leave space in the ring to handle a maxed-out jumbo packet */
  125. call->rx_winsize = rxrpc_rx_window_size;
  126. call->tx_winsize = 16;
  127. call->rx_expect_next = 1;
  128. call->cong_cwnd = 2;
  129. call->cong_ssthresh = RXRPC_RXTX_BUFF_SIZE - 1;
  130. return call;
  131. nomem_2:
  132. kfree(call->rxtx_buffer);
  133. nomem:
  134. kmem_cache_free(rxrpc_call_jar, call);
  135. return NULL;
  136. }
  137. /*
  138. * Allocate a new client call.
  139. */
  140. static struct rxrpc_call *rxrpc_alloc_client_call(struct rxrpc_sock *rx,
  141. struct sockaddr_rxrpc *srx,
  142. gfp_t gfp,
  143. unsigned int debug_id)
  144. {
  145. struct rxrpc_call *call;
  146. ktime_t now;
  147. _enter("");
  148. call = rxrpc_alloc_call(rx, gfp, debug_id);
  149. if (!call)
  150. return ERR_PTR(-ENOMEM);
  151. call->state = RXRPC_CALL_CLIENT_AWAIT_CONN;
  152. call->service_id = srx->srx_service;
  153. call->tx_phase = true;
  154. now = ktime_get_real();
  155. call->acks_latest_ts = now;
  156. call->cong_tstamp = now;
  157. _leave(" = %p", call);
  158. return call;
  159. }
  160. /*
  161. * Initiate the call ack/resend/expiry timer.
  162. */
  163. static void rxrpc_start_call_timer(struct rxrpc_call *call)
  164. {
  165. unsigned long now = jiffies;
  166. unsigned long j = now + MAX_JIFFY_OFFSET;
  167. call->ack_at = j;
  168. call->ack_lost_at = j;
  169. call->resend_at = j;
  170. call->ping_at = j;
  171. call->expect_rx_by = j;
  172. call->expect_req_by = j;
  173. call->expect_term_by = j;
  174. call->timer.expires = now;
  175. }
  176. /*
  177. * Set up a call for the given parameters.
  178. * - Called with the socket lock held, which it must release.
  179. * - If it returns a call, the call's lock will need releasing by the caller.
  180. */
  181. struct rxrpc_call *rxrpc_new_client_call(struct rxrpc_sock *rx,
  182. struct rxrpc_conn_parameters *cp,
  183. struct sockaddr_rxrpc *srx,
  184. struct rxrpc_call_params *p,
  185. gfp_t gfp,
  186. unsigned int debug_id)
  187. __releases(&rx->sk.sk_lock.slock)
  188. __acquires(&call->user_mutex)
  189. {
  190. struct rxrpc_call *call, *xcall;
  191. struct rxrpc_net *rxnet = rxrpc_net(sock_net(&rx->sk));
  192. struct rb_node *parent, **pp;
  193. const void *here = __builtin_return_address(0);
  194. int ret;
  195. _enter("%p,%lx", rx, p->user_call_ID);
  196. call = rxrpc_alloc_client_call(rx, srx, gfp, debug_id);
  197. if (IS_ERR(call)) {
  198. release_sock(&rx->sk);
  199. _leave(" = %ld", PTR_ERR(call));
  200. return call;
  201. }
  202. call->tx_total_len = p->tx_total_len;
  203. trace_rxrpc_call(call, rxrpc_call_new_client, atomic_read(&call->usage),
  204. here, (const void *)p->user_call_ID);
  205. /* We need to protect a partially set up call against the user as we
  206. * will be acting outside the socket lock.
  207. */
  208. mutex_lock(&call->user_mutex);
  209. /* Publish the call, even though it is incompletely set up as yet */
  210. write_lock(&rx->call_lock);
  211. pp = &rx->calls.rb_node;
  212. parent = NULL;
  213. while (*pp) {
  214. parent = *pp;
  215. xcall = rb_entry(parent, struct rxrpc_call, sock_node);
  216. if (p->user_call_ID < xcall->user_call_ID)
  217. pp = &(*pp)->rb_left;
  218. else if (p->user_call_ID > xcall->user_call_ID)
  219. pp = &(*pp)->rb_right;
  220. else
  221. goto error_dup_user_ID;
  222. }
  223. rcu_assign_pointer(call->socket, rx);
  224. call->user_call_ID = p->user_call_ID;
  225. __set_bit(RXRPC_CALL_HAS_USERID, &call->flags);
  226. rxrpc_get_call(call, rxrpc_call_got_userid);
  227. rb_link_node(&call->sock_node, parent, pp);
  228. rb_insert_color(&call->sock_node, &rx->calls);
  229. list_add(&call->sock_link, &rx->sock_calls);
  230. write_unlock(&rx->call_lock);
  231. write_lock(&rxnet->call_lock);
  232. list_add_tail(&call->link, &rxnet->calls);
  233. write_unlock(&rxnet->call_lock);
  234. /* From this point on, the call is protected by its own lock. */
  235. release_sock(&rx->sk);
  236. /* Set up or get a connection record and set the protocol parameters,
  237. * including channel number and call ID.
  238. */
  239. ret = rxrpc_connect_call(call, cp, srx, gfp);
  240. if (ret < 0)
  241. goto error;
  242. trace_rxrpc_call(call, rxrpc_call_connected, atomic_read(&call->usage),
  243. here, NULL);
  244. rxrpc_start_call_timer(call);
  245. _net("CALL new %d on CONN %d", call->debug_id, call->conn->debug_id);
  246. _leave(" = %p [new]", call);
  247. return call;
  248. /* We unexpectedly found the user ID in the list after taking
  249. * the call_lock. This shouldn't happen unless the user races
  250. * with itself and tries to add the same user ID twice at the
  251. * same time in different threads.
  252. */
  253. error_dup_user_ID:
  254. write_unlock(&rx->call_lock);
  255. release_sock(&rx->sk);
  256. ret = -EEXIST;
  257. error:
  258. __rxrpc_set_call_completion(call, RXRPC_CALL_LOCAL_ERROR,
  259. RX_CALL_DEAD, ret);
  260. trace_rxrpc_call(call, rxrpc_call_error, atomic_read(&call->usage),
  261. here, ERR_PTR(ret));
  262. rxrpc_release_call(rx, call);
  263. mutex_unlock(&call->user_mutex);
  264. rxrpc_put_call(call, rxrpc_call_put);
  265. _leave(" = %d", ret);
  266. return ERR_PTR(ret);
  267. }
  268. /*
  269. * Retry a call to a new address. It is expected that the Tx queue of the call
  270. * will contain data previously packaged for an old call.
  271. */
  272. int rxrpc_retry_client_call(struct rxrpc_sock *rx,
  273. struct rxrpc_call *call,
  274. struct rxrpc_conn_parameters *cp,
  275. struct sockaddr_rxrpc *srx,
  276. gfp_t gfp)
  277. {
  278. const void *here = __builtin_return_address(0);
  279. int ret;
  280. /* Set up or get a connection record and set the protocol parameters,
  281. * including channel number and call ID.
  282. */
  283. ret = rxrpc_connect_call(call, cp, srx, gfp);
  284. if (ret < 0)
  285. goto error;
  286. trace_rxrpc_call(call, rxrpc_call_connected, atomic_read(&call->usage),
  287. here, NULL);
  288. rxrpc_start_call_timer(call);
  289. _net("CALL new %d on CONN %d", call->debug_id, call->conn->debug_id);
  290. if (!test_and_set_bit(RXRPC_CALL_EV_RESEND, &call->events))
  291. rxrpc_queue_call(call);
  292. _leave(" = 0");
  293. return 0;
  294. error:
  295. rxrpc_set_call_completion(call, RXRPC_CALL_LOCAL_ERROR,
  296. RX_CALL_DEAD, ret);
  297. trace_rxrpc_call(call, rxrpc_call_error, atomic_read(&call->usage),
  298. here, ERR_PTR(ret));
  299. _leave(" = %d", ret);
  300. return ret;
  301. }
  302. /*
  303. * Set up an incoming call. call->conn points to the connection.
  304. * This is called in BH context and isn't allowed to fail.
  305. */
  306. void rxrpc_incoming_call(struct rxrpc_sock *rx,
  307. struct rxrpc_call *call,
  308. struct sk_buff *skb)
  309. {
  310. struct rxrpc_connection *conn = call->conn;
  311. struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
  312. u32 chan;
  313. _enter(",%d", call->conn->debug_id);
  314. rcu_assign_pointer(call->socket, rx);
  315. call->call_id = sp->hdr.callNumber;
  316. call->service_id = sp->hdr.serviceId;
  317. call->cid = sp->hdr.cid;
  318. call->state = RXRPC_CALL_SERVER_ACCEPTING;
  319. if (sp->hdr.securityIndex > 0)
  320. call->state = RXRPC_CALL_SERVER_SECURING;
  321. call->cong_tstamp = skb->tstamp;
  322. /* Set the channel for this call. We don't get channel_lock as we're
  323. * only defending against the data_ready handler (which we're called
  324. * from) and the RESPONSE packet parser (which is only really
  325. * interested in call_counter and can cope with a disagreement with the
  326. * call pointer).
  327. */
  328. chan = sp->hdr.cid & RXRPC_CHANNELMASK;
  329. conn->channels[chan].call_counter = call->call_id;
  330. conn->channels[chan].call_id = call->call_id;
  331. rcu_assign_pointer(conn->channels[chan].call, call);
  332. spin_lock(&conn->params.peer->lock);
  333. hlist_add_head(&call->error_link, &conn->params.peer->error_targets);
  334. spin_unlock(&conn->params.peer->lock);
  335. _net("CALL incoming %d on CONN %d", call->debug_id, call->conn->debug_id);
  336. rxrpc_start_call_timer(call);
  337. _leave("");
  338. }
  339. /*
  340. * Queue a call's work processor, getting a ref to pass to the work queue.
  341. */
  342. bool rxrpc_queue_call(struct rxrpc_call *call)
  343. {
  344. const void *here = __builtin_return_address(0);
  345. int n = __atomic_add_unless(&call->usage, 1, 0);
  346. if (n == 0)
  347. return false;
  348. if (rxrpc_queue_work(&call->processor))
  349. trace_rxrpc_call(call, rxrpc_call_queued, n + 1, here, NULL);
  350. else
  351. rxrpc_put_call(call, rxrpc_call_put_noqueue);
  352. return true;
  353. }
  354. /*
  355. * Queue a call's work processor, passing the callers ref to the work queue.
  356. */
  357. bool __rxrpc_queue_call(struct rxrpc_call *call)
  358. {
  359. const void *here = __builtin_return_address(0);
  360. int n = atomic_read(&call->usage);
  361. ASSERTCMP(n, >=, 1);
  362. if (rxrpc_queue_work(&call->processor))
  363. trace_rxrpc_call(call, rxrpc_call_queued_ref, n, here, NULL);
  364. else
  365. rxrpc_put_call(call, rxrpc_call_put_noqueue);
  366. return true;
  367. }
  368. /*
  369. * Note the re-emergence of a call.
  370. */
  371. void rxrpc_see_call(struct rxrpc_call *call)
  372. {
  373. const void *here = __builtin_return_address(0);
  374. if (call) {
  375. int n = atomic_read(&call->usage);
  376. trace_rxrpc_call(call, rxrpc_call_seen, n, here, NULL);
  377. }
  378. }
  379. /*
  380. * Note the addition of a ref on a call.
  381. */
  382. void rxrpc_get_call(struct rxrpc_call *call, enum rxrpc_call_trace op)
  383. {
  384. const void *here = __builtin_return_address(0);
  385. int n = atomic_inc_return(&call->usage);
  386. trace_rxrpc_call(call, op, n, here, NULL);
  387. }
  388. /*
  389. * Detach a call from its owning socket.
  390. */
  391. void rxrpc_release_call(struct rxrpc_sock *rx, struct rxrpc_call *call)
  392. {
  393. const void *here = __builtin_return_address(0);
  394. struct rxrpc_connection *conn = call->conn;
  395. bool put = false;
  396. int i;
  397. _enter("{%d,%d}", call->debug_id, atomic_read(&call->usage));
  398. trace_rxrpc_call(call, rxrpc_call_release, atomic_read(&call->usage),
  399. here, (const void *)call->flags);
  400. ASSERTCMP(call->state, ==, RXRPC_CALL_COMPLETE);
  401. spin_lock_bh(&call->lock);
  402. if (test_and_set_bit(RXRPC_CALL_RELEASED, &call->flags))
  403. BUG();
  404. spin_unlock_bh(&call->lock);
  405. del_timer_sync(&call->timer);
  406. /* Make sure we don't get any more notifications */
  407. write_lock_bh(&rx->recvmsg_lock);
  408. if (!list_empty(&call->recvmsg_link)) {
  409. _debug("unlinking once-pending call %p { e=%lx f=%lx }",
  410. call, call->events, call->flags);
  411. list_del(&call->recvmsg_link);
  412. put = true;
  413. }
  414. /* list_empty() must return false in rxrpc_notify_socket() */
  415. call->recvmsg_link.next = NULL;
  416. call->recvmsg_link.prev = NULL;
  417. write_unlock_bh(&rx->recvmsg_lock);
  418. if (put)
  419. rxrpc_put_call(call, rxrpc_call_put);
  420. write_lock(&rx->call_lock);
  421. if (test_and_clear_bit(RXRPC_CALL_HAS_USERID, &call->flags)) {
  422. rb_erase(&call->sock_node, &rx->calls);
  423. memset(&call->sock_node, 0xdd, sizeof(call->sock_node));
  424. rxrpc_put_call(call, rxrpc_call_put_userid);
  425. }
  426. list_del(&call->sock_link);
  427. write_unlock(&rx->call_lock);
  428. _debug("RELEASE CALL %p (%d CONN %p)", call, call->debug_id, conn);
  429. if (conn)
  430. rxrpc_disconnect_call(call);
  431. for (i = 0; i < RXRPC_RXTX_BUFF_SIZE; i++) {
  432. rxrpc_free_skb(call->rxtx_buffer[i],
  433. (call->tx_phase ? rxrpc_skb_tx_cleaned :
  434. rxrpc_skb_rx_cleaned));
  435. call->rxtx_buffer[i] = NULL;
  436. }
  437. _leave("");
  438. }
  439. /*
  440. * Prepare a kernel service call for retry.
  441. */
  442. int rxrpc_prepare_call_for_retry(struct rxrpc_sock *rx, struct rxrpc_call *call)
  443. {
  444. const void *here = __builtin_return_address(0);
  445. int i;
  446. u8 last = 0;
  447. _enter("{%d,%d}", call->debug_id, atomic_read(&call->usage));
  448. trace_rxrpc_call(call, rxrpc_call_release, atomic_read(&call->usage),
  449. here, (const void *)call->flags);
  450. ASSERTCMP(call->state, ==, RXRPC_CALL_COMPLETE);
  451. ASSERTCMP(call->completion, !=, RXRPC_CALL_REMOTELY_ABORTED);
  452. ASSERTCMP(call->completion, !=, RXRPC_CALL_LOCALLY_ABORTED);
  453. ASSERT(list_empty(&call->recvmsg_link));
  454. del_timer_sync(&call->timer);
  455. _debug("RELEASE CALL %p (%d CONN %p)", call, call->debug_id, call->conn);
  456. if (call->conn)
  457. rxrpc_disconnect_call(call);
  458. if (rxrpc_is_service_call(call) ||
  459. !call->tx_phase ||
  460. call->tx_hard_ack != 0 ||
  461. call->rx_hard_ack != 0 ||
  462. call->rx_top != 0)
  463. return -EINVAL;
  464. call->state = RXRPC_CALL_UNINITIALISED;
  465. call->completion = RXRPC_CALL_SUCCEEDED;
  466. call->call_id = 0;
  467. call->cid = 0;
  468. call->cong_cwnd = 0;
  469. call->cong_extra = 0;
  470. call->cong_ssthresh = 0;
  471. call->cong_mode = 0;
  472. call->cong_dup_acks = 0;
  473. call->cong_cumul_acks = 0;
  474. call->acks_lowest_nak = 0;
  475. for (i = 0; i < RXRPC_RXTX_BUFF_SIZE; i++) {
  476. last |= call->rxtx_annotations[i];
  477. call->rxtx_annotations[i] &= RXRPC_TX_ANNO_LAST;
  478. call->rxtx_annotations[i] |= RXRPC_TX_ANNO_RETRANS;
  479. }
  480. _leave(" = 0");
  481. return 0;
  482. }
  483. /*
  484. * release all the calls associated with a socket
  485. */
  486. void rxrpc_release_calls_on_socket(struct rxrpc_sock *rx)
  487. {
  488. struct rxrpc_call *call;
  489. _enter("%p", rx);
  490. while (!list_empty(&rx->to_be_accepted)) {
  491. call = list_entry(rx->to_be_accepted.next,
  492. struct rxrpc_call, accept_link);
  493. list_del(&call->accept_link);
  494. rxrpc_abort_call("SKR", call, 0, RX_CALL_DEAD, -ECONNRESET);
  495. rxrpc_put_call(call, rxrpc_call_put);
  496. }
  497. while (!list_empty(&rx->sock_calls)) {
  498. call = list_entry(rx->sock_calls.next,
  499. struct rxrpc_call, sock_link);
  500. rxrpc_get_call(call, rxrpc_call_got);
  501. rxrpc_abort_call("SKT", call, 0, RX_CALL_DEAD, -ECONNRESET);
  502. rxrpc_send_abort_packet(call);
  503. rxrpc_release_call(rx, call);
  504. rxrpc_put_call(call, rxrpc_call_put);
  505. }
  506. _leave("");
  507. }
  508. /*
  509. * release a call
  510. */
  511. void rxrpc_put_call(struct rxrpc_call *call, enum rxrpc_call_trace op)
  512. {
  513. struct rxrpc_net *rxnet;
  514. const void *here = __builtin_return_address(0);
  515. int n;
  516. ASSERT(call != NULL);
  517. n = atomic_dec_return(&call->usage);
  518. trace_rxrpc_call(call, op, n, here, NULL);
  519. ASSERTCMP(n, >=, 0);
  520. if (n == 0) {
  521. _debug("call %d dead", call->debug_id);
  522. ASSERTCMP(call->state, ==, RXRPC_CALL_COMPLETE);
  523. if (!list_empty(&call->link)) {
  524. rxnet = rxrpc_net(sock_net(&call->socket->sk));
  525. write_lock(&rxnet->call_lock);
  526. list_del_init(&call->link);
  527. write_unlock(&rxnet->call_lock);
  528. }
  529. rxrpc_cleanup_call(call);
  530. }
  531. }
  532. /*
  533. * Final call destruction under RCU.
  534. */
  535. static void rxrpc_rcu_destroy_call(struct rcu_head *rcu)
  536. {
  537. struct rxrpc_call *call = container_of(rcu, struct rxrpc_call, rcu);
  538. rxrpc_put_peer(call->peer);
  539. kfree(call->rxtx_buffer);
  540. kfree(call->rxtx_annotations);
  541. kmem_cache_free(rxrpc_call_jar, call);
  542. }
  543. /*
  544. * clean up a call
  545. */
  546. void rxrpc_cleanup_call(struct rxrpc_call *call)
  547. {
  548. int i;
  549. _net("DESTROY CALL %d", call->debug_id);
  550. memset(&call->sock_node, 0xcd, sizeof(call->sock_node));
  551. del_timer_sync(&call->timer);
  552. ASSERTCMP(call->state, ==, RXRPC_CALL_COMPLETE);
  553. ASSERT(test_bit(RXRPC_CALL_RELEASED, &call->flags));
  554. ASSERTCMP(call->conn, ==, NULL);
  555. /* Clean up the Rx/Tx buffer */
  556. for (i = 0; i < RXRPC_RXTX_BUFF_SIZE; i++)
  557. rxrpc_free_skb(call->rxtx_buffer[i],
  558. (call->tx_phase ? rxrpc_skb_tx_cleaned :
  559. rxrpc_skb_rx_cleaned));
  560. rxrpc_free_skb(call->tx_pending, rxrpc_skb_tx_cleaned);
  561. call_rcu(&call->rcu, rxrpc_rcu_destroy_call);
  562. }
  563. /*
  564. * Make sure that all calls are gone from a network namespace. To reach this
  565. * point, any open UDP sockets in that namespace must have been closed, so any
  566. * outstanding calls cannot be doing I/O.
  567. */
  568. void rxrpc_destroy_all_calls(struct rxrpc_net *rxnet)
  569. {
  570. struct rxrpc_call *call;
  571. _enter("");
  572. if (list_empty(&rxnet->calls))
  573. return;
  574. write_lock(&rxnet->call_lock);
  575. while (!list_empty(&rxnet->calls)) {
  576. call = list_entry(rxnet->calls.next, struct rxrpc_call, link);
  577. _debug("Zapping call %p", call);
  578. rxrpc_see_call(call);
  579. list_del_init(&call->link);
  580. pr_err("Call %p still in use (%d,%s,%lx,%lx)!\n",
  581. call, atomic_read(&call->usage),
  582. rxrpc_call_states[call->state],
  583. call->flags, call->events);
  584. write_unlock(&rxnet->call_lock);
  585. cond_resched();
  586. write_lock(&rxnet->call_lock);
  587. }
  588. write_unlock(&rxnet->call_lock);
  589. }