conn_object.c 13 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489
  1. /* RxRPC virtual connection handler, common bits.
  2. *
  3. * Copyright (C) 2007, 2016 Red Hat, Inc. All Rights Reserved.
  4. * Written by David Howells (dhowells@redhat.com)
  5. *
  6. * This program is free software; you can redistribute it and/or
  7. * modify it under the terms of the GNU General Public License
  8. * as published by the Free Software Foundation; either version
  9. * 2 of the License, or (at your option) any later version.
  10. */
  11. #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  12. #include <linux/module.h>
  13. #include <linux/slab.h>
  14. #include <linux/net.h>
  15. #include <linux/skbuff.h>
  16. #include "ar-internal.h"
  17. /*
  18. * Time till a connection expires after last use (in seconds).
  19. */
  20. unsigned int __read_mostly rxrpc_connection_expiry = 10 * 60;
  21. unsigned int __read_mostly rxrpc_closed_conn_expiry = 10;
  22. static void rxrpc_destroy_connection(struct rcu_head *);
  23. static void rxrpc_connection_timer(struct timer_list *timer)
  24. {
  25. struct rxrpc_connection *conn =
  26. container_of(timer, struct rxrpc_connection, timer);
  27. rxrpc_queue_conn(conn);
  28. }
  29. /*
  30. * allocate a new connection
  31. */
  32. struct rxrpc_connection *rxrpc_alloc_connection(gfp_t gfp)
  33. {
  34. struct rxrpc_connection *conn;
  35. _enter("");
  36. conn = kzalloc(sizeof(struct rxrpc_connection), gfp);
  37. if (conn) {
  38. INIT_LIST_HEAD(&conn->cache_link);
  39. spin_lock_init(&conn->channel_lock);
  40. INIT_LIST_HEAD(&conn->waiting_calls);
  41. timer_setup(&conn->timer, &rxrpc_connection_timer, 0);
  42. INIT_WORK(&conn->processor, &rxrpc_process_connection);
  43. INIT_LIST_HEAD(&conn->proc_link);
  44. INIT_LIST_HEAD(&conn->link);
  45. skb_queue_head_init(&conn->rx_queue);
  46. conn->security = &rxrpc_no_security;
  47. spin_lock_init(&conn->state_lock);
  48. conn->debug_id = atomic_inc_return(&rxrpc_debug_id);
  49. conn->size_align = 4;
  50. conn->idle_timestamp = jiffies;
  51. }
  52. _leave(" = %p{%d}", conn, conn ? conn->debug_id : 0);
  53. return conn;
  54. }
  55. /*
  56. * Look up a connection in the cache by protocol parameters.
  57. *
  58. * If successful, a pointer to the connection is returned, but no ref is taken.
  59. * NULL is returned if there is no match.
  60. *
  61. * When searching for a service call, if we find a peer but no connection, we
  62. * return that through *_peer in case we need to create a new service call.
  63. *
  64. * The caller must be holding the RCU read lock.
  65. */
  66. struct rxrpc_connection *rxrpc_find_connection_rcu(struct rxrpc_local *local,
  67. struct sk_buff *skb,
  68. struct rxrpc_peer **_peer)
  69. {
  70. struct rxrpc_connection *conn;
  71. struct rxrpc_conn_proto k;
  72. struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
  73. struct sockaddr_rxrpc srx;
  74. struct rxrpc_peer *peer;
  75. _enter(",%x", sp->hdr.cid & RXRPC_CIDMASK);
  76. if (rxrpc_extract_addr_from_skb(local, &srx, skb) < 0)
  77. goto not_found;
  78. /* We may have to handle mixing IPv4 and IPv6 */
  79. if (srx.transport.family != local->srx.transport.family) {
  80. pr_warn_ratelimited("AF_RXRPC: Protocol mismatch %u not %u\n",
  81. srx.transport.family,
  82. local->srx.transport.family);
  83. goto not_found;
  84. }
  85. k.epoch = sp->hdr.epoch;
  86. k.cid = sp->hdr.cid & RXRPC_CIDMASK;
  87. if (rxrpc_to_server(sp)) {
  88. /* We need to look up service connections by the full protocol
  89. * parameter set. We look up the peer first as an intermediate
  90. * step and then the connection from the peer's tree.
  91. */
  92. peer = rxrpc_lookup_peer_rcu(local, &srx);
  93. if (!peer)
  94. goto not_found;
  95. *_peer = peer;
  96. conn = rxrpc_find_service_conn_rcu(peer, skb);
  97. if (!conn || atomic_read(&conn->usage) == 0)
  98. goto not_found;
  99. _leave(" = %p", conn);
  100. return conn;
  101. } else {
  102. /* Look up client connections by connection ID alone as their
  103. * IDs are unique for this machine.
  104. */
  105. conn = idr_find(&rxrpc_client_conn_ids,
  106. sp->hdr.cid >> RXRPC_CIDSHIFT);
  107. if (!conn || atomic_read(&conn->usage) == 0) {
  108. _debug("no conn");
  109. goto not_found;
  110. }
  111. if (conn->proto.epoch != k.epoch ||
  112. conn->params.local != local)
  113. goto not_found;
  114. peer = conn->params.peer;
  115. switch (srx.transport.family) {
  116. case AF_INET:
  117. if (peer->srx.transport.sin.sin_port !=
  118. srx.transport.sin.sin_port ||
  119. peer->srx.transport.sin.sin_addr.s_addr !=
  120. srx.transport.sin.sin_addr.s_addr)
  121. goto not_found;
  122. break;
  123. #ifdef CONFIG_AF_RXRPC_IPV6
  124. case AF_INET6:
  125. if (peer->srx.transport.sin6.sin6_port !=
  126. srx.transport.sin6.sin6_port ||
  127. memcmp(&peer->srx.transport.sin6.sin6_addr,
  128. &srx.transport.sin6.sin6_addr,
  129. sizeof(struct in6_addr)) != 0)
  130. goto not_found;
  131. break;
  132. #endif
  133. default:
  134. BUG();
  135. }
  136. _leave(" = %p", conn);
  137. return conn;
  138. }
  139. not_found:
  140. _leave(" = NULL");
  141. return NULL;
  142. }
  143. /*
  144. * Disconnect a call and clear any channel it occupies when that call
  145. * terminates. The caller must hold the channel_lock and must release the
  146. * call's ref on the connection.
  147. */
  148. void __rxrpc_disconnect_call(struct rxrpc_connection *conn,
  149. struct rxrpc_call *call)
  150. {
  151. struct rxrpc_channel *chan =
  152. &conn->channels[call->cid & RXRPC_CHANNELMASK];
  153. _enter("%d,%x", conn->debug_id, call->cid);
  154. if (rcu_access_pointer(chan->call) == call) {
  155. /* Save the result of the call so that we can repeat it if necessary
  156. * through the channel, whilst disposing of the actual call record.
  157. */
  158. trace_rxrpc_disconnect_call(call);
  159. switch (call->completion) {
  160. case RXRPC_CALL_SUCCEEDED:
  161. chan->last_seq = call->rx_hard_ack;
  162. chan->last_type = RXRPC_PACKET_TYPE_ACK;
  163. break;
  164. case RXRPC_CALL_LOCALLY_ABORTED:
  165. chan->last_abort = call->abort_code;
  166. chan->last_type = RXRPC_PACKET_TYPE_ABORT;
  167. break;
  168. default:
  169. chan->last_abort = RX_USER_ABORT;
  170. chan->last_type = RXRPC_PACKET_TYPE_ABORT;
  171. break;
  172. }
  173. /* Sync with rxrpc_conn_retransmit(). */
  174. smp_wmb();
  175. chan->last_call = chan->call_id;
  176. chan->call_id = chan->call_counter;
  177. rcu_assign_pointer(chan->call, NULL);
  178. }
  179. _leave("");
  180. }
  181. /*
  182. * Disconnect a call and clear any channel it occupies when that call
  183. * terminates.
  184. */
  185. void rxrpc_disconnect_call(struct rxrpc_call *call)
  186. {
  187. struct rxrpc_connection *conn = call->conn;
  188. call->peer->cong_cwnd = call->cong_cwnd;
  189. spin_lock_bh(&conn->params.peer->lock);
  190. hlist_del_init(&call->error_link);
  191. spin_unlock_bh(&conn->params.peer->lock);
  192. if (rxrpc_is_client_call(call))
  193. return rxrpc_disconnect_client_call(call);
  194. spin_lock(&conn->channel_lock);
  195. __rxrpc_disconnect_call(conn, call);
  196. spin_unlock(&conn->channel_lock);
  197. call->conn = NULL;
  198. conn->idle_timestamp = jiffies;
  199. rxrpc_put_connection(conn);
  200. }
  201. /*
  202. * Kill off a connection.
  203. */
  204. void rxrpc_kill_connection(struct rxrpc_connection *conn)
  205. {
  206. struct rxrpc_net *rxnet = conn->params.local->rxnet;
  207. ASSERT(!rcu_access_pointer(conn->channels[0].call) &&
  208. !rcu_access_pointer(conn->channels[1].call) &&
  209. !rcu_access_pointer(conn->channels[2].call) &&
  210. !rcu_access_pointer(conn->channels[3].call));
  211. ASSERT(list_empty(&conn->cache_link));
  212. write_lock(&rxnet->conn_lock);
  213. list_del_init(&conn->proc_link);
  214. write_unlock(&rxnet->conn_lock);
  215. /* Drain the Rx queue. Note that even though we've unpublished, an
  216. * incoming packet could still be being added to our Rx queue, so we
  217. * will need to drain it again in the RCU cleanup handler.
  218. */
  219. rxrpc_purge_queue(&conn->rx_queue);
  220. /* Leave final destruction to RCU. The connection processor work item
  221. * must carry a ref on the connection to prevent us getting here whilst
  222. * it is queued or running.
  223. */
  224. call_rcu(&conn->rcu, rxrpc_destroy_connection);
  225. }
  226. /*
  227. * Queue a connection's work processor, getting a ref to pass to the work
  228. * queue.
  229. */
  230. bool rxrpc_queue_conn(struct rxrpc_connection *conn)
  231. {
  232. const void *here = __builtin_return_address(0);
  233. int n = atomic_fetch_add_unless(&conn->usage, 1, 0);
  234. if (n == 0)
  235. return false;
  236. if (rxrpc_queue_work(&conn->processor))
  237. trace_rxrpc_conn(conn, rxrpc_conn_queued, n + 1, here);
  238. else
  239. rxrpc_put_connection(conn);
  240. return true;
  241. }
  242. /*
  243. * Note the re-emergence of a connection.
  244. */
  245. void rxrpc_see_connection(struct rxrpc_connection *conn)
  246. {
  247. const void *here = __builtin_return_address(0);
  248. if (conn) {
  249. int n = atomic_read(&conn->usage);
  250. trace_rxrpc_conn(conn, rxrpc_conn_seen, n, here);
  251. }
  252. }
  253. /*
  254. * Get a ref on a connection.
  255. */
  256. void rxrpc_get_connection(struct rxrpc_connection *conn)
  257. {
  258. const void *here = __builtin_return_address(0);
  259. int n = atomic_inc_return(&conn->usage);
  260. trace_rxrpc_conn(conn, rxrpc_conn_got, n, here);
  261. }
  262. /*
  263. * Try to get a ref on a connection.
  264. */
  265. struct rxrpc_connection *
  266. rxrpc_get_connection_maybe(struct rxrpc_connection *conn)
  267. {
  268. const void *here = __builtin_return_address(0);
  269. if (conn) {
  270. int n = atomic_fetch_add_unless(&conn->usage, 1, 0);
  271. if (n > 0)
  272. trace_rxrpc_conn(conn, rxrpc_conn_got, n + 1, here);
  273. else
  274. conn = NULL;
  275. }
  276. return conn;
  277. }
  278. /*
  279. * Set the service connection reap timer.
  280. */
  281. static void rxrpc_set_service_reap_timer(struct rxrpc_net *rxnet,
  282. unsigned long reap_at)
  283. {
  284. if (rxnet->live)
  285. timer_reduce(&rxnet->service_conn_reap_timer, reap_at);
  286. }
  287. /*
  288. * Release a service connection
  289. */
  290. void rxrpc_put_service_conn(struct rxrpc_connection *conn)
  291. {
  292. const void *here = __builtin_return_address(0);
  293. int n;
  294. n = atomic_dec_return(&conn->usage);
  295. trace_rxrpc_conn(conn, rxrpc_conn_put_service, n, here);
  296. ASSERTCMP(n, >=, 0);
  297. if (n == 1)
  298. rxrpc_set_service_reap_timer(conn->params.local->rxnet,
  299. jiffies + rxrpc_connection_expiry);
  300. }
  301. /*
  302. * destroy a virtual connection
  303. */
  304. static void rxrpc_destroy_connection(struct rcu_head *rcu)
  305. {
  306. struct rxrpc_connection *conn =
  307. container_of(rcu, struct rxrpc_connection, rcu);
  308. _enter("{%d,u=%d}", conn->debug_id, atomic_read(&conn->usage));
  309. ASSERTCMP(atomic_read(&conn->usage), ==, 0);
  310. _net("DESTROY CONN %d", conn->debug_id);
  311. del_timer_sync(&conn->timer);
  312. rxrpc_purge_queue(&conn->rx_queue);
  313. conn->security->clear(conn);
  314. key_put(conn->params.key);
  315. key_put(conn->server_key);
  316. rxrpc_put_peer(conn->params.peer);
  317. if (atomic_dec_and_test(&conn->params.local->rxnet->nr_conns))
  318. wake_up_var(&conn->params.local->rxnet->nr_conns);
  319. rxrpc_put_local(conn->params.local);
  320. kfree(conn);
  321. _leave("");
  322. }
  323. /*
  324. * reap dead service connections
  325. */
  326. void rxrpc_service_connection_reaper(struct work_struct *work)
  327. {
  328. struct rxrpc_connection *conn, *_p;
  329. struct rxrpc_net *rxnet =
  330. container_of(work, struct rxrpc_net, service_conn_reaper);
  331. unsigned long expire_at, earliest, idle_timestamp, now;
  332. LIST_HEAD(graveyard);
  333. _enter("");
  334. now = jiffies;
  335. earliest = now + MAX_JIFFY_OFFSET;
  336. write_lock(&rxnet->conn_lock);
  337. list_for_each_entry_safe(conn, _p, &rxnet->service_conns, link) {
  338. ASSERTCMP(atomic_read(&conn->usage), >, 0);
  339. if (likely(atomic_read(&conn->usage) > 1))
  340. continue;
  341. if (conn->state == RXRPC_CONN_SERVICE_PREALLOC)
  342. continue;
  343. if (rxnet->live) {
  344. idle_timestamp = READ_ONCE(conn->idle_timestamp);
  345. expire_at = idle_timestamp + rxrpc_connection_expiry * HZ;
  346. if (conn->params.local->service_closed)
  347. expire_at = idle_timestamp + rxrpc_closed_conn_expiry * HZ;
  348. _debug("reap CONN %d { u=%d,t=%ld }",
  349. conn->debug_id, atomic_read(&conn->usage),
  350. (long)expire_at - (long)now);
  351. if (time_before(now, expire_at)) {
  352. if (time_before(expire_at, earliest))
  353. earliest = expire_at;
  354. continue;
  355. }
  356. }
  357. /* The usage count sits at 1 whilst the object is unused on the
  358. * list; we reduce that to 0 to make the object unavailable.
  359. */
  360. if (atomic_cmpxchg(&conn->usage, 1, 0) != 1)
  361. continue;
  362. trace_rxrpc_conn(conn, rxrpc_conn_reap_service, 0, NULL);
  363. if (rxrpc_conn_is_client(conn))
  364. BUG();
  365. else
  366. rxrpc_unpublish_service_conn(conn);
  367. list_move_tail(&conn->link, &graveyard);
  368. }
  369. write_unlock(&rxnet->conn_lock);
  370. if (earliest != now + MAX_JIFFY_OFFSET) {
  371. _debug("reschedule reaper %ld", (long)earliest - (long)now);
  372. ASSERT(time_after(earliest, now));
  373. rxrpc_set_service_reap_timer(rxnet, earliest);
  374. }
  375. while (!list_empty(&graveyard)) {
  376. conn = list_entry(graveyard.next, struct rxrpc_connection,
  377. link);
  378. list_del_init(&conn->link);
  379. ASSERTCMP(atomic_read(&conn->usage), ==, 0);
  380. rxrpc_kill_connection(conn);
  381. }
  382. _leave("");
  383. }
  384. /*
  385. * preemptively destroy all the service connection records rather than
  386. * waiting for them to time out
  387. */
  388. void rxrpc_destroy_all_connections(struct rxrpc_net *rxnet)
  389. {
  390. struct rxrpc_connection *conn, *_p;
  391. bool leak = false;
  392. _enter("");
  393. atomic_dec(&rxnet->nr_conns);
  394. rxrpc_destroy_all_client_connections(rxnet);
  395. del_timer_sync(&rxnet->service_conn_reap_timer);
  396. rxrpc_queue_work(&rxnet->service_conn_reaper);
  397. flush_workqueue(rxrpc_workqueue);
  398. write_lock(&rxnet->conn_lock);
  399. list_for_each_entry_safe(conn, _p, &rxnet->service_conns, link) {
  400. pr_err("AF_RXRPC: Leaked conn %p {%d}\n",
  401. conn, atomic_read(&conn->usage));
  402. leak = true;
  403. }
  404. write_unlock(&rxnet->conn_lock);
  405. BUG_ON(leak);
  406. ASSERT(list_empty(&rxnet->conn_proc_list));
  407. /* We need to wait for the connections to be destroyed by RCU as they
  408. * pin things that we still need to get rid of.
  409. */
  410. wait_var_event(&rxnet->nr_conns, !atomic_read(&rxnet->nr_conns));
  411. _leave("");
  412. }