conn_object.c 8.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340
  1. /* RxRPC virtual connection handler
  2. *
  3. * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
  4. * Written by David Howells (dhowells@redhat.com)
  5. *
  6. * This program is free software; you can redistribute it and/or
  7. * modify it under the terms of the GNU General Public License
  8. * as published by the Free Software Foundation; either version
  9. * 2 of the License, or (at your option) any later version.
  10. */
  11. #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  12. #include <linux/module.h>
  13. #include <linux/slab.h>
  14. #include <linux/net.h>
  15. #include <linux/skbuff.h>
  16. #include <net/sock.h>
  17. #include <net/af_rxrpc.h>
  18. #include "ar-internal.h"
  19. /*
  20. * Time till a connection expires after last use (in seconds).
  21. */
  22. unsigned int rxrpc_connection_expiry = 10 * 60;
  23. static void rxrpc_connection_reaper(struct work_struct *work);
  24. LIST_HEAD(rxrpc_connections);
  25. DEFINE_RWLOCK(rxrpc_connection_lock);
  26. static DECLARE_DELAYED_WORK(rxrpc_connection_reap, rxrpc_connection_reaper);
  27. /*
  28. * allocate a new connection
  29. */
  30. struct rxrpc_connection *rxrpc_alloc_connection(gfp_t gfp)
  31. {
  32. struct rxrpc_connection *conn;
  33. _enter("");
  34. conn = kzalloc(sizeof(struct rxrpc_connection), gfp);
  35. if (conn) {
  36. spin_lock_init(&conn->channel_lock);
  37. init_waitqueue_head(&conn->channel_wq);
  38. INIT_WORK(&conn->processor, &rxrpc_process_connection);
  39. INIT_LIST_HEAD(&conn->link);
  40. skb_queue_head_init(&conn->rx_queue);
  41. conn->security = &rxrpc_no_security;
  42. spin_lock_init(&conn->state_lock);
  43. /* We maintain an extra ref on the connection whilst it is
  44. * on the rxrpc_connections list.
  45. */
  46. atomic_set(&conn->usage, 2);
  47. conn->debug_id = atomic_inc_return(&rxrpc_debug_id);
  48. atomic_set(&conn->avail_chans, RXRPC_MAXCALLS);
  49. conn->size_align = 4;
  50. conn->header_size = sizeof(struct rxrpc_wire_header);
  51. }
  52. _leave(" = %p{%d}", conn, conn ? conn->debug_id : 0);
  53. return conn;
  54. }
  55. /*
  56. * Look up a connection in the cache by protocol parameters.
  57. *
  58. * If successful, a pointer to the connection is returned, but no ref is taken.
  59. * NULL is returned if there is no match.
  60. *
  61. * The caller must be holding the RCU read lock.
  62. */
  63. struct rxrpc_connection *rxrpc_find_connection_rcu(struct rxrpc_local *local,
  64. struct sk_buff *skb)
  65. {
  66. struct rxrpc_connection *conn;
  67. struct rxrpc_conn_proto k;
  68. struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
  69. struct sockaddr_rxrpc srx;
  70. struct rxrpc_peer *peer;
  71. _enter(",%x", sp->hdr.cid & RXRPC_CIDMASK);
  72. if (rxrpc_extract_addr_from_skb(&srx, skb) < 0)
  73. goto not_found;
  74. k.epoch = sp->hdr.epoch;
  75. k.cid = sp->hdr.cid & RXRPC_CIDMASK;
  76. /* We may have to handle mixing IPv4 and IPv6 */
  77. if (srx.transport.family != local->srx.transport.family) {
  78. pr_warn_ratelimited("AF_RXRPC: Protocol mismatch %u not %u\n",
  79. srx.transport.family,
  80. local->srx.transport.family);
  81. goto not_found;
  82. }
  83. k.epoch = sp->hdr.epoch;
  84. k.cid = sp->hdr.cid & RXRPC_CIDMASK;
  85. if (sp->hdr.flags & RXRPC_CLIENT_INITIATED) {
  86. /* We need to look up service connections by the full protocol
  87. * parameter set. We look up the peer first as an intermediate
  88. * step and then the connection from the peer's tree.
  89. */
  90. peer = rxrpc_lookup_peer_rcu(local, &srx);
  91. if (!peer)
  92. goto not_found;
  93. conn = rxrpc_find_service_conn_rcu(peer, skb);
  94. if (!conn || atomic_read(&conn->usage) == 0)
  95. goto not_found;
  96. _leave(" = %p", conn);
  97. return conn;
  98. } else {
  99. /* Look up client connections by connection ID alone as their
  100. * IDs are unique for this machine.
  101. */
  102. conn = idr_find(&rxrpc_client_conn_ids,
  103. sp->hdr.cid >> RXRPC_CIDSHIFT);
  104. if (!conn || atomic_read(&conn->usage) == 0) {
  105. _debug("no conn");
  106. goto not_found;
  107. }
  108. if (conn->proto.epoch != k.epoch ||
  109. conn->params.local != local)
  110. goto not_found;
  111. peer = conn->params.peer;
  112. switch (srx.transport.family) {
  113. case AF_INET:
  114. if (peer->srx.transport.sin.sin_port !=
  115. srx.transport.sin.sin_port ||
  116. peer->srx.transport.sin.sin_addr.s_addr !=
  117. srx.transport.sin.sin_addr.s_addr)
  118. goto not_found;
  119. break;
  120. default:
  121. BUG();
  122. }
  123. _leave(" = %p", conn);
  124. return conn;
  125. }
  126. not_found:
  127. _leave(" = NULL");
  128. return NULL;
  129. }
  130. /*
  131. * Disconnect a call and clear any channel it occupies when that call
  132. * terminates. The caller must hold the channel_lock and must release the
  133. * call's ref on the connection.
  134. */
  135. void __rxrpc_disconnect_call(struct rxrpc_call *call)
  136. {
  137. struct rxrpc_connection *conn = call->conn;
  138. struct rxrpc_channel *chan = &conn->channels[call->channel];
  139. _enter("%d,%d", conn->debug_id, call->channel);
  140. if (rcu_access_pointer(chan->call) == call) {
  141. /* Save the result of the call so that we can repeat it if necessary
  142. * through the channel, whilst disposing of the actual call record.
  143. */
  144. chan->last_result = call->local_abort;
  145. smp_wmb();
  146. chan->last_call = chan->call_id;
  147. chan->call_id = chan->call_counter;
  148. rcu_assign_pointer(chan->call, NULL);
  149. atomic_inc(&conn->avail_chans);
  150. wake_up(&conn->channel_wq);
  151. }
  152. _leave("");
  153. }
  154. /*
  155. * Disconnect a call and clear any channel it occupies when that call
  156. * terminates.
  157. */
  158. void rxrpc_disconnect_call(struct rxrpc_call *call)
  159. {
  160. struct rxrpc_connection *conn = call->conn;
  161. spin_lock(&conn->channel_lock);
  162. __rxrpc_disconnect_call(call);
  163. spin_unlock(&conn->channel_lock);
  164. call->conn = NULL;
  165. rxrpc_put_connection(conn);
  166. }
  167. /*
  168. * release a virtual connection
  169. */
  170. void rxrpc_put_connection(struct rxrpc_connection *conn)
  171. {
  172. if (!conn)
  173. return;
  174. _enter("%p{u=%d,d=%d}",
  175. conn, atomic_read(&conn->usage), conn->debug_id);
  176. ASSERTCMP(atomic_read(&conn->usage), >, 1);
  177. conn->put_time = ktime_get_seconds();
  178. if (atomic_dec_return(&conn->usage) == 1) {
  179. _debug("zombie");
  180. rxrpc_queue_delayed_work(&rxrpc_connection_reap, 0);
  181. }
  182. _leave("");
  183. }
  184. /*
  185. * destroy a virtual connection
  186. */
  187. static void rxrpc_destroy_connection(struct rcu_head *rcu)
  188. {
  189. struct rxrpc_connection *conn =
  190. container_of(rcu, struct rxrpc_connection, rcu);
  191. _enter("{%d,u=%d}", conn->debug_id, atomic_read(&conn->usage));
  192. ASSERTCMP(atomic_read(&conn->usage), ==, 0);
  193. _net("DESTROY CONN %d", conn->debug_id);
  194. rxrpc_purge_queue(&conn->rx_queue);
  195. conn->security->clear(conn);
  196. key_put(conn->params.key);
  197. key_put(conn->server_key);
  198. rxrpc_put_peer(conn->params.peer);
  199. rxrpc_put_local(conn->params.local);
  200. kfree(conn);
  201. _leave("");
  202. }
  203. /*
  204. * reap dead connections
  205. */
  206. static void rxrpc_connection_reaper(struct work_struct *work)
  207. {
  208. struct rxrpc_connection *conn, *_p;
  209. unsigned long reap_older_than, earliest, put_time, now;
  210. LIST_HEAD(graveyard);
  211. _enter("");
  212. now = ktime_get_seconds();
  213. reap_older_than = now - rxrpc_connection_expiry;
  214. earliest = ULONG_MAX;
  215. write_lock(&rxrpc_connection_lock);
  216. list_for_each_entry_safe(conn, _p, &rxrpc_connections, link) {
  217. ASSERTCMP(atomic_read(&conn->usage), >, 0);
  218. if (likely(atomic_read(&conn->usage) > 1))
  219. continue;
  220. put_time = READ_ONCE(conn->put_time);
  221. if (time_after(put_time, reap_older_than)) {
  222. if (time_before(put_time, earliest))
  223. earliest = put_time;
  224. continue;
  225. }
  226. /* The usage count sits at 1 whilst the object is unused on the
  227. * list; we reduce that to 0 to make the object unavailable.
  228. */
  229. if (atomic_cmpxchg(&conn->usage, 1, 0) != 1)
  230. continue;
  231. if (rxrpc_conn_is_client(conn))
  232. rxrpc_unpublish_client_conn(conn);
  233. else
  234. rxrpc_unpublish_service_conn(conn);
  235. list_move_tail(&conn->link, &graveyard);
  236. }
  237. write_unlock(&rxrpc_connection_lock);
  238. if (earliest != ULONG_MAX) {
  239. _debug("reschedule reaper %ld", (long) earliest - now);
  240. ASSERTCMP(earliest, >, now);
  241. rxrpc_queue_delayed_work(&rxrpc_connection_reap,
  242. (earliest - now) * HZ);
  243. }
  244. while (!list_empty(&graveyard)) {
  245. conn = list_entry(graveyard.next, struct rxrpc_connection,
  246. link);
  247. list_del_init(&conn->link);
  248. ASSERTCMP(atomic_read(&conn->usage), ==, 0);
  249. skb_queue_purge(&conn->rx_queue);
  250. call_rcu(&conn->rcu, rxrpc_destroy_connection);
  251. }
  252. _leave("");
  253. }
  254. /*
  255. * preemptively destroy all the connection records rather than waiting for them
  256. * to time out
  257. */
  258. void __exit rxrpc_destroy_all_connections(void)
  259. {
  260. struct rxrpc_connection *conn, *_p;
  261. bool leak = false;
  262. _enter("");
  263. rxrpc_connection_expiry = 0;
  264. cancel_delayed_work(&rxrpc_connection_reap);
  265. rxrpc_queue_delayed_work(&rxrpc_connection_reap, 0);
  266. flush_workqueue(rxrpc_workqueue);
  267. write_lock(&rxrpc_connection_lock);
  268. list_for_each_entry_safe(conn, _p, &rxrpc_connections, link) {
  269. pr_err("AF_RXRPC: Leaked conn %p {%d}\n",
  270. conn, atomic_read(&conn->usage));
  271. leak = true;
  272. }
  273. write_unlock(&rxrpc_connection_lock);
  274. BUG_ON(leak);
  275. /* Make sure the local and peer records pinned by any dying connections
  276. * are released.
  277. */
  278. rcu_barrier();
  279. rxrpc_destroy_client_conn_ids();
  280. _leave("");
  281. }