conn_client.c 9.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372
  1. /* Client connection-specific management code.
  2. *
  3. * Copyright (C) 2016 Red Hat, Inc. All Rights Reserved.
  4. * Written by David Howells (dhowells@redhat.com)
  5. *
  6. * This program is free software; you can redistribute it and/or
  7. * modify it under the terms of the GNU General Public Licence
  8. * as published by the Free Software Foundation; either version
  9. * 2 of the Licence, or (at your option) any later version.
  10. */
  11. #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  12. #include <linux/slab.h>
  13. #include <linux/idr.h>
  14. #include <linux/timer.h>
  15. #include "ar-internal.h"
  16. /*
  17. * We use machine-unique IDs for our client connections.
  18. */
  19. DEFINE_IDR(rxrpc_client_conn_ids);
  20. static DEFINE_SPINLOCK(rxrpc_conn_id_lock);
  21. /*
  22. * Get a connection ID and epoch for a client connection from the global pool.
  23. * The connection struct pointer is then recorded in the idr radix tree. The
  24. * epoch is changed if this wraps.
  25. *
  26. * TODO: The IDR tree gets very expensive on memory if the connection IDs are
  27. * widely scattered throughout the number space, so we shall need to retire
  28. * connections that have, say, an ID more than four times the maximum number of
  29. * client conns away from the current allocation point to try and keep the IDs
  30. * concentrated. We will also need to retire connections from an old epoch.
  31. */
  32. static int rxrpc_get_client_connection_id(struct rxrpc_connection *conn,
  33. gfp_t gfp)
  34. {
  35. u32 epoch;
  36. int id;
  37. _enter("");
  38. idr_preload(gfp);
  39. spin_lock(&rxrpc_conn_id_lock);
  40. epoch = rxrpc_epoch;
  41. /* We could use idr_alloc_cyclic() here, but we really need to know
  42. * when the thing wraps so that we can advance the epoch.
  43. */
  44. if (rxrpc_client_conn_ids.cur == 0)
  45. rxrpc_client_conn_ids.cur = 1;
  46. id = idr_alloc(&rxrpc_client_conn_ids, conn,
  47. rxrpc_client_conn_ids.cur, 0x40000000, GFP_NOWAIT);
  48. if (id < 0) {
  49. if (id != -ENOSPC)
  50. goto error;
  51. id = idr_alloc(&rxrpc_client_conn_ids, conn,
  52. 1, 0x40000000, GFP_NOWAIT);
  53. if (id < 0)
  54. goto error;
  55. epoch++;
  56. rxrpc_epoch = epoch;
  57. }
  58. rxrpc_client_conn_ids.cur = id + 1;
  59. spin_unlock(&rxrpc_conn_id_lock);
  60. idr_preload_end();
  61. conn->proto.epoch = epoch;
  62. conn->proto.cid = id << RXRPC_CIDSHIFT;
  63. set_bit(RXRPC_CONN_HAS_IDR, &conn->flags);
  64. _leave(" [CID %x:%x]", epoch, conn->proto.cid);
  65. return 0;
  66. error:
  67. spin_unlock(&rxrpc_conn_id_lock);
  68. idr_preload_end();
  69. _leave(" = %d", id);
  70. return id;
  71. }
  72. /*
  73. * Release a connection ID for a client connection from the global pool.
  74. */
  75. static void rxrpc_put_client_connection_id(struct rxrpc_connection *conn)
  76. {
  77. if (test_bit(RXRPC_CONN_HAS_IDR, &conn->flags)) {
  78. spin_lock(&rxrpc_conn_id_lock);
  79. idr_remove(&rxrpc_client_conn_ids,
  80. conn->proto.cid >> RXRPC_CIDSHIFT);
  81. spin_unlock(&rxrpc_conn_id_lock);
  82. }
  83. }
  84. /*
  85. * Destroy the client connection ID tree.
  86. */
  87. void rxrpc_destroy_client_conn_ids(void)
  88. {
  89. struct rxrpc_connection *conn;
  90. int id;
  91. if (!idr_is_empty(&rxrpc_client_conn_ids)) {
  92. idr_for_each_entry(&rxrpc_client_conn_ids, conn, id) {
  93. pr_err("AF_RXRPC: Leaked client conn %p {%d}\n",
  94. conn, atomic_read(&conn->usage));
  95. }
  96. BUG();
  97. }
  98. idr_destroy(&rxrpc_client_conn_ids);
  99. }
  100. /*
  101. * Allocate a client connection. The caller must take care to clear any
  102. * padding bytes in *cp.
  103. */
  104. static struct rxrpc_connection *
  105. rxrpc_alloc_client_connection(struct rxrpc_conn_parameters *cp, gfp_t gfp)
  106. {
  107. struct rxrpc_connection *conn;
  108. int ret;
  109. _enter("");
  110. conn = rxrpc_alloc_connection(gfp);
  111. if (!conn) {
  112. _leave(" = -ENOMEM");
  113. return ERR_PTR(-ENOMEM);
  114. }
  115. conn->params = *cp;
  116. conn->out_clientflag = RXRPC_CLIENT_INITIATED;
  117. conn->state = RXRPC_CONN_CLIENT;
  118. ret = rxrpc_get_client_connection_id(conn, gfp);
  119. if (ret < 0)
  120. goto error_0;
  121. ret = rxrpc_init_client_conn_security(conn);
  122. if (ret < 0)
  123. goto error_1;
  124. ret = conn->security->prime_packet_security(conn);
  125. if (ret < 0)
  126. goto error_2;
  127. write_lock(&rxrpc_connection_lock);
  128. list_add_tail(&conn->link, &rxrpc_connections);
  129. write_unlock(&rxrpc_connection_lock);
  130. /* We steal the caller's peer ref. */
  131. cp->peer = NULL;
  132. rxrpc_get_local(conn->params.local);
  133. key_get(conn->params.key);
  134. _leave(" = %p", conn);
  135. return conn;
  136. error_2:
  137. conn->security->clear(conn);
  138. error_1:
  139. rxrpc_put_client_connection_id(conn);
  140. error_0:
  141. kfree(conn);
  142. _leave(" = %d", ret);
  143. return ERR_PTR(ret);
  144. }
  145. /*
  146. * find a connection for a call
  147. * - called in process context with IRQs enabled
  148. */
  149. int rxrpc_connect_call(struct rxrpc_call *call,
  150. struct rxrpc_conn_parameters *cp,
  151. struct sockaddr_rxrpc *srx,
  152. gfp_t gfp)
  153. {
  154. struct rxrpc_connection *conn, *candidate = NULL;
  155. struct rxrpc_local *local = cp->local;
  156. struct rb_node *p, **pp, *parent;
  157. long diff;
  158. int chan;
  159. DECLARE_WAITQUEUE(myself, current);
  160. _enter("{%d,%lx},", call->debug_id, call->user_call_ID);
  161. cp->peer = rxrpc_lookup_peer(cp->local, srx, gfp);
  162. if (!cp->peer)
  163. return -ENOMEM;
  164. if (!cp->exclusive) {
  165. /* Search for a existing client connection unless this is going
  166. * to be a connection that's used exclusively for a single call.
  167. */
  168. _debug("search 1");
  169. spin_lock(&local->client_conns_lock);
  170. p = local->client_conns.rb_node;
  171. while (p) {
  172. conn = rb_entry(p, struct rxrpc_connection, client_node);
  173. #define cmp(X) ((long)conn->params.X - (long)cp->X)
  174. diff = (cmp(peer) ?:
  175. cmp(key) ?:
  176. cmp(security_level));
  177. if (diff < 0)
  178. p = p->rb_left;
  179. else if (diff > 0)
  180. p = p->rb_right;
  181. else
  182. goto found_extant_conn;
  183. }
  184. spin_unlock(&local->client_conns_lock);
  185. }
  186. /* We didn't find a connection or we want an exclusive one. */
  187. _debug("get new conn");
  188. candidate = rxrpc_alloc_client_connection(cp, gfp);
  189. if (!candidate) {
  190. _leave(" = -ENOMEM");
  191. return -ENOMEM;
  192. }
  193. if (cp->exclusive) {
  194. /* Assign the call on an exclusive connection to channel 0 and
  195. * don't add the connection to the endpoint's shareable conn
  196. * lookup tree.
  197. */
  198. _debug("exclusive chan 0");
  199. conn = candidate;
  200. atomic_set(&conn->avail_chans, RXRPC_MAXCALLS - 1);
  201. spin_lock(&conn->channel_lock);
  202. chan = 0;
  203. goto found_channel;
  204. }
  205. /* We need to redo the search before attempting to add a new connection
  206. * lest we race with someone else adding a conflicting instance.
  207. */
  208. _debug("search 2");
  209. spin_lock(&local->client_conns_lock);
  210. pp = &local->client_conns.rb_node;
  211. parent = NULL;
  212. while (*pp) {
  213. parent = *pp;
  214. conn = rb_entry(parent, struct rxrpc_connection, client_node);
  215. diff = (cmp(peer) ?:
  216. cmp(key) ?:
  217. cmp(security_level));
  218. if (diff < 0)
  219. pp = &(*pp)->rb_left;
  220. else if (diff > 0)
  221. pp = &(*pp)->rb_right;
  222. else
  223. goto found_extant_conn;
  224. }
  225. /* The second search also failed; simply add the new connection with
  226. * the new call in channel 0. Note that we need to take the channel
  227. * lock before dropping the client conn lock.
  228. */
  229. _debug("new conn");
  230. set_bit(RXRPC_CONN_IN_CLIENT_CONNS, &candidate->flags);
  231. rb_link_node(&candidate->client_node, parent, pp);
  232. rb_insert_color(&candidate->client_node, &local->client_conns);
  233. attached:
  234. conn = candidate;
  235. candidate = NULL;
  236. atomic_set(&conn->avail_chans, RXRPC_MAXCALLS - 1);
  237. spin_lock(&conn->channel_lock);
  238. spin_unlock(&local->client_conns_lock);
  239. chan = 0;
  240. found_channel:
  241. _debug("found chan");
  242. call->conn = conn;
  243. call->channel = chan;
  244. call->epoch = conn->proto.epoch;
  245. call->cid = conn->proto.cid | chan;
  246. call->call_id = ++conn->channels[chan].call_counter;
  247. conn->channels[chan].call_id = call->call_id;
  248. rcu_assign_pointer(conn->channels[chan].call, call);
  249. _net("CONNECT call %d on conn %d", call->debug_id, conn->debug_id);
  250. spin_unlock(&conn->channel_lock);
  251. rxrpc_put_peer(cp->peer);
  252. cp->peer = NULL;
  253. _leave(" = %p {u=%d}", conn, atomic_read(&conn->usage));
  254. return 0;
  255. /* We found a potentially suitable connection already in existence. If
  256. * we can reuse it (ie. its usage count hasn't been reduced to 0 by the
  257. * reaper), discard any candidate we may have allocated, and try to get
  258. * a channel on this one, otherwise we have to replace it.
  259. */
  260. found_extant_conn:
  261. _debug("found conn");
  262. if (!rxrpc_get_connection_maybe(conn)) {
  263. set_bit(RXRPC_CONN_IN_CLIENT_CONNS, &candidate->flags);
  264. rb_replace_node(&conn->client_node,
  265. &candidate->client_node,
  266. &local->client_conns);
  267. clear_bit(RXRPC_CONN_IN_CLIENT_CONNS, &conn->flags);
  268. goto attached;
  269. }
  270. spin_unlock(&local->client_conns_lock);
  271. rxrpc_put_connection(candidate);
  272. if (!atomic_add_unless(&conn->avail_chans, -1, 0)) {
  273. if (!gfpflags_allow_blocking(gfp)) {
  274. rxrpc_put_connection(conn);
  275. _leave(" = -EAGAIN");
  276. return -EAGAIN;
  277. }
  278. add_wait_queue(&conn->channel_wq, &myself);
  279. for (;;) {
  280. set_current_state(TASK_INTERRUPTIBLE);
  281. if (atomic_add_unless(&conn->avail_chans, -1, 0))
  282. break;
  283. if (signal_pending(current))
  284. goto interrupted;
  285. schedule();
  286. }
  287. remove_wait_queue(&conn->channel_wq, &myself);
  288. __set_current_state(TASK_RUNNING);
  289. }
  290. /* The connection allegedly now has a free channel and we can now
  291. * attach the call to it.
  292. */
  293. spin_lock(&conn->channel_lock);
  294. for (chan = 0; chan < RXRPC_MAXCALLS; chan++)
  295. if (!conn->channels[chan].call)
  296. goto found_channel;
  297. BUG();
  298. interrupted:
  299. remove_wait_queue(&conn->channel_wq, &myself);
  300. __set_current_state(TASK_RUNNING);
  301. rxrpc_put_connection(conn);
  302. rxrpc_put_peer(cp->peer);
  303. cp->peer = NULL;
  304. _leave(" = -ERESTARTSYS");
  305. return -ERESTARTSYS;
  306. }
  307. /*
  308. * Remove a client connection from the local endpoint's tree, thereby removing
  309. * it as a target for reuse for new client calls.
  310. */
  311. void rxrpc_unpublish_client_conn(struct rxrpc_connection *conn)
  312. {
  313. struct rxrpc_local *local = conn->params.local;
  314. spin_lock(&local->client_conns_lock);
  315. if (test_and_clear_bit(RXRPC_CONN_IN_CLIENT_CONNS, &conn->flags))
  316. rb_erase(&conn->client_node, &local->client_conns);
  317. spin_unlock(&local->client_conns_lock);
  318. rxrpc_put_client_connection_id(conn);
  319. }