conn_object.c 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699
  1. /* RxRPC virtual connection handler
  2. *
  3. * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
  4. * Written by David Howells (dhowells@redhat.com)
  5. *
  6. * This program is free software; you can redistribute it and/or
  7. * modify it under the terms of the GNU General Public License
  8. * as published by the Free Software Foundation; either version
  9. * 2 of the License, or (at your option) any later version.
  10. */
  11. #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  12. #include <linux/module.h>
  13. #include <linux/slab.h>
  14. #include <linux/net.h>
  15. #include <linux/skbuff.h>
  16. #include <linux/crypto.h>
  17. #include <net/sock.h>
  18. #include <net/af_rxrpc.h>
  19. #include "ar-internal.h"
  20. /*
  21. * Time till a connection expires after last use (in seconds).
  22. */
  23. unsigned int rxrpc_connection_expiry = 10 * 60;
  24. static void rxrpc_connection_reaper(struct work_struct *work);
  25. LIST_HEAD(rxrpc_connections);
  26. DEFINE_RWLOCK(rxrpc_connection_lock);
  27. static DECLARE_DELAYED_WORK(rxrpc_connection_reap, rxrpc_connection_reaper);
  28. /*
  29. * allocate a new connection
  30. */
  31. static struct rxrpc_connection *rxrpc_alloc_connection(gfp_t gfp)
  32. {
  33. struct rxrpc_connection *conn;
  34. _enter("");
  35. conn = kzalloc(sizeof(struct rxrpc_connection), gfp);
  36. if (conn) {
  37. spin_lock_init(&conn->channel_lock);
  38. init_waitqueue_head(&conn->channel_wq);
  39. INIT_WORK(&conn->processor, &rxrpc_process_connection);
  40. INIT_LIST_HEAD(&conn->link);
  41. conn->calls = RB_ROOT;
  42. skb_queue_head_init(&conn->rx_queue);
  43. conn->security = &rxrpc_no_security;
  44. rwlock_init(&conn->lock);
  45. spin_lock_init(&conn->state_lock);
  46. atomic_set(&conn->usage, 1);
  47. conn->debug_id = atomic_inc_return(&rxrpc_debug_id);
  48. atomic_set(&conn->avail_chans, RXRPC_MAXCALLS);
  49. conn->size_align = 4;
  50. conn->header_size = sizeof(struct rxrpc_wire_header);
  51. }
  52. _leave(" = %p{%d}", conn, conn ? conn->debug_id : 0);
  53. return conn;
  54. }
  55. /*
  56. * add a call to a connection's call-by-ID tree
  57. */
  58. static void rxrpc_add_call_ID_to_conn(struct rxrpc_connection *conn,
  59. struct rxrpc_call *call)
  60. {
  61. struct rxrpc_call *xcall;
  62. struct rb_node *parent, **p;
  63. u32 call_id;
  64. write_lock_bh(&conn->lock);
  65. call_id = call->call_id;
  66. p = &conn->calls.rb_node;
  67. parent = NULL;
  68. while (*p) {
  69. parent = *p;
  70. xcall = rb_entry(parent, struct rxrpc_call, conn_node);
  71. if (call_id < xcall->call_id)
  72. p = &(*p)->rb_left;
  73. else if (call_id > xcall->call_id)
  74. p = &(*p)->rb_right;
  75. else
  76. BUG();
  77. }
  78. rb_link_node(&call->conn_node, parent, p);
  79. rb_insert_color(&call->conn_node, &conn->calls);
  80. write_unlock_bh(&conn->lock);
  81. }
  82. /*
  83. * Allocate a client connection. The caller must take care to clear any
  84. * padding bytes in *cp.
  85. */
  86. static struct rxrpc_connection *
  87. rxrpc_alloc_client_connection(struct rxrpc_conn_parameters *cp, gfp_t gfp)
  88. {
  89. struct rxrpc_connection *conn;
  90. int ret;
  91. _enter("");
  92. conn = rxrpc_alloc_connection(gfp);
  93. if (!conn) {
  94. _leave(" = -ENOMEM");
  95. return ERR_PTR(-ENOMEM);
  96. }
  97. conn->params = *cp;
  98. conn->proto.local = cp->local;
  99. conn->proto.epoch = rxrpc_epoch;
  100. conn->proto.cid = 0;
  101. conn->proto.in_clientflag = 0;
  102. conn->proto.family = cp->peer->srx.transport.family;
  103. conn->out_clientflag = RXRPC_CLIENT_INITIATED;
  104. conn->state = RXRPC_CONN_CLIENT;
  105. switch (conn->proto.family) {
  106. case AF_INET:
  107. conn->proto.addr_size = sizeof(conn->proto.ipv4_addr);
  108. conn->proto.ipv4_addr = cp->peer->srx.transport.sin.sin_addr;
  109. conn->proto.port = cp->peer->srx.transport.sin.sin_port;
  110. break;
  111. }
  112. ret = rxrpc_get_client_connection_id(conn, gfp);
  113. if (ret < 0)
  114. goto error_0;
  115. ret = rxrpc_init_client_conn_security(conn);
  116. if (ret < 0)
  117. goto error_1;
  118. ret = conn->security->prime_packet_security(conn);
  119. if (ret < 0)
  120. goto error_2;
  121. write_lock(&rxrpc_connection_lock);
  122. list_add_tail(&conn->link, &rxrpc_connections);
  123. write_unlock(&rxrpc_connection_lock);
  124. /* We steal the caller's peer ref. */
  125. cp->peer = NULL;
  126. rxrpc_get_local(conn->params.local);
  127. key_get(conn->params.key);
  128. _leave(" = %p", conn);
  129. return conn;
  130. error_2:
  131. conn->security->clear(conn);
  132. error_1:
  133. rxrpc_put_client_connection_id(conn);
  134. error_0:
  135. kfree(conn);
  136. _leave(" = %d", ret);
  137. return ERR_PTR(ret);
  138. }
  139. /*
  140. * find a connection for a call
  141. * - called in process context with IRQs enabled
  142. */
  143. int rxrpc_connect_call(struct rxrpc_call *call,
  144. struct rxrpc_conn_parameters *cp,
  145. struct sockaddr_rxrpc *srx,
  146. gfp_t gfp)
  147. {
  148. struct rxrpc_connection *conn, *candidate = NULL;
  149. struct rxrpc_local *local = cp->local;
  150. struct rb_node *p, **pp, *parent;
  151. long diff;
  152. int chan;
  153. DECLARE_WAITQUEUE(myself, current);
  154. _enter("{%d,%lx},", call->debug_id, call->user_call_ID);
  155. cp->peer = rxrpc_lookup_peer(cp->local, srx, gfp);
  156. if (!cp->peer)
  157. return -ENOMEM;
  158. if (!cp->exclusive) {
  159. /* Search for a existing client connection unless this is going
  160. * to be a connection that's used exclusively for a single call.
  161. */
  162. _debug("search 1");
  163. spin_lock(&local->client_conns_lock);
  164. p = local->client_conns.rb_node;
  165. while (p) {
  166. conn = rb_entry(p, struct rxrpc_connection, client_node);
  167. #define cmp(X) ((long)conn->params.X - (long)cp->X)
  168. diff = (cmp(peer) ?:
  169. cmp(key) ?:
  170. cmp(security_level));
  171. if (diff < 0)
  172. p = p->rb_left;
  173. else if (diff > 0)
  174. p = p->rb_right;
  175. else
  176. goto found_extant_conn;
  177. }
  178. spin_unlock(&local->client_conns_lock);
  179. }
  180. /* We didn't find a connection or we want an exclusive one. */
  181. _debug("get new conn");
  182. candidate = rxrpc_alloc_client_connection(cp, gfp);
  183. if (!candidate) {
  184. _leave(" = -ENOMEM");
  185. return -ENOMEM;
  186. }
  187. if (cp->exclusive) {
  188. /* Assign the call on an exclusive connection to channel 0 and
  189. * don't add the connection to the endpoint's shareable conn
  190. * lookup tree.
  191. */
  192. _debug("exclusive chan 0");
  193. conn = candidate;
  194. atomic_set(&conn->avail_chans, RXRPC_MAXCALLS - 1);
  195. spin_lock(&conn->channel_lock);
  196. chan = 0;
  197. goto found_channel;
  198. }
  199. /* We need to redo the search before attempting to add a new connection
  200. * lest we race with someone else adding a conflicting instance.
  201. */
  202. _debug("search 2");
  203. spin_lock(&local->client_conns_lock);
  204. pp = &local->client_conns.rb_node;
  205. parent = NULL;
  206. while (*pp) {
  207. parent = *pp;
  208. conn = rb_entry(parent, struct rxrpc_connection, client_node);
  209. diff = (cmp(peer) ?:
  210. cmp(key) ?:
  211. cmp(security_level));
  212. if (diff < 0)
  213. pp = &(*pp)->rb_left;
  214. else if (diff > 0)
  215. pp = &(*pp)->rb_right;
  216. else
  217. goto found_extant_conn;
  218. }
  219. /* The second search also failed; simply add the new connection with
  220. * the new call in channel 0. Note that we need to take the channel
  221. * lock before dropping the client conn lock.
  222. */
  223. _debug("new conn");
  224. conn = candidate;
  225. candidate = NULL;
  226. rb_link_node(&conn->client_node, parent, pp);
  227. rb_insert_color(&conn->client_node, &local->client_conns);
  228. atomic_set(&conn->avail_chans, RXRPC_MAXCALLS - 1);
  229. spin_lock(&conn->channel_lock);
  230. spin_unlock(&local->client_conns_lock);
  231. chan = 0;
  232. found_channel:
  233. _debug("found chan");
  234. call->conn = conn;
  235. call->channel = chan;
  236. call->epoch = conn->proto.epoch;
  237. call->cid = conn->proto.cid | chan;
  238. call->call_id = ++conn->call_counter;
  239. rcu_assign_pointer(conn->channels[chan], call);
  240. _net("CONNECT call %d on conn %d", call->debug_id, conn->debug_id);
  241. rxrpc_add_call_ID_to_conn(conn, call);
  242. spin_unlock(&conn->channel_lock);
  243. rxrpc_put_peer(cp->peer);
  244. cp->peer = NULL;
  245. _leave(" = %p {u=%d}", conn, atomic_read(&conn->usage));
  246. return 0;
  247. /* We found a suitable connection already in existence. Discard any
  248. * candidate we may have allocated, and try to get a channel on this
  249. * one.
  250. */
  251. found_extant_conn:
  252. _debug("found conn");
  253. rxrpc_get_connection(conn);
  254. spin_unlock(&local->client_conns_lock);
  255. rxrpc_put_connection(candidate);
  256. if (!atomic_add_unless(&conn->avail_chans, -1, 0)) {
  257. if (!gfpflags_allow_blocking(gfp)) {
  258. rxrpc_put_connection(conn);
  259. _leave(" = -EAGAIN");
  260. return -EAGAIN;
  261. }
  262. add_wait_queue(&conn->channel_wq, &myself);
  263. for (;;) {
  264. set_current_state(TASK_INTERRUPTIBLE);
  265. if (atomic_add_unless(&conn->avail_chans, -1, 0))
  266. break;
  267. if (signal_pending(current))
  268. goto interrupted;
  269. schedule();
  270. }
  271. remove_wait_queue(&conn->channel_wq, &myself);
  272. __set_current_state(TASK_RUNNING);
  273. }
  274. /* The connection allegedly now has a free channel and we can now
  275. * attach the call to it.
  276. */
  277. spin_lock(&conn->channel_lock);
  278. for (chan = 0; chan < RXRPC_MAXCALLS; chan++)
  279. if (!conn->channels[chan])
  280. goto found_channel;
  281. BUG();
  282. interrupted:
  283. remove_wait_queue(&conn->channel_wq, &myself);
  284. __set_current_state(TASK_RUNNING);
  285. rxrpc_put_connection(conn);
  286. rxrpc_put_peer(cp->peer);
  287. cp->peer = NULL;
  288. _leave(" = -ERESTARTSYS");
  289. return -ERESTARTSYS;
  290. }
  291. /*
  292. * get a record of an incoming connection
  293. */
  294. struct rxrpc_connection *rxrpc_incoming_connection(struct rxrpc_local *local,
  295. struct rxrpc_peer *peer,
  296. struct sk_buff *skb)
  297. {
  298. struct rxrpc_connection *conn, *candidate = NULL;
  299. struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
  300. struct rb_node *p, **pp;
  301. const char *new = "old";
  302. u32 epoch, cid;
  303. _enter("");
  304. ASSERT(sp->hdr.flags & RXRPC_CLIENT_INITIATED);
  305. epoch = sp->hdr.epoch;
  306. cid = sp->hdr.cid & RXRPC_CIDMASK;
  307. /* search the connection list first */
  308. read_lock_bh(&peer->conn_lock);
  309. p = peer->service_conns.rb_node;
  310. while (p) {
  311. conn = rb_entry(p, struct rxrpc_connection, service_node);
  312. _debug("maybe %x", conn->proto.cid);
  313. if (epoch < conn->proto.epoch)
  314. p = p->rb_left;
  315. else if (epoch > conn->proto.epoch)
  316. p = p->rb_right;
  317. else if (cid < conn->proto.cid)
  318. p = p->rb_left;
  319. else if (cid > conn->proto.cid)
  320. p = p->rb_right;
  321. else
  322. goto found_extant_connection;
  323. }
  324. read_unlock_bh(&peer->conn_lock);
  325. /* not yet present - create a candidate for a new record and then
  326. * redo the search */
  327. candidate = rxrpc_alloc_connection(GFP_NOIO);
  328. if (!candidate) {
  329. _leave(" = -ENOMEM");
  330. return ERR_PTR(-ENOMEM);
  331. }
  332. candidate->proto.local = local;
  333. candidate->proto.epoch = sp->hdr.epoch;
  334. candidate->proto.cid = sp->hdr.cid & RXRPC_CIDMASK;
  335. candidate->proto.in_clientflag = RXRPC_CLIENT_INITIATED;
  336. candidate->params.local = local;
  337. candidate->params.peer = peer;
  338. candidate->params.service_id = sp->hdr.serviceId;
  339. candidate->security_ix = sp->hdr.securityIndex;
  340. candidate->out_clientflag = 0;
  341. candidate->state = RXRPC_CONN_SERVICE;
  342. if (candidate->params.service_id)
  343. candidate->state = RXRPC_CONN_SERVICE_UNSECURED;
  344. write_lock_bh(&peer->conn_lock);
  345. pp = &peer->service_conns.rb_node;
  346. p = NULL;
  347. while (*pp) {
  348. p = *pp;
  349. conn = rb_entry(p, struct rxrpc_connection, service_node);
  350. if (epoch < conn->proto.epoch)
  351. pp = &(*pp)->rb_left;
  352. else if (epoch > conn->proto.epoch)
  353. pp = &(*pp)->rb_right;
  354. else if (cid < conn->proto.cid)
  355. pp = &(*pp)->rb_left;
  356. else if (cid > conn->proto.cid)
  357. pp = &(*pp)->rb_right;
  358. else
  359. goto found_extant_second;
  360. }
  361. /* we can now add the new candidate to the list */
  362. conn = candidate;
  363. candidate = NULL;
  364. rb_link_node(&conn->service_node, p, pp);
  365. rb_insert_color(&conn->service_node, &peer->service_conns);
  366. rxrpc_get_peer(peer);
  367. rxrpc_get_local(local);
  368. write_unlock_bh(&peer->conn_lock);
  369. write_lock(&rxrpc_connection_lock);
  370. list_add_tail(&conn->link, &rxrpc_connections);
  371. write_unlock(&rxrpc_connection_lock);
  372. new = "new";
  373. success:
  374. _net("CONNECTION %s %d {%x}", new, conn->debug_id, conn->proto.cid);
  375. _leave(" = %p {u=%d}", conn, atomic_read(&conn->usage));
  376. return conn;
  377. /* we found the connection in the list immediately */
  378. found_extant_connection:
  379. if (sp->hdr.securityIndex != conn->security_ix) {
  380. read_unlock_bh(&peer->conn_lock);
  381. goto security_mismatch;
  382. }
  383. rxrpc_get_connection(conn);
  384. read_unlock_bh(&peer->conn_lock);
  385. goto success;
  386. /* we found the connection on the second time through the list */
  387. found_extant_second:
  388. if (sp->hdr.securityIndex != conn->security_ix) {
  389. write_unlock_bh(&peer->conn_lock);
  390. goto security_mismatch;
  391. }
  392. rxrpc_get_connection(conn);
  393. write_unlock_bh(&peer->conn_lock);
  394. kfree(candidate);
  395. goto success;
  396. security_mismatch:
  397. kfree(candidate);
  398. _leave(" = -EKEYREJECTED");
  399. return ERR_PTR(-EKEYREJECTED);
  400. }
  401. /*
  402. * find a connection based on transport and RxRPC connection ID for an incoming
  403. * packet
  404. */
  405. struct rxrpc_connection *rxrpc_find_connection(struct rxrpc_local *local,
  406. struct rxrpc_peer *peer,
  407. struct sk_buff *skb)
  408. {
  409. struct rxrpc_connection *conn;
  410. struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
  411. struct rb_node *p;
  412. u32 epoch, cid;
  413. _enter(",{%x,%x}", sp->hdr.cid, sp->hdr.flags);
  414. read_lock_bh(&peer->conn_lock);
  415. cid = sp->hdr.cid & RXRPC_CIDMASK;
  416. epoch = sp->hdr.epoch;
  417. if (sp->hdr.flags & RXRPC_CLIENT_INITIATED) {
  418. p = peer->service_conns.rb_node;
  419. while (p) {
  420. conn = rb_entry(p, struct rxrpc_connection, service_node);
  421. _debug("maybe %x", conn->proto.cid);
  422. if (epoch < conn->proto.epoch)
  423. p = p->rb_left;
  424. else if (epoch > conn->proto.epoch)
  425. p = p->rb_right;
  426. else if (cid < conn->proto.cid)
  427. p = p->rb_left;
  428. else if (cid > conn->proto.cid)
  429. p = p->rb_right;
  430. else
  431. goto found;
  432. }
  433. } else {
  434. conn = idr_find(&rxrpc_client_conn_ids, cid >> RXRPC_CIDSHIFT);
  435. if (conn &&
  436. conn->proto.epoch == epoch &&
  437. conn->params.peer == peer)
  438. goto found;
  439. }
  440. read_unlock_bh(&peer->conn_lock);
  441. _leave(" = NULL");
  442. return NULL;
  443. found:
  444. rxrpc_get_connection(conn);
  445. read_unlock_bh(&peer->conn_lock);
  446. _leave(" = %p", conn);
  447. return conn;
  448. }
  449. /*
  450. * Disconnect a call and clear any channel it occupies when that call
  451. * terminates.
  452. */
  453. void rxrpc_disconnect_call(struct rxrpc_call *call)
  454. {
  455. struct rxrpc_connection *conn = call->conn;
  456. unsigned chan = call->channel;
  457. _enter("%d,%d", conn->debug_id, call->channel);
  458. spin_lock(&conn->channel_lock);
  459. if (conn->channels[chan] == call) {
  460. rcu_assign_pointer(conn->channels[chan], NULL);
  461. atomic_inc(&conn->avail_chans);
  462. wake_up(&conn->channel_wq);
  463. }
  464. spin_unlock(&conn->channel_lock);
  465. call->conn = NULL;
  466. rxrpc_put_connection(conn);
  467. _leave("");
  468. }
  469. /*
  470. * release a virtual connection
  471. */
  472. void rxrpc_put_connection(struct rxrpc_connection *conn)
  473. {
  474. if (!conn)
  475. return;
  476. _enter("%p{u=%d,d=%d}",
  477. conn, atomic_read(&conn->usage), conn->debug_id);
  478. ASSERTCMP(atomic_read(&conn->usage), >, 0);
  479. conn->put_time = ktime_get_seconds();
  480. if (atomic_dec_and_test(&conn->usage)) {
  481. _debug("zombie");
  482. rxrpc_queue_delayed_work(&rxrpc_connection_reap, 0);
  483. }
  484. _leave("");
  485. }
  486. /*
  487. * destroy a virtual connection
  488. */
  489. static void rxrpc_destroy_connection(struct rxrpc_connection *conn)
  490. {
  491. _enter("%p{%d}", conn, atomic_read(&conn->usage));
  492. ASSERTCMP(atomic_read(&conn->usage), ==, 0);
  493. _net("DESTROY CONN %d", conn->debug_id);
  494. ASSERT(RB_EMPTY_ROOT(&conn->calls));
  495. rxrpc_purge_queue(&conn->rx_queue);
  496. conn->security->clear(conn);
  497. key_put(conn->params.key);
  498. key_put(conn->server_key);
  499. rxrpc_put_peer(conn->params.peer);
  500. rxrpc_put_local(conn->params.local);
  501. kfree(conn);
  502. _leave("");
  503. }
  504. /*
  505. * reap dead connections
  506. */
  507. static void rxrpc_connection_reaper(struct work_struct *work)
  508. {
  509. struct rxrpc_connection *conn, *_p;
  510. struct rxrpc_peer *peer;
  511. unsigned long now, earliest, reap_time;
  512. LIST_HEAD(graveyard);
  513. _enter("");
  514. now = ktime_get_seconds();
  515. earliest = ULONG_MAX;
  516. write_lock(&rxrpc_connection_lock);
  517. list_for_each_entry_safe(conn, _p, &rxrpc_connections, link) {
  518. _debug("reap CONN %d { u=%d,t=%ld }",
  519. conn->debug_id, atomic_read(&conn->usage),
  520. (long) now - (long) conn->put_time);
  521. if (likely(atomic_read(&conn->usage) > 0))
  522. continue;
  523. if (rxrpc_conn_is_client(conn)) {
  524. struct rxrpc_local *local = conn->params.local;
  525. spin_lock(&local->client_conns_lock);
  526. reap_time = conn->put_time + rxrpc_connection_expiry;
  527. if (atomic_read(&conn->usage) > 0) {
  528. ;
  529. } else if (reap_time <= now) {
  530. list_move_tail(&conn->link, &graveyard);
  531. rxrpc_put_client_connection_id(conn);
  532. rb_erase(&conn->client_node,
  533. &local->client_conns);
  534. } else if (reap_time < earliest) {
  535. earliest = reap_time;
  536. }
  537. spin_unlock(&local->client_conns_lock);
  538. } else {
  539. peer = conn->params.peer;
  540. write_lock_bh(&peer->conn_lock);
  541. reap_time = conn->put_time + rxrpc_connection_expiry;
  542. if (atomic_read(&conn->usage) > 0) {
  543. ;
  544. } else if (reap_time <= now) {
  545. list_move_tail(&conn->link, &graveyard);
  546. rb_erase(&conn->service_node,
  547. &peer->service_conns);
  548. } else if (reap_time < earliest) {
  549. earliest = reap_time;
  550. }
  551. write_unlock_bh(&peer->conn_lock);
  552. }
  553. }
  554. write_unlock(&rxrpc_connection_lock);
  555. if (earliest != ULONG_MAX) {
  556. _debug("reschedule reaper %ld", (long) earliest - now);
  557. ASSERTCMP(earliest, >, now);
  558. rxrpc_queue_delayed_work(&rxrpc_connection_reap,
  559. (earliest - now) * HZ);
  560. }
  561. /* then destroy all those pulled out */
  562. while (!list_empty(&graveyard)) {
  563. conn = list_entry(graveyard.next, struct rxrpc_connection,
  564. link);
  565. list_del_init(&conn->link);
  566. ASSERTCMP(atomic_read(&conn->usage), ==, 0);
  567. rxrpc_destroy_connection(conn);
  568. }
  569. _leave("");
  570. }
  571. /*
  572. * preemptively destroy all the connection records rather than waiting for them
  573. * to time out
  574. */
  575. void __exit rxrpc_destroy_all_connections(void)
  576. {
  577. _enter("");
  578. rxrpc_connection_expiry = 0;
  579. cancel_delayed_work(&rxrpc_connection_reap);
  580. rxrpc_queue_delayed_work(&rxrpc_connection_reap, 0);
  581. _leave("");
  582. }