af_rxrpc.c 27 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132
  1. /* AF_RXRPC implementation
  2. *
  3. * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
  4. * Written by David Howells (dhowells@redhat.com)
  5. *
  6. * This program is free software; you can redistribute it and/or
  7. * modify it under the terms of the GNU General Public License
  8. * as published by the Free Software Foundation; either version
  9. * 2 of the License, or (at your option) any later version.
  10. */
  11. #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  12. #include <linux/module.h>
  13. #include <linux/kernel.h>
  14. #include <linux/net.h>
  15. #include <linux/slab.h>
  16. #include <linux/skbuff.h>
  17. #include <linux/random.h>
  18. #include <linux/poll.h>
  19. #include <linux/proc_fs.h>
  20. #include <linux/key-type.h>
  21. #include <net/net_namespace.h>
  22. #include <net/sock.h>
  23. #include <net/af_rxrpc.h>
  24. #define CREATE_TRACE_POINTS
  25. #include "ar-internal.h"
  26. MODULE_DESCRIPTION("RxRPC network protocol");
  27. MODULE_AUTHOR("Red Hat, Inc.");
  28. MODULE_LICENSE("GPL");
  29. MODULE_ALIAS_NETPROTO(PF_RXRPC);
  30. unsigned int rxrpc_debug; // = RXRPC_DEBUG_KPROTO;
  31. module_param_named(debug, rxrpc_debug, uint, 0644);
  32. MODULE_PARM_DESC(debug, "RxRPC debugging mask");
  33. static struct proto rxrpc_proto;
  34. static const struct proto_ops rxrpc_rpc_ops;
  35. /* current debugging ID */
  36. atomic_t rxrpc_debug_id;
  37. EXPORT_SYMBOL(rxrpc_debug_id);
  38. /* count of skbs currently in use */
  39. atomic_t rxrpc_n_tx_skbs, rxrpc_n_rx_skbs;
  40. struct workqueue_struct *rxrpc_workqueue;
  41. static void rxrpc_sock_destructor(struct sock *);
  42. /*
  43. * see if an RxRPC socket is currently writable
  44. */
  45. static inline int rxrpc_writable(struct sock *sk)
  46. {
  47. return refcount_read(&sk->sk_wmem_alloc) < (size_t) sk->sk_sndbuf;
  48. }
  49. /*
  50. * wait for write bufferage to become available
  51. */
  52. static void rxrpc_write_space(struct sock *sk)
  53. {
  54. _enter("%p", sk);
  55. rcu_read_lock();
  56. if (rxrpc_writable(sk)) {
  57. struct socket_wq *wq = rcu_dereference(sk->sk_wq);
  58. if (skwq_has_sleeper(wq))
  59. wake_up_interruptible(&wq->wait);
  60. sk_wake_async(sk, SOCK_WAKE_SPACE, POLL_OUT);
  61. }
  62. rcu_read_unlock();
  63. }
  64. /*
  65. * validate an RxRPC address
  66. */
  67. static int rxrpc_validate_address(struct rxrpc_sock *rx,
  68. struct sockaddr_rxrpc *srx,
  69. int len)
  70. {
  71. unsigned int tail;
  72. if (len < sizeof(struct sockaddr_rxrpc))
  73. return -EINVAL;
  74. if (srx->srx_family != AF_RXRPC)
  75. return -EAFNOSUPPORT;
  76. if (srx->transport_type != SOCK_DGRAM)
  77. return -ESOCKTNOSUPPORT;
  78. len -= offsetof(struct sockaddr_rxrpc, transport);
  79. if (srx->transport_len < sizeof(sa_family_t) ||
  80. srx->transport_len > len)
  81. return -EINVAL;
  82. if (srx->transport.family != rx->family &&
  83. srx->transport.family == AF_INET && rx->family != AF_INET6)
  84. return -EAFNOSUPPORT;
  85. switch (srx->transport.family) {
  86. case AF_INET:
  87. if (srx->transport_len < sizeof(struct sockaddr_in))
  88. return -EINVAL;
  89. tail = offsetof(struct sockaddr_rxrpc, transport.sin.__pad);
  90. break;
  91. #ifdef CONFIG_AF_RXRPC_IPV6
  92. case AF_INET6:
  93. if (srx->transport_len < sizeof(struct sockaddr_in6))
  94. return -EINVAL;
  95. tail = offsetof(struct sockaddr_rxrpc, transport) +
  96. sizeof(struct sockaddr_in6);
  97. break;
  98. #endif
  99. default:
  100. return -EAFNOSUPPORT;
  101. }
  102. if (tail < len)
  103. memset((void *)srx + tail, 0, len - tail);
  104. _debug("INET: %pISp", &srx->transport);
  105. return 0;
  106. }
  107. /*
  108. * bind a local address to an RxRPC socket
  109. */
  110. static int rxrpc_bind(struct socket *sock, struct sockaddr *saddr, int len)
  111. {
  112. struct sockaddr_rxrpc *srx = (struct sockaddr_rxrpc *)saddr;
  113. struct rxrpc_local *local;
  114. struct rxrpc_sock *rx = rxrpc_sk(sock->sk);
  115. u16 service_id = srx->srx_service;
  116. int ret;
  117. _enter("%p,%p,%d", rx, saddr, len);
  118. ret = rxrpc_validate_address(rx, srx, len);
  119. if (ret < 0)
  120. goto error;
  121. lock_sock(&rx->sk);
  122. switch (rx->sk.sk_state) {
  123. case RXRPC_UNBOUND:
  124. rx->srx = *srx;
  125. local = rxrpc_lookup_local(sock_net(&rx->sk), &rx->srx);
  126. if (IS_ERR(local)) {
  127. ret = PTR_ERR(local);
  128. goto error_unlock;
  129. }
  130. if (service_id) {
  131. write_lock(&local->services_lock);
  132. if (rcu_access_pointer(local->service))
  133. goto service_in_use;
  134. rx->local = local;
  135. rcu_assign_pointer(local->service, rx);
  136. write_unlock(&local->services_lock);
  137. rx->sk.sk_state = RXRPC_SERVER_BOUND;
  138. } else {
  139. rx->local = local;
  140. rx->sk.sk_state = RXRPC_CLIENT_BOUND;
  141. }
  142. break;
  143. case RXRPC_SERVER_BOUND:
  144. ret = -EINVAL;
  145. if (service_id == 0)
  146. goto error_unlock;
  147. ret = -EADDRINUSE;
  148. if (service_id == rx->srx.srx_service)
  149. goto error_unlock;
  150. ret = -EINVAL;
  151. srx->srx_service = rx->srx.srx_service;
  152. if (memcmp(srx, &rx->srx, sizeof(*srx)) != 0)
  153. goto error_unlock;
  154. rx->second_service = service_id;
  155. rx->sk.sk_state = RXRPC_SERVER_BOUND2;
  156. break;
  157. default:
  158. ret = -EINVAL;
  159. goto error_unlock;
  160. }
  161. release_sock(&rx->sk);
  162. _leave(" = 0");
  163. return 0;
  164. service_in_use:
  165. write_unlock(&local->services_lock);
  166. rxrpc_put_local(local);
  167. ret = -EADDRINUSE;
  168. error_unlock:
  169. release_sock(&rx->sk);
  170. error:
  171. _leave(" = %d", ret);
  172. return ret;
  173. }
  174. /*
  175. * set the number of pending calls permitted on a listening socket
  176. */
  177. static int rxrpc_listen(struct socket *sock, int backlog)
  178. {
  179. struct sock *sk = sock->sk;
  180. struct rxrpc_sock *rx = rxrpc_sk(sk);
  181. unsigned int max, old;
  182. int ret;
  183. _enter("%p,%d", rx, backlog);
  184. lock_sock(&rx->sk);
  185. switch (rx->sk.sk_state) {
  186. case RXRPC_UNBOUND:
  187. ret = -EADDRNOTAVAIL;
  188. break;
  189. case RXRPC_SERVER_BOUND:
  190. case RXRPC_SERVER_BOUND2:
  191. ASSERT(rx->local != NULL);
  192. max = READ_ONCE(rxrpc_max_backlog);
  193. ret = -EINVAL;
  194. if (backlog == INT_MAX)
  195. backlog = max;
  196. else if (backlog < 0 || backlog > max)
  197. break;
  198. old = sk->sk_max_ack_backlog;
  199. sk->sk_max_ack_backlog = backlog;
  200. ret = rxrpc_service_prealloc(rx, GFP_KERNEL);
  201. if (ret == 0)
  202. rx->sk.sk_state = RXRPC_SERVER_LISTENING;
  203. else
  204. sk->sk_max_ack_backlog = old;
  205. break;
  206. case RXRPC_SERVER_LISTENING:
  207. if (backlog == 0) {
  208. rx->sk.sk_state = RXRPC_SERVER_LISTEN_DISABLED;
  209. sk->sk_max_ack_backlog = 0;
  210. rxrpc_discard_prealloc(rx);
  211. ret = 0;
  212. break;
  213. }
  214. /* Fall through */
  215. default:
  216. ret = -EBUSY;
  217. break;
  218. }
  219. release_sock(&rx->sk);
  220. _leave(" = %d", ret);
  221. return ret;
  222. }
  223. /**
  224. * rxrpc_kernel_begin_call - Allow a kernel service to begin a call
  225. * @sock: The socket on which to make the call
  226. * @srx: The address of the peer to contact
  227. * @key: The security context to use (defaults to socket setting)
  228. * @user_call_ID: The ID to use
  229. * @tx_total_len: Total length of data to transmit during the call (or -1)
  230. * @gfp: The allocation constraints
  231. * @notify_rx: Where to send notifications instead of socket queue
  232. * @upgrade: Request service upgrade for call
  233. * @debug_id: The debug ID for tracing to be assigned to the call
  234. *
  235. * Allow a kernel service to begin a call on the nominated socket. This just
  236. * sets up all the internal tracking structures and allocates connection and
  237. * call IDs as appropriate. The call to be used is returned.
  238. *
  239. * The default socket destination address and security may be overridden by
  240. * supplying @srx and @key.
  241. */
  242. struct rxrpc_call *rxrpc_kernel_begin_call(struct socket *sock,
  243. struct sockaddr_rxrpc *srx,
  244. struct key *key,
  245. unsigned long user_call_ID,
  246. s64 tx_total_len,
  247. gfp_t gfp,
  248. rxrpc_notify_rx_t notify_rx,
  249. bool upgrade,
  250. unsigned int debug_id)
  251. {
  252. struct rxrpc_conn_parameters cp;
  253. struct rxrpc_call_params p;
  254. struct rxrpc_call *call;
  255. struct rxrpc_sock *rx = rxrpc_sk(sock->sk);
  256. int ret;
  257. _enter(",,%x,%lx", key_serial(key), user_call_ID);
  258. ret = rxrpc_validate_address(rx, srx, sizeof(*srx));
  259. if (ret < 0)
  260. return ERR_PTR(ret);
  261. lock_sock(&rx->sk);
  262. if (!key)
  263. key = rx->key;
  264. if (key && !key->payload.data[0])
  265. key = NULL; /* a no-security key */
  266. memset(&p, 0, sizeof(p));
  267. p.user_call_ID = user_call_ID;
  268. p.tx_total_len = tx_total_len;
  269. memset(&cp, 0, sizeof(cp));
  270. cp.local = rx->local;
  271. cp.key = key;
  272. cp.security_level = rx->min_sec_level;
  273. cp.exclusive = false;
  274. cp.upgrade = upgrade;
  275. cp.service_id = srx->srx_service;
  276. call = rxrpc_new_client_call(rx, &cp, srx, &p, gfp, debug_id);
  277. /* The socket has been unlocked. */
  278. if (!IS_ERR(call)) {
  279. call->notify_rx = notify_rx;
  280. mutex_unlock(&call->user_mutex);
  281. }
  282. rxrpc_put_peer(cp.peer);
  283. _leave(" = %p", call);
  284. return call;
  285. }
  286. EXPORT_SYMBOL(rxrpc_kernel_begin_call);
  287. /*
  288. * Dummy function used to stop the notifier talking to recvmsg().
  289. */
  290. static void rxrpc_dummy_notify_rx(struct sock *sk, struct rxrpc_call *rxcall,
  291. unsigned long call_user_ID)
  292. {
  293. }
  294. /**
  295. * rxrpc_kernel_end_call - Allow a kernel service to end a call it was using
  296. * @sock: The socket the call is on
  297. * @call: The call to end
  298. *
  299. * Allow a kernel service to end a call it was using. The call must be
  300. * complete before this is called (the call should be aborted if necessary).
  301. */
  302. void rxrpc_kernel_end_call(struct socket *sock, struct rxrpc_call *call)
  303. {
  304. _enter("%d{%d}", call->debug_id, atomic_read(&call->usage));
  305. mutex_lock(&call->user_mutex);
  306. rxrpc_release_call(rxrpc_sk(sock->sk), call);
  307. /* Make sure we're not going to call back into a kernel service */
  308. if (call->notify_rx) {
  309. spin_lock_bh(&call->notify_lock);
  310. call->notify_rx = rxrpc_dummy_notify_rx;
  311. spin_unlock_bh(&call->notify_lock);
  312. }
  313. mutex_unlock(&call->user_mutex);
  314. rxrpc_put_call(call, rxrpc_call_put_kernel);
  315. }
  316. EXPORT_SYMBOL(rxrpc_kernel_end_call);
  317. /**
  318. * rxrpc_kernel_check_life - Check to see whether a call is still alive
  319. * @sock: The socket the call is on
  320. * @call: The call to check
  321. *
  322. * Allow a kernel service to find out whether a call is still alive - ie. we're
  323. * getting ACKs from the server. Returns a number representing the life state
  324. * which can be compared to that returned by a previous call.
  325. *
  326. * If the life state stalls, rxrpc_kernel_probe_life() should be called and
  327. * then 2RTT waited.
  328. */
  329. u32 rxrpc_kernel_check_life(const struct socket *sock,
  330. const struct rxrpc_call *call)
  331. {
  332. return call->acks_latest;
  333. }
  334. EXPORT_SYMBOL(rxrpc_kernel_check_life);
  335. /**
  336. * rxrpc_kernel_probe_life - Poke the peer to see if it's still alive
  337. * @sock: The socket the call is on
  338. * @call: The call to check
  339. *
  340. * In conjunction with rxrpc_kernel_check_life(), allow a kernel service to
  341. * find out whether a call is still alive by pinging it. This should cause the
  342. * life state to be bumped in about 2*RTT.
  343. *
  344. * The must be called in TASK_RUNNING state on pain of might_sleep() objecting.
  345. */
  346. void rxrpc_kernel_probe_life(struct socket *sock, struct rxrpc_call *call)
  347. {
  348. rxrpc_propose_ACK(call, RXRPC_ACK_PING, 0, 0, true, false,
  349. rxrpc_propose_ack_ping_for_check_life);
  350. rxrpc_send_ack_packet(call, true, NULL);
  351. }
  352. EXPORT_SYMBOL(rxrpc_kernel_probe_life);
  353. /**
  354. * rxrpc_kernel_get_epoch - Retrieve the epoch value from a call.
  355. * @sock: The socket the call is on
  356. * @call: The call to query
  357. *
  358. * Allow a kernel service to retrieve the epoch value from a service call to
  359. * see if the client at the other end rebooted.
  360. */
  361. u32 rxrpc_kernel_get_epoch(struct socket *sock, struct rxrpc_call *call)
  362. {
  363. return call->conn->proto.epoch;
  364. }
  365. EXPORT_SYMBOL(rxrpc_kernel_get_epoch);
  366. /**
  367. * rxrpc_kernel_check_call - Check a call's state
  368. * @sock: The socket the call is on
  369. * @call: The call to check
  370. * @_compl: Where to store the completion state
  371. * @_abort_code: Where to store any abort code
  372. *
  373. * Allow a kernel service to query the state of a call and find out the manner
  374. * of its termination if it has completed. Returns -EINPROGRESS if the call is
  375. * still going, 0 if the call finished successfully, -ECONNABORTED if the call
  376. * was aborted and an appropriate error if the call failed in some other way.
  377. */
  378. int rxrpc_kernel_check_call(struct socket *sock, struct rxrpc_call *call,
  379. enum rxrpc_call_completion *_compl, u32 *_abort_code)
  380. {
  381. if (call->state != RXRPC_CALL_COMPLETE)
  382. return -EINPROGRESS;
  383. smp_rmb();
  384. *_compl = call->completion;
  385. *_abort_code = call->abort_code;
  386. return call->error;
  387. }
  388. EXPORT_SYMBOL(rxrpc_kernel_check_call);
  389. /**
  390. * rxrpc_kernel_retry_call - Allow a kernel service to retry a call
  391. * @sock: The socket the call is on
  392. * @call: The call to retry
  393. * @srx: The address of the peer to contact
  394. * @key: The security context to use (defaults to socket setting)
  395. *
  396. * Allow a kernel service to try resending a client call that failed due to a
  397. * network error to a new address. The Tx queue is maintained intact, thereby
  398. * relieving the need to re-encrypt any request data that has already been
  399. * buffered.
  400. */
  401. int rxrpc_kernel_retry_call(struct socket *sock, struct rxrpc_call *call,
  402. struct sockaddr_rxrpc *srx, struct key *key)
  403. {
  404. struct rxrpc_conn_parameters cp;
  405. struct rxrpc_sock *rx = rxrpc_sk(sock->sk);
  406. int ret;
  407. _enter("%d{%d}", call->debug_id, atomic_read(&call->usage));
  408. if (!key)
  409. key = rx->key;
  410. if (key && !key->payload.data[0])
  411. key = NULL; /* a no-security key */
  412. memset(&cp, 0, sizeof(cp));
  413. cp.local = rx->local;
  414. cp.key = key;
  415. cp.security_level = 0;
  416. cp.exclusive = false;
  417. cp.service_id = srx->srx_service;
  418. mutex_lock(&call->user_mutex);
  419. ret = rxrpc_prepare_call_for_retry(rx, call);
  420. if (ret == 0)
  421. ret = rxrpc_retry_client_call(rx, call, &cp, srx, GFP_KERNEL);
  422. mutex_unlock(&call->user_mutex);
  423. rxrpc_put_peer(cp.peer);
  424. _leave(" = %d", ret);
  425. return ret;
  426. }
  427. EXPORT_SYMBOL(rxrpc_kernel_retry_call);
  428. /**
  429. * rxrpc_kernel_new_call_notification - Get notifications of new calls
  430. * @sock: The socket to intercept received messages on
  431. * @notify_new_call: Function to be called when new calls appear
  432. * @discard_new_call: Function to discard preallocated calls
  433. *
  434. * Allow a kernel service to be given notifications about new calls.
  435. */
  436. void rxrpc_kernel_new_call_notification(
  437. struct socket *sock,
  438. rxrpc_notify_new_call_t notify_new_call,
  439. rxrpc_discard_new_call_t discard_new_call)
  440. {
  441. struct rxrpc_sock *rx = rxrpc_sk(sock->sk);
  442. rx->notify_new_call = notify_new_call;
  443. rx->discard_new_call = discard_new_call;
  444. }
  445. EXPORT_SYMBOL(rxrpc_kernel_new_call_notification);
  446. /*
  447. * connect an RxRPC socket
  448. * - this just targets it at a specific destination; no actual connection
  449. * negotiation takes place
  450. */
  451. static int rxrpc_connect(struct socket *sock, struct sockaddr *addr,
  452. int addr_len, int flags)
  453. {
  454. struct sockaddr_rxrpc *srx = (struct sockaddr_rxrpc *)addr;
  455. struct rxrpc_sock *rx = rxrpc_sk(sock->sk);
  456. int ret;
  457. _enter("%p,%p,%d,%d", rx, addr, addr_len, flags);
  458. ret = rxrpc_validate_address(rx, srx, addr_len);
  459. if (ret < 0) {
  460. _leave(" = %d [bad addr]", ret);
  461. return ret;
  462. }
  463. lock_sock(&rx->sk);
  464. ret = -EISCONN;
  465. if (test_bit(RXRPC_SOCK_CONNECTED, &rx->flags))
  466. goto error;
  467. switch (rx->sk.sk_state) {
  468. case RXRPC_UNBOUND:
  469. rx->sk.sk_state = RXRPC_CLIENT_UNBOUND;
  470. case RXRPC_CLIENT_UNBOUND:
  471. case RXRPC_CLIENT_BOUND:
  472. break;
  473. default:
  474. ret = -EBUSY;
  475. goto error;
  476. }
  477. rx->connect_srx = *srx;
  478. set_bit(RXRPC_SOCK_CONNECTED, &rx->flags);
  479. ret = 0;
  480. error:
  481. release_sock(&rx->sk);
  482. return ret;
  483. }
  484. /*
  485. * send a message through an RxRPC socket
  486. * - in a client this does a number of things:
  487. * - finds/sets up a connection for the security specified (if any)
  488. * - initiates a call (ID in control data)
  489. * - ends the request phase of a call (if MSG_MORE is not set)
  490. * - sends a call data packet
  491. * - may send an abort (abort code in control data)
  492. */
  493. static int rxrpc_sendmsg(struct socket *sock, struct msghdr *m, size_t len)
  494. {
  495. struct rxrpc_local *local;
  496. struct rxrpc_sock *rx = rxrpc_sk(sock->sk);
  497. int ret;
  498. _enter(",{%d},,%zu", rx->sk.sk_state, len);
  499. if (m->msg_flags & MSG_OOB)
  500. return -EOPNOTSUPP;
  501. if (m->msg_name) {
  502. ret = rxrpc_validate_address(rx, m->msg_name, m->msg_namelen);
  503. if (ret < 0) {
  504. _leave(" = %d [bad addr]", ret);
  505. return ret;
  506. }
  507. }
  508. lock_sock(&rx->sk);
  509. switch (rx->sk.sk_state) {
  510. case RXRPC_UNBOUND:
  511. rx->srx.srx_family = AF_RXRPC;
  512. rx->srx.srx_service = 0;
  513. rx->srx.transport_type = SOCK_DGRAM;
  514. rx->srx.transport.family = rx->family;
  515. switch (rx->family) {
  516. case AF_INET:
  517. rx->srx.transport_len = sizeof(struct sockaddr_in);
  518. break;
  519. #ifdef CONFIG_AF_RXRPC_IPV6
  520. case AF_INET6:
  521. rx->srx.transport_len = sizeof(struct sockaddr_in6);
  522. break;
  523. #endif
  524. default:
  525. ret = -EAFNOSUPPORT;
  526. goto error_unlock;
  527. }
  528. local = rxrpc_lookup_local(sock_net(sock->sk), &rx->srx);
  529. if (IS_ERR(local)) {
  530. ret = PTR_ERR(local);
  531. goto error_unlock;
  532. }
  533. rx->local = local;
  534. rx->sk.sk_state = RXRPC_CLIENT_UNBOUND;
  535. /* Fall through */
  536. case RXRPC_CLIENT_UNBOUND:
  537. case RXRPC_CLIENT_BOUND:
  538. if (!m->msg_name &&
  539. test_bit(RXRPC_SOCK_CONNECTED, &rx->flags)) {
  540. m->msg_name = &rx->connect_srx;
  541. m->msg_namelen = sizeof(rx->connect_srx);
  542. }
  543. /* Fall through */
  544. case RXRPC_SERVER_BOUND:
  545. case RXRPC_SERVER_LISTENING:
  546. ret = rxrpc_do_sendmsg(rx, m, len);
  547. /* The socket has been unlocked */
  548. goto out;
  549. default:
  550. ret = -EINVAL;
  551. goto error_unlock;
  552. }
  553. error_unlock:
  554. release_sock(&rx->sk);
  555. out:
  556. _leave(" = %d", ret);
  557. return ret;
  558. }
  559. /*
  560. * set RxRPC socket options
  561. */
  562. static int rxrpc_setsockopt(struct socket *sock, int level, int optname,
  563. char __user *optval, unsigned int optlen)
  564. {
  565. struct rxrpc_sock *rx = rxrpc_sk(sock->sk);
  566. unsigned int min_sec_level;
  567. u16 service_upgrade[2];
  568. int ret;
  569. _enter(",%d,%d,,%d", level, optname, optlen);
  570. lock_sock(&rx->sk);
  571. ret = -EOPNOTSUPP;
  572. if (level == SOL_RXRPC) {
  573. switch (optname) {
  574. case RXRPC_EXCLUSIVE_CONNECTION:
  575. ret = -EINVAL;
  576. if (optlen != 0)
  577. goto error;
  578. ret = -EISCONN;
  579. if (rx->sk.sk_state != RXRPC_UNBOUND)
  580. goto error;
  581. rx->exclusive = true;
  582. goto success;
  583. case RXRPC_SECURITY_KEY:
  584. ret = -EINVAL;
  585. if (rx->key)
  586. goto error;
  587. ret = -EISCONN;
  588. if (rx->sk.sk_state != RXRPC_UNBOUND)
  589. goto error;
  590. ret = rxrpc_request_key(rx, optval, optlen);
  591. goto error;
  592. case RXRPC_SECURITY_KEYRING:
  593. ret = -EINVAL;
  594. if (rx->key)
  595. goto error;
  596. ret = -EISCONN;
  597. if (rx->sk.sk_state != RXRPC_UNBOUND)
  598. goto error;
  599. ret = rxrpc_server_keyring(rx, optval, optlen);
  600. goto error;
  601. case RXRPC_MIN_SECURITY_LEVEL:
  602. ret = -EINVAL;
  603. if (optlen != sizeof(unsigned int))
  604. goto error;
  605. ret = -EISCONN;
  606. if (rx->sk.sk_state != RXRPC_UNBOUND)
  607. goto error;
  608. ret = get_user(min_sec_level,
  609. (unsigned int __user *) optval);
  610. if (ret < 0)
  611. goto error;
  612. ret = -EINVAL;
  613. if (min_sec_level > RXRPC_SECURITY_MAX)
  614. goto error;
  615. rx->min_sec_level = min_sec_level;
  616. goto success;
  617. case RXRPC_UPGRADEABLE_SERVICE:
  618. ret = -EINVAL;
  619. if (optlen != sizeof(service_upgrade) ||
  620. rx->service_upgrade.from != 0)
  621. goto error;
  622. ret = -EISCONN;
  623. if (rx->sk.sk_state != RXRPC_SERVER_BOUND2)
  624. goto error;
  625. ret = -EFAULT;
  626. if (copy_from_user(service_upgrade, optval,
  627. sizeof(service_upgrade)) != 0)
  628. goto error;
  629. ret = -EINVAL;
  630. if ((service_upgrade[0] != rx->srx.srx_service ||
  631. service_upgrade[1] != rx->second_service) &&
  632. (service_upgrade[0] != rx->second_service ||
  633. service_upgrade[1] != rx->srx.srx_service))
  634. goto error;
  635. rx->service_upgrade.from = service_upgrade[0];
  636. rx->service_upgrade.to = service_upgrade[1];
  637. goto success;
  638. default:
  639. break;
  640. }
  641. }
  642. success:
  643. ret = 0;
  644. error:
  645. release_sock(&rx->sk);
  646. return ret;
  647. }
  648. /*
  649. * Get socket options.
  650. */
  651. static int rxrpc_getsockopt(struct socket *sock, int level, int optname,
  652. char __user *optval, int __user *_optlen)
  653. {
  654. int optlen;
  655. if (level != SOL_RXRPC)
  656. return -EOPNOTSUPP;
  657. if (get_user(optlen, _optlen))
  658. return -EFAULT;
  659. switch (optname) {
  660. case RXRPC_SUPPORTED_CMSG:
  661. if (optlen < sizeof(int))
  662. return -ETOOSMALL;
  663. if (put_user(RXRPC__SUPPORTED - 1, (int __user *)optval) ||
  664. put_user(sizeof(int), _optlen))
  665. return -EFAULT;
  666. return 0;
  667. default:
  668. return -EOPNOTSUPP;
  669. }
  670. }
  671. /*
  672. * permit an RxRPC socket to be polled
  673. */
  674. static __poll_t rxrpc_poll(struct file *file, struct socket *sock,
  675. poll_table *wait)
  676. {
  677. struct sock *sk = sock->sk;
  678. struct rxrpc_sock *rx = rxrpc_sk(sk);
  679. __poll_t mask;
  680. sock_poll_wait(file, sock, wait);
  681. mask = 0;
  682. /* the socket is readable if there are any messages waiting on the Rx
  683. * queue */
  684. if (!list_empty(&rx->recvmsg_q))
  685. mask |= EPOLLIN | EPOLLRDNORM;
  686. /* the socket is writable if there is space to add new data to the
  687. * socket; there is no guarantee that any particular call in progress
  688. * on the socket may have space in the Tx ACK window */
  689. if (rxrpc_writable(sk))
  690. mask |= EPOLLOUT | EPOLLWRNORM;
  691. return mask;
  692. }
  693. /*
  694. * create an RxRPC socket
  695. */
  696. static int rxrpc_create(struct net *net, struct socket *sock, int protocol,
  697. int kern)
  698. {
  699. struct rxrpc_net *rxnet;
  700. struct rxrpc_sock *rx;
  701. struct sock *sk;
  702. _enter("%p,%d", sock, protocol);
  703. /* we support transport protocol UDP/UDP6 only */
  704. if (protocol != PF_INET &&
  705. IS_ENABLED(CONFIG_AF_RXRPC_IPV6) && protocol != PF_INET6)
  706. return -EPROTONOSUPPORT;
  707. if (sock->type != SOCK_DGRAM)
  708. return -ESOCKTNOSUPPORT;
  709. sock->ops = &rxrpc_rpc_ops;
  710. sock->state = SS_UNCONNECTED;
  711. sk = sk_alloc(net, PF_RXRPC, GFP_KERNEL, &rxrpc_proto, kern);
  712. if (!sk)
  713. return -ENOMEM;
  714. sock_init_data(sock, sk);
  715. sock_set_flag(sk, SOCK_RCU_FREE);
  716. sk->sk_state = RXRPC_UNBOUND;
  717. sk->sk_write_space = rxrpc_write_space;
  718. sk->sk_max_ack_backlog = 0;
  719. sk->sk_destruct = rxrpc_sock_destructor;
  720. rx = rxrpc_sk(sk);
  721. rx->family = protocol;
  722. rx->calls = RB_ROOT;
  723. spin_lock_init(&rx->incoming_lock);
  724. INIT_LIST_HEAD(&rx->sock_calls);
  725. INIT_LIST_HEAD(&rx->to_be_accepted);
  726. INIT_LIST_HEAD(&rx->recvmsg_q);
  727. rwlock_init(&rx->recvmsg_lock);
  728. rwlock_init(&rx->call_lock);
  729. memset(&rx->srx, 0, sizeof(rx->srx));
  730. rxnet = rxrpc_net(sock_net(&rx->sk));
  731. timer_reduce(&rxnet->peer_keepalive_timer, jiffies + 1);
  732. _leave(" = 0 [%p]", rx);
  733. return 0;
  734. }
  735. /*
  736. * Kill all the calls on a socket and shut it down.
  737. */
  738. static int rxrpc_shutdown(struct socket *sock, int flags)
  739. {
  740. struct sock *sk = sock->sk;
  741. struct rxrpc_sock *rx = rxrpc_sk(sk);
  742. int ret = 0;
  743. _enter("%p,%d", sk, flags);
  744. if (flags != SHUT_RDWR)
  745. return -EOPNOTSUPP;
  746. if (sk->sk_state == RXRPC_CLOSE)
  747. return -ESHUTDOWN;
  748. lock_sock(sk);
  749. spin_lock_bh(&sk->sk_receive_queue.lock);
  750. if (sk->sk_state < RXRPC_CLOSE) {
  751. sk->sk_state = RXRPC_CLOSE;
  752. sk->sk_shutdown = SHUTDOWN_MASK;
  753. } else {
  754. ret = -ESHUTDOWN;
  755. }
  756. spin_unlock_bh(&sk->sk_receive_queue.lock);
  757. rxrpc_discard_prealloc(rx);
  758. release_sock(sk);
  759. return ret;
  760. }
  761. /*
  762. * RxRPC socket destructor
  763. */
  764. static void rxrpc_sock_destructor(struct sock *sk)
  765. {
  766. _enter("%p", sk);
  767. rxrpc_purge_queue(&sk->sk_receive_queue);
  768. WARN_ON(refcount_read(&sk->sk_wmem_alloc));
  769. WARN_ON(!sk_unhashed(sk));
  770. WARN_ON(sk->sk_socket);
  771. if (!sock_flag(sk, SOCK_DEAD)) {
  772. printk("Attempt to release alive rxrpc socket: %p\n", sk);
  773. return;
  774. }
  775. }
  776. /*
  777. * release an RxRPC socket
  778. */
  779. static int rxrpc_release_sock(struct sock *sk)
  780. {
  781. struct rxrpc_sock *rx = rxrpc_sk(sk);
  782. struct rxrpc_net *rxnet = rxrpc_net(sock_net(&rx->sk));
  783. _enter("%p{%d,%d}", sk, sk->sk_state, refcount_read(&sk->sk_refcnt));
  784. /* declare the socket closed for business */
  785. sock_orphan(sk);
  786. sk->sk_shutdown = SHUTDOWN_MASK;
  787. /* We want to kill off all connections from a service socket
  788. * as fast as possible because we can't share these; client
  789. * sockets, on the other hand, can share an endpoint.
  790. */
  791. switch (sk->sk_state) {
  792. case RXRPC_SERVER_BOUND:
  793. case RXRPC_SERVER_BOUND2:
  794. case RXRPC_SERVER_LISTENING:
  795. case RXRPC_SERVER_LISTEN_DISABLED:
  796. rx->local->service_closed = true;
  797. break;
  798. }
  799. spin_lock_bh(&sk->sk_receive_queue.lock);
  800. sk->sk_state = RXRPC_CLOSE;
  801. spin_unlock_bh(&sk->sk_receive_queue.lock);
  802. if (rx->local && rcu_access_pointer(rx->local->service) == rx) {
  803. write_lock(&rx->local->services_lock);
  804. rcu_assign_pointer(rx->local->service, NULL);
  805. write_unlock(&rx->local->services_lock);
  806. }
  807. /* try to flush out this socket */
  808. rxrpc_discard_prealloc(rx);
  809. rxrpc_release_calls_on_socket(rx);
  810. flush_workqueue(rxrpc_workqueue);
  811. rxrpc_purge_queue(&sk->sk_receive_queue);
  812. rxrpc_queue_work(&rxnet->service_conn_reaper);
  813. rxrpc_queue_work(&rxnet->client_conn_reaper);
  814. rxrpc_put_local(rx->local);
  815. rx->local = NULL;
  816. key_put(rx->key);
  817. rx->key = NULL;
  818. key_put(rx->securities);
  819. rx->securities = NULL;
  820. sock_put(sk);
  821. _leave(" = 0");
  822. return 0;
  823. }
  824. /*
  825. * release an RxRPC BSD socket on close() or equivalent
  826. */
  827. static int rxrpc_release(struct socket *sock)
  828. {
  829. struct sock *sk = sock->sk;
  830. _enter("%p{%p}", sock, sk);
  831. if (!sk)
  832. return 0;
  833. sock->sk = NULL;
  834. return rxrpc_release_sock(sk);
  835. }
  836. /*
  837. * RxRPC network protocol
  838. */
  839. static const struct proto_ops rxrpc_rpc_ops = {
  840. .family = PF_RXRPC,
  841. .owner = THIS_MODULE,
  842. .release = rxrpc_release,
  843. .bind = rxrpc_bind,
  844. .connect = rxrpc_connect,
  845. .socketpair = sock_no_socketpair,
  846. .accept = sock_no_accept,
  847. .getname = sock_no_getname,
  848. .poll = rxrpc_poll,
  849. .ioctl = sock_no_ioctl,
  850. .listen = rxrpc_listen,
  851. .shutdown = rxrpc_shutdown,
  852. .setsockopt = rxrpc_setsockopt,
  853. .getsockopt = rxrpc_getsockopt,
  854. .sendmsg = rxrpc_sendmsg,
  855. .recvmsg = rxrpc_recvmsg,
  856. .mmap = sock_no_mmap,
  857. .sendpage = sock_no_sendpage,
  858. };
  859. static struct proto rxrpc_proto = {
  860. .name = "RXRPC",
  861. .owner = THIS_MODULE,
  862. .obj_size = sizeof(struct rxrpc_sock),
  863. .max_header = sizeof(struct rxrpc_wire_header),
  864. };
  865. static const struct net_proto_family rxrpc_family_ops = {
  866. .family = PF_RXRPC,
  867. .create = rxrpc_create,
  868. .owner = THIS_MODULE,
  869. };
  870. /*
  871. * initialise and register the RxRPC protocol
  872. */
  873. static int __init af_rxrpc_init(void)
  874. {
  875. int ret = -1;
  876. unsigned int tmp;
  877. BUILD_BUG_ON(sizeof(struct rxrpc_skb_priv) > FIELD_SIZEOF(struct sk_buff, cb));
  878. get_random_bytes(&tmp, sizeof(tmp));
  879. tmp &= 0x3fffffff;
  880. if (tmp == 0)
  881. tmp = 1;
  882. idr_set_cursor(&rxrpc_client_conn_ids, tmp);
  883. ret = -ENOMEM;
  884. rxrpc_call_jar = kmem_cache_create(
  885. "rxrpc_call_jar", sizeof(struct rxrpc_call), 0,
  886. SLAB_HWCACHE_ALIGN, NULL);
  887. if (!rxrpc_call_jar) {
  888. pr_notice("Failed to allocate call jar\n");
  889. goto error_call_jar;
  890. }
  891. rxrpc_workqueue = alloc_workqueue("krxrpcd", 0, 1);
  892. if (!rxrpc_workqueue) {
  893. pr_notice("Failed to allocate work queue\n");
  894. goto error_work_queue;
  895. }
  896. ret = rxrpc_init_security();
  897. if (ret < 0) {
  898. pr_crit("Cannot initialise security\n");
  899. goto error_security;
  900. }
  901. ret = register_pernet_subsys(&rxrpc_net_ops);
  902. if (ret)
  903. goto error_pernet;
  904. ret = proto_register(&rxrpc_proto, 1);
  905. if (ret < 0) {
  906. pr_crit("Cannot register protocol\n");
  907. goto error_proto;
  908. }
  909. ret = sock_register(&rxrpc_family_ops);
  910. if (ret < 0) {
  911. pr_crit("Cannot register socket family\n");
  912. goto error_sock;
  913. }
  914. ret = register_key_type(&key_type_rxrpc);
  915. if (ret < 0) {
  916. pr_crit("Cannot register client key type\n");
  917. goto error_key_type;
  918. }
  919. ret = register_key_type(&key_type_rxrpc_s);
  920. if (ret < 0) {
  921. pr_crit("Cannot register server key type\n");
  922. goto error_key_type_s;
  923. }
  924. ret = rxrpc_sysctl_init();
  925. if (ret < 0) {
  926. pr_crit("Cannot register sysctls\n");
  927. goto error_sysctls;
  928. }
  929. return 0;
  930. error_sysctls:
  931. unregister_key_type(&key_type_rxrpc_s);
  932. error_key_type_s:
  933. unregister_key_type(&key_type_rxrpc);
  934. error_key_type:
  935. sock_unregister(PF_RXRPC);
  936. error_sock:
  937. proto_unregister(&rxrpc_proto);
  938. error_proto:
  939. unregister_pernet_subsys(&rxrpc_net_ops);
  940. error_pernet:
  941. rxrpc_exit_security();
  942. error_security:
  943. destroy_workqueue(rxrpc_workqueue);
  944. error_work_queue:
  945. kmem_cache_destroy(rxrpc_call_jar);
  946. error_call_jar:
  947. return ret;
  948. }
  949. /*
  950. * unregister the RxRPC protocol
  951. */
  952. static void __exit af_rxrpc_exit(void)
  953. {
  954. _enter("");
  955. rxrpc_sysctl_exit();
  956. unregister_key_type(&key_type_rxrpc_s);
  957. unregister_key_type(&key_type_rxrpc);
  958. sock_unregister(PF_RXRPC);
  959. proto_unregister(&rxrpc_proto);
  960. unregister_pernet_subsys(&rxrpc_net_ops);
  961. ASSERTCMP(atomic_read(&rxrpc_n_tx_skbs), ==, 0);
  962. ASSERTCMP(atomic_read(&rxrpc_n_rx_skbs), ==, 0);
  963. /* Make sure the local and peer records pinned by any dying connections
  964. * are released.
  965. */
  966. rcu_barrier();
  967. rxrpc_destroy_client_conn_ids();
  968. destroy_workqueue(rxrpc_workqueue);
  969. rxrpc_exit_security();
  970. kmem_cache_destroy(rxrpc_call_jar);
  971. _leave("");
  972. }
  973. module_init(af_rxrpc_init);
  974. module_exit(af_rxrpc_exit);