recvmsg.c 19 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713
  1. /* RxRPC recvmsg() implementation
  2. *
  3. * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
  4. * Written by David Howells (dhowells@redhat.com)
  5. *
  6. * This program is free software; you can redistribute it and/or
  7. * modify it under the terms of the GNU General Public License
  8. * as published by the Free Software Foundation; either version
  9. * 2 of the License, or (at your option) any later version.
  10. */
  11. #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  12. #include <linux/net.h>
  13. #include <linux/skbuff.h>
  14. #include <linux/export.h>
  15. #include <linux/sched/signal.h>
  16. #include <net/sock.h>
  17. #include <net/af_rxrpc.h>
  18. #include "ar-internal.h"
  19. /*
  20. * Post a call for attention by the socket or kernel service. Further
  21. * notifications are suppressed by putting recvmsg_link on a dummy queue.
  22. */
  23. void rxrpc_notify_socket(struct rxrpc_call *call)
  24. {
  25. struct rxrpc_sock *rx;
  26. struct sock *sk;
  27. _enter("%d", call->debug_id);
  28. if (!list_empty(&call->recvmsg_link))
  29. return;
  30. rcu_read_lock();
  31. rx = rcu_dereference(call->socket);
  32. sk = &rx->sk;
  33. if (rx && sk->sk_state < RXRPC_CLOSE) {
  34. if (call->notify_rx) {
  35. spin_lock_bh(&call->notify_lock);
  36. call->notify_rx(sk, call, call->user_call_ID);
  37. spin_unlock_bh(&call->notify_lock);
  38. } else {
  39. write_lock_bh(&rx->recvmsg_lock);
  40. if (list_empty(&call->recvmsg_link)) {
  41. rxrpc_get_call(call, rxrpc_call_got);
  42. list_add_tail(&call->recvmsg_link, &rx->recvmsg_q);
  43. }
  44. write_unlock_bh(&rx->recvmsg_lock);
  45. if (!sock_flag(sk, SOCK_DEAD)) {
  46. _debug("call %ps", sk->sk_data_ready);
  47. sk->sk_data_ready(sk);
  48. }
  49. }
  50. }
  51. rcu_read_unlock();
  52. _leave("");
  53. }
  54. /*
  55. * Pass a call terminating message to userspace.
  56. */
  57. static int rxrpc_recvmsg_term(struct rxrpc_call *call, struct msghdr *msg)
  58. {
  59. u32 tmp = 0;
  60. int ret;
  61. switch (call->completion) {
  62. case RXRPC_CALL_SUCCEEDED:
  63. ret = 0;
  64. if (rxrpc_is_service_call(call))
  65. ret = put_cmsg(msg, SOL_RXRPC, RXRPC_ACK, 0, &tmp);
  66. break;
  67. case RXRPC_CALL_REMOTELY_ABORTED:
  68. tmp = call->abort_code;
  69. ret = put_cmsg(msg, SOL_RXRPC, RXRPC_ABORT, 4, &tmp);
  70. break;
  71. case RXRPC_CALL_LOCALLY_ABORTED:
  72. tmp = call->abort_code;
  73. ret = put_cmsg(msg, SOL_RXRPC, RXRPC_ABORT, 4, &tmp);
  74. break;
  75. case RXRPC_CALL_NETWORK_ERROR:
  76. tmp = -call->error;
  77. ret = put_cmsg(msg, SOL_RXRPC, RXRPC_NET_ERROR, 4, &tmp);
  78. break;
  79. case RXRPC_CALL_LOCAL_ERROR:
  80. tmp = -call->error;
  81. ret = put_cmsg(msg, SOL_RXRPC, RXRPC_LOCAL_ERROR, 4, &tmp);
  82. break;
  83. default:
  84. pr_err("Invalid terminal call state %u\n", call->state);
  85. BUG();
  86. break;
  87. }
  88. trace_rxrpc_recvmsg(call, rxrpc_recvmsg_terminal, call->rx_hard_ack,
  89. call->rx_pkt_offset, call->rx_pkt_len, ret);
  90. return ret;
  91. }
  92. /*
  93. * Pass back notification of a new call. The call is added to the
  94. * to-be-accepted list. This means that the next call to be accepted might not
  95. * be the last call seen awaiting acceptance, but unless we leave this on the
  96. * front of the queue and block all other messages until someone gives us a
  97. * user_ID for it, there's not a lot we can do.
  98. */
  99. static int rxrpc_recvmsg_new_call(struct rxrpc_sock *rx,
  100. struct rxrpc_call *call,
  101. struct msghdr *msg, int flags)
  102. {
  103. int tmp = 0, ret;
  104. ret = put_cmsg(msg, SOL_RXRPC, RXRPC_NEW_CALL, 0, &tmp);
  105. if (ret == 0 && !(flags & MSG_PEEK)) {
  106. _debug("to be accepted");
  107. write_lock_bh(&rx->recvmsg_lock);
  108. list_del_init(&call->recvmsg_link);
  109. write_unlock_bh(&rx->recvmsg_lock);
  110. rxrpc_get_call(call, rxrpc_call_got);
  111. write_lock(&rx->call_lock);
  112. list_add_tail(&call->accept_link, &rx->to_be_accepted);
  113. write_unlock(&rx->call_lock);
  114. }
  115. trace_rxrpc_recvmsg(call, rxrpc_recvmsg_to_be_accepted, 1, 0, 0, ret);
  116. return ret;
  117. }
  118. /*
  119. * End the packet reception phase.
  120. */
  121. static void rxrpc_end_rx_phase(struct rxrpc_call *call, rxrpc_serial_t serial)
  122. {
  123. _enter("%d,%s", call->debug_id, rxrpc_call_states[call->state]);
  124. trace_rxrpc_receive(call, rxrpc_receive_end, 0, call->rx_top);
  125. ASSERTCMP(call->rx_hard_ack, ==, call->rx_top);
  126. #if 0 // TODO: May want to transmit final ACK under some circumstances anyway
  127. if (call->state == RXRPC_CALL_CLIENT_RECV_REPLY) {
  128. rxrpc_propose_ACK(call, RXRPC_ACK_IDLE, 0, serial, true, false,
  129. rxrpc_propose_ack_terminal_ack);
  130. rxrpc_send_ack_packet(call, false, NULL);
  131. }
  132. #endif
  133. write_lock_bh(&call->state_lock);
  134. switch (call->state) {
  135. case RXRPC_CALL_CLIENT_RECV_REPLY:
  136. __rxrpc_call_completed(call);
  137. write_unlock_bh(&call->state_lock);
  138. break;
  139. case RXRPC_CALL_SERVER_RECV_REQUEST:
  140. call->tx_phase = true;
  141. call->state = RXRPC_CALL_SERVER_ACK_REQUEST;
  142. call->expect_req_by = jiffies + MAX_JIFFY_OFFSET;
  143. write_unlock_bh(&call->state_lock);
  144. rxrpc_propose_ACK(call, RXRPC_ACK_DELAY, 0, serial, false, true,
  145. rxrpc_propose_ack_processing_op);
  146. break;
  147. default:
  148. write_unlock_bh(&call->state_lock);
  149. break;
  150. }
  151. }
  152. /*
  153. * Discard a packet we've used up and advance the Rx window by one.
  154. */
  155. static void rxrpc_rotate_rx_window(struct rxrpc_call *call)
  156. {
  157. struct rxrpc_skb_priv *sp;
  158. struct sk_buff *skb;
  159. rxrpc_serial_t serial;
  160. rxrpc_seq_t hard_ack, top;
  161. u8 flags;
  162. int ix;
  163. _enter("%d", call->debug_id);
  164. hard_ack = call->rx_hard_ack;
  165. top = smp_load_acquire(&call->rx_top);
  166. ASSERT(before(hard_ack, top));
  167. hard_ack++;
  168. ix = hard_ack & RXRPC_RXTX_BUFF_MASK;
  169. skb = call->rxtx_buffer[ix];
  170. rxrpc_see_skb(skb, rxrpc_skb_rx_rotated);
  171. sp = rxrpc_skb(skb);
  172. flags = sp->hdr.flags;
  173. serial = sp->hdr.serial;
  174. if (call->rxtx_annotations[ix] & RXRPC_RX_ANNO_JUMBO)
  175. serial += (call->rxtx_annotations[ix] & RXRPC_RX_ANNO_JUMBO) - 1;
  176. call->rxtx_buffer[ix] = NULL;
  177. call->rxtx_annotations[ix] = 0;
  178. /* Barrier against rxrpc_input_data(). */
  179. smp_store_release(&call->rx_hard_ack, hard_ack);
  180. rxrpc_free_skb(skb, rxrpc_skb_rx_freed);
  181. _debug("%u,%u,%02x", hard_ack, top, flags);
  182. trace_rxrpc_receive(call, rxrpc_receive_rotate, serial, hard_ack);
  183. if (flags & RXRPC_LAST_PACKET) {
  184. rxrpc_end_rx_phase(call, serial);
  185. } else {
  186. /* Check to see if there's an ACK that needs sending. */
  187. if (after_eq(hard_ack, call->ackr_consumed + 2) ||
  188. after_eq(top, call->ackr_seen + 2) ||
  189. (hard_ack == top && after(hard_ack, call->ackr_consumed)))
  190. rxrpc_propose_ACK(call, RXRPC_ACK_DELAY, 0, serial,
  191. true, true,
  192. rxrpc_propose_ack_rotate_rx);
  193. if (call->ackr_reason && call->ackr_reason != RXRPC_ACK_DELAY)
  194. rxrpc_send_ack_packet(call, false, NULL);
  195. }
  196. }
  197. /*
  198. * Decrypt and verify a (sub)packet. The packet's length may be changed due to
  199. * padding, but if this is the case, the packet length will be resident in the
  200. * socket buffer. Note that we can't modify the master skb info as the skb may
  201. * be the home to multiple subpackets.
  202. */
  203. static int rxrpc_verify_packet(struct rxrpc_call *call, struct sk_buff *skb,
  204. u8 annotation,
  205. unsigned int offset, unsigned int len)
  206. {
  207. struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
  208. rxrpc_seq_t seq = sp->hdr.seq;
  209. u16 cksum = sp->hdr.cksum;
  210. _enter("");
  211. /* For all but the head jumbo subpacket, the security checksum is in a
  212. * jumbo header immediately prior to the data.
  213. */
  214. if ((annotation & RXRPC_RX_ANNO_JUMBO) > 1) {
  215. __be16 tmp;
  216. if (skb_copy_bits(skb, offset - 2, &tmp, 2) < 0)
  217. BUG();
  218. cksum = ntohs(tmp);
  219. seq += (annotation & RXRPC_RX_ANNO_JUMBO) - 1;
  220. }
  221. return call->conn->security->verify_packet(call, skb, offset, len,
  222. seq, cksum);
  223. }
  224. /*
  225. * Locate the data within a packet. This is complicated by:
  226. *
  227. * (1) An skb may contain a jumbo packet - so we have to find the appropriate
  228. * subpacket.
  229. *
  230. * (2) The (sub)packets may be encrypted and, if so, the encrypted portion
  231. * contains an extra header which includes the true length of the data,
  232. * excluding any encrypted padding.
  233. */
  234. static int rxrpc_locate_data(struct rxrpc_call *call, struct sk_buff *skb,
  235. u8 *_annotation,
  236. unsigned int *_offset, unsigned int *_len)
  237. {
  238. unsigned int offset = sizeof(struct rxrpc_wire_header);
  239. unsigned int len;
  240. int ret;
  241. u8 annotation = *_annotation;
  242. /* Locate the subpacket */
  243. len = skb->len - offset;
  244. if ((annotation & RXRPC_RX_ANNO_JUMBO) > 0) {
  245. offset += (((annotation & RXRPC_RX_ANNO_JUMBO) - 1) *
  246. RXRPC_JUMBO_SUBPKTLEN);
  247. len = (annotation & RXRPC_RX_ANNO_JLAST) ?
  248. skb->len - offset : RXRPC_JUMBO_SUBPKTLEN;
  249. }
  250. if (!(annotation & RXRPC_RX_ANNO_VERIFIED)) {
  251. ret = rxrpc_verify_packet(call, skb, annotation, offset, len);
  252. if (ret < 0)
  253. return ret;
  254. *_annotation |= RXRPC_RX_ANNO_VERIFIED;
  255. }
  256. *_offset = offset;
  257. *_len = len;
  258. call->conn->security->locate_data(call, skb, _offset, _len);
  259. return 0;
  260. }
  261. /*
  262. * Deliver messages to a call. This keeps processing packets until the buffer
  263. * is filled and we find either more DATA (returns 0) or the end of the DATA
  264. * (returns 1). If more packets are required, it returns -EAGAIN.
  265. */
  266. static int rxrpc_recvmsg_data(struct socket *sock, struct rxrpc_call *call,
  267. struct msghdr *msg, struct iov_iter *iter,
  268. size_t len, int flags, size_t *_offset)
  269. {
  270. struct rxrpc_skb_priv *sp;
  271. struct sk_buff *skb;
  272. rxrpc_seq_t hard_ack, top, seq;
  273. size_t remain;
  274. bool last;
  275. unsigned int rx_pkt_offset, rx_pkt_len;
  276. int ix, copy, ret = -EAGAIN, ret2;
  277. rx_pkt_offset = call->rx_pkt_offset;
  278. rx_pkt_len = call->rx_pkt_len;
  279. if (call->state >= RXRPC_CALL_SERVER_ACK_REQUEST) {
  280. seq = call->rx_hard_ack;
  281. ret = 1;
  282. goto done;
  283. }
  284. /* Barriers against rxrpc_input_data(). */
  285. hard_ack = call->rx_hard_ack;
  286. seq = hard_ack + 1;
  287. while (top = smp_load_acquire(&call->rx_top),
  288. before_eq(seq, top)
  289. ) {
  290. ix = seq & RXRPC_RXTX_BUFF_MASK;
  291. skb = call->rxtx_buffer[ix];
  292. if (!skb) {
  293. trace_rxrpc_recvmsg(call, rxrpc_recvmsg_hole, seq,
  294. rx_pkt_offset, rx_pkt_len, 0);
  295. break;
  296. }
  297. smp_rmb();
  298. rxrpc_see_skb(skb, rxrpc_skb_rx_seen);
  299. sp = rxrpc_skb(skb);
  300. if (!(flags & MSG_PEEK))
  301. trace_rxrpc_receive(call, rxrpc_receive_front,
  302. sp->hdr.serial, seq);
  303. if (msg)
  304. sock_recv_timestamp(msg, sock->sk, skb);
  305. if (rx_pkt_offset == 0) {
  306. ret2 = rxrpc_locate_data(call, skb,
  307. &call->rxtx_annotations[ix],
  308. &rx_pkt_offset, &rx_pkt_len);
  309. trace_rxrpc_recvmsg(call, rxrpc_recvmsg_next, seq,
  310. rx_pkt_offset, rx_pkt_len, ret2);
  311. if (ret2 < 0) {
  312. ret = ret2;
  313. goto out;
  314. }
  315. } else {
  316. trace_rxrpc_recvmsg(call, rxrpc_recvmsg_cont, seq,
  317. rx_pkt_offset, rx_pkt_len, 0);
  318. }
  319. /* We have to handle short, empty and used-up DATA packets. */
  320. remain = len - *_offset;
  321. copy = rx_pkt_len;
  322. if (copy > remain)
  323. copy = remain;
  324. if (copy > 0) {
  325. ret2 = skb_copy_datagram_iter(skb, rx_pkt_offset, iter,
  326. copy);
  327. if (ret2 < 0) {
  328. ret = ret2;
  329. goto out;
  330. }
  331. /* handle piecemeal consumption of data packets */
  332. rx_pkt_offset += copy;
  333. rx_pkt_len -= copy;
  334. *_offset += copy;
  335. }
  336. if (rx_pkt_len > 0) {
  337. trace_rxrpc_recvmsg(call, rxrpc_recvmsg_full, seq,
  338. rx_pkt_offset, rx_pkt_len, 0);
  339. ASSERTCMP(*_offset, ==, len);
  340. ret = 0;
  341. break;
  342. }
  343. /* The whole packet has been transferred. */
  344. last = sp->hdr.flags & RXRPC_LAST_PACKET;
  345. if (!(flags & MSG_PEEK))
  346. rxrpc_rotate_rx_window(call);
  347. rx_pkt_offset = 0;
  348. rx_pkt_len = 0;
  349. if (last) {
  350. ASSERTCMP(seq, ==, READ_ONCE(call->rx_top));
  351. ret = 1;
  352. goto out;
  353. }
  354. seq++;
  355. }
  356. out:
  357. if (!(flags & MSG_PEEK)) {
  358. call->rx_pkt_offset = rx_pkt_offset;
  359. call->rx_pkt_len = rx_pkt_len;
  360. }
  361. done:
  362. trace_rxrpc_recvmsg(call, rxrpc_recvmsg_data_return, seq,
  363. rx_pkt_offset, rx_pkt_len, ret);
  364. return ret;
  365. }
  366. /*
  367. * Receive a message from an RxRPC socket
  368. * - we need to be careful about two or more threads calling recvmsg
  369. * simultaneously
  370. */
  371. int rxrpc_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
  372. int flags)
  373. {
  374. struct rxrpc_call *call;
  375. struct rxrpc_sock *rx = rxrpc_sk(sock->sk);
  376. struct list_head *l;
  377. size_t copied = 0;
  378. long timeo;
  379. int ret;
  380. DEFINE_WAIT(wait);
  381. trace_rxrpc_recvmsg(NULL, rxrpc_recvmsg_enter, 0, 0, 0, 0);
  382. if (flags & (MSG_OOB | MSG_TRUNC))
  383. return -EOPNOTSUPP;
  384. timeo = sock_rcvtimeo(&rx->sk, flags & MSG_DONTWAIT);
  385. try_again:
  386. lock_sock(&rx->sk);
  387. /* Return immediately if a client socket has no outstanding calls */
  388. if (RB_EMPTY_ROOT(&rx->calls) &&
  389. list_empty(&rx->recvmsg_q) &&
  390. rx->sk.sk_state != RXRPC_SERVER_LISTENING) {
  391. release_sock(&rx->sk);
  392. return -ENODATA;
  393. }
  394. if (list_empty(&rx->recvmsg_q)) {
  395. ret = -EWOULDBLOCK;
  396. if (timeo == 0) {
  397. call = NULL;
  398. goto error_no_call;
  399. }
  400. release_sock(&rx->sk);
  401. /* Wait for something to happen */
  402. prepare_to_wait_exclusive(sk_sleep(&rx->sk), &wait,
  403. TASK_INTERRUPTIBLE);
  404. ret = sock_error(&rx->sk);
  405. if (ret)
  406. goto wait_error;
  407. if (list_empty(&rx->recvmsg_q)) {
  408. if (signal_pending(current))
  409. goto wait_interrupted;
  410. trace_rxrpc_recvmsg(NULL, rxrpc_recvmsg_wait,
  411. 0, 0, 0, 0);
  412. timeo = schedule_timeout(timeo);
  413. }
  414. finish_wait(sk_sleep(&rx->sk), &wait);
  415. goto try_again;
  416. }
  417. /* Find the next call and dequeue it if we're not just peeking. If we
  418. * do dequeue it, that comes with a ref that we will need to release.
  419. */
  420. write_lock_bh(&rx->recvmsg_lock);
  421. l = rx->recvmsg_q.next;
  422. call = list_entry(l, struct rxrpc_call, recvmsg_link);
  423. if (!(flags & MSG_PEEK))
  424. list_del_init(&call->recvmsg_link);
  425. else
  426. rxrpc_get_call(call, rxrpc_call_got);
  427. write_unlock_bh(&rx->recvmsg_lock);
  428. trace_rxrpc_recvmsg(call, rxrpc_recvmsg_dequeue, 0, 0, 0, 0);
  429. /* We're going to drop the socket lock, so we need to lock the call
  430. * against interference by sendmsg.
  431. */
  432. if (!mutex_trylock(&call->user_mutex)) {
  433. ret = -EWOULDBLOCK;
  434. if (flags & MSG_DONTWAIT)
  435. goto error_requeue_call;
  436. ret = -ERESTARTSYS;
  437. if (mutex_lock_interruptible(&call->user_mutex) < 0)
  438. goto error_requeue_call;
  439. }
  440. release_sock(&rx->sk);
  441. if (test_bit(RXRPC_CALL_RELEASED, &call->flags))
  442. BUG();
  443. if (test_bit(RXRPC_CALL_HAS_USERID, &call->flags)) {
  444. if (flags & MSG_CMSG_COMPAT) {
  445. unsigned int id32 = call->user_call_ID;
  446. ret = put_cmsg(msg, SOL_RXRPC, RXRPC_USER_CALL_ID,
  447. sizeof(unsigned int), &id32);
  448. } else {
  449. unsigned long idl = call->user_call_ID;
  450. ret = put_cmsg(msg, SOL_RXRPC, RXRPC_USER_CALL_ID,
  451. sizeof(unsigned long), &idl);
  452. }
  453. if (ret < 0)
  454. goto error_unlock_call;
  455. }
  456. if (msg->msg_name) {
  457. struct sockaddr_rxrpc *srx = msg->msg_name;
  458. size_t len = sizeof(call->peer->srx);
  459. memcpy(msg->msg_name, &call->peer->srx, len);
  460. srx->srx_service = call->service_id;
  461. msg->msg_namelen = len;
  462. }
  463. switch (READ_ONCE(call->state)) {
  464. case RXRPC_CALL_SERVER_ACCEPTING:
  465. ret = rxrpc_recvmsg_new_call(rx, call, msg, flags);
  466. break;
  467. case RXRPC_CALL_CLIENT_RECV_REPLY:
  468. case RXRPC_CALL_SERVER_RECV_REQUEST:
  469. case RXRPC_CALL_SERVER_ACK_REQUEST:
  470. ret = rxrpc_recvmsg_data(sock, call, msg, &msg->msg_iter, len,
  471. flags, &copied);
  472. if (ret == -EAGAIN)
  473. ret = 0;
  474. if (after(call->rx_top, call->rx_hard_ack) &&
  475. call->rxtx_buffer[(call->rx_hard_ack + 1) & RXRPC_RXTX_BUFF_MASK])
  476. rxrpc_notify_socket(call);
  477. break;
  478. default:
  479. ret = 0;
  480. break;
  481. }
  482. if (ret < 0)
  483. goto error_unlock_call;
  484. if (call->state == RXRPC_CALL_COMPLETE) {
  485. ret = rxrpc_recvmsg_term(call, msg);
  486. if (ret < 0)
  487. goto error_unlock_call;
  488. if (!(flags & MSG_PEEK))
  489. rxrpc_release_call(rx, call);
  490. msg->msg_flags |= MSG_EOR;
  491. ret = 1;
  492. }
  493. if (ret == 0)
  494. msg->msg_flags |= MSG_MORE;
  495. else
  496. msg->msg_flags &= ~MSG_MORE;
  497. ret = copied;
  498. error_unlock_call:
  499. mutex_unlock(&call->user_mutex);
  500. rxrpc_put_call(call, rxrpc_call_put);
  501. trace_rxrpc_recvmsg(call, rxrpc_recvmsg_return, 0, 0, 0, ret);
  502. return ret;
  503. error_requeue_call:
  504. if (!(flags & MSG_PEEK)) {
  505. write_lock_bh(&rx->recvmsg_lock);
  506. list_add(&call->recvmsg_link, &rx->recvmsg_q);
  507. write_unlock_bh(&rx->recvmsg_lock);
  508. trace_rxrpc_recvmsg(call, rxrpc_recvmsg_requeue, 0, 0, 0, 0);
  509. } else {
  510. rxrpc_put_call(call, rxrpc_call_put);
  511. }
  512. error_no_call:
  513. release_sock(&rx->sk);
  514. trace_rxrpc_recvmsg(call, rxrpc_recvmsg_return, 0, 0, 0, ret);
  515. return ret;
  516. wait_interrupted:
  517. ret = sock_intr_errno(timeo);
  518. wait_error:
  519. finish_wait(sk_sleep(&rx->sk), &wait);
  520. call = NULL;
  521. goto error_no_call;
  522. }
  523. /**
  524. * rxrpc_kernel_recv_data - Allow a kernel service to receive data/info
  525. * @sock: The socket that the call exists on
  526. * @call: The call to send data through
  527. * @buf: The buffer to receive into
  528. * @size: The size of the buffer, including data already read
  529. * @_offset: The running offset into the buffer.
  530. * @want_more: True if more data is expected to be read
  531. * @_abort: Where the abort code is stored if -ECONNABORTED is returned
  532. * @_service: Where to store the actual service ID (may be upgraded)
  533. *
  534. * Allow a kernel service to receive data and pick up information about the
  535. * state of a call. Returns 0 if got what was asked for and there's more
  536. * available, 1 if we got what was asked for and we're at the end of the data
  537. * and -EAGAIN if we need more data.
  538. *
  539. * Note that we may return -EAGAIN to drain empty packets at the end of the
  540. * data, even if we've already copied over the requested data.
  541. *
  542. * This function adds the amount it transfers to *_offset, so this should be
  543. * precleared as appropriate. Note that the amount remaining in the buffer is
  544. * taken to be size - *_offset.
  545. *
  546. * *_abort should also be initialised to 0.
  547. */
  548. int rxrpc_kernel_recv_data(struct socket *sock, struct rxrpc_call *call,
  549. void *buf, size_t size, size_t *_offset,
  550. bool want_more, u32 *_abort, u16 *_service)
  551. {
  552. struct iov_iter iter;
  553. struct kvec iov;
  554. int ret;
  555. _enter("{%d,%s},%zu/%zu,%d",
  556. call->debug_id, rxrpc_call_states[call->state],
  557. *_offset, size, want_more);
  558. ASSERTCMP(*_offset, <=, size);
  559. ASSERTCMP(call->state, !=, RXRPC_CALL_SERVER_ACCEPTING);
  560. iov.iov_base = buf + *_offset;
  561. iov.iov_len = size - *_offset;
  562. iov_iter_kvec(&iter, ITER_KVEC | READ, &iov, 1, size - *_offset);
  563. mutex_lock(&call->user_mutex);
  564. switch (READ_ONCE(call->state)) {
  565. case RXRPC_CALL_CLIENT_RECV_REPLY:
  566. case RXRPC_CALL_SERVER_RECV_REQUEST:
  567. case RXRPC_CALL_SERVER_ACK_REQUEST:
  568. ret = rxrpc_recvmsg_data(sock, call, NULL, &iter, size, 0,
  569. _offset);
  570. if (ret < 0)
  571. goto out;
  572. /* We can only reach here with a partially full buffer if we
  573. * have reached the end of the data. We must otherwise have a
  574. * full buffer or have been given -EAGAIN.
  575. */
  576. if (ret == 1) {
  577. if (*_offset < size)
  578. goto short_data;
  579. if (!want_more)
  580. goto read_phase_complete;
  581. ret = 0;
  582. goto out;
  583. }
  584. if (!want_more)
  585. goto excess_data;
  586. goto out;
  587. case RXRPC_CALL_COMPLETE:
  588. goto call_complete;
  589. default:
  590. ret = -EINPROGRESS;
  591. goto out;
  592. }
  593. read_phase_complete:
  594. ret = 1;
  595. out:
  596. if (_service)
  597. *_service = call->service_id;
  598. mutex_unlock(&call->user_mutex);
  599. _leave(" = %d [%zu,%d]", ret, *_offset, *_abort);
  600. return ret;
  601. short_data:
  602. trace_rxrpc_rx_eproto(call, 0, tracepoint_string("short_data"));
  603. ret = -EBADMSG;
  604. goto out;
  605. excess_data:
  606. trace_rxrpc_rx_eproto(call, 0, tracepoint_string("excess_data"));
  607. ret = -EMSGSIZE;
  608. goto out;
  609. call_complete:
  610. *_abort = call->abort_code;
  611. ret = call->error;
  612. if (call->completion == RXRPC_CALL_SUCCEEDED) {
  613. ret = 1;
  614. if (size > 0)
  615. ret = -ECONNRESET;
  616. }
  617. goto out;
  618. }
  619. EXPORT_SYMBOL(rxrpc_kernel_recv_data);