output.c 18 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721
  1. /* RxRPC packet transmission
  2. *
  3. * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
  4. * Written by David Howells (dhowells@redhat.com)
  5. *
  6. * This program is free software; you can redistribute it and/or
  7. * modify it under the terms of the GNU General Public License
  8. * as published by the Free Software Foundation; either version
  9. * 2 of the License, or (at your option) any later version.
  10. */
  11. #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  12. #include <linux/net.h>
  13. #include <linux/gfp.h>
  14. #include <linux/skbuff.h>
  15. #include <linux/circ_buf.h>
  16. #include <linux/export.h>
  17. #include <net/sock.h>
  18. #include <net/af_rxrpc.h>
  19. #include "ar-internal.h"
  20. /*
  21. * Time till packet resend (in jiffies).
  22. */
  23. unsigned int rxrpc_resend_timeout = 4 * HZ;
  24. static int rxrpc_send_data(struct rxrpc_sock *rx,
  25. struct rxrpc_call *call,
  26. struct msghdr *msg, size_t len);
  27. /*
  28. * extract control messages from the sendmsg() control buffer
  29. */
  30. static int rxrpc_sendmsg_cmsg(struct msghdr *msg,
  31. unsigned long *user_call_ID,
  32. enum rxrpc_command *command,
  33. u32 *abort_code,
  34. bool *_exclusive)
  35. {
  36. struct cmsghdr *cmsg;
  37. bool got_user_ID = false;
  38. int len;
  39. *command = RXRPC_CMD_SEND_DATA;
  40. if (msg->msg_controllen == 0)
  41. return -EINVAL;
  42. for_each_cmsghdr(cmsg, msg) {
  43. if (!CMSG_OK(msg, cmsg))
  44. return -EINVAL;
  45. len = cmsg->cmsg_len - CMSG_ALIGN(sizeof(struct cmsghdr));
  46. _debug("CMSG %d, %d, %d",
  47. cmsg->cmsg_level, cmsg->cmsg_type, len);
  48. if (cmsg->cmsg_level != SOL_RXRPC)
  49. continue;
  50. switch (cmsg->cmsg_type) {
  51. case RXRPC_USER_CALL_ID:
  52. if (msg->msg_flags & MSG_CMSG_COMPAT) {
  53. if (len != sizeof(u32))
  54. return -EINVAL;
  55. *user_call_ID = *(u32 *) CMSG_DATA(cmsg);
  56. } else {
  57. if (len != sizeof(unsigned long))
  58. return -EINVAL;
  59. *user_call_ID = *(unsigned long *)
  60. CMSG_DATA(cmsg);
  61. }
  62. _debug("User Call ID %lx", *user_call_ID);
  63. got_user_ID = true;
  64. break;
  65. case RXRPC_ABORT:
  66. if (*command != RXRPC_CMD_SEND_DATA)
  67. return -EINVAL;
  68. *command = RXRPC_CMD_SEND_ABORT;
  69. if (len != sizeof(*abort_code))
  70. return -EINVAL;
  71. *abort_code = *(unsigned int *) CMSG_DATA(cmsg);
  72. _debug("Abort %x", *abort_code);
  73. if (*abort_code == 0)
  74. return -EINVAL;
  75. break;
  76. case RXRPC_ACCEPT:
  77. if (*command != RXRPC_CMD_SEND_DATA)
  78. return -EINVAL;
  79. *command = RXRPC_CMD_ACCEPT;
  80. if (len != 0)
  81. return -EINVAL;
  82. break;
  83. case RXRPC_EXCLUSIVE_CALL:
  84. *_exclusive = true;
  85. if (len != 0)
  86. return -EINVAL;
  87. break;
  88. default:
  89. return -EINVAL;
  90. }
  91. }
  92. if (!got_user_ID)
  93. return -EINVAL;
  94. _leave(" = 0");
  95. return 0;
  96. }
  97. /*
  98. * abort a call, sending an ABORT packet to the peer
  99. */
  100. static void rxrpc_send_abort(struct rxrpc_call *call, u32 abort_code)
  101. {
  102. write_lock_bh(&call->state_lock);
  103. if (call->state <= RXRPC_CALL_COMPLETE) {
  104. call->state = RXRPC_CALL_LOCALLY_ABORTED;
  105. call->local_abort = abort_code;
  106. set_bit(RXRPC_CALL_EV_ABORT, &call->events);
  107. del_timer_sync(&call->resend_timer);
  108. del_timer_sync(&call->ack_timer);
  109. clear_bit(RXRPC_CALL_EV_RESEND_TIMER, &call->events);
  110. clear_bit(RXRPC_CALL_EV_ACK, &call->events);
  111. clear_bit(RXRPC_CALL_RUN_RTIMER, &call->flags);
  112. rxrpc_queue_call(call);
  113. }
  114. write_unlock_bh(&call->state_lock);
  115. }
  116. /*
  117. * Create a new client call for sendmsg().
  118. */
  119. static struct rxrpc_call *
  120. rxrpc_new_client_call_for_sendmsg(struct rxrpc_sock *rx, struct msghdr *msg,
  121. unsigned long user_call_ID, bool exclusive)
  122. {
  123. struct rxrpc_conn_parameters cp;
  124. struct rxrpc_call *call;
  125. struct key *key;
  126. DECLARE_SOCKADDR(struct sockaddr_rxrpc *, srx, msg->msg_name);
  127. _enter("");
  128. if (!msg->msg_name)
  129. return ERR_PTR(-EDESTADDRREQ);
  130. key = rx->key;
  131. if (key && !rx->key->payload.data[0])
  132. key = NULL;
  133. memset(&cp, 0, sizeof(cp));
  134. cp.local = rx->local;
  135. cp.key = rx->key;
  136. cp.security_level = rx->min_sec_level;
  137. cp.exclusive = rx->exclusive | exclusive;
  138. cp.service_id = srx->srx_service;
  139. call = rxrpc_new_client_call(rx, &cp, srx, user_call_ID, GFP_KERNEL);
  140. _leave(" = %p\n", call);
  141. return call;
  142. }
  143. /*
  144. * send a message forming part of a client call through an RxRPC socket
  145. * - caller holds the socket locked
  146. * - the socket may be either a client socket or a server socket
  147. */
  148. int rxrpc_do_sendmsg(struct rxrpc_sock *rx, struct msghdr *msg, size_t len)
  149. {
  150. enum rxrpc_command cmd;
  151. struct rxrpc_call *call;
  152. unsigned long user_call_ID = 0;
  153. bool exclusive = false;
  154. u32 abort_code = 0;
  155. int ret;
  156. _enter("");
  157. ret = rxrpc_sendmsg_cmsg(msg, &user_call_ID, &cmd, &abort_code,
  158. &exclusive);
  159. if (ret < 0)
  160. return ret;
  161. if (cmd == RXRPC_CMD_ACCEPT) {
  162. if (rx->sk.sk_state != RXRPC_SERVER_LISTENING)
  163. return -EINVAL;
  164. call = rxrpc_accept_call(rx, user_call_ID);
  165. if (IS_ERR(call))
  166. return PTR_ERR(call);
  167. rxrpc_put_call(call);
  168. return 0;
  169. }
  170. call = rxrpc_find_call_by_user_ID(rx, user_call_ID);
  171. if (!call) {
  172. if (cmd != RXRPC_CMD_SEND_DATA)
  173. return -EBADSLT;
  174. call = rxrpc_new_client_call_for_sendmsg(rx, msg, user_call_ID,
  175. exclusive);
  176. if (IS_ERR(call))
  177. return PTR_ERR(call);
  178. }
  179. _debug("CALL %d USR %lx ST %d on CONN %p",
  180. call->debug_id, call->user_call_ID, call->state, call->conn);
  181. if (call->state >= RXRPC_CALL_COMPLETE) {
  182. /* it's too late for this call */
  183. ret = -ECONNRESET;
  184. } else if (cmd == RXRPC_CMD_SEND_ABORT) {
  185. rxrpc_send_abort(call, abort_code);
  186. ret = 0;
  187. } else if (cmd != RXRPC_CMD_SEND_DATA) {
  188. ret = -EINVAL;
  189. } else if (!call->in_clientflag &&
  190. call->state != RXRPC_CALL_CLIENT_SEND_REQUEST) {
  191. /* request phase complete for this client call */
  192. ret = -EPROTO;
  193. } else if (call->in_clientflag &&
  194. call->state != RXRPC_CALL_SERVER_ACK_REQUEST &&
  195. call->state != RXRPC_CALL_SERVER_SEND_REPLY) {
  196. /* Reply phase not begun or not complete for service call. */
  197. ret = -EPROTO;
  198. } else {
  199. ret = rxrpc_send_data(rx, call, msg, len);
  200. }
  201. rxrpc_put_call(call);
  202. _leave(" = %d", ret);
  203. return ret;
  204. }
  205. /**
  206. * rxrpc_kernel_send_data - Allow a kernel service to send data on a call
  207. * @call: The call to send data through
  208. * @msg: The data to send
  209. * @len: The amount of data to send
  210. *
  211. * Allow a kernel service to send data on a call. The call must be in an state
  212. * appropriate to sending data. No control data should be supplied in @msg,
  213. * nor should an address be supplied. MSG_MORE should be flagged if there's
  214. * more data to come, otherwise this data will end the transmission phase.
  215. */
  216. int rxrpc_kernel_send_data(struct rxrpc_call *call, struct msghdr *msg,
  217. size_t len)
  218. {
  219. int ret;
  220. _enter("{%d,%s},", call->debug_id, rxrpc_call_states[call->state]);
  221. ASSERTCMP(msg->msg_name, ==, NULL);
  222. ASSERTCMP(msg->msg_control, ==, NULL);
  223. lock_sock(&call->socket->sk);
  224. _debug("CALL %d USR %lx ST %d on CONN %p",
  225. call->debug_id, call->user_call_ID, call->state, call->conn);
  226. if (call->state >= RXRPC_CALL_COMPLETE) {
  227. ret = -ESHUTDOWN; /* it's too late for this call */
  228. } else if (call->state != RXRPC_CALL_CLIENT_SEND_REQUEST &&
  229. call->state != RXRPC_CALL_SERVER_ACK_REQUEST &&
  230. call->state != RXRPC_CALL_SERVER_SEND_REPLY) {
  231. ret = -EPROTO; /* request phase complete for this client call */
  232. } else {
  233. ret = rxrpc_send_data(call->socket, call, msg, len);
  234. }
  235. release_sock(&call->socket->sk);
  236. _leave(" = %d", ret);
  237. return ret;
  238. }
  239. EXPORT_SYMBOL(rxrpc_kernel_send_data);
  240. /**
  241. * rxrpc_kernel_abort_call - Allow a kernel service to abort a call
  242. * @call: The call to be aborted
  243. * @abort_code: The abort code to stick into the ABORT packet
  244. *
  245. * Allow a kernel service to abort a call, if it's still in an abortable state.
  246. */
  247. void rxrpc_kernel_abort_call(struct rxrpc_call *call, u32 abort_code)
  248. {
  249. _enter("{%d},%d", call->debug_id, abort_code);
  250. lock_sock(&call->socket->sk);
  251. _debug("CALL %d USR %lx ST %d on CONN %p",
  252. call->debug_id, call->user_call_ID, call->state, call->conn);
  253. if (call->state < RXRPC_CALL_COMPLETE)
  254. rxrpc_send_abort(call, abort_code);
  255. release_sock(&call->socket->sk);
  256. _leave("");
  257. }
  258. EXPORT_SYMBOL(rxrpc_kernel_abort_call);
  259. /*
  260. * send a packet through the transport endpoint
  261. */
  262. int rxrpc_send_data_packet(struct rxrpc_connection *conn, struct sk_buff *skb)
  263. {
  264. struct kvec iov[1];
  265. struct msghdr msg;
  266. int ret, opt;
  267. _enter(",{%d}", skb->len);
  268. iov[0].iov_base = skb->head;
  269. iov[0].iov_len = skb->len;
  270. msg.msg_name = &conn->params.peer->srx.transport;
  271. msg.msg_namelen = conn->params.peer->srx.transport_len;
  272. msg.msg_control = NULL;
  273. msg.msg_controllen = 0;
  274. msg.msg_flags = 0;
  275. /* send the packet with the don't fragment bit set if we currently
  276. * think it's small enough */
  277. if (skb->len - sizeof(struct rxrpc_wire_header) < conn->params.peer->maxdata) {
  278. down_read(&conn->params.local->defrag_sem);
  279. /* send the packet by UDP
  280. * - returns -EMSGSIZE if UDP would have to fragment the packet
  281. * to go out of the interface
  282. * - in which case, we'll have processed the ICMP error
  283. * message and update the peer record
  284. */
  285. ret = kernel_sendmsg(conn->params.local->socket, &msg, iov, 1,
  286. iov[0].iov_len);
  287. up_read(&conn->params.local->defrag_sem);
  288. if (ret == -EMSGSIZE)
  289. goto send_fragmentable;
  290. _leave(" = %d [%u]", ret, conn->params.peer->maxdata);
  291. return ret;
  292. }
  293. send_fragmentable:
  294. /* attempt to send this message with fragmentation enabled */
  295. _debug("send fragment");
  296. down_write(&conn->params.local->defrag_sem);
  297. switch (conn->params.local->srx.transport.family) {
  298. case AF_INET:
  299. opt = IP_PMTUDISC_DONT;
  300. ret = kernel_setsockopt(conn->params.local->socket,
  301. SOL_IP, IP_MTU_DISCOVER,
  302. (char *)&opt, sizeof(opt));
  303. if (ret == 0) {
  304. ret = kernel_sendmsg(conn->params.local->socket, &msg, iov, 1,
  305. iov[0].iov_len);
  306. opt = IP_PMTUDISC_DO;
  307. kernel_setsockopt(conn->params.local->socket, SOL_IP,
  308. IP_MTU_DISCOVER,
  309. (char *)&opt, sizeof(opt));
  310. }
  311. break;
  312. }
  313. up_write(&conn->params.local->defrag_sem);
  314. _leave(" = %d [frag %u]", ret, conn->params.peer->maxdata);
  315. return ret;
  316. }
  317. /*
  318. * wait for space to appear in the transmit/ACK window
  319. * - caller holds the socket locked
  320. */
  321. static int rxrpc_wait_for_tx_window(struct rxrpc_sock *rx,
  322. struct rxrpc_call *call,
  323. long *timeo)
  324. {
  325. DECLARE_WAITQUEUE(myself, current);
  326. int ret;
  327. _enter(",{%d},%ld",
  328. CIRC_SPACE(call->acks_head, ACCESS_ONCE(call->acks_tail),
  329. call->acks_winsz),
  330. *timeo);
  331. add_wait_queue(&call->tx_waitq, &myself);
  332. for (;;) {
  333. set_current_state(TASK_INTERRUPTIBLE);
  334. ret = 0;
  335. if (CIRC_SPACE(call->acks_head, ACCESS_ONCE(call->acks_tail),
  336. call->acks_winsz) > 0)
  337. break;
  338. if (signal_pending(current)) {
  339. ret = sock_intr_errno(*timeo);
  340. break;
  341. }
  342. release_sock(&rx->sk);
  343. *timeo = schedule_timeout(*timeo);
  344. lock_sock(&rx->sk);
  345. }
  346. remove_wait_queue(&call->tx_waitq, &myself);
  347. set_current_state(TASK_RUNNING);
  348. _leave(" = %d", ret);
  349. return ret;
  350. }
  351. /*
  352. * attempt to schedule an instant Tx resend
  353. */
  354. static inline void rxrpc_instant_resend(struct rxrpc_call *call)
  355. {
  356. read_lock_bh(&call->state_lock);
  357. if (try_to_del_timer_sync(&call->resend_timer) >= 0) {
  358. clear_bit(RXRPC_CALL_RUN_RTIMER, &call->flags);
  359. if (call->state < RXRPC_CALL_COMPLETE &&
  360. !test_and_set_bit(RXRPC_CALL_EV_RESEND_TIMER, &call->events))
  361. rxrpc_queue_call(call);
  362. }
  363. read_unlock_bh(&call->state_lock);
  364. }
  365. /*
  366. * queue a packet for transmission, set the resend timer and attempt
  367. * to send the packet immediately
  368. */
  369. static void rxrpc_queue_packet(struct rxrpc_call *call, struct sk_buff *skb,
  370. bool last)
  371. {
  372. struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
  373. int ret;
  374. _net("queue skb %p [%d]", skb, call->acks_head);
  375. ASSERT(call->acks_window != NULL);
  376. call->acks_window[call->acks_head] = (unsigned long) skb;
  377. smp_wmb();
  378. call->acks_head = (call->acks_head + 1) & (call->acks_winsz - 1);
  379. if (last || call->state == RXRPC_CALL_SERVER_ACK_REQUEST) {
  380. _debug("________awaiting reply/ACK__________");
  381. write_lock_bh(&call->state_lock);
  382. switch (call->state) {
  383. case RXRPC_CALL_CLIENT_SEND_REQUEST:
  384. call->state = RXRPC_CALL_CLIENT_AWAIT_REPLY;
  385. break;
  386. case RXRPC_CALL_SERVER_ACK_REQUEST:
  387. call->state = RXRPC_CALL_SERVER_SEND_REPLY;
  388. if (!last)
  389. break;
  390. case RXRPC_CALL_SERVER_SEND_REPLY:
  391. call->state = RXRPC_CALL_SERVER_AWAIT_ACK;
  392. break;
  393. default:
  394. break;
  395. }
  396. write_unlock_bh(&call->state_lock);
  397. }
  398. _proto("Tx DATA %%%u { #%u }", sp->hdr.serial, sp->hdr.seq);
  399. sp->need_resend = false;
  400. sp->resend_at = jiffies + rxrpc_resend_timeout;
  401. if (!test_and_set_bit(RXRPC_CALL_RUN_RTIMER, &call->flags)) {
  402. _debug("run timer");
  403. call->resend_timer.expires = sp->resend_at;
  404. add_timer(&call->resend_timer);
  405. }
  406. /* attempt to cancel the rx-ACK timer, deferring reply transmission if
  407. * we're ACK'ing the request phase of an incoming call */
  408. ret = -EAGAIN;
  409. if (try_to_del_timer_sync(&call->ack_timer) >= 0) {
  410. /* the packet may be freed by rxrpc_process_call() before this
  411. * returns */
  412. ret = rxrpc_send_data_packet(call->conn, skb);
  413. _net("sent skb %p", skb);
  414. } else {
  415. _debug("failed to delete ACK timer");
  416. }
  417. if (ret < 0) {
  418. _debug("need instant resend %d", ret);
  419. sp->need_resend = true;
  420. rxrpc_instant_resend(call);
  421. }
  422. _leave("");
  423. }
  424. /*
  425. * Convert a host-endian header into a network-endian header.
  426. */
  427. static void rxrpc_insert_header(struct sk_buff *skb)
  428. {
  429. struct rxrpc_wire_header whdr;
  430. struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
  431. whdr.epoch = htonl(sp->hdr.epoch);
  432. whdr.cid = htonl(sp->hdr.cid);
  433. whdr.callNumber = htonl(sp->hdr.callNumber);
  434. whdr.seq = htonl(sp->hdr.seq);
  435. whdr.serial = htonl(sp->hdr.serial);
  436. whdr.type = sp->hdr.type;
  437. whdr.flags = sp->hdr.flags;
  438. whdr.userStatus = sp->hdr.userStatus;
  439. whdr.securityIndex = sp->hdr.securityIndex;
  440. whdr._rsvd = htons(sp->hdr._rsvd);
  441. whdr.serviceId = htons(sp->hdr.serviceId);
  442. memcpy(skb->head, &whdr, sizeof(whdr));
  443. }
  444. /*
  445. * send data through a socket
  446. * - must be called in process context
  447. * - caller holds the socket locked
  448. */
  449. static int rxrpc_send_data(struct rxrpc_sock *rx,
  450. struct rxrpc_call *call,
  451. struct msghdr *msg, size_t len)
  452. {
  453. struct rxrpc_skb_priv *sp;
  454. struct sk_buff *skb;
  455. struct sock *sk = &rx->sk;
  456. long timeo;
  457. bool more;
  458. int ret, copied;
  459. timeo = sock_sndtimeo(sk, msg->msg_flags & MSG_DONTWAIT);
  460. /* this should be in poll */
  461. sk_clear_bit(SOCKWQ_ASYNC_NOSPACE, sk);
  462. if (sk->sk_err || (sk->sk_shutdown & SEND_SHUTDOWN))
  463. return -EPIPE;
  464. more = msg->msg_flags & MSG_MORE;
  465. skb = call->tx_pending;
  466. call->tx_pending = NULL;
  467. copied = 0;
  468. do {
  469. if (!skb) {
  470. size_t size, chunk, max, space;
  471. _debug("alloc");
  472. if (CIRC_SPACE(call->acks_head,
  473. ACCESS_ONCE(call->acks_tail),
  474. call->acks_winsz) <= 0) {
  475. ret = -EAGAIN;
  476. if (msg->msg_flags & MSG_DONTWAIT)
  477. goto maybe_error;
  478. ret = rxrpc_wait_for_tx_window(rx, call,
  479. &timeo);
  480. if (ret < 0)
  481. goto maybe_error;
  482. }
  483. max = call->conn->params.peer->maxdata;
  484. max -= call->conn->security_size;
  485. max &= ~(call->conn->size_align - 1UL);
  486. chunk = max;
  487. if (chunk > msg_data_left(msg) && !more)
  488. chunk = msg_data_left(msg);
  489. space = chunk + call->conn->size_align;
  490. space &= ~(call->conn->size_align - 1UL);
  491. size = space + call->conn->header_size;
  492. _debug("SIZE: %zu/%zu/%zu", chunk, space, size);
  493. /* create a buffer that we can retain until it's ACK'd */
  494. skb = sock_alloc_send_skb(
  495. sk, size, msg->msg_flags & MSG_DONTWAIT, &ret);
  496. if (!skb)
  497. goto maybe_error;
  498. rxrpc_new_skb(skb);
  499. _debug("ALLOC SEND %p", skb);
  500. ASSERTCMP(skb->mark, ==, 0);
  501. _debug("HS: %u", call->conn->header_size);
  502. skb_reserve(skb, call->conn->header_size);
  503. skb->len += call->conn->header_size;
  504. sp = rxrpc_skb(skb);
  505. sp->remain = chunk;
  506. if (sp->remain > skb_tailroom(skb))
  507. sp->remain = skb_tailroom(skb);
  508. _net("skb: hr %d, tr %d, hl %d, rm %d",
  509. skb_headroom(skb),
  510. skb_tailroom(skb),
  511. skb_headlen(skb),
  512. sp->remain);
  513. skb->ip_summed = CHECKSUM_UNNECESSARY;
  514. }
  515. _debug("append");
  516. sp = rxrpc_skb(skb);
  517. /* append next segment of data to the current buffer */
  518. if (msg_data_left(msg) > 0) {
  519. int copy = skb_tailroom(skb);
  520. ASSERTCMP(copy, >, 0);
  521. if (copy > msg_data_left(msg))
  522. copy = msg_data_left(msg);
  523. if (copy > sp->remain)
  524. copy = sp->remain;
  525. _debug("add");
  526. ret = skb_add_data(skb, &msg->msg_iter, copy);
  527. _debug("added");
  528. if (ret < 0)
  529. goto efault;
  530. sp->remain -= copy;
  531. skb->mark += copy;
  532. copied += copy;
  533. }
  534. /* check for the far side aborting the call or a network error
  535. * occurring */
  536. if (call->state > RXRPC_CALL_COMPLETE)
  537. goto call_aborted;
  538. /* add the packet to the send queue if it's now full */
  539. if (sp->remain <= 0 ||
  540. (msg_data_left(msg) == 0 && !more)) {
  541. struct rxrpc_connection *conn = call->conn;
  542. uint32_t seq;
  543. size_t pad;
  544. /* pad out if we're using security */
  545. if (conn->security_ix) {
  546. pad = conn->security_size + skb->mark;
  547. pad = conn->size_align - pad;
  548. pad &= conn->size_align - 1;
  549. _debug("pad %zu", pad);
  550. if (pad)
  551. memset(skb_put(skb, pad), 0, pad);
  552. }
  553. seq = atomic_inc_return(&call->sequence);
  554. sp->hdr.epoch = conn->proto.epoch;
  555. sp->hdr.cid = call->cid;
  556. sp->hdr.callNumber = call->call_id;
  557. sp->hdr.seq = seq;
  558. sp->hdr.serial = atomic_inc_return(&conn->serial);
  559. sp->hdr.type = RXRPC_PACKET_TYPE_DATA;
  560. sp->hdr.userStatus = 0;
  561. sp->hdr.securityIndex = conn->security_ix;
  562. sp->hdr._rsvd = 0;
  563. sp->hdr.serviceId = call->service_id;
  564. sp->hdr.flags = conn->out_clientflag;
  565. if (msg_data_left(msg) == 0 && !more)
  566. sp->hdr.flags |= RXRPC_LAST_PACKET;
  567. else if (CIRC_SPACE(call->acks_head,
  568. ACCESS_ONCE(call->acks_tail),
  569. call->acks_winsz) > 1)
  570. sp->hdr.flags |= RXRPC_MORE_PACKETS;
  571. if (more && seq & 1)
  572. sp->hdr.flags |= RXRPC_REQUEST_ACK;
  573. ret = conn->security->secure_packet(
  574. call, skb, skb->mark,
  575. skb->head + sizeof(struct rxrpc_wire_header));
  576. if (ret < 0)
  577. goto out;
  578. rxrpc_insert_header(skb);
  579. rxrpc_queue_packet(call, skb, !msg_data_left(msg) && !more);
  580. skb = NULL;
  581. }
  582. } while (msg_data_left(msg) > 0);
  583. success:
  584. ret = copied;
  585. out:
  586. call->tx_pending = skb;
  587. _leave(" = %d", ret);
  588. return ret;
  589. call_aborted:
  590. rxrpc_free_skb(skb);
  591. if (call->state == RXRPC_CALL_NETWORK_ERROR)
  592. ret = call->error_report < RXRPC_LOCAL_ERROR_OFFSET ?
  593. call->error_report :
  594. call->error_report - RXRPC_LOCAL_ERROR_OFFSET;
  595. else
  596. ret = -ECONNABORTED;
  597. _leave(" = %d", ret);
  598. return ret;
  599. maybe_error:
  600. if (copied)
  601. goto success;
  602. goto out;
  603. efault:
  604. ret = -EFAULT;
  605. goto out;
  606. }