ar-ack.c 34 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356
  1. /* Management of Tx window, Tx resend, ACKs and out-of-sequence reception
  2. *
  3. * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
  4. * Written by David Howells (dhowells@redhat.com)
  5. *
  6. * This program is free software; you can redistribute it and/or
  7. * modify it under the terms of the GNU General Public License
  8. * as published by the Free Software Foundation; either version
  9. * 2 of the License, or (at your option) any later version.
  10. */
  11. #include <linux/module.h>
  12. #include <linux/circ_buf.h>
  13. #include <linux/net.h>
  14. #include <linux/skbuff.h>
  15. #include <linux/slab.h>
  16. #include <linux/udp.h>
  17. #include <net/sock.h>
  18. #include <net/af_rxrpc.h>
  19. #include "ar-internal.h"
  20. /*
  21. * How long to wait before scheduling ACK generation after seeing a
  22. * packet with RXRPC_REQUEST_ACK set (in jiffies).
  23. */
  24. unsigned rxrpc_requested_ack_delay = 1;
  25. /*
  26. * How long to wait before scheduling an ACK with subtype DELAY (in jiffies).
  27. *
  28. * We use this when we've received new data packets. If those packets aren't
  29. * all consumed within this time we will send a DELAY ACK if an ACK was not
  30. * requested to let the sender know it doesn't need to resend.
  31. */
  32. unsigned rxrpc_soft_ack_delay = 1 * HZ;
  33. /*
  34. * How long to wait before scheduling an ACK with subtype IDLE (in jiffies).
  35. *
  36. * We use this when we've consumed some previously soft-ACK'd packets when
  37. * further packets aren't immediately received to decide when to send an IDLE
  38. * ACK let the other end know that it can free up its Tx buffer space.
  39. */
  40. unsigned rxrpc_idle_ack_delay = 0.5 * HZ;
  41. /*
  42. * Receive window size in packets. This indicates the maximum number of
  43. * unconsumed received packets we're willing to retain in memory. Once this
  44. * limit is hit, we should generate an EXCEEDS_WINDOW ACK and discard further
  45. * packets.
  46. */
  47. unsigned rxrpc_rx_window_size = 32;
  48. /*
  49. * Maximum Rx MTU size. This indicates to the sender the size of jumbo packet
  50. * made by gluing normal packets together that we're willing to handle.
  51. */
  52. unsigned rxrpc_rx_mtu = 5692;
  53. /*
  54. * The maximum number of fragments in a received jumbo packet that we tell the
  55. * sender that we're willing to handle.
  56. */
  57. unsigned rxrpc_rx_jumbo_max = 4;
  58. static const char *rxrpc_acks(u8 reason)
  59. {
  60. static const char *const str[] = {
  61. "---", "REQ", "DUP", "OOS", "WIN", "MEM", "PNG", "PNR", "DLY",
  62. "IDL", "-?-"
  63. };
  64. if (reason >= ARRAY_SIZE(str))
  65. reason = ARRAY_SIZE(str) - 1;
  66. return str[reason];
  67. }
  68. static const s8 rxrpc_ack_priority[] = {
  69. [0] = 0,
  70. [RXRPC_ACK_DELAY] = 1,
  71. [RXRPC_ACK_REQUESTED] = 2,
  72. [RXRPC_ACK_IDLE] = 3,
  73. [RXRPC_ACK_PING_RESPONSE] = 4,
  74. [RXRPC_ACK_DUPLICATE] = 5,
  75. [RXRPC_ACK_OUT_OF_SEQUENCE] = 6,
  76. [RXRPC_ACK_EXCEEDS_WINDOW] = 7,
  77. [RXRPC_ACK_NOSPACE] = 8,
  78. };
  79. /*
  80. * propose an ACK be sent
  81. */
  82. void __rxrpc_propose_ACK(struct rxrpc_call *call, u8 ack_reason,
  83. __be32 serial, bool immediate)
  84. {
  85. unsigned long expiry;
  86. s8 prior = rxrpc_ack_priority[ack_reason];
  87. ASSERTCMP(prior, >, 0);
  88. _enter("{%d},%s,%%%x,%u",
  89. call->debug_id, rxrpc_acks(ack_reason), ntohl(serial),
  90. immediate);
  91. if (prior < rxrpc_ack_priority[call->ackr_reason]) {
  92. if (immediate)
  93. goto cancel_timer;
  94. return;
  95. }
  96. /* update DELAY, IDLE, REQUESTED and PING_RESPONSE ACK serial
  97. * numbers */
  98. if (prior == rxrpc_ack_priority[call->ackr_reason]) {
  99. if (prior <= 4)
  100. call->ackr_serial = serial;
  101. if (immediate)
  102. goto cancel_timer;
  103. return;
  104. }
  105. call->ackr_reason = ack_reason;
  106. call->ackr_serial = serial;
  107. switch (ack_reason) {
  108. case RXRPC_ACK_DELAY:
  109. _debug("run delay timer");
  110. expiry = rxrpc_soft_ack_delay;
  111. goto run_timer;
  112. case RXRPC_ACK_IDLE:
  113. if (!immediate) {
  114. _debug("run defer timer");
  115. expiry = rxrpc_idle_ack_delay;
  116. goto run_timer;
  117. }
  118. goto cancel_timer;
  119. case RXRPC_ACK_REQUESTED:
  120. expiry = rxrpc_requested_ack_delay;
  121. if (!expiry)
  122. goto cancel_timer;
  123. if (!immediate || serial == cpu_to_be32(1)) {
  124. _debug("run defer timer");
  125. goto run_timer;
  126. }
  127. default:
  128. _debug("immediate ACK");
  129. goto cancel_timer;
  130. }
  131. run_timer:
  132. expiry += jiffies;
  133. if (!timer_pending(&call->ack_timer) ||
  134. time_after(call->ack_timer.expires, expiry))
  135. mod_timer(&call->ack_timer, expiry);
  136. return;
  137. cancel_timer:
  138. _debug("cancel timer %%%u", ntohl(serial));
  139. try_to_del_timer_sync(&call->ack_timer);
  140. read_lock_bh(&call->state_lock);
  141. if (call->state <= RXRPC_CALL_COMPLETE &&
  142. !test_and_set_bit(RXRPC_CALL_ACK, &call->events))
  143. rxrpc_queue_call(call);
  144. read_unlock_bh(&call->state_lock);
  145. }
  146. /*
  147. * propose an ACK be sent, locking the call structure
  148. */
  149. void rxrpc_propose_ACK(struct rxrpc_call *call, u8 ack_reason,
  150. __be32 serial, bool immediate)
  151. {
  152. s8 prior = rxrpc_ack_priority[ack_reason];
  153. if (prior > rxrpc_ack_priority[call->ackr_reason]) {
  154. spin_lock_bh(&call->lock);
  155. __rxrpc_propose_ACK(call, ack_reason, serial, immediate);
  156. spin_unlock_bh(&call->lock);
  157. }
  158. }
  159. /*
  160. * set the resend timer
  161. */
  162. static void rxrpc_set_resend(struct rxrpc_call *call, u8 resend,
  163. unsigned long resend_at)
  164. {
  165. read_lock_bh(&call->state_lock);
  166. if (call->state >= RXRPC_CALL_COMPLETE)
  167. resend = 0;
  168. if (resend & 1) {
  169. _debug("SET RESEND");
  170. set_bit(RXRPC_CALL_RESEND, &call->events);
  171. }
  172. if (resend & 2) {
  173. _debug("MODIFY RESEND TIMER");
  174. set_bit(RXRPC_CALL_RUN_RTIMER, &call->flags);
  175. mod_timer(&call->resend_timer, resend_at);
  176. } else {
  177. _debug("KILL RESEND TIMER");
  178. del_timer_sync(&call->resend_timer);
  179. clear_bit(RXRPC_CALL_RESEND_TIMER, &call->events);
  180. clear_bit(RXRPC_CALL_RUN_RTIMER, &call->flags);
  181. }
  182. read_unlock_bh(&call->state_lock);
  183. }
  184. /*
  185. * resend packets
  186. */
  187. static void rxrpc_resend(struct rxrpc_call *call)
  188. {
  189. struct rxrpc_skb_priv *sp;
  190. struct rxrpc_header *hdr;
  191. struct sk_buff *txb;
  192. unsigned long *p_txb, resend_at;
  193. bool stop;
  194. int loop;
  195. u8 resend;
  196. _enter("{%d,%d,%d,%d},",
  197. call->acks_hard, call->acks_unacked,
  198. atomic_read(&call->sequence),
  199. CIRC_CNT(call->acks_head, call->acks_tail, call->acks_winsz));
  200. stop = false;
  201. resend = 0;
  202. resend_at = 0;
  203. for (loop = call->acks_tail;
  204. loop != call->acks_head || stop;
  205. loop = (loop + 1) & (call->acks_winsz - 1)
  206. ) {
  207. p_txb = call->acks_window + loop;
  208. smp_read_barrier_depends();
  209. if (*p_txb & 1)
  210. continue;
  211. txb = (struct sk_buff *) *p_txb;
  212. sp = rxrpc_skb(txb);
  213. if (sp->need_resend) {
  214. sp->need_resend = false;
  215. /* each Tx packet has a new serial number */
  216. sp->hdr.serial =
  217. htonl(atomic_inc_return(&call->conn->serial));
  218. hdr = (struct rxrpc_header *) txb->head;
  219. hdr->serial = sp->hdr.serial;
  220. _proto("Tx DATA %%%u { #%d }",
  221. ntohl(sp->hdr.serial), ntohl(sp->hdr.seq));
  222. if (rxrpc_send_packet(call->conn->trans, txb) < 0) {
  223. stop = true;
  224. sp->resend_at = jiffies + 3;
  225. } else {
  226. sp->resend_at =
  227. jiffies + rxrpc_resend_timeout;
  228. }
  229. }
  230. if (time_after_eq(jiffies + 1, sp->resend_at)) {
  231. sp->need_resend = true;
  232. resend |= 1;
  233. } else if (resend & 2) {
  234. if (time_before(sp->resend_at, resend_at))
  235. resend_at = sp->resend_at;
  236. } else {
  237. resend_at = sp->resend_at;
  238. resend |= 2;
  239. }
  240. }
  241. rxrpc_set_resend(call, resend, resend_at);
  242. _leave("");
  243. }
  244. /*
  245. * handle resend timer expiry
  246. */
  247. static void rxrpc_resend_timer(struct rxrpc_call *call)
  248. {
  249. struct rxrpc_skb_priv *sp;
  250. struct sk_buff *txb;
  251. unsigned long *p_txb, resend_at;
  252. int loop;
  253. u8 resend;
  254. _enter("%d,%d,%d",
  255. call->acks_tail, call->acks_unacked, call->acks_head);
  256. if (call->state >= RXRPC_CALL_COMPLETE)
  257. return;
  258. resend = 0;
  259. resend_at = 0;
  260. for (loop = call->acks_unacked;
  261. loop != call->acks_head;
  262. loop = (loop + 1) & (call->acks_winsz - 1)
  263. ) {
  264. p_txb = call->acks_window + loop;
  265. smp_read_barrier_depends();
  266. txb = (struct sk_buff *) (*p_txb & ~1);
  267. sp = rxrpc_skb(txb);
  268. ASSERT(!(*p_txb & 1));
  269. if (sp->need_resend) {
  270. ;
  271. } else if (time_after_eq(jiffies + 1, sp->resend_at)) {
  272. sp->need_resend = true;
  273. resend |= 1;
  274. } else if (resend & 2) {
  275. if (time_before(sp->resend_at, resend_at))
  276. resend_at = sp->resend_at;
  277. } else {
  278. resend_at = sp->resend_at;
  279. resend |= 2;
  280. }
  281. }
  282. rxrpc_set_resend(call, resend, resend_at);
  283. _leave("");
  284. }
  285. /*
  286. * process soft ACKs of our transmitted packets
  287. * - these indicate packets the peer has or has not received, but hasn't yet
  288. * given to the consumer, and so can still be discarded and re-requested
  289. */
  290. static int rxrpc_process_soft_ACKs(struct rxrpc_call *call,
  291. struct rxrpc_ackpacket *ack,
  292. struct sk_buff *skb)
  293. {
  294. struct rxrpc_skb_priv *sp;
  295. struct sk_buff *txb;
  296. unsigned long *p_txb, resend_at;
  297. int loop;
  298. u8 sacks[RXRPC_MAXACKS], resend;
  299. _enter("{%d,%d},{%d},",
  300. call->acks_hard,
  301. CIRC_CNT(call->acks_head, call->acks_tail, call->acks_winsz),
  302. ack->nAcks);
  303. if (skb_copy_bits(skb, 0, sacks, ack->nAcks) < 0)
  304. goto protocol_error;
  305. resend = 0;
  306. resend_at = 0;
  307. for (loop = 0; loop < ack->nAcks; loop++) {
  308. p_txb = call->acks_window;
  309. p_txb += (call->acks_tail + loop) & (call->acks_winsz - 1);
  310. smp_read_barrier_depends();
  311. txb = (struct sk_buff *) (*p_txb & ~1);
  312. sp = rxrpc_skb(txb);
  313. switch (sacks[loop]) {
  314. case RXRPC_ACK_TYPE_ACK:
  315. sp->need_resend = false;
  316. *p_txb |= 1;
  317. break;
  318. case RXRPC_ACK_TYPE_NACK:
  319. sp->need_resend = true;
  320. *p_txb &= ~1;
  321. resend = 1;
  322. break;
  323. default:
  324. _debug("Unsupported ACK type %d", sacks[loop]);
  325. goto protocol_error;
  326. }
  327. }
  328. smp_mb();
  329. call->acks_unacked = (call->acks_tail + loop) & (call->acks_winsz - 1);
  330. /* anything not explicitly ACK'd is implicitly NACK'd, but may just not
  331. * have been received or processed yet by the far end */
  332. for (loop = call->acks_unacked;
  333. loop != call->acks_head;
  334. loop = (loop + 1) & (call->acks_winsz - 1)
  335. ) {
  336. p_txb = call->acks_window + loop;
  337. smp_read_barrier_depends();
  338. txb = (struct sk_buff *) (*p_txb & ~1);
  339. sp = rxrpc_skb(txb);
  340. if (*p_txb & 1) {
  341. /* packet must have been discarded */
  342. sp->need_resend = true;
  343. *p_txb &= ~1;
  344. resend |= 1;
  345. } else if (sp->need_resend) {
  346. ;
  347. } else if (time_after_eq(jiffies + 1, sp->resend_at)) {
  348. sp->need_resend = true;
  349. resend |= 1;
  350. } else if (resend & 2) {
  351. if (time_before(sp->resend_at, resend_at))
  352. resend_at = sp->resend_at;
  353. } else {
  354. resend_at = sp->resend_at;
  355. resend |= 2;
  356. }
  357. }
  358. rxrpc_set_resend(call, resend, resend_at);
  359. _leave(" = 0");
  360. return 0;
  361. protocol_error:
  362. _leave(" = -EPROTO");
  363. return -EPROTO;
  364. }
  365. /*
  366. * discard hard-ACK'd packets from the Tx window
  367. */
  368. static void rxrpc_rotate_tx_window(struct rxrpc_call *call, u32 hard)
  369. {
  370. unsigned long _skb;
  371. int tail = call->acks_tail, old_tail;
  372. int win = CIRC_CNT(call->acks_head, tail, call->acks_winsz);
  373. _enter("{%u,%u},%u", call->acks_hard, win, hard);
  374. ASSERTCMP(hard - call->acks_hard, <=, win);
  375. while (call->acks_hard < hard) {
  376. smp_read_barrier_depends();
  377. _skb = call->acks_window[tail] & ~1;
  378. rxrpc_free_skb((struct sk_buff *) _skb);
  379. old_tail = tail;
  380. tail = (tail + 1) & (call->acks_winsz - 1);
  381. call->acks_tail = tail;
  382. if (call->acks_unacked == old_tail)
  383. call->acks_unacked = tail;
  384. call->acks_hard++;
  385. }
  386. wake_up(&call->tx_waitq);
  387. }
  388. /*
  389. * clear the Tx window in the event of a failure
  390. */
  391. static void rxrpc_clear_tx_window(struct rxrpc_call *call)
  392. {
  393. rxrpc_rotate_tx_window(call, atomic_read(&call->sequence));
  394. }
  395. /*
  396. * drain the out of sequence received packet queue into the packet Rx queue
  397. */
  398. static int rxrpc_drain_rx_oos_queue(struct rxrpc_call *call)
  399. {
  400. struct rxrpc_skb_priv *sp;
  401. struct sk_buff *skb;
  402. bool terminal;
  403. int ret;
  404. _enter("{%d,%d}", call->rx_data_post, call->rx_first_oos);
  405. spin_lock_bh(&call->lock);
  406. ret = -ECONNRESET;
  407. if (test_bit(RXRPC_CALL_RELEASED, &call->flags))
  408. goto socket_unavailable;
  409. skb = skb_dequeue(&call->rx_oos_queue);
  410. if (skb) {
  411. sp = rxrpc_skb(skb);
  412. _debug("drain OOS packet %d [%d]",
  413. ntohl(sp->hdr.seq), call->rx_first_oos);
  414. if (ntohl(sp->hdr.seq) != call->rx_first_oos) {
  415. skb_queue_head(&call->rx_oos_queue, skb);
  416. call->rx_first_oos = ntohl(rxrpc_skb(skb)->hdr.seq);
  417. _debug("requeue %p {%u}", skb, call->rx_first_oos);
  418. } else {
  419. skb->mark = RXRPC_SKB_MARK_DATA;
  420. terminal = ((sp->hdr.flags & RXRPC_LAST_PACKET) &&
  421. !(sp->hdr.flags & RXRPC_CLIENT_INITIATED));
  422. ret = rxrpc_queue_rcv_skb(call, skb, true, terminal);
  423. BUG_ON(ret < 0);
  424. _debug("drain #%u", call->rx_data_post);
  425. call->rx_data_post++;
  426. /* find out what the next packet is */
  427. skb = skb_peek(&call->rx_oos_queue);
  428. if (skb)
  429. call->rx_first_oos =
  430. ntohl(rxrpc_skb(skb)->hdr.seq);
  431. else
  432. call->rx_first_oos = 0;
  433. _debug("peek %p {%u}", skb, call->rx_first_oos);
  434. }
  435. }
  436. ret = 0;
  437. socket_unavailable:
  438. spin_unlock_bh(&call->lock);
  439. _leave(" = %d", ret);
  440. return ret;
  441. }
  442. /*
  443. * insert an out of sequence packet into the buffer
  444. */
  445. static void rxrpc_insert_oos_packet(struct rxrpc_call *call,
  446. struct sk_buff *skb)
  447. {
  448. struct rxrpc_skb_priv *sp, *psp;
  449. struct sk_buff *p;
  450. u32 seq;
  451. sp = rxrpc_skb(skb);
  452. seq = ntohl(sp->hdr.seq);
  453. _enter(",,{%u}", seq);
  454. skb->destructor = rxrpc_packet_destructor;
  455. ASSERTCMP(sp->call, ==, NULL);
  456. sp->call = call;
  457. rxrpc_get_call(call);
  458. /* insert into the buffer in sequence order */
  459. spin_lock_bh(&call->lock);
  460. skb_queue_walk(&call->rx_oos_queue, p) {
  461. psp = rxrpc_skb(p);
  462. if (ntohl(psp->hdr.seq) > seq) {
  463. _debug("insert oos #%u before #%u",
  464. seq, ntohl(psp->hdr.seq));
  465. skb_insert(p, skb, &call->rx_oos_queue);
  466. goto inserted;
  467. }
  468. }
  469. _debug("append oos #%u", seq);
  470. skb_queue_tail(&call->rx_oos_queue, skb);
  471. inserted:
  472. /* we might now have a new front to the queue */
  473. if (call->rx_first_oos == 0 || seq < call->rx_first_oos)
  474. call->rx_first_oos = seq;
  475. read_lock(&call->state_lock);
  476. if (call->state < RXRPC_CALL_COMPLETE &&
  477. call->rx_data_post == call->rx_first_oos) {
  478. _debug("drain rx oos now");
  479. set_bit(RXRPC_CALL_DRAIN_RX_OOS, &call->events);
  480. }
  481. read_unlock(&call->state_lock);
  482. spin_unlock_bh(&call->lock);
  483. _leave(" [stored #%u]", call->rx_first_oos);
  484. }
  485. /*
  486. * clear the Tx window on final ACK reception
  487. */
  488. static void rxrpc_zap_tx_window(struct rxrpc_call *call)
  489. {
  490. struct rxrpc_skb_priv *sp;
  491. struct sk_buff *skb;
  492. unsigned long _skb, *acks_window;
  493. u8 winsz = call->acks_winsz;
  494. int tail;
  495. acks_window = call->acks_window;
  496. call->acks_window = NULL;
  497. while (CIRC_CNT(call->acks_head, call->acks_tail, winsz) > 0) {
  498. tail = call->acks_tail;
  499. smp_read_barrier_depends();
  500. _skb = acks_window[tail] & ~1;
  501. smp_mb();
  502. call->acks_tail = (call->acks_tail + 1) & (winsz - 1);
  503. skb = (struct sk_buff *) _skb;
  504. sp = rxrpc_skb(skb);
  505. _debug("+++ clear Tx %u", ntohl(sp->hdr.seq));
  506. rxrpc_free_skb(skb);
  507. }
  508. kfree(acks_window);
  509. }
  510. /*
  511. * process the extra information that may be appended to an ACK packet
  512. */
  513. static void rxrpc_extract_ackinfo(struct rxrpc_call *call, struct sk_buff *skb,
  514. unsigned int latest, int nAcks)
  515. {
  516. struct rxrpc_ackinfo ackinfo;
  517. struct rxrpc_peer *peer;
  518. unsigned int mtu;
  519. if (skb_copy_bits(skb, nAcks + 3, &ackinfo, sizeof(ackinfo)) < 0) {
  520. _leave(" [no ackinfo]");
  521. return;
  522. }
  523. _proto("Rx ACK %%%u Info { rx=%u max=%u rwin=%u jm=%u }",
  524. latest,
  525. ntohl(ackinfo.rxMTU), ntohl(ackinfo.maxMTU),
  526. ntohl(ackinfo.rwind), ntohl(ackinfo.jumbo_max));
  527. mtu = min(ntohl(ackinfo.rxMTU), ntohl(ackinfo.maxMTU));
  528. peer = call->conn->trans->peer;
  529. if (mtu < peer->maxdata) {
  530. spin_lock_bh(&peer->lock);
  531. peer->maxdata = mtu;
  532. peer->mtu = mtu + peer->hdrsize;
  533. spin_unlock_bh(&peer->lock);
  534. _net("Net MTU %u (maxdata %u)", peer->mtu, peer->maxdata);
  535. }
  536. }
  537. /*
  538. * process packets in the reception queue
  539. */
  540. static int rxrpc_process_rx_queue(struct rxrpc_call *call,
  541. u32 *_abort_code)
  542. {
  543. struct rxrpc_ackpacket ack;
  544. struct rxrpc_skb_priv *sp;
  545. struct sk_buff *skb;
  546. bool post_ACK;
  547. int latest;
  548. u32 hard, tx;
  549. _enter("");
  550. process_further:
  551. skb = skb_dequeue(&call->rx_queue);
  552. if (!skb)
  553. return -EAGAIN;
  554. _net("deferred skb %p", skb);
  555. sp = rxrpc_skb(skb);
  556. _debug("process %s [st %d]", rxrpc_pkts[sp->hdr.type], call->state);
  557. post_ACK = false;
  558. switch (sp->hdr.type) {
  559. /* data packets that wind up here have been received out of
  560. * order, need security processing or are jumbo packets */
  561. case RXRPC_PACKET_TYPE_DATA:
  562. _proto("OOSQ DATA %%%u { #%u }",
  563. ntohl(sp->hdr.serial), ntohl(sp->hdr.seq));
  564. /* secured packets must be verified and possibly decrypted */
  565. if (rxrpc_verify_packet(call, skb, _abort_code) < 0)
  566. goto protocol_error;
  567. rxrpc_insert_oos_packet(call, skb);
  568. goto process_further;
  569. /* partial ACK to process */
  570. case RXRPC_PACKET_TYPE_ACK:
  571. if (skb_copy_bits(skb, 0, &ack, sizeof(ack)) < 0) {
  572. _debug("extraction failure");
  573. goto protocol_error;
  574. }
  575. if (!skb_pull(skb, sizeof(ack)))
  576. BUG();
  577. latest = ntohl(sp->hdr.serial);
  578. hard = ntohl(ack.firstPacket);
  579. tx = atomic_read(&call->sequence);
  580. _proto("Rx ACK %%%u { m=%hu f=#%u p=#%u s=%%%u r=%s n=%u }",
  581. latest,
  582. ntohs(ack.maxSkew),
  583. hard,
  584. ntohl(ack.previousPacket),
  585. ntohl(ack.serial),
  586. rxrpc_acks(ack.reason),
  587. ack.nAcks);
  588. rxrpc_extract_ackinfo(call, skb, latest, ack.nAcks);
  589. if (ack.reason == RXRPC_ACK_PING) {
  590. _proto("Rx ACK %%%u PING Request", latest);
  591. rxrpc_propose_ACK(call, RXRPC_ACK_PING_RESPONSE,
  592. sp->hdr.serial, true);
  593. }
  594. /* discard any out-of-order or duplicate ACKs */
  595. if (latest - call->acks_latest <= 0) {
  596. _debug("discard ACK %d <= %d",
  597. latest, call->acks_latest);
  598. goto discard;
  599. }
  600. call->acks_latest = latest;
  601. if (call->state != RXRPC_CALL_CLIENT_SEND_REQUEST &&
  602. call->state != RXRPC_CALL_CLIENT_AWAIT_REPLY &&
  603. call->state != RXRPC_CALL_SERVER_SEND_REPLY &&
  604. call->state != RXRPC_CALL_SERVER_AWAIT_ACK)
  605. goto discard;
  606. _debug("Tx=%d H=%u S=%d", tx, call->acks_hard, call->state);
  607. if (hard > 0) {
  608. if (hard - 1 > tx) {
  609. _debug("hard-ACK'd packet %d not transmitted"
  610. " (%d top)",
  611. hard - 1, tx);
  612. goto protocol_error;
  613. }
  614. if ((call->state == RXRPC_CALL_CLIENT_AWAIT_REPLY ||
  615. call->state == RXRPC_CALL_SERVER_AWAIT_ACK) &&
  616. hard > tx)
  617. goto all_acked;
  618. smp_rmb();
  619. rxrpc_rotate_tx_window(call, hard - 1);
  620. }
  621. if (ack.nAcks > 0) {
  622. if (hard - 1 + ack.nAcks > tx) {
  623. _debug("soft-ACK'd packet %d+%d not"
  624. " transmitted (%d top)",
  625. hard - 1, ack.nAcks, tx);
  626. goto protocol_error;
  627. }
  628. if (rxrpc_process_soft_ACKs(call, &ack, skb) < 0)
  629. goto protocol_error;
  630. }
  631. goto discard;
  632. /* complete ACK to process */
  633. case RXRPC_PACKET_TYPE_ACKALL:
  634. goto all_acked;
  635. /* abort and busy are handled elsewhere */
  636. case RXRPC_PACKET_TYPE_BUSY:
  637. case RXRPC_PACKET_TYPE_ABORT:
  638. BUG();
  639. /* connection level events - also handled elsewhere */
  640. case RXRPC_PACKET_TYPE_CHALLENGE:
  641. case RXRPC_PACKET_TYPE_RESPONSE:
  642. case RXRPC_PACKET_TYPE_DEBUG:
  643. BUG();
  644. }
  645. /* if we've had a hard ACK that covers all the packets we've sent, then
  646. * that ends that phase of the operation */
  647. all_acked:
  648. write_lock_bh(&call->state_lock);
  649. _debug("ack all %d", call->state);
  650. switch (call->state) {
  651. case RXRPC_CALL_CLIENT_AWAIT_REPLY:
  652. call->state = RXRPC_CALL_CLIENT_RECV_REPLY;
  653. break;
  654. case RXRPC_CALL_SERVER_AWAIT_ACK:
  655. _debug("srv complete");
  656. call->state = RXRPC_CALL_COMPLETE;
  657. post_ACK = true;
  658. break;
  659. case RXRPC_CALL_CLIENT_SEND_REQUEST:
  660. case RXRPC_CALL_SERVER_RECV_REQUEST:
  661. goto protocol_error_unlock; /* can't occur yet */
  662. default:
  663. write_unlock_bh(&call->state_lock);
  664. goto discard; /* assume packet left over from earlier phase */
  665. }
  666. write_unlock_bh(&call->state_lock);
  667. /* if all the packets we sent are hard-ACK'd, then we can discard
  668. * whatever we've got left */
  669. _debug("clear Tx %d",
  670. CIRC_CNT(call->acks_head, call->acks_tail, call->acks_winsz));
  671. del_timer_sync(&call->resend_timer);
  672. clear_bit(RXRPC_CALL_RUN_RTIMER, &call->flags);
  673. clear_bit(RXRPC_CALL_RESEND_TIMER, &call->events);
  674. if (call->acks_window)
  675. rxrpc_zap_tx_window(call);
  676. if (post_ACK) {
  677. /* post the final ACK message for userspace to pick up */
  678. _debug("post ACK");
  679. skb->mark = RXRPC_SKB_MARK_FINAL_ACK;
  680. sp->call = call;
  681. rxrpc_get_call(call);
  682. spin_lock_bh(&call->lock);
  683. if (rxrpc_queue_rcv_skb(call, skb, true, true) < 0)
  684. BUG();
  685. spin_unlock_bh(&call->lock);
  686. goto process_further;
  687. }
  688. discard:
  689. rxrpc_free_skb(skb);
  690. goto process_further;
  691. protocol_error_unlock:
  692. write_unlock_bh(&call->state_lock);
  693. protocol_error:
  694. rxrpc_free_skb(skb);
  695. _leave(" = -EPROTO");
  696. return -EPROTO;
  697. }
  698. /*
  699. * post a message to the socket Rx queue for recvmsg() to pick up
  700. */
  701. static int rxrpc_post_message(struct rxrpc_call *call, u32 mark, u32 error,
  702. bool fatal)
  703. {
  704. struct rxrpc_skb_priv *sp;
  705. struct sk_buff *skb;
  706. int ret;
  707. _enter("{%d,%lx},%u,%u,%d",
  708. call->debug_id, call->flags, mark, error, fatal);
  709. /* remove timers and things for fatal messages */
  710. if (fatal) {
  711. del_timer_sync(&call->resend_timer);
  712. del_timer_sync(&call->ack_timer);
  713. clear_bit(RXRPC_CALL_RUN_RTIMER, &call->flags);
  714. }
  715. if (mark != RXRPC_SKB_MARK_NEW_CALL &&
  716. !test_bit(RXRPC_CALL_HAS_USERID, &call->flags)) {
  717. _leave("[no userid]");
  718. return 0;
  719. }
  720. if (!test_bit(RXRPC_CALL_TERMINAL_MSG, &call->flags)) {
  721. skb = alloc_skb(0, GFP_NOFS);
  722. if (!skb)
  723. return -ENOMEM;
  724. rxrpc_new_skb(skb);
  725. skb->mark = mark;
  726. sp = rxrpc_skb(skb);
  727. memset(sp, 0, sizeof(*sp));
  728. sp->error = error;
  729. sp->call = call;
  730. rxrpc_get_call(call);
  731. spin_lock_bh(&call->lock);
  732. ret = rxrpc_queue_rcv_skb(call, skb, true, fatal);
  733. spin_unlock_bh(&call->lock);
  734. BUG_ON(ret < 0);
  735. }
  736. return 0;
  737. }
  738. /*
  739. * handle background processing of incoming call packets and ACK / abort
  740. * generation
  741. */
  742. void rxrpc_process_call(struct work_struct *work)
  743. {
  744. struct rxrpc_call *call =
  745. container_of(work, struct rxrpc_call, processor);
  746. struct rxrpc_ackpacket ack;
  747. struct rxrpc_ackinfo ackinfo;
  748. struct rxrpc_header hdr;
  749. struct msghdr msg;
  750. struct kvec iov[5];
  751. unsigned long bits;
  752. __be32 data, pad;
  753. size_t len;
  754. int genbit, loop, nbit, ioc, ret, mtu;
  755. u32 abort_code = RX_PROTOCOL_ERROR;
  756. u8 *acks = NULL;
  757. //printk("\n--------------------\n");
  758. _enter("{%d,%s,%lx} [%lu]",
  759. call->debug_id, rxrpc_call_states[call->state], call->events,
  760. (jiffies - call->creation_jif) / (HZ / 10));
  761. if (test_and_set_bit(RXRPC_CALL_PROC_BUSY, &call->flags)) {
  762. _debug("XXXXXXXXXXXXX RUNNING ON MULTIPLE CPUS XXXXXXXXXXXXX");
  763. return;
  764. }
  765. /* there's a good chance we're going to have to send a message, so set
  766. * one up in advance */
  767. msg.msg_name = &call->conn->trans->peer->srx.transport.sin;
  768. msg.msg_namelen = sizeof(call->conn->trans->peer->srx.transport.sin);
  769. msg.msg_control = NULL;
  770. msg.msg_controllen = 0;
  771. msg.msg_flags = 0;
  772. hdr.epoch = call->conn->epoch;
  773. hdr.cid = call->cid;
  774. hdr.callNumber = call->call_id;
  775. hdr.seq = 0;
  776. hdr.type = RXRPC_PACKET_TYPE_ACK;
  777. hdr.flags = call->conn->out_clientflag;
  778. hdr.userStatus = 0;
  779. hdr.securityIndex = call->conn->security_ix;
  780. hdr._rsvd = 0;
  781. hdr.serviceId = call->conn->service_id;
  782. memset(iov, 0, sizeof(iov));
  783. iov[0].iov_base = &hdr;
  784. iov[0].iov_len = sizeof(hdr);
  785. /* deal with events of a final nature */
  786. if (test_bit(RXRPC_CALL_RELEASE, &call->events)) {
  787. rxrpc_release_call(call);
  788. clear_bit(RXRPC_CALL_RELEASE, &call->events);
  789. }
  790. if (test_bit(RXRPC_CALL_RCVD_ERROR, &call->events)) {
  791. int error;
  792. clear_bit(RXRPC_CALL_CONN_ABORT, &call->events);
  793. clear_bit(RXRPC_CALL_REJECT_BUSY, &call->events);
  794. clear_bit(RXRPC_CALL_ABORT, &call->events);
  795. error = call->conn->trans->peer->net_error;
  796. _debug("post net error %d", error);
  797. if (rxrpc_post_message(call, RXRPC_SKB_MARK_NET_ERROR,
  798. error, true) < 0)
  799. goto no_mem;
  800. clear_bit(RXRPC_CALL_RCVD_ERROR, &call->events);
  801. goto kill_ACKs;
  802. }
  803. if (test_bit(RXRPC_CALL_CONN_ABORT, &call->events)) {
  804. ASSERTCMP(call->state, >, RXRPC_CALL_COMPLETE);
  805. clear_bit(RXRPC_CALL_REJECT_BUSY, &call->events);
  806. clear_bit(RXRPC_CALL_ABORT, &call->events);
  807. _debug("post conn abort");
  808. if (rxrpc_post_message(call, RXRPC_SKB_MARK_LOCAL_ERROR,
  809. call->conn->error, true) < 0)
  810. goto no_mem;
  811. clear_bit(RXRPC_CALL_CONN_ABORT, &call->events);
  812. goto kill_ACKs;
  813. }
  814. if (test_bit(RXRPC_CALL_REJECT_BUSY, &call->events)) {
  815. hdr.type = RXRPC_PACKET_TYPE_BUSY;
  816. genbit = RXRPC_CALL_REJECT_BUSY;
  817. goto send_message;
  818. }
  819. if (test_bit(RXRPC_CALL_ABORT, &call->events)) {
  820. ASSERTCMP(call->state, >, RXRPC_CALL_COMPLETE);
  821. if (rxrpc_post_message(call, RXRPC_SKB_MARK_LOCAL_ERROR,
  822. ECONNABORTED, true) < 0)
  823. goto no_mem;
  824. hdr.type = RXRPC_PACKET_TYPE_ABORT;
  825. data = htonl(call->abort_code);
  826. iov[1].iov_base = &data;
  827. iov[1].iov_len = sizeof(data);
  828. genbit = RXRPC_CALL_ABORT;
  829. goto send_message;
  830. }
  831. if (test_bit(RXRPC_CALL_ACK_FINAL, &call->events)) {
  832. genbit = RXRPC_CALL_ACK_FINAL;
  833. ack.bufferSpace = htons(8);
  834. ack.maxSkew = 0;
  835. ack.serial = 0;
  836. ack.reason = RXRPC_ACK_IDLE;
  837. ack.nAcks = 0;
  838. call->ackr_reason = 0;
  839. spin_lock_bh(&call->lock);
  840. ack.serial = call->ackr_serial;
  841. ack.previousPacket = call->ackr_prev_seq;
  842. ack.firstPacket = htonl(call->rx_data_eaten + 1);
  843. spin_unlock_bh(&call->lock);
  844. pad = 0;
  845. iov[1].iov_base = &ack;
  846. iov[1].iov_len = sizeof(ack);
  847. iov[2].iov_base = &pad;
  848. iov[2].iov_len = 3;
  849. iov[3].iov_base = &ackinfo;
  850. iov[3].iov_len = sizeof(ackinfo);
  851. goto send_ACK;
  852. }
  853. if (call->events & ((1 << RXRPC_CALL_RCVD_BUSY) |
  854. (1 << RXRPC_CALL_RCVD_ABORT))
  855. ) {
  856. u32 mark;
  857. if (test_bit(RXRPC_CALL_RCVD_ABORT, &call->events))
  858. mark = RXRPC_SKB_MARK_REMOTE_ABORT;
  859. else
  860. mark = RXRPC_SKB_MARK_BUSY;
  861. _debug("post abort/busy");
  862. rxrpc_clear_tx_window(call);
  863. if (rxrpc_post_message(call, mark, ECONNABORTED, true) < 0)
  864. goto no_mem;
  865. clear_bit(RXRPC_CALL_RCVD_BUSY, &call->events);
  866. clear_bit(RXRPC_CALL_RCVD_ABORT, &call->events);
  867. goto kill_ACKs;
  868. }
  869. if (test_and_clear_bit(RXRPC_CALL_RCVD_ACKALL, &call->events)) {
  870. _debug("do implicit ackall");
  871. rxrpc_clear_tx_window(call);
  872. }
  873. if (test_bit(RXRPC_CALL_LIFE_TIMER, &call->events)) {
  874. write_lock_bh(&call->state_lock);
  875. if (call->state <= RXRPC_CALL_COMPLETE) {
  876. call->state = RXRPC_CALL_LOCALLY_ABORTED;
  877. call->abort_code = RX_CALL_TIMEOUT;
  878. set_bit(RXRPC_CALL_ABORT, &call->events);
  879. }
  880. write_unlock_bh(&call->state_lock);
  881. _debug("post timeout");
  882. if (rxrpc_post_message(call, RXRPC_SKB_MARK_LOCAL_ERROR,
  883. ETIME, true) < 0)
  884. goto no_mem;
  885. clear_bit(RXRPC_CALL_LIFE_TIMER, &call->events);
  886. goto kill_ACKs;
  887. }
  888. /* deal with assorted inbound messages */
  889. if (!skb_queue_empty(&call->rx_queue)) {
  890. switch (rxrpc_process_rx_queue(call, &abort_code)) {
  891. case 0:
  892. case -EAGAIN:
  893. break;
  894. case -ENOMEM:
  895. goto no_mem;
  896. case -EKEYEXPIRED:
  897. case -EKEYREJECTED:
  898. case -EPROTO:
  899. rxrpc_abort_call(call, abort_code);
  900. goto kill_ACKs;
  901. }
  902. }
  903. /* handle resending */
  904. if (test_and_clear_bit(RXRPC_CALL_RESEND_TIMER, &call->events))
  905. rxrpc_resend_timer(call);
  906. if (test_and_clear_bit(RXRPC_CALL_RESEND, &call->events))
  907. rxrpc_resend(call);
  908. /* consider sending an ordinary ACK */
  909. if (test_bit(RXRPC_CALL_ACK, &call->events)) {
  910. _debug("send ACK: window: %d - %d { %lx }",
  911. call->rx_data_eaten, call->ackr_win_top,
  912. call->ackr_window[0]);
  913. if (call->state > RXRPC_CALL_SERVER_ACK_REQUEST &&
  914. call->ackr_reason != RXRPC_ACK_PING_RESPONSE) {
  915. /* ACK by sending reply DATA packet in this state */
  916. clear_bit(RXRPC_CALL_ACK, &call->events);
  917. goto maybe_reschedule;
  918. }
  919. genbit = RXRPC_CALL_ACK;
  920. acks = kzalloc(call->ackr_win_top - call->rx_data_eaten,
  921. GFP_NOFS);
  922. if (!acks)
  923. goto no_mem;
  924. //hdr.flags = RXRPC_SLOW_START_OK;
  925. ack.bufferSpace = htons(8);
  926. ack.maxSkew = 0;
  927. ack.serial = 0;
  928. ack.reason = 0;
  929. spin_lock_bh(&call->lock);
  930. ack.reason = call->ackr_reason;
  931. ack.serial = call->ackr_serial;
  932. ack.previousPacket = call->ackr_prev_seq;
  933. ack.firstPacket = htonl(call->rx_data_eaten + 1);
  934. ack.nAcks = 0;
  935. for (loop = 0; loop < RXRPC_ACKR_WINDOW_ASZ; loop++) {
  936. nbit = loop * BITS_PER_LONG;
  937. for (bits = call->ackr_window[loop]; bits; bits >>= 1
  938. ) {
  939. _debug("- l=%d n=%d b=%lx", loop, nbit, bits);
  940. if (bits & 1) {
  941. acks[nbit] = RXRPC_ACK_TYPE_ACK;
  942. ack.nAcks = nbit + 1;
  943. }
  944. nbit++;
  945. }
  946. }
  947. call->ackr_reason = 0;
  948. spin_unlock_bh(&call->lock);
  949. pad = 0;
  950. iov[1].iov_base = &ack;
  951. iov[1].iov_len = sizeof(ack);
  952. iov[2].iov_base = acks;
  953. iov[2].iov_len = ack.nAcks;
  954. iov[3].iov_base = &pad;
  955. iov[3].iov_len = 3;
  956. iov[4].iov_base = &ackinfo;
  957. iov[4].iov_len = sizeof(ackinfo);
  958. switch (ack.reason) {
  959. case RXRPC_ACK_REQUESTED:
  960. case RXRPC_ACK_DUPLICATE:
  961. case RXRPC_ACK_OUT_OF_SEQUENCE:
  962. case RXRPC_ACK_EXCEEDS_WINDOW:
  963. case RXRPC_ACK_NOSPACE:
  964. case RXRPC_ACK_PING:
  965. case RXRPC_ACK_PING_RESPONSE:
  966. goto send_ACK_with_skew;
  967. case RXRPC_ACK_DELAY:
  968. case RXRPC_ACK_IDLE:
  969. goto send_ACK;
  970. }
  971. }
  972. /* handle completion of security negotiations on an incoming
  973. * connection */
  974. if (test_and_clear_bit(RXRPC_CALL_SECURED, &call->events)) {
  975. _debug("secured");
  976. spin_lock_bh(&call->lock);
  977. if (call->state == RXRPC_CALL_SERVER_SECURING) {
  978. _debug("securing");
  979. write_lock(&call->conn->lock);
  980. if (!test_bit(RXRPC_CALL_RELEASED, &call->flags) &&
  981. !test_bit(RXRPC_CALL_RELEASE, &call->events)) {
  982. _debug("not released");
  983. call->state = RXRPC_CALL_SERVER_ACCEPTING;
  984. list_move_tail(&call->accept_link,
  985. &call->socket->acceptq);
  986. }
  987. write_unlock(&call->conn->lock);
  988. read_lock(&call->state_lock);
  989. if (call->state < RXRPC_CALL_COMPLETE)
  990. set_bit(RXRPC_CALL_POST_ACCEPT, &call->events);
  991. read_unlock(&call->state_lock);
  992. }
  993. spin_unlock_bh(&call->lock);
  994. if (!test_bit(RXRPC_CALL_POST_ACCEPT, &call->events))
  995. goto maybe_reschedule;
  996. }
  997. /* post a notification of an acceptable connection to the app */
  998. if (test_bit(RXRPC_CALL_POST_ACCEPT, &call->events)) {
  999. _debug("post accept");
  1000. if (rxrpc_post_message(call, RXRPC_SKB_MARK_NEW_CALL,
  1001. 0, false) < 0)
  1002. goto no_mem;
  1003. clear_bit(RXRPC_CALL_POST_ACCEPT, &call->events);
  1004. goto maybe_reschedule;
  1005. }
  1006. /* handle incoming call acceptance */
  1007. if (test_and_clear_bit(RXRPC_CALL_ACCEPTED, &call->events)) {
  1008. _debug("accepted");
  1009. ASSERTCMP(call->rx_data_post, ==, 0);
  1010. call->rx_data_post = 1;
  1011. read_lock_bh(&call->state_lock);
  1012. if (call->state < RXRPC_CALL_COMPLETE)
  1013. set_bit(RXRPC_CALL_DRAIN_RX_OOS, &call->events);
  1014. read_unlock_bh(&call->state_lock);
  1015. }
  1016. /* drain the out of sequence received packet queue into the packet Rx
  1017. * queue */
  1018. if (test_and_clear_bit(RXRPC_CALL_DRAIN_RX_OOS, &call->events)) {
  1019. while (call->rx_data_post == call->rx_first_oos)
  1020. if (rxrpc_drain_rx_oos_queue(call) < 0)
  1021. break;
  1022. goto maybe_reschedule;
  1023. }
  1024. /* other events may have been raised since we started checking */
  1025. goto maybe_reschedule;
  1026. send_ACK_with_skew:
  1027. ack.maxSkew = htons(atomic_read(&call->conn->hi_serial) -
  1028. ntohl(ack.serial));
  1029. send_ACK:
  1030. mtu = call->conn->trans->peer->if_mtu;
  1031. mtu -= call->conn->trans->peer->hdrsize;
  1032. ackinfo.maxMTU = htonl(mtu);
  1033. ackinfo.rwind = htonl(rxrpc_rx_window_size);
  1034. /* permit the peer to send us jumbo packets if it wants to */
  1035. ackinfo.rxMTU = htonl(rxrpc_rx_mtu);
  1036. ackinfo.jumbo_max = htonl(rxrpc_rx_jumbo_max);
  1037. hdr.serial = htonl(atomic_inc_return(&call->conn->serial));
  1038. _proto("Tx ACK %%%u { m=%hu f=#%u p=#%u s=%%%u r=%s n=%u }",
  1039. ntohl(hdr.serial),
  1040. ntohs(ack.maxSkew),
  1041. ntohl(ack.firstPacket),
  1042. ntohl(ack.previousPacket),
  1043. ntohl(ack.serial),
  1044. rxrpc_acks(ack.reason),
  1045. ack.nAcks);
  1046. del_timer_sync(&call->ack_timer);
  1047. if (ack.nAcks > 0)
  1048. set_bit(RXRPC_CALL_TX_SOFT_ACK, &call->flags);
  1049. goto send_message_2;
  1050. send_message:
  1051. _debug("send message");
  1052. hdr.serial = htonl(atomic_inc_return(&call->conn->serial));
  1053. _proto("Tx %s %%%u", rxrpc_pkts[hdr.type], ntohl(hdr.serial));
  1054. send_message_2:
  1055. len = iov[0].iov_len;
  1056. ioc = 1;
  1057. if (iov[4].iov_len) {
  1058. ioc = 5;
  1059. len += iov[4].iov_len;
  1060. len += iov[3].iov_len;
  1061. len += iov[2].iov_len;
  1062. len += iov[1].iov_len;
  1063. } else if (iov[3].iov_len) {
  1064. ioc = 4;
  1065. len += iov[3].iov_len;
  1066. len += iov[2].iov_len;
  1067. len += iov[1].iov_len;
  1068. } else if (iov[2].iov_len) {
  1069. ioc = 3;
  1070. len += iov[2].iov_len;
  1071. len += iov[1].iov_len;
  1072. } else if (iov[1].iov_len) {
  1073. ioc = 2;
  1074. len += iov[1].iov_len;
  1075. }
  1076. ret = kernel_sendmsg(call->conn->trans->local->socket,
  1077. &msg, iov, ioc, len);
  1078. if (ret < 0) {
  1079. _debug("sendmsg failed: %d", ret);
  1080. read_lock_bh(&call->state_lock);
  1081. if (call->state < RXRPC_CALL_DEAD)
  1082. rxrpc_queue_call(call);
  1083. read_unlock_bh(&call->state_lock);
  1084. goto error;
  1085. }
  1086. switch (genbit) {
  1087. case RXRPC_CALL_ABORT:
  1088. clear_bit(genbit, &call->events);
  1089. clear_bit(RXRPC_CALL_RCVD_ABORT, &call->events);
  1090. goto kill_ACKs;
  1091. case RXRPC_CALL_ACK_FINAL:
  1092. write_lock_bh(&call->state_lock);
  1093. if (call->state == RXRPC_CALL_CLIENT_FINAL_ACK)
  1094. call->state = RXRPC_CALL_COMPLETE;
  1095. write_unlock_bh(&call->state_lock);
  1096. goto kill_ACKs;
  1097. default:
  1098. clear_bit(genbit, &call->events);
  1099. switch (call->state) {
  1100. case RXRPC_CALL_CLIENT_AWAIT_REPLY:
  1101. case RXRPC_CALL_CLIENT_RECV_REPLY:
  1102. case RXRPC_CALL_SERVER_RECV_REQUEST:
  1103. case RXRPC_CALL_SERVER_ACK_REQUEST:
  1104. _debug("start ACK timer");
  1105. rxrpc_propose_ACK(call, RXRPC_ACK_DELAY,
  1106. call->ackr_serial, false);
  1107. default:
  1108. break;
  1109. }
  1110. goto maybe_reschedule;
  1111. }
  1112. kill_ACKs:
  1113. del_timer_sync(&call->ack_timer);
  1114. if (test_and_clear_bit(RXRPC_CALL_ACK_FINAL, &call->events))
  1115. rxrpc_put_call(call);
  1116. clear_bit(RXRPC_CALL_ACK, &call->events);
  1117. maybe_reschedule:
  1118. if (call->events || !skb_queue_empty(&call->rx_queue)) {
  1119. read_lock_bh(&call->state_lock);
  1120. if (call->state < RXRPC_CALL_DEAD)
  1121. rxrpc_queue_call(call);
  1122. read_unlock_bh(&call->state_lock);
  1123. }
  1124. /* don't leave aborted connections on the accept queue */
  1125. if (call->state >= RXRPC_CALL_COMPLETE &&
  1126. !list_empty(&call->accept_link)) {
  1127. _debug("X unlinking once-pending call %p { e=%lx f=%lx c=%x }",
  1128. call, call->events, call->flags,
  1129. ntohl(call->conn->cid));
  1130. read_lock_bh(&call->state_lock);
  1131. if (!test_bit(RXRPC_CALL_RELEASED, &call->flags) &&
  1132. !test_and_set_bit(RXRPC_CALL_RELEASE, &call->events))
  1133. rxrpc_queue_call(call);
  1134. read_unlock_bh(&call->state_lock);
  1135. }
  1136. error:
  1137. clear_bit(RXRPC_CALL_PROC_BUSY, &call->flags);
  1138. kfree(acks);
  1139. /* because we don't want two CPUs both processing the work item for one
  1140. * call at the same time, we use a flag to note when it's busy; however
  1141. * this means there's a race between clearing the flag and setting the
  1142. * work pending bit and the work item being processed again */
  1143. if (call->events && !work_pending(&call->processor)) {
  1144. _debug("jumpstart %x", ntohl(call->conn->cid));
  1145. rxrpc_queue_call(call);
  1146. }
  1147. _leave("");
  1148. return;
  1149. no_mem:
  1150. _debug("out of memory");
  1151. goto maybe_reschedule;
  1152. }