ar-ack.c 34 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355
  1. /* Management of Tx window, Tx resend, ACKs and out-of-sequence reception
  2. *
  3. * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
  4. * Written by David Howells (dhowells@redhat.com)
  5. *
  6. * This program is free software; you can redistribute it and/or
  7. * modify it under the terms of the GNU General Public License
  8. * as published by the Free Software Foundation; either version
  9. * 2 of the License, or (at your option) any later version.
  10. */
  11. #include <linux/module.h>
  12. #include <linux/circ_buf.h>
  13. #include <linux/net.h>
  14. #include <linux/skbuff.h>
  15. #include <linux/slab.h>
  16. #include <linux/udp.h>
  17. #include <net/sock.h>
  18. #include <net/af_rxrpc.h>
  19. #include "ar-internal.h"
  20. /*
  21. * How long to wait before scheduling ACK generation after seeing a
  22. * packet with RXRPC_REQUEST_ACK set (in jiffies).
  23. */
  24. unsigned rxrpc_requested_ack_delay = 1;
  25. /*
  26. * How long to wait before scheduling an ACK with subtype DELAY (in jiffies).
  27. *
  28. * We use this when we've received new data packets. If those packets aren't
  29. * all consumed within this time we will send a DELAY ACK if an ACK was not
  30. * requested to let the sender know it doesn't need to resend.
  31. */
  32. unsigned rxrpc_soft_ack_delay = 1 * HZ;
  33. /*
  34. * How long to wait before scheduling an ACK with subtype IDLE (in jiffies).
  35. *
  36. * We use this when we've consumed some previously soft-ACK'd packets when
  37. * further packets aren't immediately received to decide when to send an IDLE
  38. * ACK let the other end know that it can free up its Tx buffer space.
  39. */
  40. unsigned rxrpc_idle_ack_delay = 0.5 * HZ;
  41. /*
  42. * Receive window size in packets. This indicates the maximum number of
  43. * unconsumed received packets we're willing to retain in memory. Once this
  44. * limit is hit, we should generate an EXCEEDS_WINDOW ACK and discard further
  45. * packets.
  46. */
  47. unsigned rxrpc_rx_window_size = 32;
  48. /*
  49. * Maximum Rx MTU size. This indicates to the sender the size of jumbo packet
  50. * made by gluing normal packets together that we're willing to handle.
  51. */
  52. unsigned rxrpc_rx_mtu = 5692;
  53. /*
  54. * The maximum number of fragments in a received jumbo packet that we tell the
  55. * sender that we're willing to handle.
  56. */
  57. unsigned rxrpc_rx_jumbo_max = 4;
  58. static const char *rxrpc_acks(u8 reason)
  59. {
  60. static const char *const str[] = {
  61. "---", "REQ", "DUP", "OOS", "WIN", "MEM", "PNG", "PNR", "DLY",
  62. "IDL", "-?-"
  63. };
  64. if (reason >= ARRAY_SIZE(str))
  65. reason = ARRAY_SIZE(str) - 1;
  66. return str[reason];
  67. }
  68. static const s8 rxrpc_ack_priority[] = {
  69. [0] = 0,
  70. [RXRPC_ACK_DELAY] = 1,
  71. [RXRPC_ACK_REQUESTED] = 2,
  72. [RXRPC_ACK_IDLE] = 3,
  73. [RXRPC_ACK_PING_RESPONSE] = 4,
  74. [RXRPC_ACK_DUPLICATE] = 5,
  75. [RXRPC_ACK_OUT_OF_SEQUENCE] = 6,
  76. [RXRPC_ACK_EXCEEDS_WINDOW] = 7,
  77. [RXRPC_ACK_NOSPACE] = 8,
  78. };
  79. /*
  80. * propose an ACK be sent
  81. */
  82. void __rxrpc_propose_ACK(struct rxrpc_call *call, u8 ack_reason,
  83. __be32 serial, bool immediate)
  84. {
  85. unsigned long expiry;
  86. s8 prior = rxrpc_ack_priority[ack_reason];
  87. ASSERTCMP(prior, >, 0);
  88. _enter("{%d},%s,%%%x,%u",
  89. call->debug_id, rxrpc_acks(ack_reason), ntohl(serial),
  90. immediate);
  91. if (prior < rxrpc_ack_priority[call->ackr_reason]) {
  92. if (immediate)
  93. goto cancel_timer;
  94. return;
  95. }
  96. /* update DELAY, IDLE, REQUESTED and PING_RESPONSE ACK serial
  97. * numbers */
  98. if (prior == rxrpc_ack_priority[call->ackr_reason]) {
  99. if (prior <= 4)
  100. call->ackr_serial = serial;
  101. if (immediate)
  102. goto cancel_timer;
  103. return;
  104. }
  105. call->ackr_reason = ack_reason;
  106. call->ackr_serial = serial;
  107. switch (ack_reason) {
  108. case RXRPC_ACK_DELAY:
  109. _debug("run delay timer");
  110. expiry = rxrpc_soft_ack_delay;
  111. goto run_timer;
  112. case RXRPC_ACK_IDLE:
  113. if (!immediate) {
  114. _debug("run defer timer");
  115. expiry = rxrpc_idle_ack_delay;
  116. goto run_timer;
  117. }
  118. goto cancel_timer;
  119. case RXRPC_ACK_REQUESTED:
  120. expiry = rxrpc_requested_ack_delay;
  121. if (!expiry)
  122. goto cancel_timer;
  123. if (!immediate || serial == cpu_to_be32(1)) {
  124. _debug("run defer timer");
  125. goto run_timer;
  126. }
  127. default:
  128. _debug("immediate ACK");
  129. goto cancel_timer;
  130. }
  131. run_timer:
  132. expiry += jiffies;
  133. if (!timer_pending(&call->ack_timer) ||
  134. time_after(call->ack_timer.expires, expiry))
  135. mod_timer(&call->ack_timer, expiry);
  136. return;
  137. cancel_timer:
  138. _debug("cancel timer %%%u", ntohl(serial));
  139. try_to_del_timer_sync(&call->ack_timer);
  140. read_lock_bh(&call->state_lock);
  141. if (call->state <= RXRPC_CALL_COMPLETE &&
  142. !test_and_set_bit(RXRPC_CALL_ACK, &call->events))
  143. rxrpc_queue_call(call);
  144. read_unlock_bh(&call->state_lock);
  145. }
  146. /*
  147. * propose an ACK be sent, locking the call structure
  148. */
  149. void rxrpc_propose_ACK(struct rxrpc_call *call, u8 ack_reason,
  150. __be32 serial, bool immediate)
  151. {
  152. s8 prior = rxrpc_ack_priority[ack_reason];
  153. if (prior > rxrpc_ack_priority[call->ackr_reason]) {
  154. spin_lock_bh(&call->lock);
  155. __rxrpc_propose_ACK(call, ack_reason, serial, immediate);
  156. spin_unlock_bh(&call->lock);
  157. }
  158. }
  159. /*
  160. * set the resend timer
  161. */
  162. static void rxrpc_set_resend(struct rxrpc_call *call, u8 resend,
  163. unsigned long resend_at)
  164. {
  165. read_lock_bh(&call->state_lock);
  166. if (call->state >= RXRPC_CALL_COMPLETE)
  167. resend = 0;
  168. if (resend & 1) {
  169. _debug("SET RESEND");
  170. set_bit(RXRPC_CALL_RESEND, &call->events);
  171. }
  172. if (resend & 2) {
  173. _debug("MODIFY RESEND TIMER");
  174. set_bit(RXRPC_CALL_RUN_RTIMER, &call->flags);
  175. mod_timer(&call->resend_timer, resend_at);
  176. } else {
  177. _debug("KILL RESEND TIMER");
  178. del_timer_sync(&call->resend_timer);
  179. clear_bit(RXRPC_CALL_RESEND_TIMER, &call->events);
  180. clear_bit(RXRPC_CALL_RUN_RTIMER, &call->flags);
  181. }
  182. read_unlock_bh(&call->state_lock);
  183. }
  184. /*
  185. * resend packets
  186. */
  187. static void rxrpc_resend(struct rxrpc_call *call)
  188. {
  189. struct rxrpc_skb_priv *sp;
  190. struct rxrpc_header *hdr;
  191. struct sk_buff *txb;
  192. unsigned long *p_txb, resend_at;
  193. int loop, stop;
  194. u8 resend;
  195. _enter("{%d,%d,%d,%d},",
  196. call->acks_hard, call->acks_unacked,
  197. atomic_read(&call->sequence),
  198. CIRC_CNT(call->acks_head, call->acks_tail, call->acks_winsz));
  199. stop = 0;
  200. resend = 0;
  201. resend_at = 0;
  202. for (loop = call->acks_tail;
  203. loop != call->acks_head || stop;
  204. loop = (loop + 1) & (call->acks_winsz - 1)
  205. ) {
  206. p_txb = call->acks_window + loop;
  207. smp_read_barrier_depends();
  208. if (*p_txb & 1)
  209. continue;
  210. txb = (struct sk_buff *) *p_txb;
  211. sp = rxrpc_skb(txb);
  212. if (sp->need_resend) {
  213. sp->need_resend = false;
  214. /* each Tx packet has a new serial number */
  215. sp->hdr.serial =
  216. htonl(atomic_inc_return(&call->conn->serial));
  217. hdr = (struct rxrpc_header *) txb->head;
  218. hdr->serial = sp->hdr.serial;
  219. _proto("Tx DATA %%%u { #%d }",
  220. ntohl(sp->hdr.serial), ntohl(sp->hdr.seq));
  221. if (rxrpc_send_packet(call->conn->trans, txb) < 0) {
  222. stop = 0;
  223. sp->resend_at = jiffies + 3;
  224. } else {
  225. sp->resend_at =
  226. jiffies + rxrpc_resend_timeout * HZ;
  227. }
  228. }
  229. if (time_after_eq(jiffies + 1, sp->resend_at)) {
  230. sp->need_resend = true;
  231. resend |= 1;
  232. } else if (resend & 2) {
  233. if (time_before(sp->resend_at, resend_at))
  234. resend_at = sp->resend_at;
  235. } else {
  236. resend_at = sp->resend_at;
  237. resend |= 2;
  238. }
  239. }
  240. rxrpc_set_resend(call, resend, resend_at);
  241. _leave("");
  242. }
  243. /*
  244. * handle resend timer expiry
  245. */
  246. static void rxrpc_resend_timer(struct rxrpc_call *call)
  247. {
  248. struct rxrpc_skb_priv *sp;
  249. struct sk_buff *txb;
  250. unsigned long *p_txb, resend_at;
  251. int loop;
  252. u8 resend;
  253. _enter("%d,%d,%d",
  254. call->acks_tail, call->acks_unacked, call->acks_head);
  255. if (call->state >= RXRPC_CALL_COMPLETE)
  256. return;
  257. resend = 0;
  258. resend_at = 0;
  259. for (loop = call->acks_unacked;
  260. loop != call->acks_head;
  261. loop = (loop + 1) & (call->acks_winsz - 1)
  262. ) {
  263. p_txb = call->acks_window + loop;
  264. smp_read_barrier_depends();
  265. txb = (struct sk_buff *) (*p_txb & ~1);
  266. sp = rxrpc_skb(txb);
  267. ASSERT(!(*p_txb & 1));
  268. if (sp->need_resend) {
  269. ;
  270. } else if (time_after_eq(jiffies + 1, sp->resend_at)) {
  271. sp->need_resend = true;
  272. resend |= 1;
  273. } else if (resend & 2) {
  274. if (time_before(sp->resend_at, resend_at))
  275. resend_at = sp->resend_at;
  276. } else {
  277. resend_at = sp->resend_at;
  278. resend |= 2;
  279. }
  280. }
  281. rxrpc_set_resend(call, resend, resend_at);
  282. _leave("");
  283. }
  284. /*
  285. * process soft ACKs of our transmitted packets
  286. * - these indicate packets the peer has or has not received, but hasn't yet
  287. * given to the consumer, and so can still be discarded and re-requested
  288. */
  289. static int rxrpc_process_soft_ACKs(struct rxrpc_call *call,
  290. struct rxrpc_ackpacket *ack,
  291. struct sk_buff *skb)
  292. {
  293. struct rxrpc_skb_priv *sp;
  294. struct sk_buff *txb;
  295. unsigned long *p_txb, resend_at;
  296. int loop;
  297. u8 sacks[RXRPC_MAXACKS], resend;
  298. _enter("{%d,%d},{%d},",
  299. call->acks_hard,
  300. CIRC_CNT(call->acks_head, call->acks_tail, call->acks_winsz),
  301. ack->nAcks);
  302. if (skb_copy_bits(skb, 0, sacks, ack->nAcks) < 0)
  303. goto protocol_error;
  304. resend = 0;
  305. resend_at = 0;
  306. for (loop = 0; loop < ack->nAcks; loop++) {
  307. p_txb = call->acks_window;
  308. p_txb += (call->acks_tail + loop) & (call->acks_winsz - 1);
  309. smp_read_barrier_depends();
  310. txb = (struct sk_buff *) (*p_txb & ~1);
  311. sp = rxrpc_skb(txb);
  312. switch (sacks[loop]) {
  313. case RXRPC_ACK_TYPE_ACK:
  314. sp->need_resend = false;
  315. *p_txb |= 1;
  316. break;
  317. case RXRPC_ACK_TYPE_NACK:
  318. sp->need_resend = true;
  319. *p_txb &= ~1;
  320. resend = 1;
  321. break;
  322. default:
  323. _debug("Unsupported ACK type %d", sacks[loop]);
  324. goto protocol_error;
  325. }
  326. }
  327. smp_mb();
  328. call->acks_unacked = (call->acks_tail + loop) & (call->acks_winsz - 1);
  329. /* anything not explicitly ACK'd is implicitly NACK'd, but may just not
  330. * have been received or processed yet by the far end */
  331. for (loop = call->acks_unacked;
  332. loop != call->acks_head;
  333. loop = (loop + 1) & (call->acks_winsz - 1)
  334. ) {
  335. p_txb = call->acks_window + loop;
  336. smp_read_barrier_depends();
  337. txb = (struct sk_buff *) (*p_txb & ~1);
  338. sp = rxrpc_skb(txb);
  339. if (*p_txb & 1) {
  340. /* packet must have been discarded */
  341. sp->need_resend = true;
  342. *p_txb &= ~1;
  343. resend |= 1;
  344. } else if (sp->need_resend) {
  345. ;
  346. } else if (time_after_eq(jiffies + 1, sp->resend_at)) {
  347. sp->need_resend = true;
  348. resend |= 1;
  349. } else if (resend & 2) {
  350. if (time_before(sp->resend_at, resend_at))
  351. resend_at = sp->resend_at;
  352. } else {
  353. resend_at = sp->resend_at;
  354. resend |= 2;
  355. }
  356. }
  357. rxrpc_set_resend(call, resend, resend_at);
  358. _leave(" = 0");
  359. return 0;
  360. protocol_error:
  361. _leave(" = -EPROTO");
  362. return -EPROTO;
  363. }
  364. /*
  365. * discard hard-ACK'd packets from the Tx window
  366. */
  367. static void rxrpc_rotate_tx_window(struct rxrpc_call *call, u32 hard)
  368. {
  369. unsigned long _skb;
  370. int tail = call->acks_tail, old_tail;
  371. int win = CIRC_CNT(call->acks_head, tail, call->acks_winsz);
  372. _enter("{%u,%u},%u", call->acks_hard, win, hard);
  373. ASSERTCMP(hard - call->acks_hard, <=, win);
  374. while (call->acks_hard < hard) {
  375. smp_read_barrier_depends();
  376. _skb = call->acks_window[tail] & ~1;
  377. rxrpc_free_skb((struct sk_buff *) _skb);
  378. old_tail = tail;
  379. tail = (tail + 1) & (call->acks_winsz - 1);
  380. call->acks_tail = tail;
  381. if (call->acks_unacked == old_tail)
  382. call->acks_unacked = tail;
  383. call->acks_hard++;
  384. }
  385. wake_up(&call->tx_waitq);
  386. }
  387. /*
  388. * clear the Tx window in the event of a failure
  389. */
  390. static void rxrpc_clear_tx_window(struct rxrpc_call *call)
  391. {
  392. rxrpc_rotate_tx_window(call, atomic_read(&call->sequence));
  393. }
  394. /*
  395. * drain the out of sequence received packet queue into the packet Rx queue
  396. */
  397. static int rxrpc_drain_rx_oos_queue(struct rxrpc_call *call)
  398. {
  399. struct rxrpc_skb_priv *sp;
  400. struct sk_buff *skb;
  401. bool terminal;
  402. int ret;
  403. _enter("{%d,%d}", call->rx_data_post, call->rx_first_oos);
  404. spin_lock_bh(&call->lock);
  405. ret = -ECONNRESET;
  406. if (test_bit(RXRPC_CALL_RELEASED, &call->flags))
  407. goto socket_unavailable;
  408. skb = skb_dequeue(&call->rx_oos_queue);
  409. if (skb) {
  410. sp = rxrpc_skb(skb);
  411. _debug("drain OOS packet %d [%d]",
  412. ntohl(sp->hdr.seq), call->rx_first_oos);
  413. if (ntohl(sp->hdr.seq) != call->rx_first_oos) {
  414. skb_queue_head(&call->rx_oos_queue, skb);
  415. call->rx_first_oos = ntohl(rxrpc_skb(skb)->hdr.seq);
  416. _debug("requeue %p {%u}", skb, call->rx_first_oos);
  417. } else {
  418. skb->mark = RXRPC_SKB_MARK_DATA;
  419. terminal = ((sp->hdr.flags & RXRPC_LAST_PACKET) &&
  420. !(sp->hdr.flags & RXRPC_CLIENT_INITIATED));
  421. ret = rxrpc_queue_rcv_skb(call, skb, true, terminal);
  422. BUG_ON(ret < 0);
  423. _debug("drain #%u", call->rx_data_post);
  424. call->rx_data_post++;
  425. /* find out what the next packet is */
  426. skb = skb_peek(&call->rx_oos_queue);
  427. if (skb)
  428. call->rx_first_oos =
  429. ntohl(rxrpc_skb(skb)->hdr.seq);
  430. else
  431. call->rx_first_oos = 0;
  432. _debug("peek %p {%u}", skb, call->rx_first_oos);
  433. }
  434. }
  435. ret = 0;
  436. socket_unavailable:
  437. spin_unlock_bh(&call->lock);
  438. _leave(" = %d", ret);
  439. return ret;
  440. }
  441. /*
  442. * insert an out of sequence packet into the buffer
  443. */
  444. static void rxrpc_insert_oos_packet(struct rxrpc_call *call,
  445. struct sk_buff *skb)
  446. {
  447. struct rxrpc_skb_priv *sp, *psp;
  448. struct sk_buff *p;
  449. u32 seq;
  450. sp = rxrpc_skb(skb);
  451. seq = ntohl(sp->hdr.seq);
  452. _enter(",,{%u}", seq);
  453. skb->destructor = rxrpc_packet_destructor;
  454. ASSERTCMP(sp->call, ==, NULL);
  455. sp->call = call;
  456. rxrpc_get_call(call);
  457. /* insert into the buffer in sequence order */
  458. spin_lock_bh(&call->lock);
  459. skb_queue_walk(&call->rx_oos_queue, p) {
  460. psp = rxrpc_skb(p);
  461. if (ntohl(psp->hdr.seq) > seq) {
  462. _debug("insert oos #%u before #%u",
  463. seq, ntohl(psp->hdr.seq));
  464. skb_insert(p, skb, &call->rx_oos_queue);
  465. goto inserted;
  466. }
  467. }
  468. _debug("append oos #%u", seq);
  469. skb_queue_tail(&call->rx_oos_queue, skb);
  470. inserted:
  471. /* we might now have a new front to the queue */
  472. if (call->rx_first_oos == 0 || seq < call->rx_first_oos)
  473. call->rx_first_oos = seq;
  474. read_lock(&call->state_lock);
  475. if (call->state < RXRPC_CALL_COMPLETE &&
  476. call->rx_data_post == call->rx_first_oos) {
  477. _debug("drain rx oos now");
  478. set_bit(RXRPC_CALL_DRAIN_RX_OOS, &call->events);
  479. }
  480. read_unlock(&call->state_lock);
  481. spin_unlock_bh(&call->lock);
  482. _leave(" [stored #%u]", call->rx_first_oos);
  483. }
  484. /*
  485. * clear the Tx window on final ACK reception
  486. */
  487. static void rxrpc_zap_tx_window(struct rxrpc_call *call)
  488. {
  489. struct rxrpc_skb_priv *sp;
  490. struct sk_buff *skb;
  491. unsigned long _skb, *acks_window;
  492. u8 winsz = call->acks_winsz;
  493. int tail;
  494. acks_window = call->acks_window;
  495. call->acks_window = NULL;
  496. while (CIRC_CNT(call->acks_head, call->acks_tail, winsz) > 0) {
  497. tail = call->acks_tail;
  498. smp_read_barrier_depends();
  499. _skb = acks_window[tail] & ~1;
  500. smp_mb();
  501. call->acks_tail = (call->acks_tail + 1) & (winsz - 1);
  502. skb = (struct sk_buff *) _skb;
  503. sp = rxrpc_skb(skb);
  504. _debug("+++ clear Tx %u", ntohl(sp->hdr.seq));
  505. rxrpc_free_skb(skb);
  506. }
  507. kfree(acks_window);
  508. }
  509. /*
  510. * process the extra information that may be appended to an ACK packet
  511. */
  512. static void rxrpc_extract_ackinfo(struct rxrpc_call *call, struct sk_buff *skb,
  513. unsigned int latest, int nAcks)
  514. {
  515. struct rxrpc_ackinfo ackinfo;
  516. struct rxrpc_peer *peer;
  517. unsigned int mtu;
  518. if (skb_copy_bits(skb, nAcks + 3, &ackinfo, sizeof(ackinfo)) < 0) {
  519. _leave(" [no ackinfo]");
  520. return;
  521. }
  522. _proto("Rx ACK %%%u Info { rx=%u max=%u rwin=%u jm=%u }",
  523. latest,
  524. ntohl(ackinfo.rxMTU), ntohl(ackinfo.maxMTU),
  525. ntohl(ackinfo.rwind), ntohl(ackinfo.jumbo_max));
  526. mtu = min(ntohl(ackinfo.rxMTU), ntohl(ackinfo.maxMTU));
  527. peer = call->conn->trans->peer;
  528. if (mtu < peer->maxdata) {
  529. spin_lock_bh(&peer->lock);
  530. peer->maxdata = mtu;
  531. peer->mtu = mtu + peer->hdrsize;
  532. spin_unlock_bh(&peer->lock);
  533. _net("Net MTU %u (maxdata %u)", peer->mtu, peer->maxdata);
  534. }
  535. }
  536. /*
  537. * process packets in the reception queue
  538. */
  539. static int rxrpc_process_rx_queue(struct rxrpc_call *call,
  540. u32 *_abort_code)
  541. {
  542. struct rxrpc_ackpacket ack;
  543. struct rxrpc_skb_priv *sp;
  544. struct sk_buff *skb;
  545. bool post_ACK;
  546. int latest;
  547. u32 hard, tx;
  548. _enter("");
  549. process_further:
  550. skb = skb_dequeue(&call->rx_queue);
  551. if (!skb)
  552. return -EAGAIN;
  553. _net("deferred skb %p", skb);
  554. sp = rxrpc_skb(skb);
  555. _debug("process %s [st %d]", rxrpc_pkts[sp->hdr.type], call->state);
  556. post_ACK = false;
  557. switch (sp->hdr.type) {
  558. /* data packets that wind up here have been received out of
  559. * order, need security processing or are jumbo packets */
  560. case RXRPC_PACKET_TYPE_DATA:
  561. _proto("OOSQ DATA %%%u { #%u }",
  562. ntohl(sp->hdr.serial), ntohl(sp->hdr.seq));
  563. /* secured packets must be verified and possibly decrypted */
  564. if (rxrpc_verify_packet(call, skb, _abort_code) < 0)
  565. goto protocol_error;
  566. rxrpc_insert_oos_packet(call, skb);
  567. goto process_further;
  568. /* partial ACK to process */
  569. case RXRPC_PACKET_TYPE_ACK:
  570. if (skb_copy_bits(skb, 0, &ack, sizeof(ack)) < 0) {
  571. _debug("extraction failure");
  572. goto protocol_error;
  573. }
  574. if (!skb_pull(skb, sizeof(ack)))
  575. BUG();
  576. latest = ntohl(sp->hdr.serial);
  577. hard = ntohl(ack.firstPacket);
  578. tx = atomic_read(&call->sequence);
  579. _proto("Rx ACK %%%u { m=%hu f=#%u p=#%u s=%%%u r=%s n=%u }",
  580. latest,
  581. ntohs(ack.maxSkew),
  582. hard,
  583. ntohl(ack.previousPacket),
  584. ntohl(ack.serial),
  585. rxrpc_acks(ack.reason),
  586. ack.nAcks);
  587. rxrpc_extract_ackinfo(call, skb, latest, ack.nAcks);
  588. if (ack.reason == RXRPC_ACK_PING) {
  589. _proto("Rx ACK %%%u PING Request", latest);
  590. rxrpc_propose_ACK(call, RXRPC_ACK_PING_RESPONSE,
  591. sp->hdr.serial, true);
  592. }
  593. /* discard any out-of-order or duplicate ACKs */
  594. if (latest - call->acks_latest <= 0) {
  595. _debug("discard ACK %d <= %d",
  596. latest, call->acks_latest);
  597. goto discard;
  598. }
  599. call->acks_latest = latest;
  600. if (call->state != RXRPC_CALL_CLIENT_SEND_REQUEST &&
  601. call->state != RXRPC_CALL_CLIENT_AWAIT_REPLY &&
  602. call->state != RXRPC_CALL_SERVER_SEND_REPLY &&
  603. call->state != RXRPC_CALL_SERVER_AWAIT_ACK)
  604. goto discard;
  605. _debug("Tx=%d H=%u S=%d", tx, call->acks_hard, call->state);
  606. if (hard > 0) {
  607. if (hard - 1 > tx) {
  608. _debug("hard-ACK'd packet %d not transmitted"
  609. " (%d top)",
  610. hard - 1, tx);
  611. goto protocol_error;
  612. }
  613. if ((call->state == RXRPC_CALL_CLIENT_AWAIT_REPLY ||
  614. call->state == RXRPC_CALL_SERVER_AWAIT_ACK) &&
  615. hard > tx)
  616. goto all_acked;
  617. smp_rmb();
  618. rxrpc_rotate_tx_window(call, hard - 1);
  619. }
  620. if (ack.nAcks > 0) {
  621. if (hard - 1 + ack.nAcks > tx) {
  622. _debug("soft-ACK'd packet %d+%d not"
  623. " transmitted (%d top)",
  624. hard - 1, ack.nAcks, tx);
  625. goto protocol_error;
  626. }
  627. if (rxrpc_process_soft_ACKs(call, &ack, skb) < 0)
  628. goto protocol_error;
  629. }
  630. goto discard;
  631. /* complete ACK to process */
  632. case RXRPC_PACKET_TYPE_ACKALL:
  633. goto all_acked;
  634. /* abort and busy are handled elsewhere */
  635. case RXRPC_PACKET_TYPE_BUSY:
  636. case RXRPC_PACKET_TYPE_ABORT:
  637. BUG();
  638. /* connection level events - also handled elsewhere */
  639. case RXRPC_PACKET_TYPE_CHALLENGE:
  640. case RXRPC_PACKET_TYPE_RESPONSE:
  641. case RXRPC_PACKET_TYPE_DEBUG:
  642. BUG();
  643. }
  644. /* if we've had a hard ACK that covers all the packets we've sent, then
  645. * that ends that phase of the operation */
  646. all_acked:
  647. write_lock_bh(&call->state_lock);
  648. _debug("ack all %d", call->state);
  649. switch (call->state) {
  650. case RXRPC_CALL_CLIENT_AWAIT_REPLY:
  651. call->state = RXRPC_CALL_CLIENT_RECV_REPLY;
  652. break;
  653. case RXRPC_CALL_SERVER_AWAIT_ACK:
  654. _debug("srv complete");
  655. call->state = RXRPC_CALL_COMPLETE;
  656. post_ACK = true;
  657. break;
  658. case RXRPC_CALL_CLIENT_SEND_REQUEST:
  659. case RXRPC_CALL_SERVER_RECV_REQUEST:
  660. goto protocol_error_unlock; /* can't occur yet */
  661. default:
  662. write_unlock_bh(&call->state_lock);
  663. goto discard; /* assume packet left over from earlier phase */
  664. }
  665. write_unlock_bh(&call->state_lock);
  666. /* if all the packets we sent are hard-ACK'd, then we can discard
  667. * whatever we've got left */
  668. _debug("clear Tx %d",
  669. CIRC_CNT(call->acks_head, call->acks_tail, call->acks_winsz));
  670. del_timer_sync(&call->resend_timer);
  671. clear_bit(RXRPC_CALL_RUN_RTIMER, &call->flags);
  672. clear_bit(RXRPC_CALL_RESEND_TIMER, &call->events);
  673. if (call->acks_window)
  674. rxrpc_zap_tx_window(call);
  675. if (post_ACK) {
  676. /* post the final ACK message for userspace to pick up */
  677. _debug("post ACK");
  678. skb->mark = RXRPC_SKB_MARK_FINAL_ACK;
  679. sp->call = call;
  680. rxrpc_get_call(call);
  681. spin_lock_bh(&call->lock);
  682. if (rxrpc_queue_rcv_skb(call, skb, true, true) < 0)
  683. BUG();
  684. spin_unlock_bh(&call->lock);
  685. goto process_further;
  686. }
  687. discard:
  688. rxrpc_free_skb(skb);
  689. goto process_further;
  690. protocol_error_unlock:
  691. write_unlock_bh(&call->state_lock);
  692. protocol_error:
  693. rxrpc_free_skb(skb);
  694. _leave(" = -EPROTO");
  695. return -EPROTO;
  696. }
  697. /*
  698. * post a message to the socket Rx queue for recvmsg() to pick up
  699. */
  700. static int rxrpc_post_message(struct rxrpc_call *call, u32 mark, u32 error,
  701. bool fatal)
  702. {
  703. struct rxrpc_skb_priv *sp;
  704. struct sk_buff *skb;
  705. int ret;
  706. _enter("{%d,%lx},%u,%u,%d",
  707. call->debug_id, call->flags, mark, error, fatal);
  708. /* remove timers and things for fatal messages */
  709. if (fatal) {
  710. del_timer_sync(&call->resend_timer);
  711. del_timer_sync(&call->ack_timer);
  712. clear_bit(RXRPC_CALL_RUN_RTIMER, &call->flags);
  713. }
  714. if (mark != RXRPC_SKB_MARK_NEW_CALL &&
  715. !test_bit(RXRPC_CALL_HAS_USERID, &call->flags)) {
  716. _leave("[no userid]");
  717. return 0;
  718. }
  719. if (!test_bit(RXRPC_CALL_TERMINAL_MSG, &call->flags)) {
  720. skb = alloc_skb(0, GFP_NOFS);
  721. if (!skb)
  722. return -ENOMEM;
  723. rxrpc_new_skb(skb);
  724. skb->mark = mark;
  725. sp = rxrpc_skb(skb);
  726. memset(sp, 0, sizeof(*sp));
  727. sp->error = error;
  728. sp->call = call;
  729. rxrpc_get_call(call);
  730. spin_lock_bh(&call->lock);
  731. ret = rxrpc_queue_rcv_skb(call, skb, true, fatal);
  732. spin_unlock_bh(&call->lock);
  733. BUG_ON(ret < 0);
  734. }
  735. return 0;
  736. }
  737. /*
  738. * handle background processing of incoming call packets and ACK / abort
  739. * generation
  740. */
  741. void rxrpc_process_call(struct work_struct *work)
  742. {
  743. struct rxrpc_call *call =
  744. container_of(work, struct rxrpc_call, processor);
  745. struct rxrpc_ackpacket ack;
  746. struct rxrpc_ackinfo ackinfo;
  747. struct rxrpc_header hdr;
  748. struct msghdr msg;
  749. struct kvec iov[5];
  750. unsigned long bits;
  751. __be32 data, pad;
  752. size_t len;
  753. int genbit, loop, nbit, ioc, ret, mtu;
  754. u32 abort_code = RX_PROTOCOL_ERROR;
  755. u8 *acks = NULL;
  756. //printk("\n--------------------\n");
  757. _enter("{%d,%s,%lx} [%lu]",
  758. call->debug_id, rxrpc_call_states[call->state], call->events,
  759. (jiffies - call->creation_jif) / (HZ / 10));
  760. if (test_and_set_bit(RXRPC_CALL_PROC_BUSY, &call->flags)) {
  761. _debug("XXXXXXXXXXXXX RUNNING ON MULTIPLE CPUS XXXXXXXXXXXXX");
  762. return;
  763. }
  764. /* there's a good chance we're going to have to send a message, so set
  765. * one up in advance */
  766. msg.msg_name = &call->conn->trans->peer->srx.transport.sin;
  767. msg.msg_namelen = sizeof(call->conn->trans->peer->srx.transport.sin);
  768. msg.msg_control = NULL;
  769. msg.msg_controllen = 0;
  770. msg.msg_flags = 0;
  771. hdr.epoch = call->conn->epoch;
  772. hdr.cid = call->cid;
  773. hdr.callNumber = call->call_id;
  774. hdr.seq = 0;
  775. hdr.type = RXRPC_PACKET_TYPE_ACK;
  776. hdr.flags = call->conn->out_clientflag;
  777. hdr.userStatus = 0;
  778. hdr.securityIndex = call->conn->security_ix;
  779. hdr._rsvd = 0;
  780. hdr.serviceId = call->conn->service_id;
  781. memset(iov, 0, sizeof(iov));
  782. iov[0].iov_base = &hdr;
  783. iov[0].iov_len = sizeof(hdr);
  784. /* deal with events of a final nature */
  785. if (test_bit(RXRPC_CALL_RELEASE, &call->events)) {
  786. rxrpc_release_call(call);
  787. clear_bit(RXRPC_CALL_RELEASE, &call->events);
  788. }
  789. if (test_bit(RXRPC_CALL_RCVD_ERROR, &call->events)) {
  790. int error;
  791. clear_bit(RXRPC_CALL_CONN_ABORT, &call->events);
  792. clear_bit(RXRPC_CALL_REJECT_BUSY, &call->events);
  793. clear_bit(RXRPC_CALL_ABORT, &call->events);
  794. error = call->conn->trans->peer->net_error;
  795. _debug("post net error %d", error);
  796. if (rxrpc_post_message(call, RXRPC_SKB_MARK_NET_ERROR,
  797. error, true) < 0)
  798. goto no_mem;
  799. clear_bit(RXRPC_CALL_RCVD_ERROR, &call->events);
  800. goto kill_ACKs;
  801. }
  802. if (test_bit(RXRPC_CALL_CONN_ABORT, &call->events)) {
  803. ASSERTCMP(call->state, >, RXRPC_CALL_COMPLETE);
  804. clear_bit(RXRPC_CALL_REJECT_BUSY, &call->events);
  805. clear_bit(RXRPC_CALL_ABORT, &call->events);
  806. _debug("post conn abort");
  807. if (rxrpc_post_message(call, RXRPC_SKB_MARK_LOCAL_ERROR,
  808. call->conn->error, true) < 0)
  809. goto no_mem;
  810. clear_bit(RXRPC_CALL_CONN_ABORT, &call->events);
  811. goto kill_ACKs;
  812. }
  813. if (test_bit(RXRPC_CALL_REJECT_BUSY, &call->events)) {
  814. hdr.type = RXRPC_PACKET_TYPE_BUSY;
  815. genbit = RXRPC_CALL_REJECT_BUSY;
  816. goto send_message;
  817. }
  818. if (test_bit(RXRPC_CALL_ABORT, &call->events)) {
  819. ASSERTCMP(call->state, >, RXRPC_CALL_COMPLETE);
  820. if (rxrpc_post_message(call, RXRPC_SKB_MARK_LOCAL_ERROR,
  821. ECONNABORTED, true) < 0)
  822. goto no_mem;
  823. hdr.type = RXRPC_PACKET_TYPE_ABORT;
  824. data = htonl(call->abort_code);
  825. iov[1].iov_base = &data;
  826. iov[1].iov_len = sizeof(data);
  827. genbit = RXRPC_CALL_ABORT;
  828. goto send_message;
  829. }
  830. if (test_bit(RXRPC_CALL_ACK_FINAL, &call->events)) {
  831. genbit = RXRPC_CALL_ACK_FINAL;
  832. ack.bufferSpace = htons(8);
  833. ack.maxSkew = 0;
  834. ack.serial = 0;
  835. ack.reason = RXRPC_ACK_IDLE;
  836. ack.nAcks = 0;
  837. call->ackr_reason = 0;
  838. spin_lock_bh(&call->lock);
  839. ack.serial = call->ackr_serial;
  840. ack.previousPacket = call->ackr_prev_seq;
  841. ack.firstPacket = htonl(call->rx_data_eaten + 1);
  842. spin_unlock_bh(&call->lock);
  843. pad = 0;
  844. iov[1].iov_base = &ack;
  845. iov[1].iov_len = sizeof(ack);
  846. iov[2].iov_base = &pad;
  847. iov[2].iov_len = 3;
  848. iov[3].iov_base = &ackinfo;
  849. iov[3].iov_len = sizeof(ackinfo);
  850. goto send_ACK;
  851. }
  852. if (call->events & ((1 << RXRPC_CALL_RCVD_BUSY) |
  853. (1 << RXRPC_CALL_RCVD_ABORT))
  854. ) {
  855. u32 mark;
  856. if (test_bit(RXRPC_CALL_RCVD_ABORT, &call->events))
  857. mark = RXRPC_SKB_MARK_REMOTE_ABORT;
  858. else
  859. mark = RXRPC_SKB_MARK_BUSY;
  860. _debug("post abort/busy");
  861. rxrpc_clear_tx_window(call);
  862. if (rxrpc_post_message(call, mark, ECONNABORTED, true) < 0)
  863. goto no_mem;
  864. clear_bit(RXRPC_CALL_RCVD_BUSY, &call->events);
  865. clear_bit(RXRPC_CALL_RCVD_ABORT, &call->events);
  866. goto kill_ACKs;
  867. }
  868. if (test_and_clear_bit(RXRPC_CALL_RCVD_ACKALL, &call->events)) {
  869. _debug("do implicit ackall");
  870. rxrpc_clear_tx_window(call);
  871. }
  872. if (test_bit(RXRPC_CALL_LIFE_TIMER, &call->events)) {
  873. write_lock_bh(&call->state_lock);
  874. if (call->state <= RXRPC_CALL_COMPLETE) {
  875. call->state = RXRPC_CALL_LOCALLY_ABORTED;
  876. call->abort_code = RX_CALL_TIMEOUT;
  877. set_bit(RXRPC_CALL_ABORT, &call->events);
  878. }
  879. write_unlock_bh(&call->state_lock);
  880. _debug("post timeout");
  881. if (rxrpc_post_message(call, RXRPC_SKB_MARK_LOCAL_ERROR,
  882. ETIME, true) < 0)
  883. goto no_mem;
  884. clear_bit(RXRPC_CALL_LIFE_TIMER, &call->events);
  885. goto kill_ACKs;
  886. }
  887. /* deal with assorted inbound messages */
  888. if (!skb_queue_empty(&call->rx_queue)) {
  889. switch (rxrpc_process_rx_queue(call, &abort_code)) {
  890. case 0:
  891. case -EAGAIN:
  892. break;
  893. case -ENOMEM:
  894. goto no_mem;
  895. case -EKEYEXPIRED:
  896. case -EKEYREJECTED:
  897. case -EPROTO:
  898. rxrpc_abort_call(call, abort_code);
  899. goto kill_ACKs;
  900. }
  901. }
  902. /* handle resending */
  903. if (test_and_clear_bit(RXRPC_CALL_RESEND_TIMER, &call->events))
  904. rxrpc_resend_timer(call);
  905. if (test_and_clear_bit(RXRPC_CALL_RESEND, &call->events))
  906. rxrpc_resend(call);
  907. /* consider sending an ordinary ACK */
  908. if (test_bit(RXRPC_CALL_ACK, &call->events)) {
  909. _debug("send ACK: window: %d - %d { %lx }",
  910. call->rx_data_eaten, call->ackr_win_top,
  911. call->ackr_window[0]);
  912. if (call->state > RXRPC_CALL_SERVER_ACK_REQUEST &&
  913. call->ackr_reason != RXRPC_ACK_PING_RESPONSE) {
  914. /* ACK by sending reply DATA packet in this state */
  915. clear_bit(RXRPC_CALL_ACK, &call->events);
  916. goto maybe_reschedule;
  917. }
  918. genbit = RXRPC_CALL_ACK;
  919. acks = kzalloc(call->ackr_win_top - call->rx_data_eaten,
  920. GFP_NOFS);
  921. if (!acks)
  922. goto no_mem;
  923. //hdr.flags = RXRPC_SLOW_START_OK;
  924. ack.bufferSpace = htons(8);
  925. ack.maxSkew = 0;
  926. ack.serial = 0;
  927. ack.reason = 0;
  928. spin_lock_bh(&call->lock);
  929. ack.reason = call->ackr_reason;
  930. ack.serial = call->ackr_serial;
  931. ack.previousPacket = call->ackr_prev_seq;
  932. ack.firstPacket = htonl(call->rx_data_eaten + 1);
  933. ack.nAcks = 0;
  934. for (loop = 0; loop < RXRPC_ACKR_WINDOW_ASZ; loop++) {
  935. nbit = loop * BITS_PER_LONG;
  936. for (bits = call->ackr_window[loop]; bits; bits >>= 1
  937. ) {
  938. _debug("- l=%d n=%d b=%lx", loop, nbit, bits);
  939. if (bits & 1) {
  940. acks[nbit] = RXRPC_ACK_TYPE_ACK;
  941. ack.nAcks = nbit + 1;
  942. }
  943. nbit++;
  944. }
  945. }
  946. call->ackr_reason = 0;
  947. spin_unlock_bh(&call->lock);
  948. pad = 0;
  949. iov[1].iov_base = &ack;
  950. iov[1].iov_len = sizeof(ack);
  951. iov[2].iov_base = acks;
  952. iov[2].iov_len = ack.nAcks;
  953. iov[3].iov_base = &pad;
  954. iov[3].iov_len = 3;
  955. iov[4].iov_base = &ackinfo;
  956. iov[4].iov_len = sizeof(ackinfo);
  957. switch (ack.reason) {
  958. case RXRPC_ACK_REQUESTED:
  959. case RXRPC_ACK_DUPLICATE:
  960. case RXRPC_ACK_OUT_OF_SEQUENCE:
  961. case RXRPC_ACK_EXCEEDS_WINDOW:
  962. case RXRPC_ACK_NOSPACE:
  963. case RXRPC_ACK_PING:
  964. case RXRPC_ACK_PING_RESPONSE:
  965. goto send_ACK_with_skew;
  966. case RXRPC_ACK_DELAY:
  967. case RXRPC_ACK_IDLE:
  968. goto send_ACK;
  969. }
  970. }
  971. /* handle completion of security negotiations on an incoming
  972. * connection */
  973. if (test_and_clear_bit(RXRPC_CALL_SECURED, &call->events)) {
  974. _debug("secured");
  975. spin_lock_bh(&call->lock);
  976. if (call->state == RXRPC_CALL_SERVER_SECURING) {
  977. _debug("securing");
  978. write_lock(&call->conn->lock);
  979. if (!test_bit(RXRPC_CALL_RELEASED, &call->flags) &&
  980. !test_bit(RXRPC_CALL_RELEASE, &call->events)) {
  981. _debug("not released");
  982. call->state = RXRPC_CALL_SERVER_ACCEPTING;
  983. list_move_tail(&call->accept_link,
  984. &call->socket->acceptq);
  985. }
  986. write_unlock(&call->conn->lock);
  987. read_lock(&call->state_lock);
  988. if (call->state < RXRPC_CALL_COMPLETE)
  989. set_bit(RXRPC_CALL_POST_ACCEPT, &call->events);
  990. read_unlock(&call->state_lock);
  991. }
  992. spin_unlock_bh(&call->lock);
  993. if (!test_bit(RXRPC_CALL_POST_ACCEPT, &call->events))
  994. goto maybe_reschedule;
  995. }
  996. /* post a notification of an acceptable connection to the app */
  997. if (test_bit(RXRPC_CALL_POST_ACCEPT, &call->events)) {
  998. _debug("post accept");
  999. if (rxrpc_post_message(call, RXRPC_SKB_MARK_NEW_CALL,
  1000. 0, false) < 0)
  1001. goto no_mem;
  1002. clear_bit(RXRPC_CALL_POST_ACCEPT, &call->events);
  1003. goto maybe_reschedule;
  1004. }
  1005. /* handle incoming call acceptance */
  1006. if (test_and_clear_bit(RXRPC_CALL_ACCEPTED, &call->events)) {
  1007. _debug("accepted");
  1008. ASSERTCMP(call->rx_data_post, ==, 0);
  1009. call->rx_data_post = 1;
  1010. read_lock_bh(&call->state_lock);
  1011. if (call->state < RXRPC_CALL_COMPLETE)
  1012. set_bit(RXRPC_CALL_DRAIN_RX_OOS, &call->events);
  1013. read_unlock_bh(&call->state_lock);
  1014. }
  1015. /* drain the out of sequence received packet queue into the packet Rx
  1016. * queue */
  1017. if (test_and_clear_bit(RXRPC_CALL_DRAIN_RX_OOS, &call->events)) {
  1018. while (call->rx_data_post == call->rx_first_oos)
  1019. if (rxrpc_drain_rx_oos_queue(call) < 0)
  1020. break;
  1021. goto maybe_reschedule;
  1022. }
  1023. /* other events may have been raised since we started checking */
  1024. goto maybe_reschedule;
  1025. send_ACK_with_skew:
  1026. ack.maxSkew = htons(atomic_read(&call->conn->hi_serial) -
  1027. ntohl(ack.serial));
  1028. send_ACK:
  1029. mtu = call->conn->trans->peer->if_mtu;
  1030. mtu -= call->conn->trans->peer->hdrsize;
  1031. ackinfo.maxMTU = htonl(mtu);
  1032. ackinfo.rwind = htonl(rxrpc_rx_window_size);
  1033. /* permit the peer to send us jumbo packets if it wants to */
  1034. ackinfo.rxMTU = htonl(rxrpc_rx_mtu);
  1035. ackinfo.jumbo_max = htonl(rxrpc_rx_jumbo_max);
  1036. hdr.serial = htonl(atomic_inc_return(&call->conn->serial));
  1037. _proto("Tx ACK %%%u { m=%hu f=#%u p=#%u s=%%%u r=%s n=%u }",
  1038. ntohl(hdr.serial),
  1039. ntohs(ack.maxSkew),
  1040. ntohl(ack.firstPacket),
  1041. ntohl(ack.previousPacket),
  1042. ntohl(ack.serial),
  1043. rxrpc_acks(ack.reason),
  1044. ack.nAcks);
  1045. del_timer_sync(&call->ack_timer);
  1046. if (ack.nAcks > 0)
  1047. set_bit(RXRPC_CALL_TX_SOFT_ACK, &call->flags);
  1048. goto send_message_2;
  1049. send_message:
  1050. _debug("send message");
  1051. hdr.serial = htonl(atomic_inc_return(&call->conn->serial));
  1052. _proto("Tx %s %%%u", rxrpc_pkts[hdr.type], ntohl(hdr.serial));
  1053. send_message_2:
  1054. len = iov[0].iov_len;
  1055. ioc = 1;
  1056. if (iov[4].iov_len) {
  1057. ioc = 5;
  1058. len += iov[4].iov_len;
  1059. len += iov[3].iov_len;
  1060. len += iov[2].iov_len;
  1061. len += iov[1].iov_len;
  1062. } else if (iov[3].iov_len) {
  1063. ioc = 4;
  1064. len += iov[3].iov_len;
  1065. len += iov[2].iov_len;
  1066. len += iov[1].iov_len;
  1067. } else if (iov[2].iov_len) {
  1068. ioc = 3;
  1069. len += iov[2].iov_len;
  1070. len += iov[1].iov_len;
  1071. } else if (iov[1].iov_len) {
  1072. ioc = 2;
  1073. len += iov[1].iov_len;
  1074. }
  1075. ret = kernel_sendmsg(call->conn->trans->local->socket,
  1076. &msg, iov, ioc, len);
  1077. if (ret < 0) {
  1078. _debug("sendmsg failed: %d", ret);
  1079. read_lock_bh(&call->state_lock);
  1080. if (call->state < RXRPC_CALL_DEAD)
  1081. rxrpc_queue_call(call);
  1082. read_unlock_bh(&call->state_lock);
  1083. goto error;
  1084. }
  1085. switch (genbit) {
  1086. case RXRPC_CALL_ABORT:
  1087. clear_bit(genbit, &call->events);
  1088. clear_bit(RXRPC_CALL_RCVD_ABORT, &call->events);
  1089. goto kill_ACKs;
  1090. case RXRPC_CALL_ACK_FINAL:
  1091. write_lock_bh(&call->state_lock);
  1092. if (call->state == RXRPC_CALL_CLIENT_FINAL_ACK)
  1093. call->state = RXRPC_CALL_COMPLETE;
  1094. write_unlock_bh(&call->state_lock);
  1095. goto kill_ACKs;
  1096. default:
  1097. clear_bit(genbit, &call->events);
  1098. switch (call->state) {
  1099. case RXRPC_CALL_CLIENT_AWAIT_REPLY:
  1100. case RXRPC_CALL_CLIENT_RECV_REPLY:
  1101. case RXRPC_CALL_SERVER_RECV_REQUEST:
  1102. case RXRPC_CALL_SERVER_ACK_REQUEST:
  1103. _debug("start ACK timer");
  1104. rxrpc_propose_ACK(call, RXRPC_ACK_DELAY,
  1105. call->ackr_serial, false);
  1106. default:
  1107. break;
  1108. }
  1109. goto maybe_reschedule;
  1110. }
  1111. kill_ACKs:
  1112. del_timer_sync(&call->ack_timer);
  1113. if (test_and_clear_bit(RXRPC_CALL_ACK_FINAL, &call->events))
  1114. rxrpc_put_call(call);
  1115. clear_bit(RXRPC_CALL_ACK, &call->events);
  1116. maybe_reschedule:
  1117. if (call->events || !skb_queue_empty(&call->rx_queue)) {
  1118. read_lock_bh(&call->state_lock);
  1119. if (call->state < RXRPC_CALL_DEAD)
  1120. rxrpc_queue_call(call);
  1121. read_unlock_bh(&call->state_lock);
  1122. }
  1123. /* don't leave aborted connections on the accept queue */
  1124. if (call->state >= RXRPC_CALL_COMPLETE &&
  1125. !list_empty(&call->accept_link)) {
  1126. _debug("X unlinking once-pending call %p { e=%lx f=%lx c=%x }",
  1127. call, call->events, call->flags,
  1128. ntohl(call->conn->cid));
  1129. read_lock_bh(&call->state_lock);
  1130. if (!test_bit(RXRPC_CALL_RELEASED, &call->flags) &&
  1131. !test_and_set_bit(RXRPC_CALL_RELEASE, &call->events))
  1132. rxrpc_queue_call(call);
  1133. read_unlock_bh(&call->state_lock);
  1134. }
  1135. error:
  1136. clear_bit(RXRPC_CALL_PROC_BUSY, &call->flags);
  1137. kfree(acks);
  1138. /* because we don't want two CPUs both processing the work item for one
  1139. * call at the same time, we use a flag to note when it's busy; however
  1140. * this means there's a race between clearing the flag and setting the
  1141. * work pending bit and the work item being processed again */
  1142. if (call->events && !work_pending(&call->processor)) {
  1143. _debug("jumpstart %x", ntohl(call->conn->cid));
  1144. rxrpc_queue_call(call);
  1145. }
  1146. _leave("");
  1147. return;
  1148. no_mem:
  1149. _debug("out of memory");
  1150. goto maybe_reschedule;
  1151. }