call_event.c 33 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302
  1. /* Management of Tx window, Tx resend, ACKs and out-of-sequence reception
  2. *
  3. * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
  4. * Written by David Howells (dhowells@redhat.com)
  5. *
  6. * This program is free software; you can redistribute it and/or
  7. * modify it under the terms of the GNU General Public License
  8. * as published by the Free Software Foundation; either version
  9. * 2 of the License, or (at your option) any later version.
  10. */
  11. #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  12. #include <linux/module.h>
  13. #include <linux/circ_buf.h>
  14. #include <linux/net.h>
  15. #include <linux/skbuff.h>
  16. #include <linux/slab.h>
  17. #include <linux/udp.h>
  18. #include <net/sock.h>
  19. #include <net/af_rxrpc.h>
  20. #include "ar-internal.h"
  21. /*
  22. * propose an ACK be sent
  23. */
  24. void __rxrpc_propose_ACK(struct rxrpc_call *call, u8 ack_reason,
  25. u32 serial, bool immediate)
  26. {
  27. unsigned long expiry;
  28. s8 prior = rxrpc_ack_priority[ack_reason];
  29. ASSERTCMP(prior, >, 0);
  30. _enter("{%d},%s,%%%x,%u",
  31. call->debug_id, rxrpc_acks(ack_reason), serial, immediate);
  32. if (prior < rxrpc_ack_priority[call->ackr_reason]) {
  33. if (immediate)
  34. goto cancel_timer;
  35. return;
  36. }
  37. /* update DELAY, IDLE, REQUESTED and PING_RESPONSE ACK serial
  38. * numbers */
  39. if (prior == rxrpc_ack_priority[call->ackr_reason]) {
  40. if (prior <= 4)
  41. call->ackr_serial = serial;
  42. if (immediate)
  43. goto cancel_timer;
  44. return;
  45. }
  46. call->ackr_reason = ack_reason;
  47. call->ackr_serial = serial;
  48. switch (ack_reason) {
  49. case RXRPC_ACK_DELAY:
  50. _debug("run delay timer");
  51. expiry = rxrpc_soft_ack_delay;
  52. goto run_timer;
  53. case RXRPC_ACK_IDLE:
  54. if (!immediate) {
  55. _debug("run defer timer");
  56. expiry = rxrpc_idle_ack_delay;
  57. goto run_timer;
  58. }
  59. goto cancel_timer;
  60. case RXRPC_ACK_REQUESTED:
  61. expiry = rxrpc_requested_ack_delay;
  62. if (!expiry)
  63. goto cancel_timer;
  64. if (!immediate || serial == 1) {
  65. _debug("run defer timer");
  66. goto run_timer;
  67. }
  68. default:
  69. _debug("immediate ACK");
  70. goto cancel_timer;
  71. }
  72. run_timer:
  73. expiry += jiffies;
  74. if (!timer_pending(&call->ack_timer) ||
  75. time_after(call->ack_timer.expires, expiry))
  76. mod_timer(&call->ack_timer, expiry);
  77. return;
  78. cancel_timer:
  79. _debug("cancel timer %%%u", serial);
  80. try_to_del_timer_sync(&call->ack_timer);
  81. read_lock_bh(&call->state_lock);
  82. if (call->state <= RXRPC_CALL_COMPLETE &&
  83. !test_and_set_bit(RXRPC_CALL_EV_ACK, &call->events))
  84. rxrpc_queue_call(call);
  85. read_unlock_bh(&call->state_lock);
  86. }
  87. /*
  88. * propose an ACK be sent, locking the call structure
  89. */
  90. void rxrpc_propose_ACK(struct rxrpc_call *call, u8 ack_reason,
  91. u32 serial, bool immediate)
  92. {
  93. s8 prior = rxrpc_ack_priority[ack_reason];
  94. if (prior > rxrpc_ack_priority[call->ackr_reason]) {
  95. spin_lock_bh(&call->lock);
  96. __rxrpc_propose_ACK(call, ack_reason, serial, immediate);
  97. spin_unlock_bh(&call->lock);
  98. }
  99. }
  100. /*
  101. * set the resend timer
  102. */
  103. static void rxrpc_set_resend(struct rxrpc_call *call, u8 resend,
  104. unsigned long resend_at)
  105. {
  106. read_lock_bh(&call->state_lock);
  107. if (call->state >= RXRPC_CALL_COMPLETE)
  108. resend = 0;
  109. if (resend & 1) {
  110. _debug("SET RESEND");
  111. set_bit(RXRPC_CALL_EV_RESEND, &call->events);
  112. }
  113. if (resend & 2) {
  114. _debug("MODIFY RESEND TIMER");
  115. set_bit(RXRPC_CALL_RUN_RTIMER, &call->flags);
  116. mod_timer(&call->resend_timer, resend_at);
  117. } else {
  118. _debug("KILL RESEND TIMER");
  119. del_timer_sync(&call->resend_timer);
  120. clear_bit(RXRPC_CALL_EV_RESEND_TIMER, &call->events);
  121. clear_bit(RXRPC_CALL_RUN_RTIMER, &call->flags);
  122. }
  123. read_unlock_bh(&call->state_lock);
  124. }
  125. /*
  126. * resend packets
  127. */
  128. static void rxrpc_resend(struct rxrpc_call *call)
  129. {
  130. struct rxrpc_wire_header *whdr;
  131. struct rxrpc_skb_priv *sp;
  132. struct sk_buff *txb;
  133. unsigned long *p_txb, resend_at;
  134. bool stop;
  135. int loop;
  136. u8 resend;
  137. _enter("{%d,%d,%d,%d},",
  138. call->acks_hard, call->acks_unacked,
  139. atomic_read(&call->sequence),
  140. CIRC_CNT(call->acks_head, call->acks_tail, call->acks_winsz));
  141. stop = false;
  142. resend = 0;
  143. resend_at = 0;
  144. for (loop = call->acks_tail;
  145. loop != call->acks_head || stop;
  146. loop = (loop + 1) & (call->acks_winsz - 1)
  147. ) {
  148. p_txb = call->acks_window + loop;
  149. smp_read_barrier_depends();
  150. if (*p_txb & 1)
  151. continue;
  152. txb = (struct sk_buff *) *p_txb;
  153. sp = rxrpc_skb(txb);
  154. if (sp->need_resend) {
  155. sp->need_resend = false;
  156. /* each Tx packet has a new serial number */
  157. sp->hdr.serial = atomic_inc_return(&call->conn->serial);
  158. whdr = (struct rxrpc_wire_header *)txb->head;
  159. whdr->serial = htonl(sp->hdr.serial);
  160. _proto("Tx DATA %%%u { #%d }",
  161. sp->hdr.serial, sp->hdr.seq);
  162. if (rxrpc_send_data_packet(call->conn, txb) < 0) {
  163. stop = true;
  164. sp->resend_at = jiffies + 3;
  165. } else {
  166. sp->resend_at =
  167. jiffies + rxrpc_resend_timeout;
  168. }
  169. }
  170. if (time_after_eq(jiffies + 1, sp->resend_at)) {
  171. sp->need_resend = true;
  172. resend |= 1;
  173. } else if (resend & 2) {
  174. if (time_before(sp->resend_at, resend_at))
  175. resend_at = sp->resend_at;
  176. } else {
  177. resend_at = sp->resend_at;
  178. resend |= 2;
  179. }
  180. }
  181. rxrpc_set_resend(call, resend, resend_at);
  182. _leave("");
  183. }
  184. /*
  185. * handle resend timer expiry
  186. */
  187. static void rxrpc_resend_timer(struct rxrpc_call *call)
  188. {
  189. struct rxrpc_skb_priv *sp;
  190. struct sk_buff *txb;
  191. unsigned long *p_txb, resend_at;
  192. int loop;
  193. u8 resend;
  194. _enter("%d,%d,%d",
  195. call->acks_tail, call->acks_unacked, call->acks_head);
  196. if (call->state >= RXRPC_CALL_COMPLETE)
  197. return;
  198. resend = 0;
  199. resend_at = 0;
  200. for (loop = call->acks_unacked;
  201. loop != call->acks_head;
  202. loop = (loop + 1) & (call->acks_winsz - 1)
  203. ) {
  204. p_txb = call->acks_window + loop;
  205. smp_read_barrier_depends();
  206. txb = (struct sk_buff *) (*p_txb & ~1);
  207. sp = rxrpc_skb(txb);
  208. ASSERT(!(*p_txb & 1));
  209. if (sp->need_resend) {
  210. ;
  211. } else if (time_after_eq(jiffies + 1, sp->resend_at)) {
  212. sp->need_resend = true;
  213. resend |= 1;
  214. } else if (resend & 2) {
  215. if (time_before(sp->resend_at, resend_at))
  216. resend_at = sp->resend_at;
  217. } else {
  218. resend_at = sp->resend_at;
  219. resend |= 2;
  220. }
  221. }
  222. rxrpc_set_resend(call, resend, resend_at);
  223. _leave("");
  224. }
  225. /*
  226. * process soft ACKs of our transmitted packets
  227. * - these indicate packets the peer has or has not received, but hasn't yet
  228. * given to the consumer, and so can still be discarded and re-requested
  229. */
  230. static int rxrpc_process_soft_ACKs(struct rxrpc_call *call,
  231. struct rxrpc_ackpacket *ack,
  232. struct sk_buff *skb)
  233. {
  234. struct rxrpc_skb_priv *sp;
  235. struct sk_buff *txb;
  236. unsigned long *p_txb, resend_at;
  237. int loop;
  238. u8 sacks[RXRPC_MAXACKS], resend;
  239. _enter("{%d,%d},{%d},",
  240. call->acks_hard,
  241. CIRC_CNT(call->acks_head, call->acks_tail, call->acks_winsz),
  242. ack->nAcks);
  243. if (skb_copy_bits(skb, 0, sacks, ack->nAcks) < 0)
  244. goto protocol_error;
  245. resend = 0;
  246. resend_at = 0;
  247. for (loop = 0; loop < ack->nAcks; loop++) {
  248. p_txb = call->acks_window;
  249. p_txb += (call->acks_tail + loop) & (call->acks_winsz - 1);
  250. smp_read_barrier_depends();
  251. txb = (struct sk_buff *) (*p_txb & ~1);
  252. sp = rxrpc_skb(txb);
  253. switch (sacks[loop]) {
  254. case RXRPC_ACK_TYPE_ACK:
  255. sp->need_resend = false;
  256. *p_txb |= 1;
  257. break;
  258. case RXRPC_ACK_TYPE_NACK:
  259. sp->need_resend = true;
  260. *p_txb &= ~1;
  261. resend = 1;
  262. break;
  263. default:
  264. _debug("Unsupported ACK type %d", sacks[loop]);
  265. goto protocol_error;
  266. }
  267. }
  268. smp_mb();
  269. call->acks_unacked = (call->acks_tail + loop) & (call->acks_winsz - 1);
  270. /* anything not explicitly ACK'd is implicitly NACK'd, but may just not
  271. * have been received or processed yet by the far end */
  272. for (loop = call->acks_unacked;
  273. loop != call->acks_head;
  274. loop = (loop + 1) & (call->acks_winsz - 1)
  275. ) {
  276. p_txb = call->acks_window + loop;
  277. smp_read_barrier_depends();
  278. txb = (struct sk_buff *) (*p_txb & ~1);
  279. sp = rxrpc_skb(txb);
  280. if (*p_txb & 1) {
  281. /* packet must have been discarded */
  282. sp->need_resend = true;
  283. *p_txb &= ~1;
  284. resend |= 1;
  285. } else if (sp->need_resend) {
  286. ;
  287. } else if (time_after_eq(jiffies + 1, sp->resend_at)) {
  288. sp->need_resend = true;
  289. resend |= 1;
  290. } else if (resend & 2) {
  291. if (time_before(sp->resend_at, resend_at))
  292. resend_at = sp->resend_at;
  293. } else {
  294. resend_at = sp->resend_at;
  295. resend |= 2;
  296. }
  297. }
  298. rxrpc_set_resend(call, resend, resend_at);
  299. _leave(" = 0");
  300. return 0;
  301. protocol_error:
  302. _leave(" = -EPROTO");
  303. return -EPROTO;
  304. }
  305. /*
  306. * discard hard-ACK'd packets from the Tx window
  307. */
  308. static void rxrpc_rotate_tx_window(struct rxrpc_call *call, u32 hard)
  309. {
  310. unsigned long _skb;
  311. int tail = call->acks_tail, old_tail;
  312. int win = CIRC_CNT(call->acks_head, tail, call->acks_winsz);
  313. _enter("{%u,%u},%u", call->acks_hard, win, hard);
  314. ASSERTCMP(hard - call->acks_hard, <=, win);
  315. while (call->acks_hard < hard) {
  316. smp_read_barrier_depends();
  317. _skb = call->acks_window[tail] & ~1;
  318. rxrpc_free_skb((struct sk_buff *) _skb);
  319. old_tail = tail;
  320. tail = (tail + 1) & (call->acks_winsz - 1);
  321. call->acks_tail = tail;
  322. if (call->acks_unacked == old_tail)
  323. call->acks_unacked = tail;
  324. call->acks_hard++;
  325. }
  326. wake_up(&call->tx_waitq);
  327. }
  328. /*
  329. * clear the Tx window in the event of a failure
  330. */
  331. static void rxrpc_clear_tx_window(struct rxrpc_call *call)
  332. {
  333. rxrpc_rotate_tx_window(call, atomic_read(&call->sequence));
  334. }
  335. /*
  336. * drain the out of sequence received packet queue into the packet Rx queue
  337. */
  338. static int rxrpc_drain_rx_oos_queue(struct rxrpc_call *call)
  339. {
  340. struct rxrpc_skb_priv *sp;
  341. struct sk_buff *skb;
  342. bool terminal;
  343. int ret;
  344. _enter("{%d,%d}", call->rx_data_post, call->rx_first_oos);
  345. spin_lock_bh(&call->lock);
  346. ret = -ECONNRESET;
  347. if (test_bit(RXRPC_CALL_RELEASED, &call->flags))
  348. goto socket_unavailable;
  349. skb = skb_dequeue(&call->rx_oos_queue);
  350. if (skb) {
  351. sp = rxrpc_skb(skb);
  352. _debug("drain OOS packet %d [%d]",
  353. sp->hdr.seq, call->rx_first_oos);
  354. if (sp->hdr.seq != call->rx_first_oos) {
  355. skb_queue_head(&call->rx_oos_queue, skb);
  356. call->rx_first_oos = rxrpc_skb(skb)->hdr.seq;
  357. _debug("requeue %p {%u}", skb, call->rx_first_oos);
  358. } else {
  359. skb->mark = RXRPC_SKB_MARK_DATA;
  360. terminal = ((sp->hdr.flags & RXRPC_LAST_PACKET) &&
  361. !(sp->hdr.flags & RXRPC_CLIENT_INITIATED));
  362. ret = rxrpc_queue_rcv_skb(call, skb, true, terminal);
  363. BUG_ON(ret < 0);
  364. _debug("drain #%u", call->rx_data_post);
  365. call->rx_data_post++;
  366. /* find out what the next packet is */
  367. skb = skb_peek(&call->rx_oos_queue);
  368. if (skb)
  369. call->rx_first_oos = rxrpc_skb(skb)->hdr.seq;
  370. else
  371. call->rx_first_oos = 0;
  372. _debug("peek %p {%u}", skb, call->rx_first_oos);
  373. }
  374. }
  375. ret = 0;
  376. socket_unavailable:
  377. spin_unlock_bh(&call->lock);
  378. _leave(" = %d", ret);
  379. return ret;
  380. }
  381. /*
  382. * insert an out of sequence packet into the buffer
  383. */
  384. static void rxrpc_insert_oos_packet(struct rxrpc_call *call,
  385. struct sk_buff *skb)
  386. {
  387. struct rxrpc_skb_priv *sp, *psp;
  388. struct sk_buff *p;
  389. u32 seq;
  390. sp = rxrpc_skb(skb);
  391. seq = sp->hdr.seq;
  392. _enter(",,{%u}", seq);
  393. skb->destructor = rxrpc_packet_destructor;
  394. ASSERTCMP(sp->call, ==, NULL);
  395. sp->call = call;
  396. rxrpc_get_call(call);
  397. atomic_inc(&call->skb_count);
  398. /* insert into the buffer in sequence order */
  399. spin_lock_bh(&call->lock);
  400. skb_queue_walk(&call->rx_oos_queue, p) {
  401. psp = rxrpc_skb(p);
  402. if (psp->hdr.seq > seq) {
  403. _debug("insert oos #%u before #%u", seq, psp->hdr.seq);
  404. skb_insert(p, skb, &call->rx_oos_queue);
  405. goto inserted;
  406. }
  407. }
  408. _debug("append oos #%u", seq);
  409. skb_queue_tail(&call->rx_oos_queue, skb);
  410. inserted:
  411. /* we might now have a new front to the queue */
  412. if (call->rx_first_oos == 0 || seq < call->rx_first_oos)
  413. call->rx_first_oos = seq;
  414. read_lock(&call->state_lock);
  415. if (call->state < RXRPC_CALL_COMPLETE &&
  416. call->rx_data_post == call->rx_first_oos) {
  417. _debug("drain rx oos now");
  418. set_bit(RXRPC_CALL_EV_DRAIN_RX_OOS, &call->events);
  419. }
  420. read_unlock(&call->state_lock);
  421. spin_unlock_bh(&call->lock);
  422. _leave(" [stored #%u]", call->rx_first_oos);
  423. }
  424. /*
  425. * clear the Tx window on final ACK reception
  426. */
  427. static void rxrpc_zap_tx_window(struct rxrpc_call *call)
  428. {
  429. struct rxrpc_skb_priv *sp;
  430. struct sk_buff *skb;
  431. unsigned long _skb, *acks_window;
  432. u8 winsz = call->acks_winsz;
  433. int tail;
  434. acks_window = call->acks_window;
  435. call->acks_window = NULL;
  436. while (CIRC_CNT(call->acks_head, call->acks_tail, winsz) > 0) {
  437. tail = call->acks_tail;
  438. smp_read_barrier_depends();
  439. _skb = acks_window[tail] & ~1;
  440. smp_mb();
  441. call->acks_tail = (call->acks_tail + 1) & (winsz - 1);
  442. skb = (struct sk_buff *) _skb;
  443. sp = rxrpc_skb(skb);
  444. _debug("+++ clear Tx %u", sp->hdr.seq);
  445. rxrpc_free_skb(skb);
  446. }
  447. kfree(acks_window);
  448. }
  449. /*
  450. * process the extra information that may be appended to an ACK packet
  451. */
  452. static void rxrpc_extract_ackinfo(struct rxrpc_call *call, struct sk_buff *skb,
  453. unsigned int latest, int nAcks)
  454. {
  455. struct rxrpc_ackinfo ackinfo;
  456. struct rxrpc_peer *peer;
  457. unsigned int mtu;
  458. if (skb_copy_bits(skb, nAcks + 3, &ackinfo, sizeof(ackinfo)) < 0) {
  459. _leave(" [no ackinfo]");
  460. return;
  461. }
  462. _proto("Rx ACK %%%u Info { rx=%u max=%u rwin=%u jm=%u }",
  463. latest,
  464. ntohl(ackinfo.rxMTU), ntohl(ackinfo.maxMTU),
  465. ntohl(ackinfo.rwind), ntohl(ackinfo.jumbo_max));
  466. mtu = min(ntohl(ackinfo.rxMTU), ntohl(ackinfo.maxMTU));
  467. peer = call->conn->params.peer;
  468. if (mtu < peer->maxdata) {
  469. spin_lock_bh(&peer->lock);
  470. peer->maxdata = mtu;
  471. peer->mtu = mtu + peer->hdrsize;
  472. spin_unlock_bh(&peer->lock);
  473. _net("Net MTU %u (maxdata %u)", peer->mtu, peer->maxdata);
  474. }
  475. }
  476. /*
  477. * process packets in the reception queue
  478. */
  479. static int rxrpc_process_rx_queue(struct rxrpc_call *call,
  480. u32 *_abort_code)
  481. {
  482. struct rxrpc_ackpacket ack;
  483. struct rxrpc_skb_priv *sp;
  484. struct sk_buff *skb;
  485. bool post_ACK;
  486. int latest;
  487. u32 hard, tx;
  488. _enter("");
  489. process_further:
  490. skb = skb_dequeue(&call->rx_queue);
  491. if (!skb)
  492. return -EAGAIN;
  493. _net("deferred skb %p", skb);
  494. sp = rxrpc_skb(skb);
  495. _debug("process %s [st %d]", rxrpc_pkts[sp->hdr.type], call->state);
  496. post_ACK = false;
  497. switch (sp->hdr.type) {
  498. /* data packets that wind up here have been received out of
  499. * order, need security processing or are jumbo packets */
  500. case RXRPC_PACKET_TYPE_DATA:
  501. _proto("OOSQ DATA %%%u { #%u }", sp->hdr.serial, sp->hdr.seq);
  502. /* secured packets must be verified and possibly decrypted */
  503. if (call->conn->security->verify_packet(call, skb,
  504. _abort_code) < 0)
  505. goto protocol_error;
  506. rxrpc_insert_oos_packet(call, skb);
  507. goto process_further;
  508. /* partial ACK to process */
  509. case RXRPC_PACKET_TYPE_ACK:
  510. if (skb_copy_bits(skb, 0, &ack, sizeof(ack)) < 0) {
  511. _debug("extraction failure");
  512. goto protocol_error;
  513. }
  514. if (!skb_pull(skb, sizeof(ack)))
  515. BUG();
  516. latest = sp->hdr.serial;
  517. hard = ntohl(ack.firstPacket);
  518. tx = atomic_read(&call->sequence);
  519. _proto("Rx ACK %%%u { m=%hu f=#%u p=#%u s=%%%u r=%s n=%u }",
  520. latest,
  521. ntohs(ack.maxSkew),
  522. hard,
  523. ntohl(ack.previousPacket),
  524. ntohl(ack.serial),
  525. rxrpc_acks(ack.reason),
  526. ack.nAcks);
  527. rxrpc_extract_ackinfo(call, skb, latest, ack.nAcks);
  528. if (ack.reason == RXRPC_ACK_PING) {
  529. _proto("Rx ACK %%%u PING Request", latest);
  530. rxrpc_propose_ACK(call, RXRPC_ACK_PING_RESPONSE,
  531. sp->hdr.serial, true);
  532. }
  533. /* discard any out-of-order or duplicate ACKs */
  534. if (latest - call->acks_latest <= 0) {
  535. _debug("discard ACK %d <= %d",
  536. latest, call->acks_latest);
  537. goto discard;
  538. }
  539. call->acks_latest = latest;
  540. if (call->state != RXRPC_CALL_CLIENT_SEND_REQUEST &&
  541. call->state != RXRPC_CALL_CLIENT_AWAIT_REPLY &&
  542. call->state != RXRPC_CALL_SERVER_SEND_REPLY &&
  543. call->state != RXRPC_CALL_SERVER_AWAIT_ACK)
  544. goto discard;
  545. _debug("Tx=%d H=%u S=%d", tx, call->acks_hard, call->state);
  546. if (hard > 0) {
  547. if (hard - 1 > tx) {
  548. _debug("hard-ACK'd packet %d not transmitted"
  549. " (%d top)",
  550. hard - 1, tx);
  551. goto protocol_error;
  552. }
  553. if ((call->state == RXRPC_CALL_CLIENT_AWAIT_REPLY ||
  554. call->state == RXRPC_CALL_SERVER_AWAIT_ACK) &&
  555. hard > tx) {
  556. call->acks_hard = tx;
  557. goto all_acked;
  558. }
  559. smp_rmb();
  560. rxrpc_rotate_tx_window(call, hard - 1);
  561. }
  562. if (ack.nAcks > 0) {
  563. if (hard - 1 + ack.nAcks > tx) {
  564. _debug("soft-ACK'd packet %d+%d not"
  565. " transmitted (%d top)",
  566. hard - 1, ack.nAcks, tx);
  567. goto protocol_error;
  568. }
  569. if (rxrpc_process_soft_ACKs(call, &ack, skb) < 0)
  570. goto protocol_error;
  571. }
  572. goto discard;
  573. /* complete ACK to process */
  574. case RXRPC_PACKET_TYPE_ACKALL:
  575. goto all_acked;
  576. /* abort and busy are handled elsewhere */
  577. case RXRPC_PACKET_TYPE_BUSY:
  578. case RXRPC_PACKET_TYPE_ABORT:
  579. BUG();
  580. /* connection level events - also handled elsewhere */
  581. case RXRPC_PACKET_TYPE_CHALLENGE:
  582. case RXRPC_PACKET_TYPE_RESPONSE:
  583. case RXRPC_PACKET_TYPE_DEBUG:
  584. BUG();
  585. }
  586. /* if we've had a hard ACK that covers all the packets we've sent, then
  587. * that ends that phase of the operation */
  588. all_acked:
  589. write_lock_bh(&call->state_lock);
  590. _debug("ack all %d", call->state);
  591. switch (call->state) {
  592. case RXRPC_CALL_CLIENT_AWAIT_REPLY:
  593. call->state = RXRPC_CALL_CLIENT_RECV_REPLY;
  594. break;
  595. case RXRPC_CALL_SERVER_AWAIT_ACK:
  596. _debug("srv complete");
  597. call->state = RXRPC_CALL_COMPLETE;
  598. post_ACK = true;
  599. break;
  600. case RXRPC_CALL_CLIENT_SEND_REQUEST:
  601. case RXRPC_CALL_SERVER_RECV_REQUEST:
  602. goto protocol_error_unlock; /* can't occur yet */
  603. default:
  604. write_unlock_bh(&call->state_lock);
  605. goto discard; /* assume packet left over from earlier phase */
  606. }
  607. write_unlock_bh(&call->state_lock);
  608. /* if all the packets we sent are hard-ACK'd, then we can discard
  609. * whatever we've got left */
  610. _debug("clear Tx %d",
  611. CIRC_CNT(call->acks_head, call->acks_tail, call->acks_winsz));
  612. del_timer_sync(&call->resend_timer);
  613. clear_bit(RXRPC_CALL_RUN_RTIMER, &call->flags);
  614. clear_bit(RXRPC_CALL_EV_RESEND_TIMER, &call->events);
  615. if (call->acks_window)
  616. rxrpc_zap_tx_window(call);
  617. if (post_ACK) {
  618. /* post the final ACK message for userspace to pick up */
  619. _debug("post ACK");
  620. skb->mark = RXRPC_SKB_MARK_FINAL_ACK;
  621. sp->call = call;
  622. rxrpc_get_call(call);
  623. atomic_inc(&call->skb_count);
  624. spin_lock_bh(&call->lock);
  625. if (rxrpc_queue_rcv_skb(call, skb, true, true) < 0)
  626. BUG();
  627. spin_unlock_bh(&call->lock);
  628. goto process_further;
  629. }
  630. discard:
  631. rxrpc_free_skb(skb);
  632. goto process_further;
  633. protocol_error_unlock:
  634. write_unlock_bh(&call->state_lock);
  635. protocol_error:
  636. rxrpc_free_skb(skb);
  637. _leave(" = -EPROTO");
  638. return -EPROTO;
  639. }
  640. /*
  641. * post a message to the socket Rx queue for recvmsg() to pick up
  642. */
  643. static int rxrpc_post_message(struct rxrpc_call *call, u32 mark, u32 error,
  644. bool fatal)
  645. {
  646. struct rxrpc_skb_priv *sp;
  647. struct sk_buff *skb;
  648. int ret;
  649. _enter("{%d,%lx},%u,%u,%d",
  650. call->debug_id, call->flags, mark, error, fatal);
  651. /* remove timers and things for fatal messages */
  652. if (fatal) {
  653. del_timer_sync(&call->resend_timer);
  654. del_timer_sync(&call->ack_timer);
  655. clear_bit(RXRPC_CALL_RUN_RTIMER, &call->flags);
  656. }
  657. if (mark != RXRPC_SKB_MARK_NEW_CALL &&
  658. !test_bit(RXRPC_CALL_HAS_USERID, &call->flags)) {
  659. _leave("[no userid]");
  660. return 0;
  661. }
  662. if (!test_bit(RXRPC_CALL_TERMINAL_MSG, &call->flags)) {
  663. skb = alloc_skb(0, GFP_NOFS);
  664. if (!skb)
  665. return -ENOMEM;
  666. rxrpc_new_skb(skb);
  667. skb->mark = mark;
  668. sp = rxrpc_skb(skb);
  669. memset(sp, 0, sizeof(*sp));
  670. sp->error = error;
  671. sp->call = call;
  672. rxrpc_get_call(call);
  673. atomic_inc(&call->skb_count);
  674. spin_lock_bh(&call->lock);
  675. ret = rxrpc_queue_rcv_skb(call, skb, true, fatal);
  676. spin_unlock_bh(&call->lock);
  677. BUG_ON(ret < 0);
  678. }
  679. return 0;
  680. }
  681. /*
  682. * handle background processing of incoming call packets and ACK / abort
  683. * generation
  684. */
  685. void rxrpc_process_call(struct work_struct *work)
  686. {
  687. struct rxrpc_call *call =
  688. container_of(work, struct rxrpc_call, processor);
  689. struct rxrpc_wire_header whdr;
  690. struct rxrpc_ackpacket ack;
  691. struct rxrpc_ackinfo ackinfo;
  692. struct msghdr msg;
  693. struct kvec iov[5];
  694. enum rxrpc_call_event genbit;
  695. unsigned long bits;
  696. __be32 data, pad;
  697. size_t len;
  698. int loop, nbit, ioc, ret, mtu;
  699. u32 serial, abort_code = RX_PROTOCOL_ERROR;
  700. u8 *acks = NULL;
  701. //printk("\n--------------------\n");
  702. _enter("{%d,%s,%lx} [%lu]",
  703. call->debug_id, rxrpc_call_states[call->state], call->events,
  704. (jiffies - call->creation_jif) / (HZ / 10));
  705. if (test_and_set_bit(RXRPC_CALL_PROC_BUSY, &call->flags)) {
  706. _debug("XXXXXXXXXXXXX RUNNING ON MULTIPLE CPUS XXXXXXXXXXXXX");
  707. return;
  708. }
  709. if (!call->conn)
  710. goto skip_msg_init;
  711. /* there's a good chance we're going to have to send a message, so set
  712. * one up in advance */
  713. msg.msg_name = &call->conn->params.peer->srx.transport;
  714. msg.msg_namelen = call->conn->params.peer->srx.transport_len;
  715. msg.msg_control = NULL;
  716. msg.msg_controllen = 0;
  717. msg.msg_flags = 0;
  718. whdr.epoch = htonl(call->conn->proto.epoch);
  719. whdr.cid = htonl(call->cid);
  720. whdr.callNumber = htonl(call->call_id);
  721. whdr.seq = 0;
  722. whdr.type = RXRPC_PACKET_TYPE_ACK;
  723. whdr.flags = call->conn->out_clientflag;
  724. whdr.userStatus = 0;
  725. whdr.securityIndex = call->conn->security_ix;
  726. whdr._rsvd = 0;
  727. whdr.serviceId = htons(call->service_id);
  728. memset(iov, 0, sizeof(iov));
  729. iov[0].iov_base = &whdr;
  730. iov[0].iov_len = sizeof(whdr);
  731. skip_msg_init:
  732. /* deal with events of a final nature */
  733. if (test_bit(RXRPC_CALL_EV_RCVD_ERROR, &call->events)) {
  734. enum rxrpc_skb_mark mark;
  735. int error;
  736. clear_bit(RXRPC_CALL_EV_CONN_ABORT, &call->events);
  737. clear_bit(RXRPC_CALL_EV_REJECT_BUSY, &call->events);
  738. clear_bit(RXRPC_CALL_EV_ABORT, &call->events);
  739. error = call->error_report;
  740. if (error < RXRPC_LOCAL_ERROR_OFFSET) {
  741. mark = RXRPC_SKB_MARK_NET_ERROR;
  742. _debug("post net error %d", error);
  743. } else {
  744. mark = RXRPC_SKB_MARK_LOCAL_ERROR;
  745. error -= RXRPC_LOCAL_ERROR_OFFSET;
  746. _debug("post net local error %d", error);
  747. }
  748. if (rxrpc_post_message(call, mark, error, true) < 0)
  749. goto no_mem;
  750. clear_bit(RXRPC_CALL_EV_RCVD_ERROR, &call->events);
  751. goto kill_ACKs;
  752. }
  753. if (test_bit(RXRPC_CALL_EV_CONN_ABORT, &call->events)) {
  754. ASSERTCMP(call->state, >, RXRPC_CALL_COMPLETE);
  755. clear_bit(RXRPC_CALL_EV_REJECT_BUSY, &call->events);
  756. clear_bit(RXRPC_CALL_EV_ABORT, &call->events);
  757. _debug("post conn abort");
  758. if (rxrpc_post_message(call, RXRPC_SKB_MARK_LOCAL_ERROR,
  759. call->conn->error, true) < 0)
  760. goto no_mem;
  761. clear_bit(RXRPC_CALL_EV_CONN_ABORT, &call->events);
  762. goto kill_ACKs;
  763. }
  764. if (test_bit(RXRPC_CALL_EV_REJECT_BUSY, &call->events)) {
  765. whdr.type = RXRPC_PACKET_TYPE_BUSY;
  766. genbit = RXRPC_CALL_EV_REJECT_BUSY;
  767. goto send_message;
  768. }
  769. if (test_bit(RXRPC_CALL_EV_ABORT, &call->events)) {
  770. ASSERTCMP(call->state, >, RXRPC_CALL_COMPLETE);
  771. if (rxrpc_post_message(call, RXRPC_SKB_MARK_LOCAL_ERROR,
  772. ECONNABORTED, true) < 0)
  773. goto no_mem;
  774. whdr.type = RXRPC_PACKET_TYPE_ABORT;
  775. data = htonl(call->local_abort);
  776. iov[1].iov_base = &data;
  777. iov[1].iov_len = sizeof(data);
  778. genbit = RXRPC_CALL_EV_ABORT;
  779. goto send_message;
  780. }
  781. if (test_bit(RXRPC_CALL_EV_ACK_FINAL, &call->events)) {
  782. genbit = RXRPC_CALL_EV_ACK_FINAL;
  783. ack.bufferSpace = htons(8);
  784. ack.maxSkew = 0;
  785. ack.serial = 0;
  786. ack.reason = RXRPC_ACK_IDLE;
  787. ack.nAcks = 0;
  788. call->ackr_reason = 0;
  789. spin_lock_bh(&call->lock);
  790. ack.serial = htonl(call->ackr_serial);
  791. ack.previousPacket = htonl(call->ackr_prev_seq);
  792. ack.firstPacket = htonl(call->rx_data_eaten + 1);
  793. spin_unlock_bh(&call->lock);
  794. pad = 0;
  795. iov[1].iov_base = &ack;
  796. iov[1].iov_len = sizeof(ack);
  797. iov[2].iov_base = &pad;
  798. iov[2].iov_len = 3;
  799. iov[3].iov_base = &ackinfo;
  800. iov[3].iov_len = sizeof(ackinfo);
  801. goto send_ACK;
  802. }
  803. if (call->events & ((1 << RXRPC_CALL_EV_RCVD_BUSY) |
  804. (1 << RXRPC_CALL_EV_RCVD_ABORT))
  805. ) {
  806. u32 mark;
  807. if (test_bit(RXRPC_CALL_EV_RCVD_ABORT, &call->events))
  808. mark = RXRPC_SKB_MARK_REMOTE_ABORT;
  809. else
  810. mark = RXRPC_SKB_MARK_BUSY;
  811. _debug("post abort/busy");
  812. rxrpc_clear_tx_window(call);
  813. if (rxrpc_post_message(call, mark, ECONNABORTED, true) < 0)
  814. goto no_mem;
  815. clear_bit(RXRPC_CALL_EV_RCVD_BUSY, &call->events);
  816. clear_bit(RXRPC_CALL_EV_RCVD_ABORT, &call->events);
  817. goto kill_ACKs;
  818. }
  819. if (test_and_clear_bit(RXRPC_CALL_EV_RCVD_ACKALL, &call->events)) {
  820. _debug("do implicit ackall");
  821. rxrpc_clear_tx_window(call);
  822. }
  823. if (test_bit(RXRPC_CALL_EV_LIFE_TIMER, &call->events)) {
  824. write_lock_bh(&call->state_lock);
  825. if (call->state <= RXRPC_CALL_COMPLETE) {
  826. call->state = RXRPC_CALL_LOCALLY_ABORTED;
  827. call->local_abort = RX_CALL_TIMEOUT;
  828. set_bit(RXRPC_CALL_EV_ABORT, &call->events);
  829. }
  830. write_unlock_bh(&call->state_lock);
  831. _debug("post timeout");
  832. if (rxrpc_post_message(call, RXRPC_SKB_MARK_LOCAL_ERROR,
  833. ETIME, true) < 0)
  834. goto no_mem;
  835. clear_bit(RXRPC_CALL_EV_LIFE_TIMER, &call->events);
  836. goto kill_ACKs;
  837. }
  838. /* deal with assorted inbound messages */
  839. if (!skb_queue_empty(&call->rx_queue)) {
  840. switch (rxrpc_process_rx_queue(call, &abort_code)) {
  841. case 0:
  842. case -EAGAIN:
  843. break;
  844. case -ENOMEM:
  845. goto no_mem;
  846. case -EKEYEXPIRED:
  847. case -EKEYREJECTED:
  848. case -EPROTO:
  849. rxrpc_abort_call(call, abort_code);
  850. goto kill_ACKs;
  851. }
  852. }
  853. /* handle resending */
  854. if (test_and_clear_bit(RXRPC_CALL_EV_RESEND_TIMER, &call->events))
  855. rxrpc_resend_timer(call);
  856. if (test_and_clear_bit(RXRPC_CALL_EV_RESEND, &call->events))
  857. rxrpc_resend(call);
  858. /* consider sending an ordinary ACK */
  859. if (test_bit(RXRPC_CALL_EV_ACK, &call->events)) {
  860. _debug("send ACK: window: %d - %d { %lx }",
  861. call->rx_data_eaten, call->ackr_win_top,
  862. call->ackr_window[0]);
  863. if (call->state > RXRPC_CALL_SERVER_ACK_REQUEST &&
  864. call->ackr_reason != RXRPC_ACK_PING_RESPONSE) {
  865. /* ACK by sending reply DATA packet in this state */
  866. clear_bit(RXRPC_CALL_EV_ACK, &call->events);
  867. goto maybe_reschedule;
  868. }
  869. genbit = RXRPC_CALL_EV_ACK;
  870. acks = kzalloc(call->ackr_win_top - call->rx_data_eaten,
  871. GFP_NOFS);
  872. if (!acks)
  873. goto no_mem;
  874. //hdr.flags = RXRPC_SLOW_START_OK;
  875. ack.bufferSpace = htons(8);
  876. ack.maxSkew = 0;
  877. spin_lock_bh(&call->lock);
  878. ack.reason = call->ackr_reason;
  879. ack.serial = htonl(call->ackr_serial);
  880. ack.previousPacket = htonl(call->ackr_prev_seq);
  881. ack.firstPacket = htonl(call->rx_data_eaten + 1);
  882. ack.nAcks = 0;
  883. for (loop = 0; loop < RXRPC_ACKR_WINDOW_ASZ; loop++) {
  884. nbit = loop * BITS_PER_LONG;
  885. for (bits = call->ackr_window[loop]; bits; bits >>= 1
  886. ) {
  887. _debug("- l=%d n=%d b=%lx", loop, nbit, bits);
  888. if (bits & 1) {
  889. acks[nbit] = RXRPC_ACK_TYPE_ACK;
  890. ack.nAcks = nbit + 1;
  891. }
  892. nbit++;
  893. }
  894. }
  895. call->ackr_reason = 0;
  896. spin_unlock_bh(&call->lock);
  897. pad = 0;
  898. iov[1].iov_base = &ack;
  899. iov[1].iov_len = sizeof(ack);
  900. iov[2].iov_base = acks;
  901. iov[2].iov_len = ack.nAcks;
  902. iov[3].iov_base = &pad;
  903. iov[3].iov_len = 3;
  904. iov[4].iov_base = &ackinfo;
  905. iov[4].iov_len = sizeof(ackinfo);
  906. switch (ack.reason) {
  907. case RXRPC_ACK_REQUESTED:
  908. case RXRPC_ACK_DUPLICATE:
  909. case RXRPC_ACK_OUT_OF_SEQUENCE:
  910. case RXRPC_ACK_EXCEEDS_WINDOW:
  911. case RXRPC_ACK_NOSPACE:
  912. case RXRPC_ACK_PING:
  913. case RXRPC_ACK_PING_RESPONSE:
  914. goto send_ACK_with_skew;
  915. case RXRPC_ACK_DELAY:
  916. case RXRPC_ACK_IDLE:
  917. goto send_ACK;
  918. }
  919. }
  920. /* handle completion of security negotiations on an incoming
  921. * connection */
  922. if (test_and_clear_bit(RXRPC_CALL_EV_SECURED, &call->events)) {
  923. _debug("secured");
  924. spin_lock_bh(&call->lock);
  925. if (call->state == RXRPC_CALL_SERVER_SECURING) {
  926. _debug("securing");
  927. write_lock(&call->socket->call_lock);
  928. if (!test_bit(RXRPC_CALL_RELEASED, &call->flags) &&
  929. !test_bit(RXRPC_CALL_EV_RELEASE, &call->events)) {
  930. _debug("not released");
  931. call->state = RXRPC_CALL_SERVER_ACCEPTING;
  932. list_move_tail(&call->accept_link,
  933. &call->socket->acceptq);
  934. }
  935. write_unlock(&call->socket->call_lock);
  936. read_lock(&call->state_lock);
  937. if (call->state < RXRPC_CALL_COMPLETE)
  938. set_bit(RXRPC_CALL_EV_POST_ACCEPT, &call->events);
  939. read_unlock(&call->state_lock);
  940. }
  941. spin_unlock_bh(&call->lock);
  942. if (!test_bit(RXRPC_CALL_EV_POST_ACCEPT, &call->events))
  943. goto maybe_reschedule;
  944. }
  945. /* post a notification of an acceptable connection to the app */
  946. if (test_bit(RXRPC_CALL_EV_POST_ACCEPT, &call->events)) {
  947. _debug("post accept");
  948. if (rxrpc_post_message(call, RXRPC_SKB_MARK_NEW_CALL,
  949. 0, false) < 0)
  950. goto no_mem;
  951. clear_bit(RXRPC_CALL_EV_POST_ACCEPT, &call->events);
  952. goto maybe_reschedule;
  953. }
  954. /* handle incoming call acceptance */
  955. if (test_and_clear_bit(RXRPC_CALL_EV_ACCEPTED, &call->events)) {
  956. _debug("accepted");
  957. ASSERTCMP(call->rx_data_post, ==, 0);
  958. call->rx_data_post = 1;
  959. read_lock_bh(&call->state_lock);
  960. if (call->state < RXRPC_CALL_COMPLETE)
  961. set_bit(RXRPC_CALL_EV_DRAIN_RX_OOS, &call->events);
  962. read_unlock_bh(&call->state_lock);
  963. }
  964. /* drain the out of sequence received packet queue into the packet Rx
  965. * queue */
  966. if (test_and_clear_bit(RXRPC_CALL_EV_DRAIN_RX_OOS, &call->events)) {
  967. while (call->rx_data_post == call->rx_first_oos)
  968. if (rxrpc_drain_rx_oos_queue(call) < 0)
  969. break;
  970. goto maybe_reschedule;
  971. }
  972. if (test_bit(RXRPC_CALL_EV_RELEASE, &call->events)) {
  973. rxrpc_release_call(call);
  974. clear_bit(RXRPC_CALL_EV_RELEASE, &call->events);
  975. }
  976. /* other events may have been raised since we started checking */
  977. goto maybe_reschedule;
  978. send_ACK_with_skew:
  979. ack.maxSkew = htons(atomic_read(&call->conn->hi_serial) -
  980. ntohl(ack.serial));
  981. send_ACK:
  982. mtu = call->conn->params.peer->if_mtu;
  983. mtu -= call->conn->params.peer->hdrsize;
  984. ackinfo.maxMTU = htonl(mtu);
  985. ackinfo.rwind = htonl(rxrpc_rx_window_size);
  986. /* permit the peer to send us jumbo packets if it wants to */
  987. ackinfo.rxMTU = htonl(rxrpc_rx_mtu);
  988. ackinfo.jumbo_max = htonl(rxrpc_rx_jumbo_max);
  989. serial = atomic_inc_return(&call->conn->serial);
  990. whdr.serial = htonl(serial);
  991. _proto("Tx ACK %%%u { m=%hu f=#%u p=#%u s=%%%u r=%s n=%u }",
  992. serial,
  993. ntohs(ack.maxSkew),
  994. ntohl(ack.firstPacket),
  995. ntohl(ack.previousPacket),
  996. ntohl(ack.serial),
  997. rxrpc_acks(ack.reason),
  998. ack.nAcks);
  999. del_timer_sync(&call->ack_timer);
  1000. if (ack.nAcks > 0)
  1001. set_bit(RXRPC_CALL_TX_SOFT_ACK, &call->flags);
  1002. goto send_message_2;
  1003. send_message:
  1004. _debug("send message");
  1005. serial = atomic_inc_return(&call->conn->serial);
  1006. whdr.serial = htonl(serial);
  1007. _proto("Tx %s %%%u", rxrpc_pkts[whdr.type], serial);
  1008. send_message_2:
  1009. len = iov[0].iov_len;
  1010. ioc = 1;
  1011. if (iov[4].iov_len) {
  1012. ioc = 5;
  1013. len += iov[4].iov_len;
  1014. len += iov[3].iov_len;
  1015. len += iov[2].iov_len;
  1016. len += iov[1].iov_len;
  1017. } else if (iov[3].iov_len) {
  1018. ioc = 4;
  1019. len += iov[3].iov_len;
  1020. len += iov[2].iov_len;
  1021. len += iov[1].iov_len;
  1022. } else if (iov[2].iov_len) {
  1023. ioc = 3;
  1024. len += iov[2].iov_len;
  1025. len += iov[1].iov_len;
  1026. } else if (iov[1].iov_len) {
  1027. ioc = 2;
  1028. len += iov[1].iov_len;
  1029. }
  1030. ret = kernel_sendmsg(call->conn->params.local->socket,
  1031. &msg, iov, ioc, len);
  1032. if (ret < 0) {
  1033. _debug("sendmsg failed: %d", ret);
  1034. read_lock_bh(&call->state_lock);
  1035. if (call->state < RXRPC_CALL_DEAD)
  1036. rxrpc_queue_call(call);
  1037. read_unlock_bh(&call->state_lock);
  1038. goto error;
  1039. }
  1040. switch (genbit) {
  1041. case RXRPC_CALL_EV_ABORT:
  1042. clear_bit(genbit, &call->events);
  1043. clear_bit(RXRPC_CALL_EV_RCVD_ABORT, &call->events);
  1044. goto kill_ACKs;
  1045. case RXRPC_CALL_EV_ACK_FINAL:
  1046. write_lock_bh(&call->state_lock);
  1047. if (call->state == RXRPC_CALL_CLIENT_FINAL_ACK)
  1048. call->state = RXRPC_CALL_COMPLETE;
  1049. write_unlock_bh(&call->state_lock);
  1050. goto kill_ACKs;
  1051. default:
  1052. clear_bit(genbit, &call->events);
  1053. switch (call->state) {
  1054. case RXRPC_CALL_CLIENT_AWAIT_REPLY:
  1055. case RXRPC_CALL_CLIENT_RECV_REPLY:
  1056. case RXRPC_CALL_SERVER_RECV_REQUEST:
  1057. case RXRPC_CALL_SERVER_ACK_REQUEST:
  1058. _debug("start ACK timer");
  1059. rxrpc_propose_ACK(call, RXRPC_ACK_DELAY,
  1060. call->ackr_serial, false);
  1061. default:
  1062. break;
  1063. }
  1064. goto maybe_reschedule;
  1065. }
  1066. kill_ACKs:
  1067. del_timer_sync(&call->ack_timer);
  1068. if (test_and_clear_bit(RXRPC_CALL_EV_ACK_FINAL, &call->events))
  1069. rxrpc_put_call(call);
  1070. clear_bit(RXRPC_CALL_EV_ACK, &call->events);
  1071. maybe_reschedule:
  1072. if (call->events || !skb_queue_empty(&call->rx_queue)) {
  1073. read_lock_bh(&call->state_lock);
  1074. if (call->state < RXRPC_CALL_DEAD)
  1075. rxrpc_queue_call(call);
  1076. read_unlock_bh(&call->state_lock);
  1077. }
  1078. /* don't leave aborted connections on the accept queue */
  1079. if (call->state >= RXRPC_CALL_COMPLETE &&
  1080. !list_empty(&call->accept_link)) {
  1081. _debug("X unlinking once-pending call %p { e=%lx f=%lx c=%x }",
  1082. call, call->events, call->flags, call->conn->proto.cid);
  1083. read_lock_bh(&call->state_lock);
  1084. if (!test_bit(RXRPC_CALL_RELEASED, &call->flags) &&
  1085. !test_and_set_bit(RXRPC_CALL_EV_RELEASE, &call->events))
  1086. rxrpc_queue_call(call);
  1087. read_unlock_bh(&call->state_lock);
  1088. }
  1089. error:
  1090. clear_bit(RXRPC_CALL_PROC_BUSY, &call->flags);
  1091. kfree(acks);
  1092. /* because we don't want two CPUs both processing the work item for one
  1093. * call at the same time, we use a flag to note when it's busy; however
  1094. * this means there's a race between clearing the flag and setting the
  1095. * work pending bit and the work item being processed again */
  1096. if (call->events && !work_pending(&call->processor)) {
  1097. _debug("jumpstart %x", call->conn->proto.cid);
  1098. rxrpc_queue_call(call);
  1099. }
  1100. _leave("");
  1101. return;
  1102. no_mem:
  1103. _debug("out of memory");
  1104. goto maybe_reschedule;
  1105. }