call_event.c 32 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295
  1. /* Management of Tx window, Tx resend, ACKs and out-of-sequence reception
  2. *
  3. * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
  4. * Written by David Howells (dhowells@redhat.com)
  5. *
  6. * This program is free software; you can redistribute it and/or
  7. * modify it under the terms of the GNU General Public License
  8. * as published by the Free Software Foundation; either version
  9. * 2 of the License, or (at your option) any later version.
  10. */
  11. #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  12. #include <linux/module.h>
  13. #include <linux/circ_buf.h>
  14. #include <linux/net.h>
  15. #include <linux/skbuff.h>
  16. #include <linux/slab.h>
  17. #include <linux/udp.h>
  18. #include <net/sock.h>
  19. #include <net/af_rxrpc.h>
  20. #include "ar-internal.h"
  21. /*
  22. * propose an ACK be sent
  23. */
  24. void __rxrpc_propose_ACK(struct rxrpc_call *call, u8 ack_reason,
  25. u32 serial, bool immediate)
  26. {
  27. unsigned long expiry;
  28. s8 prior = rxrpc_ack_priority[ack_reason];
  29. ASSERTCMP(prior, >, 0);
  30. _enter("{%d},%s,%%%x,%u",
  31. call->debug_id, rxrpc_acks(ack_reason), serial, immediate);
  32. if (prior < rxrpc_ack_priority[call->ackr_reason]) {
  33. if (immediate)
  34. goto cancel_timer;
  35. return;
  36. }
  37. /* update DELAY, IDLE, REQUESTED and PING_RESPONSE ACK serial
  38. * numbers */
  39. if (prior == rxrpc_ack_priority[call->ackr_reason]) {
  40. if (prior <= 4)
  41. call->ackr_serial = serial;
  42. if (immediate)
  43. goto cancel_timer;
  44. return;
  45. }
  46. call->ackr_reason = ack_reason;
  47. call->ackr_serial = serial;
  48. switch (ack_reason) {
  49. case RXRPC_ACK_DELAY:
  50. _debug("run delay timer");
  51. expiry = rxrpc_soft_ack_delay;
  52. goto run_timer;
  53. case RXRPC_ACK_IDLE:
  54. if (!immediate) {
  55. _debug("run defer timer");
  56. expiry = rxrpc_idle_ack_delay;
  57. goto run_timer;
  58. }
  59. goto cancel_timer;
  60. case RXRPC_ACK_REQUESTED:
  61. expiry = rxrpc_requested_ack_delay;
  62. if (!expiry)
  63. goto cancel_timer;
  64. if (!immediate || serial == 1) {
  65. _debug("run defer timer");
  66. goto run_timer;
  67. }
  68. default:
  69. _debug("immediate ACK");
  70. goto cancel_timer;
  71. }
  72. run_timer:
  73. expiry += jiffies;
  74. if (!timer_pending(&call->ack_timer) ||
  75. time_after(call->ack_timer.expires, expiry))
  76. mod_timer(&call->ack_timer, expiry);
  77. return;
  78. cancel_timer:
  79. _debug("cancel timer %%%u", serial);
  80. try_to_del_timer_sync(&call->ack_timer);
  81. read_lock_bh(&call->state_lock);
  82. if (call->state <= RXRPC_CALL_COMPLETE &&
  83. !test_and_set_bit(RXRPC_CALL_EV_ACK, &call->events))
  84. rxrpc_queue_call(call);
  85. read_unlock_bh(&call->state_lock);
  86. }
  87. /*
  88. * propose an ACK be sent, locking the call structure
  89. */
  90. void rxrpc_propose_ACK(struct rxrpc_call *call, u8 ack_reason,
  91. u32 serial, bool immediate)
  92. {
  93. s8 prior = rxrpc_ack_priority[ack_reason];
  94. if (prior > rxrpc_ack_priority[call->ackr_reason]) {
  95. spin_lock_bh(&call->lock);
  96. __rxrpc_propose_ACK(call, ack_reason, serial, immediate);
  97. spin_unlock_bh(&call->lock);
  98. }
  99. }
  100. /*
  101. * set the resend timer
  102. */
  103. static void rxrpc_set_resend(struct rxrpc_call *call, u8 resend,
  104. unsigned long resend_at)
  105. {
  106. read_lock_bh(&call->state_lock);
  107. if (call->state >= RXRPC_CALL_COMPLETE)
  108. resend = 0;
  109. if (resend & 1) {
  110. _debug("SET RESEND");
  111. set_bit(RXRPC_CALL_EV_RESEND, &call->events);
  112. }
  113. if (resend & 2) {
  114. _debug("MODIFY RESEND TIMER");
  115. set_bit(RXRPC_CALL_RUN_RTIMER, &call->flags);
  116. mod_timer(&call->resend_timer, resend_at);
  117. } else {
  118. _debug("KILL RESEND TIMER");
  119. del_timer_sync(&call->resend_timer);
  120. clear_bit(RXRPC_CALL_EV_RESEND_TIMER, &call->events);
  121. clear_bit(RXRPC_CALL_RUN_RTIMER, &call->flags);
  122. }
  123. read_unlock_bh(&call->state_lock);
  124. }
  125. /*
  126. * resend packets
  127. */
  128. static void rxrpc_resend(struct rxrpc_call *call)
  129. {
  130. struct rxrpc_wire_header *whdr;
  131. struct rxrpc_skb_priv *sp;
  132. struct sk_buff *txb;
  133. unsigned long *p_txb, resend_at;
  134. bool stop;
  135. int loop;
  136. u8 resend;
  137. _enter("{%d,%d,%d,%d},",
  138. call->acks_hard, call->acks_unacked,
  139. atomic_read(&call->sequence),
  140. CIRC_CNT(call->acks_head, call->acks_tail, call->acks_winsz));
  141. stop = false;
  142. resend = 0;
  143. resend_at = 0;
  144. for (loop = call->acks_tail;
  145. loop != call->acks_head || stop;
  146. loop = (loop + 1) & (call->acks_winsz - 1)
  147. ) {
  148. p_txb = call->acks_window + loop;
  149. smp_read_barrier_depends();
  150. if (*p_txb & 1)
  151. continue;
  152. txb = (struct sk_buff *) *p_txb;
  153. sp = rxrpc_skb(txb);
  154. if (sp->need_resend) {
  155. sp->need_resend = false;
  156. /* each Tx packet has a new serial number */
  157. sp->hdr.serial = atomic_inc_return(&call->conn->serial);
  158. whdr = (struct rxrpc_wire_header *)txb->head;
  159. whdr->serial = htonl(sp->hdr.serial);
  160. _proto("Tx DATA %%%u { #%d }",
  161. sp->hdr.serial, sp->hdr.seq);
  162. if (rxrpc_send_data_packet(call->conn, txb) < 0) {
  163. stop = true;
  164. sp->resend_at = jiffies + 3;
  165. } else {
  166. sp->resend_at =
  167. jiffies + rxrpc_resend_timeout;
  168. }
  169. }
  170. if (time_after_eq(jiffies + 1, sp->resend_at)) {
  171. sp->need_resend = true;
  172. resend |= 1;
  173. } else if (resend & 2) {
  174. if (time_before(sp->resend_at, resend_at))
  175. resend_at = sp->resend_at;
  176. } else {
  177. resend_at = sp->resend_at;
  178. resend |= 2;
  179. }
  180. }
  181. rxrpc_set_resend(call, resend, resend_at);
  182. _leave("");
  183. }
  184. /*
  185. * handle resend timer expiry
  186. */
  187. static void rxrpc_resend_timer(struct rxrpc_call *call)
  188. {
  189. struct rxrpc_skb_priv *sp;
  190. struct sk_buff *txb;
  191. unsigned long *p_txb, resend_at;
  192. int loop;
  193. u8 resend;
  194. _enter("%d,%d,%d",
  195. call->acks_tail, call->acks_unacked, call->acks_head);
  196. if (call->state >= RXRPC_CALL_COMPLETE)
  197. return;
  198. resend = 0;
  199. resend_at = 0;
  200. for (loop = call->acks_unacked;
  201. loop != call->acks_head;
  202. loop = (loop + 1) & (call->acks_winsz - 1)
  203. ) {
  204. p_txb = call->acks_window + loop;
  205. smp_read_barrier_depends();
  206. txb = (struct sk_buff *) (*p_txb & ~1);
  207. sp = rxrpc_skb(txb);
  208. ASSERT(!(*p_txb & 1));
  209. if (sp->need_resend) {
  210. ;
  211. } else if (time_after_eq(jiffies + 1, sp->resend_at)) {
  212. sp->need_resend = true;
  213. resend |= 1;
  214. } else if (resend & 2) {
  215. if (time_before(sp->resend_at, resend_at))
  216. resend_at = sp->resend_at;
  217. } else {
  218. resend_at = sp->resend_at;
  219. resend |= 2;
  220. }
  221. }
  222. rxrpc_set_resend(call, resend, resend_at);
  223. _leave("");
  224. }
  225. /*
  226. * process soft ACKs of our transmitted packets
  227. * - these indicate packets the peer has or has not received, but hasn't yet
  228. * given to the consumer, and so can still be discarded and re-requested
  229. */
  230. static int rxrpc_process_soft_ACKs(struct rxrpc_call *call,
  231. struct rxrpc_ackpacket *ack,
  232. struct sk_buff *skb)
  233. {
  234. struct rxrpc_skb_priv *sp;
  235. struct sk_buff *txb;
  236. unsigned long *p_txb, resend_at;
  237. int loop;
  238. u8 sacks[RXRPC_MAXACKS], resend;
  239. _enter("{%d,%d},{%d},",
  240. call->acks_hard,
  241. CIRC_CNT(call->acks_head, call->acks_tail, call->acks_winsz),
  242. ack->nAcks);
  243. if (skb_copy_bits(skb, 0, sacks, ack->nAcks) < 0)
  244. goto protocol_error;
  245. resend = 0;
  246. resend_at = 0;
  247. for (loop = 0; loop < ack->nAcks; loop++) {
  248. p_txb = call->acks_window;
  249. p_txb += (call->acks_tail + loop) & (call->acks_winsz - 1);
  250. smp_read_barrier_depends();
  251. txb = (struct sk_buff *) (*p_txb & ~1);
  252. sp = rxrpc_skb(txb);
  253. switch (sacks[loop]) {
  254. case RXRPC_ACK_TYPE_ACK:
  255. sp->need_resend = false;
  256. *p_txb |= 1;
  257. break;
  258. case RXRPC_ACK_TYPE_NACK:
  259. sp->need_resend = true;
  260. *p_txb &= ~1;
  261. resend = 1;
  262. break;
  263. default:
  264. _debug("Unsupported ACK type %d", sacks[loop]);
  265. goto protocol_error;
  266. }
  267. }
  268. smp_mb();
  269. call->acks_unacked = (call->acks_tail + loop) & (call->acks_winsz - 1);
  270. /* anything not explicitly ACK'd is implicitly NACK'd, but may just not
  271. * have been received or processed yet by the far end */
  272. for (loop = call->acks_unacked;
  273. loop != call->acks_head;
  274. loop = (loop + 1) & (call->acks_winsz - 1)
  275. ) {
  276. p_txb = call->acks_window + loop;
  277. smp_read_barrier_depends();
  278. txb = (struct sk_buff *) (*p_txb & ~1);
  279. sp = rxrpc_skb(txb);
  280. if (*p_txb & 1) {
  281. /* packet must have been discarded */
  282. sp->need_resend = true;
  283. *p_txb &= ~1;
  284. resend |= 1;
  285. } else if (sp->need_resend) {
  286. ;
  287. } else if (time_after_eq(jiffies + 1, sp->resend_at)) {
  288. sp->need_resend = true;
  289. resend |= 1;
  290. } else if (resend & 2) {
  291. if (time_before(sp->resend_at, resend_at))
  292. resend_at = sp->resend_at;
  293. } else {
  294. resend_at = sp->resend_at;
  295. resend |= 2;
  296. }
  297. }
  298. rxrpc_set_resend(call, resend, resend_at);
  299. _leave(" = 0");
  300. return 0;
  301. protocol_error:
  302. _leave(" = -EPROTO");
  303. return -EPROTO;
  304. }
  305. /*
  306. * discard hard-ACK'd packets from the Tx window
  307. */
  308. static void rxrpc_rotate_tx_window(struct rxrpc_call *call, u32 hard)
  309. {
  310. unsigned long _skb;
  311. int tail = call->acks_tail, old_tail;
  312. int win = CIRC_CNT(call->acks_head, tail, call->acks_winsz);
  313. _enter("{%u,%u},%u", call->acks_hard, win, hard);
  314. ASSERTCMP(hard - call->acks_hard, <=, win);
  315. while (call->acks_hard < hard) {
  316. smp_read_barrier_depends();
  317. _skb = call->acks_window[tail] & ~1;
  318. rxrpc_free_skb((struct sk_buff *) _skb);
  319. old_tail = tail;
  320. tail = (tail + 1) & (call->acks_winsz - 1);
  321. call->acks_tail = tail;
  322. if (call->acks_unacked == old_tail)
  323. call->acks_unacked = tail;
  324. call->acks_hard++;
  325. }
  326. wake_up(&call->tx_waitq);
  327. }
  328. /*
  329. * clear the Tx window in the event of a failure
  330. */
  331. static void rxrpc_clear_tx_window(struct rxrpc_call *call)
  332. {
  333. rxrpc_rotate_tx_window(call, atomic_read(&call->sequence));
  334. }
  335. /*
  336. * drain the out of sequence received packet queue into the packet Rx queue
  337. */
  338. static int rxrpc_drain_rx_oos_queue(struct rxrpc_call *call)
  339. {
  340. struct rxrpc_skb_priv *sp;
  341. struct sk_buff *skb;
  342. bool terminal;
  343. int ret;
  344. _enter("{%d,%d}", call->rx_data_post, call->rx_first_oos);
  345. spin_lock_bh(&call->lock);
  346. ret = -ECONNRESET;
  347. if (test_bit(RXRPC_CALL_RELEASED, &call->flags))
  348. goto socket_unavailable;
  349. skb = skb_dequeue(&call->rx_oos_queue);
  350. if (skb) {
  351. sp = rxrpc_skb(skb);
  352. _debug("drain OOS packet %d [%d]",
  353. sp->hdr.seq, call->rx_first_oos);
  354. if (sp->hdr.seq != call->rx_first_oos) {
  355. skb_queue_head(&call->rx_oos_queue, skb);
  356. call->rx_first_oos = rxrpc_skb(skb)->hdr.seq;
  357. _debug("requeue %p {%u}", skb, call->rx_first_oos);
  358. } else {
  359. skb->mark = RXRPC_SKB_MARK_DATA;
  360. terminal = ((sp->hdr.flags & RXRPC_LAST_PACKET) &&
  361. !(sp->hdr.flags & RXRPC_CLIENT_INITIATED));
  362. ret = rxrpc_queue_rcv_skb(call, skb, true, terminal);
  363. BUG_ON(ret < 0);
  364. _debug("drain #%u", call->rx_data_post);
  365. call->rx_data_post++;
  366. /* find out what the next packet is */
  367. skb = skb_peek(&call->rx_oos_queue);
  368. if (skb)
  369. call->rx_first_oos = rxrpc_skb(skb)->hdr.seq;
  370. else
  371. call->rx_first_oos = 0;
  372. _debug("peek %p {%u}", skb, call->rx_first_oos);
  373. }
  374. }
  375. ret = 0;
  376. socket_unavailable:
  377. spin_unlock_bh(&call->lock);
  378. _leave(" = %d", ret);
  379. return ret;
  380. }
  381. /*
  382. * insert an out of sequence packet into the buffer
  383. */
  384. static void rxrpc_insert_oos_packet(struct rxrpc_call *call,
  385. struct sk_buff *skb)
  386. {
  387. struct rxrpc_skb_priv *sp, *psp;
  388. struct sk_buff *p;
  389. u32 seq;
  390. sp = rxrpc_skb(skb);
  391. seq = sp->hdr.seq;
  392. _enter(",,{%u}", seq);
  393. skb->destructor = rxrpc_packet_destructor;
  394. ASSERTCMP(sp->call, ==, NULL);
  395. sp->call = call;
  396. rxrpc_get_call(call);
  397. /* insert into the buffer in sequence order */
  398. spin_lock_bh(&call->lock);
  399. skb_queue_walk(&call->rx_oos_queue, p) {
  400. psp = rxrpc_skb(p);
  401. if (psp->hdr.seq > seq) {
  402. _debug("insert oos #%u before #%u", seq, psp->hdr.seq);
  403. skb_insert(p, skb, &call->rx_oos_queue);
  404. goto inserted;
  405. }
  406. }
  407. _debug("append oos #%u", seq);
  408. skb_queue_tail(&call->rx_oos_queue, skb);
  409. inserted:
  410. /* we might now have a new front to the queue */
  411. if (call->rx_first_oos == 0 || seq < call->rx_first_oos)
  412. call->rx_first_oos = seq;
  413. read_lock(&call->state_lock);
  414. if (call->state < RXRPC_CALL_COMPLETE &&
  415. call->rx_data_post == call->rx_first_oos) {
  416. _debug("drain rx oos now");
  417. set_bit(RXRPC_CALL_EV_DRAIN_RX_OOS, &call->events);
  418. }
  419. read_unlock(&call->state_lock);
  420. spin_unlock_bh(&call->lock);
  421. _leave(" [stored #%u]", call->rx_first_oos);
  422. }
  423. /*
  424. * clear the Tx window on final ACK reception
  425. */
  426. static void rxrpc_zap_tx_window(struct rxrpc_call *call)
  427. {
  428. struct rxrpc_skb_priv *sp;
  429. struct sk_buff *skb;
  430. unsigned long _skb, *acks_window;
  431. u8 winsz = call->acks_winsz;
  432. int tail;
  433. acks_window = call->acks_window;
  434. call->acks_window = NULL;
  435. while (CIRC_CNT(call->acks_head, call->acks_tail, winsz) > 0) {
  436. tail = call->acks_tail;
  437. smp_read_barrier_depends();
  438. _skb = acks_window[tail] & ~1;
  439. smp_mb();
  440. call->acks_tail = (call->acks_tail + 1) & (winsz - 1);
  441. skb = (struct sk_buff *) _skb;
  442. sp = rxrpc_skb(skb);
  443. _debug("+++ clear Tx %u", sp->hdr.seq);
  444. rxrpc_free_skb(skb);
  445. }
  446. kfree(acks_window);
  447. }
  448. /*
  449. * process the extra information that may be appended to an ACK packet
  450. */
  451. static void rxrpc_extract_ackinfo(struct rxrpc_call *call, struct sk_buff *skb,
  452. unsigned int latest, int nAcks)
  453. {
  454. struct rxrpc_ackinfo ackinfo;
  455. struct rxrpc_peer *peer;
  456. unsigned int mtu;
  457. if (skb_copy_bits(skb, nAcks + 3, &ackinfo, sizeof(ackinfo)) < 0) {
  458. _leave(" [no ackinfo]");
  459. return;
  460. }
  461. _proto("Rx ACK %%%u Info { rx=%u max=%u rwin=%u jm=%u }",
  462. latest,
  463. ntohl(ackinfo.rxMTU), ntohl(ackinfo.maxMTU),
  464. ntohl(ackinfo.rwind), ntohl(ackinfo.jumbo_max));
  465. mtu = min(ntohl(ackinfo.rxMTU), ntohl(ackinfo.maxMTU));
  466. peer = call->conn->params.peer;
  467. if (mtu < peer->maxdata) {
  468. spin_lock_bh(&peer->lock);
  469. peer->maxdata = mtu;
  470. peer->mtu = mtu + peer->hdrsize;
  471. spin_unlock_bh(&peer->lock);
  472. _net("Net MTU %u (maxdata %u)", peer->mtu, peer->maxdata);
  473. }
  474. }
  475. /*
  476. * process packets in the reception queue
  477. */
  478. static int rxrpc_process_rx_queue(struct rxrpc_call *call,
  479. u32 *_abort_code)
  480. {
  481. struct rxrpc_ackpacket ack;
  482. struct rxrpc_skb_priv *sp;
  483. struct sk_buff *skb;
  484. bool post_ACK;
  485. int latest;
  486. u32 hard, tx;
  487. _enter("");
  488. process_further:
  489. skb = skb_dequeue(&call->rx_queue);
  490. if (!skb)
  491. return -EAGAIN;
  492. _net("deferred skb %p", skb);
  493. sp = rxrpc_skb(skb);
  494. _debug("process %s [st %d]", rxrpc_pkts[sp->hdr.type], call->state);
  495. post_ACK = false;
  496. switch (sp->hdr.type) {
  497. /* data packets that wind up here have been received out of
  498. * order, need security processing or are jumbo packets */
  499. case RXRPC_PACKET_TYPE_DATA:
  500. _proto("OOSQ DATA %%%u { #%u }", sp->hdr.serial, sp->hdr.seq);
  501. /* secured packets must be verified and possibly decrypted */
  502. if (call->conn->security->verify_packet(call, skb,
  503. _abort_code) < 0)
  504. goto protocol_error;
  505. rxrpc_insert_oos_packet(call, skb);
  506. goto process_further;
  507. /* partial ACK to process */
  508. case RXRPC_PACKET_TYPE_ACK:
  509. if (skb_copy_bits(skb, 0, &ack, sizeof(ack)) < 0) {
  510. _debug("extraction failure");
  511. goto protocol_error;
  512. }
  513. if (!skb_pull(skb, sizeof(ack)))
  514. BUG();
  515. latest = sp->hdr.serial;
  516. hard = ntohl(ack.firstPacket);
  517. tx = atomic_read(&call->sequence);
  518. _proto("Rx ACK %%%u { m=%hu f=#%u p=#%u s=%%%u r=%s n=%u }",
  519. latest,
  520. ntohs(ack.maxSkew),
  521. hard,
  522. ntohl(ack.previousPacket),
  523. ntohl(ack.serial),
  524. rxrpc_acks(ack.reason),
  525. ack.nAcks);
  526. rxrpc_extract_ackinfo(call, skb, latest, ack.nAcks);
  527. if (ack.reason == RXRPC_ACK_PING) {
  528. _proto("Rx ACK %%%u PING Request", latest);
  529. rxrpc_propose_ACK(call, RXRPC_ACK_PING_RESPONSE,
  530. sp->hdr.serial, true);
  531. }
  532. /* discard any out-of-order or duplicate ACKs */
  533. if (latest - call->acks_latest <= 0) {
  534. _debug("discard ACK %d <= %d",
  535. latest, call->acks_latest);
  536. goto discard;
  537. }
  538. call->acks_latest = latest;
  539. if (call->state != RXRPC_CALL_CLIENT_SEND_REQUEST &&
  540. call->state != RXRPC_CALL_CLIENT_AWAIT_REPLY &&
  541. call->state != RXRPC_CALL_SERVER_SEND_REPLY &&
  542. call->state != RXRPC_CALL_SERVER_AWAIT_ACK)
  543. goto discard;
  544. _debug("Tx=%d H=%u S=%d", tx, call->acks_hard, call->state);
  545. if (hard > 0) {
  546. if (hard - 1 > tx) {
  547. _debug("hard-ACK'd packet %d not transmitted"
  548. " (%d top)",
  549. hard - 1, tx);
  550. goto protocol_error;
  551. }
  552. if ((call->state == RXRPC_CALL_CLIENT_AWAIT_REPLY ||
  553. call->state == RXRPC_CALL_SERVER_AWAIT_ACK) &&
  554. hard > tx) {
  555. call->acks_hard = tx;
  556. goto all_acked;
  557. }
  558. smp_rmb();
  559. rxrpc_rotate_tx_window(call, hard - 1);
  560. }
  561. if (ack.nAcks > 0) {
  562. if (hard - 1 + ack.nAcks > tx) {
  563. _debug("soft-ACK'd packet %d+%d not"
  564. " transmitted (%d top)",
  565. hard - 1, ack.nAcks, tx);
  566. goto protocol_error;
  567. }
  568. if (rxrpc_process_soft_ACKs(call, &ack, skb) < 0)
  569. goto protocol_error;
  570. }
  571. goto discard;
  572. /* complete ACK to process */
  573. case RXRPC_PACKET_TYPE_ACKALL:
  574. goto all_acked;
  575. /* abort and busy are handled elsewhere */
  576. case RXRPC_PACKET_TYPE_BUSY:
  577. case RXRPC_PACKET_TYPE_ABORT:
  578. BUG();
  579. /* connection level events - also handled elsewhere */
  580. case RXRPC_PACKET_TYPE_CHALLENGE:
  581. case RXRPC_PACKET_TYPE_RESPONSE:
  582. case RXRPC_PACKET_TYPE_DEBUG:
  583. BUG();
  584. }
  585. /* if we've had a hard ACK that covers all the packets we've sent, then
  586. * that ends that phase of the operation */
  587. all_acked:
  588. write_lock_bh(&call->state_lock);
  589. _debug("ack all %d", call->state);
  590. switch (call->state) {
  591. case RXRPC_CALL_CLIENT_AWAIT_REPLY:
  592. call->state = RXRPC_CALL_CLIENT_RECV_REPLY;
  593. break;
  594. case RXRPC_CALL_SERVER_AWAIT_ACK:
  595. _debug("srv complete");
  596. call->state = RXRPC_CALL_COMPLETE;
  597. post_ACK = true;
  598. break;
  599. case RXRPC_CALL_CLIENT_SEND_REQUEST:
  600. case RXRPC_CALL_SERVER_RECV_REQUEST:
  601. goto protocol_error_unlock; /* can't occur yet */
  602. default:
  603. write_unlock_bh(&call->state_lock);
  604. goto discard; /* assume packet left over from earlier phase */
  605. }
  606. write_unlock_bh(&call->state_lock);
  607. /* if all the packets we sent are hard-ACK'd, then we can discard
  608. * whatever we've got left */
  609. _debug("clear Tx %d",
  610. CIRC_CNT(call->acks_head, call->acks_tail, call->acks_winsz));
  611. del_timer_sync(&call->resend_timer);
  612. clear_bit(RXRPC_CALL_RUN_RTIMER, &call->flags);
  613. clear_bit(RXRPC_CALL_EV_RESEND_TIMER, &call->events);
  614. if (call->acks_window)
  615. rxrpc_zap_tx_window(call);
  616. if (post_ACK) {
  617. /* post the final ACK message for userspace to pick up */
  618. _debug("post ACK");
  619. skb->mark = RXRPC_SKB_MARK_FINAL_ACK;
  620. sp->call = call;
  621. rxrpc_get_call(call);
  622. spin_lock_bh(&call->lock);
  623. if (rxrpc_queue_rcv_skb(call, skb, true, true) < 0)
  624. BUG();
  625. spin_unlock_bh(&call->lock);
  626. goto process_further;
  627. }
  628. discard:
  629. rxrpc_free_skb(skb);
  630. goto process_further;
  631. protocol_error_unlock:
  632. write_unlock_bh(&call->state_lock);
  633. protocol_error:
  634. rxrpc_free_skb(skb);
  635. _leave(" = -EPROTO");
  636. return -EPROTO;
  637. }
  638. /*
  639. * post a message to the socket Rx queue for recvmsg() to pick up
  640. */
  641. static int rxrpc_post_message(struct rxrpc_call *call, u32 mark, u32 error,
  642. bool fatal)
  643. {
  644. struct rxrpc_skb_priv *sp;
  645. struct sk_buff *skb;
  646. int ret;
  647. _enter("{%d,%lx},%u,%u,%d",
  648. call->debug_id, call->flags, mark, error, fatal);
  649. /* remove timers and things for fatal messages */
  650. if (fatal) {
  651. del_timer_sync(&call->resend_timer);
  652. del_timer_sync(&call->ack_timer);
  653. clear_bit(RXRPC_CALL_RUN_RTIMER, &call->flags);
  654. }
  655. if (mark != RXRPC_SKB_MARK_NEW_CALL &&
  656. !test_bit(RXRPC_CALL_HAS_USERID, &call->flags)) {
  657. _leave("[no userid]");
  658. return 0;
  659. }
  660. if (!test_bit(RXRPC_CALL_TERMINAL_MSG, &call->flags)) {
  661. skb = alloc_skb(0, GFP_NOFS);
  662. if (!skb)
  663. return -ENOMEM;
  664. rxrpc_new_skb(skb);
  665. skb->mark = mark;
  666. sp = rxrpc_skb(skb);
  667. memset(sp, 0, sizeof(*sp));
  668. sp->error = error;
  669. sp->call = call;
  670. rxrpc_get_call(call);
  671. spin_lock_bh(&call->lock);
  672. ret = rxrpc_queue_rcv_skb(call, skb, true, fatal);
  673. spin_unlock_bh(&call->lock);
  674. BUG_ON(ret < 0);
  675. }
  676. return 0;
  677. }
  678. /*
  679. * handle background processing of incoming call packets and ACK / abort
  680. * generation
  681. */
  682. void rxrpc_process_call(struct work_struct *work)
  683. {
  684. struct rxrpc_call *call =
  685. container_of(work, struct rxrpc_call, processor);
  686. struct rxrpc_wire_header whdr;
  687. struct rxrpc_ackpacket ack;
  688. struct rxrpc_ackinfo ackinfo;
  689. struct msghdr msg;
  690. struct kvec iov[5];
  691. enum rxrpc_call_event genbit;
  692. unsigned long bits;
  693. __be32 data, pad;
  694. size_t len;
  695. int loop, nbit, ioc, ret, mtu;
  696. u32 serial, abort_code = RX_PROTOCOL_ERROR;
  697. u8 *acks = NULL;
  698. //printk("\n--------------------\n");
  699. _enter("{%d,%s,%lx} [%lu]",
  700. call->debug_id, rxrpc_call_states[call->state], call->events,
  701. (jiffies - call->creation_jif) / (HZ / 10));
  702. if (test_and_set_bit(RXRPC_CALL_PROC_BUSY, &call->flags)) {
  703. _debug("XXXXXXXXXXXXX RUNNING ON MULTIPLE CPUS XXXXXXXXXXXXX");
  704. return;
  705. }
  706. /* there's a good chance we're going to have to send a message, so set
  707. * one up in advance */
  708. msg.msg_name = &call->conn->params.peer->srx.transport;
  709. msg.msg_namelen = call->conn->params.peer->srx.transport_len;
  710. msg.msg_control = NULL;
  711. msg.msg_controllen = 0;
  712. msg.msg_flags = 0;
  713. whdr.epoch = htonl(call->conn->proto.epoch);
  714. whdr.cid = htonl(call->cid);
  715. whdr.callNumber = htonl(call->call_id);
  716. whdr.seq = 0;
  717. whdr.type = RXRPC_PACKET_TYPE_ACK;
  718. whdr.flags = call->conn->out_clientflag;
  719. whdr.userStatus = 0;
  720. whdr.securityIndex = call->conn->security_ix;
  721. whdr._rsvd = 0;
  722. whdr.serviceId = htons(call->service_id);
  723. memset(iov, 0, sizeof(iov));
  724. iov[0].iov_base = &whdr;
  725. iov[0].iov_len = sizeof(whdr);
  726. /* deal with events of a final nature */
  727. if (test_bit(RXRPC_CALL_EV_RCVD_ERROR, &call->events)) {
  728. enum rxrpc_skb_mark mark;
  729. int error;
  730. clear_bit(RXRPC_CALL_EV_CONN_ABORT, &call->events);
  731. clear_bit(RXRPC_CALL_EV_REJECT_BUSY, &call->events);
  732. clear_bit(RXRPC_CALL_EV_ABORT, &call->events);
  733. error = call->error_report;
  734. if (error < RXRPC_LOCAL_ERROR_OFFSET) {
  735. mark = RXRPC_SKB_MARK_NET_ERROR;
  736. _debug("post net error %d", error);
  737. } else {
  738. mark = RXRPC_SKB_MARK_LOCAL_ERROR;
  739. error -= RXRPC_LOCAL_ERROR_OFFSET;
  740. _debug("post net local error %d", error);
  741. }
  742. if (rxrpc_post_message(call, mark, error, true) < 0)
  743. goto no_mem;
  744. clear_bit(RXRPC_CALL_EV_RCVD_ERROR, &call->events);
  745. goto kill_ACKs;
  746. }
  747. if (test_bit(RXRPC_CALL_EV_CONN_ABORT, &call->events)) {
  748. ASSERTCMP(call->state, >, RXRPC_CALL_COMPLETE);
  749. clear_bit(RXRPC_CALL_EV_REJECT_BUSY, &call->events);
  750. clear_bit(RXRPC_CALL_EV_ABORT, &call->events);
  751. _debug("post conn abort");
  752. if (rxrpc_post_message(call, RXRPC_SKB_MARK_LOCAL_ERROR,
  753. call->conn->error, true) < 0)
  754. goto no_mem;
  755. clear_bit(RXRPC_CALL_EV_CONN_ABORT, &call->events);
  756. goto kill_ACKs;
  757. }
  758. if (test_bit(RXRPC_CALL_EV_REJECT_BUSY, &call->events)) {
  759. whdr.type = RXRPC_PACKET_TYPE_BUSY;
  760. genbit = RXRPC_CALL_EV_REJECT_BUSY;
  761. goto send_message;
  762. }
  763. if (test_bit(RXRPC_CALL_EV_ABORT, &call->events)) {
  764. ASSERTCMP(call->state, >, RXRPC_CALL_COMPLETE);
  765. if (rxrpc_post_message(call, RXRPC_SKB_MARK_LOCAL_ERROR,
  766. ECONNABORTED, true) < 0)
  767. goto no_mem;
  768. whdr.type = RXRPC_PACKET_TYPE_ABORT;
  769. data = htonl(call->local_abort);
  770. iov[1].iov_base = &data;
  771. iov[1].iov_len = sizeof(data);
  772. genbit = RXRPC_CALL_EV_ABORT;
  773. goto send_message;
  774. }
  775. if (test_bit(RXRPC_CALL_EV_ACK_FINAL, &call->events)) {
  776. genbit = RXRPC_CALL_EV_ACK_FINAL;
  777. ack.bufferSpace = htons(8);
  778. ack.maxSkew = 0;
  779. ack.serial = 0;
  780. ack.reason = RXRPC_ACK_IDLE;
  781. ack.nAcks = 0;
  782. call->ackr_reason = 0;
  783. spin_lock_bh(&call->lock);
  784. ack.serial = htonl(call->ackr_serial);
  785. ack.previousPacket = htonl(call->ackr_prev_seq);
  786. ack.firstPacket = htonl(call->rx_data_eaten + 1);
  787. spin_unlock_bh(&call->lock);
  788. pad = 0;
  789. iov[1].iov_base = &ack;
  790. iov[1].iov_len = sizeof(ack);
  791. iov[2].iov_base = &pad;
  792. iov[2].iov_len = 3;
  793. iov[3].iov_base = &ackinfo;
  794. iov[3].iov_len = sizeof(ackinfo);
  795. goto send_ACK;
  796. }
  797. if (call->events & ((1 << RXRPC_CALL_EV_RCVD_BUSY) |
  798. (1 << RXRPC_CALL_EV_RCVD_ABORT))
  799. ) {
  800. u32 mark;
  801. if (test_bit(RXRPC_CALL_EV_RCVD_ABORT, &call->events))
  802. mark = RXRPC_SKB_MARK_REMOTE_ABORT;
  803. else
  804. mark = RXRPC_SKB_MARK_BUSY;
  805. _debug("post abort/busy");
  806. rxrpc_clear_tx_window(call);
  807. if (rxrpc_post_message(call, mark, ECONNABORTED, true) < 0)
  808. goto no_mem;
  809. clear_bit(RXRPC_CALL_EV_RCVD_BUSY, &call->events);
  810. clear_bit(RXRPC_CALL_EV_RCVD_ABORT, &call->events);
  811. goto kill_ACKs;
  812. }
  813. if (test_and_clear_bit(RXRPC_CALL_EV_RCVD_ACKALL, &call->events)) {
  814. _debug("do implicit ackall");
  815. rxrpc_clear_tx_window(call);
  816. }
  817. if (test_bit(RXRPC_CALL_EV_LIFE_TIMER, &call->events)) {
  818. write_lock_bh(&call->state_lock);
  819. if (call->state <= RXRPC_CALL_COMPLETE) {
  820. call->state = RXRPC_CALL_LOCALLY_ABORTED;
  821. call->local_abort = RX_CALL_TIMEOUT;
  822. set_bit(RXRPC_CALL_EV_ABORT, &call->events);
  823. }
  824. write_unlock_bh(&call->state_lock);
  825. _debug("post timeout");
  826. if (rxrpc_post_message(call, RXRPC_SKB_MARK_LOCAL_ERROR,
  827. ETIME, true) < 0)
  828. goto no_mem;
  829. clear_bit(RXRPC_CALL_EV_LIFE_TIMER, &call->events);
  830. goto kill_ACKs;
  831. }
  832. /* deal with assorted inbound messages */
  833. if (!skb_queue_empty(&call->rx_queue)) {
  834. switch (rxrpc_process_rx_queue(call, &abort_code)) {
  835. case 0:
  836. case -EAGAIN:
  837. break;
  838. case -ENOMEM:
  839. goto no_mem;
  840. case -EKEYEXPIRED:
  841. case -EKEYREJECTED:
  842. case -EPROTO:
  843. rxrpc_abort_call(call, abort_code);
  844. goto kill_ACKs;
  845. }
  846. }
  847. /* handle resending */
  848. if (test_and_clear_bit(RXRPC_CALL_EV_RESEND_TIMER, &call->events))
  849. rxrpc_resend_timer(call);
  850. if (test_and_clear_bit(RXRPC_CALL_EV_RESEND, &call->events))
  851. rxrpc_resend(call);
  852. /* consider sending an ordinary ACK */
  853. if (test_bit(RXRPC_CALL_EV_ACK, &call->events)) {
  854. _debug("send ACK: window: %d - %d { %lx }",
  855. call->rx_data_eaten, call->ackr_win_top,
  856. call->ackr_window[0]);
  857. if (call->state > RXRPC_CALL_SERVER_ACK_REQUEST &&
  858. call->ackr_reason != RXRPC_ACK_PING_RESPONSE) {
  859. /* ACK by sending reply DATA packet in this state */
  860. clear_bit(RXRPC_CALL_EV_ACK, &call->events);
  861. goto maybe_reschedule;
  862. }
  863. genbit = RXRPC_CALL_EV_ACK;
  864. acks = kzalloc(call->ackr_win_top - call->rx_data_eaten,
  865. GFP_NOFS);
  866. if (!acks)
  867. goto no_mem;
  868. //hdr.flags = RXRPC_SLOW_START_OK;
  869. ack.bufferSpace = htons(8);
  870. ack.maxSkew = 0;
  871. spin_lock_bh(&call->lock);
  872. ack.reason = call->ackr_reason;
  873. ack.serial = htonl(call->ackr_serial);
  874. ack.previousPacket = htonl(call->ackr_prev_seq);
  875. ack.firstPacket = htonl(call->rx_data_eaten + 1);
  876. ack.nAcks = 0;
  877. for (loop = 0; loop < RXRPC_ACKR_WINDOW_ASZ; loop++) {
  878. nbit = loop * BITS_PER_LONG;
  879. for (bits = call->ackr_window[loop]; bits; bits >>= 1
  880. ) {
  881. _debug("- l=%d n=%d b=%lx", loop, nbit, bits);
  882. if (bits & 1) {
  883. acks[nbit] = RXRPC_ACK_TYPE_ACK;
  884. ack.nAcks = nbit + 1;
  885. }
  886. nbit++;
  887. }
  888. }
  889. call->ackr_reason = 0;
  890. spin_unlock_bh(&call->lock);
  891. pad = 0;
  892. iov[1].iov_base = &ack;
  893. iov[1].iov_len = sizeof(ack);
  894. iov[2].iov_base = acks;
  895. iov[2].iov_len = ack.nAcks;
  896. iov[3].iov_base = &pad;
  897. iov[3].iov_len = 3;
  898. iov[4].iov_base = &ackinfo;
  899. iov[4].iov_len = sizeof(ackinfo);
  900. switch (ack.reason) {
  901. case RXRPC_ACK_REQUESTED:
  902. case RXRPC_ACK_DUPLICATE:
  903. case RXRPC_ACK_OUT_OF_SEQUENCE:
  904. case RXRPC_ACK_EXCEEDS_WINDOW:
  905. case RXRPC_ACK_NOSPACE:
  906. case RXRPC_ACK_PING:
  907. case RXRPC_ACK_PING_RESPONSE:
  908. goto send_ACK_with_skew;
  909. case RXRPC_ACK_DELAY:
  910. case RXRPC_ACK_IDLE:
  911. goto send_ACK;
  912. }
  913. }
  914. /* handle completion of security negotiations on an incoming
  915. * connection */
  916. if (test_and_clear_bit(RXRPC_CALL_EV_SECURED, &call->events)) {
  917. _debug("secured");
  918. spin_lock_bh(&call->lock);
  919. if (call->state == RXRPC_CALL_SERVER_SECURING) {
  920. _debug("securing");
  921. write_lock(&call->conn->lock);
  922. if (!test_bit(RXRPC_CALL_RELEASED, &call->flags) &&
  923. !test_bit(RXRPC_CALL_EV_RELEASE, &call->events)) {
  924. _debug("not released");
  925. call->state = RXRPC_CALL_SERVER_ACCEPTING;
  926. list_move_tail(&call->accept_link,
  927. &call->socket->acceptq);
  928. }
  929. write_unlock(&call->conn->lock);
  930. read_lock(&call->state_lock);
  931. if (call->state < RXRPC_CALL_COMPLETE)
  932. set_bit(RXRPC_CALL_EV_POST_ACCEPT, &call->events);
  933. read_unlock(&call->state_lock);
  934. }
  935. spin_unlock_bh(&call->lock);
  936. if (!test_bit(RXRPC_CALL_EV_POST_ACCEPT, &call->events))
  937. goto maybe_reschedule;
  938. }
  939. /* post a notification of an acceptable connection to the app */
  940. if (test_bit(RXRPC_CALL_EV_POST_ACCEPT, &call->events)) {
  941. _debug("post accept");
  942. if (rxrpc_post_message(call, RXRPC_SKB_MARK_NEW_CALL,
  943. 0, false) < 0)
  944. goto no_mem;
  945. clear_bit(RXRPC_CALL_EV_POST_ACCEPT, &call->events);
  946. goto maybe_reschedule;
  947. }
  948. /* handle incoming call acceptance */
  949. if (test_and_clear_bit(RXRPC_CALL_EV_ACCEPTED, &call->events)) {
  950. _debug("accepted");
  951. ASSERTCMP(call->rx_data_post, ==, 0);
  952. call->rx_data_post = 1;
  953. read_lock_bh(&call->state_lock);
  954. if (call->state < RXRPC_CALL_COMPLETE)
  955. set_bit(RXRPC_CALL_EV_DRAIN_RX_OOS, &call->events);
  956. read_unlock_bh(&call->state_lock);
  957. }
  958. /* drain the out of sequence received packet queue into the packet Rx
  959. * queue */
  960. if (test_and_clear_bit(RXRPC_CALL_EV_DRAIN_RX_OOS, &call->events)) {
  961. while (call->rx_data_post == call->rx_first_oos)
  962. if (rxrpc_drain_rx_oos_queue(call) < 0)
  963. break;
  964. goto maybe_reschedule;
  965. }
  966. if (test_bit(RXRPC_CALL_EV_RELEASE, &call->events)) {
  967. rxrpc_release_call(call);
  968. clear_bit(RXRPC_CALL_EV_RELEASE, &call->events);
  969. }
  970. /* other events may have been raised since we started checking */
  971. goto maybe_reschedule;
  972. send_ACK_with_skew:
  973. ack.maxSkew = htons(atomic_read(&call->conn->hi_serial) -
  974. ntohl(ack.serial));
  975. send_ACK:
  976. mtu = call->conn->params.peer->if_mtu;
  977. mtu -= call->conn->params.peer->hdrsize;
  978. ackinfo.maxMTU = htonl(mtu);
  979. ackinfo.rwind = htonl(rxrpc_rx_window_size);
  980. /* permit the peer to send us jumbo packets if it wants to */
  981. ackinfo.rxMTU = htonl(rxrpc_rx_mtu);
  982. ackinfo.jumbo_max = htonl(rxrpc_rx_jumbo_max);
  983. serial = atomic_inc_return(&call->conn->serial);
  984. whdr.serial = htonl(serial);
  985. _proto("Tx ACK %%%u { m=%hu f=#%u p=#%u s=%%%u r=%s n=%u }",
  986. serial,
  987. ntohs(ack.maxSkew),
  988. ntohl(ack.firstPacket),
  989. ntohl(ack.previousPacket),
  990. ntohl(ack.serial),
  991. rxrpc_acks(ack.reason),
  992. ack.nAcks);
  993. del_timer_sync(&call->ack_timer);
  994. if (ack.nAcks > 0)
  995. set_bit(RXRPC_CALL_TX_SOFT_ACK, &call->flags);
  996. goto send_message_2;
  997. send_message:
  998. _debug("send message");
  999. serial = atomic_inc_return(&call->conn->serial);
  1000. whdr.serial = htonl(serial);
  1001. _proto("Tx %s %%%u", rxrpc_pkts[whdr.type], serial);
  1002. send_message_2:
  1003. len = iov[0].iov_len;
  1004. ioc = 1;
  1005. if (iov[4].iov_len) {
  1006. ioc = 5;
  1007. len += iov[4].iov_len;
  1008. len += iov[3].iov_len;
  1009. len += iov[2].iov_len;
  1010. len += iov[1].iov_len;
  1011. } else if (iov[3].iov_len) {
  1012. ioc = 4;
  1013. len += iov[3].iov_len;
  1014. len += iov[2].iov_len;
  1015. len += iov[1].iov_len;
  1016. } else if (iov[2].iov_len) {
  1017. ioc = 3;
  1018. len += iov[2].iov_len;
  1019. len += iov[1].iov_len;
  1020. } else if (iov[1].iov_len) {
  1021. ioc = 2;
  1022. len += iov[1].iov_len;
  1023. }
  1024. ret = kernel_sendmsg(call->conn->params.local->socket,
  1025. &msg, iov, ioc, len);
  1026. if (ret < 0) {
  1027. _debug("sendmsg failed: %d", ret);
  1028. read_lock_bh(&call->state_lock);
  1029. if (call->state < RXRPC_CALL_DEAD)
  1030. rxrpc_queue_call(call);
  1031. read_unlock_bh(&call->state_lock);
  1032. goto error;
  1033. }
  1034. switch (genbit) {
  1035. case RXRPC_CALL_EV_ABORT:
  1036. clear_bit(genbit, &call->events);
  1037. clear_bit(RXRPC_CALL_EV_RCVD_ABORT, &call->events);
  1038. goto kill_ACKs;
  1039. case RXRPC_CALL_EV_ACK_FINAL:
  1040. write_lock_bh(&call->state_lock);
  1041. if (call->state == RXRPC_CALL_CLIENT_FINAL_ACK)
  1042. call->state = RXRPC_CALL_COMPLETE;
  1043. write_unlock_bh(&call->state_lock);
  1044. goto kill_ACKs;
  1045. default:
  1046. clear_bit(genbit, &call->events);
  1047. switch (call->state) {
  1048. case RXRPC_CALL_CLIENT_AWAIT_REPLY:
  1049. case RXRPC_CALL_CLIENT_RECV_REPLY:
  1050. case RXRPC_CALL_SERVER_RECV_REQUEST:
  1051. case RXRPC_CALL_SERVER_ACK_REQUEST:
  1052. _debug("start ACK timer");
  1053. rxrpc_propose_ACK(call, RXRPC_ACK_DELAY,
  1054. call->ackr_serial, false);
  1055. default:
  1056. break;
  1057. }
  1058. goto maybe_reschedule;
  1059. }
  1060. kill_ACKs:
  1061. del_timer_sync(&call->ack_timer);
  1062. if (test_and_clear_bit(RXRPC_CALL_EV_ACK_FINAL, &call->events))
  1063. rxrpc_put_call(call);
  1064. clear_bit(RXRPC_CALL_EV_ACK, &call->events);
  1065. maybe_reschedule:
  1066. if (call->events || !skb_queue_empty(&call->rx_queue)) {
  1067. read_lock_bh(&call->state_lock);
  1068. if (call->state < RXRPC_CALL_DEAD)
  1069. rxrpc_queue_call(call);
  1070. read_unlock_bh(&call->state_lock);
  1071. }
  1072. /* don't leave aborted connections on the accept queue */
  1073. if (call->state >= RXRPC_CALL_COMPLETE &&
  1074. !list_empty(&call->accept_link)) {
  1075. _debug("X unlinking once-pending call %p { e=%lx f=%lx c=%x }",
  1076. call, call->events, call->flags, call->conn->proto.cid);
  1077. read_lock_bh(&call->state_lock);
  1078. if (!test_bit(RXRPC_CALL_RELEASED, &call->flags) &&
  1079. !test_and_set_bit(RXRPC_CALL_EV_RELEASE, &call->events))
  1080. rxrpc_queue_call(call);
  1081. read_unlock_bh(&call->state_lock);
  1082. }
  1083. error:
  1084. clear_bit(RXRPC_CALL_PROC_BUSY, &call->flags);
  1085. kfree(acks);
  1086. /* because we don't want two CPUs both processing the work item for one
  1087. * call at the same time, we use a flag to note when it's busy; however
  1088. * this means there's a race between clearing the flag and setting the
  1089. * work pending bit and the work item being processed again */
  1090. if (call->events && !work_pending(&call->processor)) {
  1091. _debug("jumpstart %x", call->conn->proto.cid);
  1092. rxrpc_queue_call(call);
  1093. }
  1094. _leave("");
  1095. return;
  1096. no_mem:
  1097. _debug("out of memory");
  1098. goto maybe_reschedule;
  1099. }