call_event.c 9.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362
  1. /* Management of Tx window, Tx resend, ACKs and out-of-sequence reception
  2. *
  3. * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
  4. * Written by David Howells (dhowells@redhat.com)
  5. *
  6. * This program is free software; you can redistribute it and/or
  7. * modify it under the terms of the GNU General Public License
  8. * as published by the Free Software Foundation; either version
  9. * 2 of the License, or (at your option) any later version.
  10. */
  11. #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  12. #include <linux/module.h>
  13. #include <linux/circ_buf.h>
  14. #include <linux/net.h>
  15. #include <linux/skbuff.h>
  16. #include <linux/slab.h>
  17. #include <linux/udp.h>
  18. #include <net/sock.h>
  19. #include <net/af_rxrpc.h>
  20. #include "ar-internal.h"
  21. /*
  22. * Set the timer
  23. */
  24. void rxrpc_set_timer(struct rxrpc_call *call, enum rxrpc_timer_trace why,
  25. ktime_t now)
  26. {
  27. unsigned long t_j, now_j = jiffies;
  28. ktime_t t;
  29. read_lock_bh(&call->state_lock);
  30. if (call->state < RXRPC_CALL_COMPLETE) {
  31. t = call->expire_at;
  32. if (!ktime_after(t, now))
  33. goto out;
  34. if (ktime_after(call->resend_at, now) &&
  35. ktime_before(call->resend_at, t))
  36. t = call->resend_at;
  37. if (ktime_after(call->ack_at, now) &&
  38. ktime_before(call->ack_at, t))
  39. t = call->ack_at;
  40. t_j = nsecs_to_jiffies(ktime_to_ns(ktime_sub(t, now)));
  41. t_j += jiffies;
  42. /* We have to make sure that the calculated jiffies value falls
  43. * at or after the nsec value, or we may loop ceaselessly
  44. * because the timer times out, but we haven't reached the nsec
  45. * timeout yet.
  46. */
  47. t_j++;
  48. if (call->timer.expires != t_j || !timer_pending(&call->timer)) {
  49. mod_timer(&call->timer, t_j);
  50. trace_rxrpc_timer(call, why, now, now_j);
  51. }
  52. }
  53. out:
  54. read_unlock_bh(&call->state_lock);
  55. }
  56. /*
  57. * propose an ACK be sent
  58. */
  59. static void __rxrpc_propose_ACK(struct rxrpc_call *call, u8 ack_reason,
  60. u16 skew, u32 serial, bool immediate,
  61. bool background,
  62. enum rxrpc_propose_ack_trace why)
  63. {
  64. enum rxrpc_propose_ack_outcome outcome = rxrpc_propose_ack_use;
  65. unsigned int expiry = rxrpc_soft_ack_delay;
  66. ktime_t now, ack_at;
  67. s8 prior = rxrpc_ack_priority[ack_reason];
  68. /* Update DELAY, IDLE, REQUESTED and PING_RESPONSE ACK serial
  69. * numbers, but we don't alter the timeout.
  70. */
  71. _debug("prior %u %u vs %u %u",
  72. ack_reason, prior,
  73. call->ackr_reason, rxrpc_ack_priority[call->ackr_reason]);
  74. if (ack_reason == call->ackr_reason) {
  75. if (RXRPC_ACK_UPDATEABLE & (1 << ack_reason)) {
  76. outcome = rxrpc_propose_ack_update;
  77. call->ackr_serial = serial;
  78. call->ackr_skew = skew;
  79. }
  80. if (!immediate)
  81. goto trace;
  82. } else if (prior > rxrpc_ack_priority[call->ackr_reason]) {
  83. call->ackr_reason = ack_reason;
  84. call->ackr_serial = serial;
  85. call->ackr_skew = skew;
  86. } else {
  87. outcome = rxrpc_propose_ack_subsume;
  88. }
  89. switch (ack_reason) {
  90. case RXRPC_ACK_REQUESTED:
  91. if (rxrpc_requested_ack_delay < expiry)
  92. expiry = rxrpc_requested_ack_delay;
  93. if (serial == 1)
  94. immediate = false;
  95. break;
  96. case RXRPC_ACK_DELAY:
  97. if (rxrpc_soft_ack_delay < expiry)
  98. expiry = rxrpc_soft_ack_delay;
  99. break;
  100. case RXRPC_ACK_PING:
  101. case RXRPC_ACK_IDLE:
  102. if (rxrpc_idle_ack_delay < expiry)
  103. expiry = rxrpc_idle_ack_delay;
  104. break;
  105. default:
  106. immediate = true;
  107. break;
  108. }
  109. if (test_bit(RXRPC_CALL_EV_ACK, &call->events)) {
  110. _debug("already scheduled");
  111. } else if (immediate || expiry == 0) {
  112. _debug("immediate ACK %lx", call->events);
  113. if (!test_and_set_bit(RXRPC_CALL_EV_ACK, &call->events) &&
  114. background)
  115. rxrpc_queue_call(call);
  116. } else {
  117. now = ktime_get_real();
  118. ack_at = ktime_add_ms(now, expiry);
  119. if (ktime_before(ack_at, call->ack_at)) {
  120. call->ack_at = ack_at;
  121. rxrpc_set_timer(call, rxrpc_timer_set_for_ack, now);
  122. }
  123. }
  124. trace:
  125. trace_rxrpc_propose_ack(call, why, ack_reason, serial, immediate,
  126. background, outcome);
  127. }
  128. /*
  129. * propose an ACK be sent, locking the call structure
  130. */
  131. void rxrpc_propose_ACK(struct rxrpc_call *call, u8 ack_reason,
  132. u16 skew, u32 serial, bool immediate, bool background,
  133. enum rxrpc_propose_ack_trace why)
  134. {
  135. spin_lock_bh(&call->lock);
  136. __rxrpc_propose_ACK(call, ack_reason, skew, serial,
  137. immediate, background, why);
  138. spin_unlock_bh(&call->lock);
  139. }
  140. /*
  141. * Handle congestion being detected by the retransmit timeout.
  142. */
  143. static void rxrpc_congestion_timeout(struct rxrpc_call *call)
  144. {
  145. set_bit(RXRPC_CALL_RETRANS_TIMEOUT, &call->flags);
  146. }
  147. /*
  148. * Perform retransmission of NAK'd and unack'd packets.
  149. */
  150. static void rxrpc_resend(struct rxrpc_call *call, ktime_t now)
  151. {
  152. struct rxrpc_skb_priv *sp;
  153. struct sk_buff *skb;
  154. rxrpc_seq_t cursor, seq, top;
  155. ktime_t max_age, oldest, ack_ts;
  156. int ix;
  157. u8 annotation, anno_type, retrans = 0, unacked = 0;
  158. _enter("{%d,%d}", call->tx_hard_ack, call->tx_top);
  159. max_age = ktime_sub_ms(now, rxrpc_resend_timeout);
  160. spin_lock_bh(&call->lock);
  161. cursor = call->tx_hard_ack;
  162. top = call->tx_top;
  163. ASSERT(before_eq(cursor, top));
  164. if (cursor == top)
  165. goto out_unlock;
  166. /* Scan the packet list without dropping the lock and decide which of
  167. * the packets in the Tx buffer we're going to resend and what the new
  168. * resend timeout will be.
  169. */
  170. oldest = now;
  171. for (seq = cursor + 1; before_eq(seq, top); seq++) {
  172. ix = seq & RXRPC_RXTX_BUFF_MASK;
  173. annotation = call->rxtx_annotations[ix];
  174. anno_type = annotation & RXRPC_TX_ANNO_MASK;
  175. annotation &= ~RXRPC_TX_ANNO_MASK;
  176. if (anno_type == RXRPC_TX_ANNO_ACK)
  177. continue;
  178. skb = call->rxtx_buffer[ix];
  179. rxrpc_see_skb(skb, rxrpc_skb_tx_seen);
  180. sp = rxrpc_skb(skb);
  181. if (anno_type == RXRPC_TX_ANNO_UNACK) {
  182. if (ktime_after(skb->tstamp, max_age)) {
  183. if (ktime_before(skb->tstamp, oldest))
  184. oldest = skb->tstamp;
  185. continue;
  186. }
  187. if (!(annotation & RXRPC_TX_ANNO_RESENT))
  188. unacked++;
  189. }
  190. /* Okay, we need to retransmit a packet. */
  191. call->rxtx_annotations[ix] = RXRPC_TX_ANNO_RETRANS | annotation;
  192. retrans++;
  193. trace_rxrpc_retransmit(call, seq, annotation | anno_type,
  194. ktime_to_ns(ktime_sub(skb->tstamp, max_age)));
  195. }
  196. call->resend_at = ktime_add_ms(oldest, rxrpc_resend_timeout);
  197. if (unacked)
  198. rxrpc_congestion_timeout(call);
  199. /* If there was nothing that needed retransmission then it's likely
  200. * that an ACK got lost somewhere. Send a ping to find out instead of
  201. * retransmitting data.
  202. */
  203. if (!retrans) {
  204. rxrpc_set_timer(call, rxrpc_timer_set_for_resend, now);
  205. spin_unlock_bh(&call->lock);
  206. ack_ts = ktime_sub(now, call->acks_latest_ts);
  207. if (ktime_to_ns(ack_ts) < call->peer->rtt)
  208. goto out;
  209. rxrpc_propose_ACK(call, RXRPC_ACK_PING, 0, 0, true, false,
  210. rxrpc_propose_ack_ping_for_lost_ack);
  211. rxrpc_send_call_packet(call, RXRPC_PACKET_TYPE_ACK);
  212. goto out;
  213. }
  214. /* Now go through the Tx window and perform the retransmissions. We
  215. * have to drop the lock for each send. If an ACK comes in whilst the
  216. * lock is dropped, it may clear some of the retransmission markers for
  217. * packets that it soft-ACKs.
  218. */
  219. for (seq = cursor + 1; before_eq(seq, top); seq++) {
  220. ix = seq & RXRPC_RXTX_BUFF_MASK;
  221. annotation = call->rxtx_annotations[ix];
  222. anno_type = annotation & RXRPC_TX_ANNO_MASK;
  223. if (anno_type != RXRPC_TX_ANNO_RETRANS)
  224. continue;
  225. skb = call->rxtx_buffer[ix];
  226. rxrpc_get_skb(skb, rxrpc_skb_tx_got);
  227. spin_unlock_bh(&call->lock);
  228. if (rxrpc_send_data_packet(call, skb, true) < 0) {
  229. rxrpc_free_skb(skb, rxrpc_skb_tx_freed);
  230. return;
  231. }
  232. if (rxrpc_is_client_call(call))
  233. rxrpc_expose_client_call(call);
  234. rxrpc_free_skb(skb, rxrpc_skb_tx_freed);
  235. spin_lock_bh(&call->lock);
  236. /* We need to clear the retransmit state, but there are two
  237. * things we need to be aware of: A new ACK/NAK might have been
  238. * received and the packet might have been hard-ACK'd (in which
  239. * case it will no longer be in the buffer).
  240. */
  241. if (after(seq, call->tx_hard_ack)) {
  242. annotation = call->rxtx_annotations[ix];
  243. anno_type = annotation & RXRPC_TX_ANNO_MASK;
  244. if (anno_type == RXRPC_TX_ANNO_RETRANS ||
  245. anno_type == RXRPC_TX_ANNO_NAK) {
  246. annotation &= ~RXRPC_TX_ANNO_MASK;
  247. annotation |= RXRPC_TX_ANNO_UNACK;
  248. }
  249. annotation |= RXRPC_TX_ANNO_RESENT;
  250. call->rxtx_annotations[ix] = annotation;
  251. }
  252. if (after(call->tx_hard_ack, seq))
  253. seq = call->tx_hard_ack;
  254. }
  255. out_unlock:
  256. spin_unlock_bh(&call->lock);
  257. out:
  258. _leave("");
  259. }
  260. /*
  261. * Handle retransmission and deferred ACK/abort generation.
  262. */
  263. void rxrpc_process_call(struct work_struct *work)
  264. {
  265. struct rxrpc_call *call =
  266. container_of(work, struct rxrpc_call, processor);
  267. ktime_t now;
  268. rxrpc_see_call(call);
  269. //printk("\n--------------------\n");
  270. _enter("{%d,%s,%lx}",
  271. call->debug_id, rxrpc_call_states[call->state], call->events);
  272. recheck_state:
  273. if (test_and_clear_bit(RXRPC_CALL_EV_ABORT, &call->events)) {
  274. rxrpc_send_call_packet(call, RXRPC_PACKET_TYPE_ABORT);
  275. goto recheck_state;
  276. }
  277. if (call->state == RXRPC_CALL_COMPLETE) {
  278. del_timer_sync(&call->timer);
  279. goto out_put;
  280. }
  281. now = ktime_get_real();
  282. if (ktime_before(call->expire_at, now)) {
  283. rxrpc_abort_call("EXP", call, 0, RX_CALL_TIMEOUT, ETIME);
  284. set_bit(RXRPC_CALL_EV_ABORT, &call->events);
  285. goto recheck_state;
  286. }
  287. if (test_and_clear_bit(RXRPC_CALL_EV_ACK, &call->events) ||
  288. ktime_before(call->ack_at, now)) {
  289. call->ack_at = call->expire_at;
  290. if (call->ackr_reason) {
  291. rxrpc_send_call_packet(call, RXRPC_PACKET_TYPE_ACK);
  292. goto recheck_state;
  293. }
  294. }
  295. if (test_and_clear_bit(RXRPC_CALL_EV_RESEND, &call->events) ||
  296. ktime_before(call->resend_at, now)) {
  297. rxrpc_resend(call, now);
  298. goto recheck_state;
  299. }
  300. rxrpc_set_timer(call, rxrpc_timer_set_for_resend, now);
  301. /* other events may have been raised since we started checking */
  302. if (call->events && call->state < RXRPC_CALL_COMPLETE) {
  303. __rxrpc_queue_call(call);
  304. goto out;
  305. }
  306. out_put:
  307. rxrpc_put_call(call, rxrpc_call_put);
  308. out:
  309. _leave("");
  310. }