|
@@ -24,28 +24,40 @@
|
|
/*
|
|
/*
|
|
* Set the timer
|
|
* Set the timer
|
|
*/
|
|
*/
|
|
-void rxrpc_set_timer(struct rxrpc_call *call, enum rxrpc_timer_trace why)
|
|
|
|
|
|
+void rxrpc_set_timer(struct rxrpc_call *call, enum rxrpc_timer_trace why,
|
|
|
|
+ ktime_t now)
|
|
{
|
|
{
|
|
- unsigned long t, now = jiffies;
|
|
|
|
|
|
+ unsigned long t_j, now_j = jiffies;
|
|
|
|
+ ktime_t t;
|
|
|
|
|
|
read_lock_bh(&call->state_lock);
|
|
read_lock_bh(&call->state_lock);
|
|
|
|
|
|
if (call->state < RXRPC_CALL_COMPLETE) {
|
|
if (call->state < RXRPC_CALL_COMPLETE) {
|
|
t = call->expire_at;
|
|
t = call->expire_at;
|
|
- if (time_before_eq(t, now))
|
|
|
|
|
|
+ if (!ktime_after(t, now))
|
|
goto out;
|
|
goto out;
|
|
|
|
|
|
- if (time_after(call->resend_at, now) &&
|
|
|
|
- time_before(call->resend_at, t))
|
|
|
|
|
|
+ if (ktime_after(call->resend_at, now) &&
|
|
|
|
+ ktime_before(call->resend_at, t))
|
|
t = call->resend_at;
|
|
t = call->resend_at;
|
|
|
|
|
|
- if (time_after(call->ack_at, now) &&
|
|
|
|
- time_before(call->ack_at, t))
|
|
|
|
|
|
+ if (ktime_after(call->ack_at, now) &&
|
|
|
|
+ ktime_before(call->ack_at, t))
|
|
t = call->ack_at;
|
|
t = call->ack_at;
|
|
|
|
|
|
- if (call->timer.expires != t || !timer_pending(&call->timer)) {
|
|
|
|
- mod_timer(&call->timer, t);
|
|
|
|
- trace_rxrpc_timer(call, why, now);
|
|
|
|
|
|
+ t_j = nsecs_to_jiffies(ktime_to_ns(ktime_sub(t, now)));
|
|
|
|
+ t_j += jiffies;
|
|
|
|
+
|
|
|
|
+ /* We have to make sure that the calculated jiffies value falls
|
|
|
|
+ * at or after the nsec value, or we may loop ceaselessly
|
|
|
|
+ * because the timer times out, but we haven't reached the nsec
|
|
|
|
+ * timeout yet.
|
|
|
|
+ */
|
|
|
|
+ t_j++;
|
|
|
|
+
|
|
|
|
+ if (call->timer.expires != t_j || !timer_pending(&call->timer)) {
|
|
|
|
+ mod_timer(&call->timer, t_j);
|
|
|
|
+ trace_rxrpc_timer(call, why, now, now_j);
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
|
|
@@ -62,7 +74,8 @@ static void __rxrpc_propose_ACK(struct rxrpc_call *call, u8 ack_reason,
|
|
enum rxrpc_propose_ack_trace why)
|
|
enum rxrpc_propose_ack_trace why)
|
|
{
|
|
{
|
|
enum rxrpc_propose_ack_outcome outcome = rxrpc_propose_ack_use;
|
|
enum rxrpc_propose_ack_outcome outcome = rxrpc_propose_ack_use;
|
|
- unsigned long now, ack_at, expiry = rxrpc_soft_ack_delay;
|
|
|
|
|
|
+ unsigned int expiry = rxrpc_soft_ack_delay;
|
|
|
|
+ ktime_t now, ack_at;
|
|
s8 prior = rxrpc_ack_priority[ack_reason];
|
|
s8 prior = rxrpc_ack_priority[ack_reason];
|
|
|
|
|
|
/* Update DELAY, IDLE, REQUESTED and PING_RESPONSE ACK serial
|
|
/* Update DELAY, IDLE, REQUESTED and PING_RESPONSE ACK serial
|
|
@@ -111,7 +124,6 @@ static void __rxrpc_propose_ACK(struct rxrpc_call *call, u8 ack_reason,
|
|
break;
|
|
break;
|
|
}
|
|
}
|
|
|
|
|
|
- now = jiffies;
|
|
|
|
if (test_bit(RXRPC_CALL_EV_ACK, &call->events)) {
|
|
if (test_bit(RXRPC_CALL_EV_ACK, &call->events)) {
|
|
_debug("already scheduled");
|
|
_debug("already scheduled");
|
|
} else if (immediate || expiry == 0) {
|
|
} else if (immediate || expiry == 0) {
|
|
@@ -120,11 +132,11 @@ static void __rxrpc_propose_ACK(struct rxrpc_call *call, u8 ack_reason,
|
|
background)
|
|
background)
|
|
rxrpc_queue_call(call);
|
|
rxrpc_queue_call(call);
|
|
} else {
|
|
} else {
|
|
- ack_at = now + expiry;
|
|
|
|
- _debug("deferred ACK %ld < %ld", expiry, call->ack_at - now);
|
|
|
|
- if (time_before(ack_at, call->ack_at)) {
|
|
|
|
|
|
+ now = ktime_get_real();
|
|
|
|
+ ack_at = ktime_add_ms(now, expiry);
|
|
|
|
+ if (ktime_before(ack_at, call->ack_at)) {
|
|
call->ack_at = ack_at;
|
|
call->ack_at = ack_at;
|
|
- rxrpc_set_timer(call, rxrpc_timer_set_for_ack);
|
|
|
|
|
|
+ rxrpc_set_timer(call, rxrpc_timer_set_for_ack, now);
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
|
|
@@ -157,12 +169,12 @@ static void rxrpc_congestion_timeout(struct rxrpc_call *call)
|
|
/*
|
|
/*
|
|
* Perform retransmission of NAK'd and unack'd packets.
|
|
* Perform retransmission of NAK'd and unack'd packets.
|
|
*/
|
|
*/
|
|
-static void rxrpc_resend(struct rxrpc_call *call)
|
|
|
|
|
|
+static void rxrpc_resend(struct rxrpc_call *call, ktime_t now)
|
|
{
|
|
{
|
|
struct rxrpc_skb_priv *sp;
|
|
struct rxrpc_skb_priv *sp;
|
|
struct sk_buff *skb;
|
|
struct sk_buff *skb;
|
|
rxrpc_seq_t cursor, seq, top;
|
|
rxrpc_seq_t cursor, seq, top;
|
|
- ktime_t now = ktime_get_real(), max_age, oldest, resend_at, ack_ts;
|
|
|
|
|
|
+ ktime_t max_age, oldest, ack_ts;
|
|
int ix;
|
|
int ix;
|
|
u8 annotation, anno_type, retrans = 0, unacked = 0;
|
|
u8 annotation, anno_type, retrans = 0, unacked = 0;
|
|
|
|
|
|
@@ -212,14 +224,7 @@ static void rxrpc_resend(struct rxrpc_call *call)
|
|
ktime_to_ns(ktime_sub(skb->tstamp, max_age)));
|
|
ktime_to_ns(ktime_sub(skb->tstamp, max_age)));
|
|
}
|
|
}
|
|
|
|
|
|
- resend_at = ktime_add_ms(oldest, rxrpc_resend_timeout);
|
|
|
|
- call->resend_at = jiffies +
|
|
|
|
- nsecs_to_jiffies(ktime_to_ns(ktime_sub(resend_at, now))) +
|
|
|
|
- 1; /* We have to make sure that the calculated jiffies value
|
|
|
|
- * falls at or after the nsec value, or we shall loop
|
|
|
|
- * ceaselessly because the timer times out, but we haven't
|
|
|
|
- * reached the nsec timeout yet.
|
|
|
|
- */
|
|
|
|
|
|
+ call->resend_at = ktime_add_ms(oldest, rxrpc_resend_timeout);
|
|
|
|
|
|
if (unacked)
|
|
if (unacked)
|
|
rxrpc_congestion_timeout(call);
|
|
rxrpc_congestion_timeout(call);
|
|
@@ -229,7 +234,7 @@ static void rxrpc_resend(struct rxrpc_call *call)
|
|
* retransmitting data.
|
|
* retransmitting data.
|
|
*/
|
|
*/
|
|
if (!retrans) {
|
|
if (!retrans) {
|
|
- rxrpc_set_timer(call, rxrpc_timer_set_for_resend);
|
|
|
|
|
|
+ rxrpc_set_timer(call, rxrpc_timer_set_for_resend, now);
|
|
spin_unlock_bh(&call->lock);
|
|
spin_unlock_bh(&call->lock);
|
|
ack_ts = ktime_sub(now, call->acks_latest_ts);
|
|
ack_ts = ktime_sub(now, call->acks_latest_ts);
|
|
if (ktime_to_ns(ack_ts) < call->peer->rtt)
|
|
if (ktime_to_ns(ack_ts) < call->peer->rtt)
|
|
@@ -301,7 +306,7 @@ void rxrpc_process_call(struct work_struct *work)
|
|
{
|
|
{
|
|
struct rxrpc_call *call =
|
|
struct rxrpc_call *call =
|
|
container_of(work, struct rxrpc_call, processor);
|
|
container_of(work, struct rxrpc_call, processor);
|
|
- unsigned long now;
|
|
|
|
|
|
+ ktime_t now;
|
|
|
|
|
|
rxrpc_see_call(call);
|
|
rxrpc_see_call(call);
|
|
|
|
|
|
@@ -320,15 +325,15 @@ recheck_state:
|
|
goto out_put;
|
|
goto out_put;
|
|
}
|
|
}
|
|
|
|
|
|
- now = jiffies;
|
|
|
|
- if (time_after_eq(now, call->expire_at)) {
|
|
|
|
|
|
+ now = ktime_get_real();
|
|
|
|
+ if (ktime_before(call->expire_at, now)) {
|
|
rxrpc_abort_call("EXP", call, 0, RX_CALL_TIMEOUT, ETIME);
|
|
rxrpc_abort_call("EXP", call, 0, RX_CALL_TIMEOUT, ETIME);
|
|
set_bit(RXRPC_CALL_EV_ABORT, &call->events);
|
|
set_bit(RXRPC_CALL_EV_ABORT, &call->events);
|
|
goto recheck_state;
|
|
goto recheck_state;
|
|
}
|
|
}
|
|
|
|
|
|
if (test_and_clear_bit(RXRPC_CALL_EV_ACK, &call->events) ||
|
|
if (test_and_clear_bit(RXRPC_CALL_EV_ACK, &call->events) ||
|
|
- time_after_eq(now, call->ack_at)) {
|
|
|
|
|
|
+ ktime_before(call->ack_at, now)) {
|
|
call->ack_at = call->expire_at;
|
|
call->ack_at = call->expire_at;
|
|
if (call->ackr_reason) {
|
|
if (call->ackr_reason) {
|
|
rxrpc_send_call_packet(call, RXRPC_PACKET_TYPE_ACK);
|
|
rxrpc_send_call_packet(call, RXRPC_PACKET_TYPE_ACK);
|
|
@@ -337,12 +342,12 @@ recheck_state:
|
|
}
|
|
}
|
|
|
|
|
|
if (test_and_clear_bit(RXRPC_CALL_EV_RESEND, &call->events) ||
|
|
if (test_and_clear_bit(RXRPC_CALL_EV_RESEND, &call->events) ||
|
|
- time_after_eq(now, call->resend_at)) {
|
|
|
|
- rxrpc_resend(call);
|
|
|
|
|
|
+ ktime_before(call->resend_at, now)) {
|
|
|
|
+ rxrpc_resend(call, now);
|
|
goto recheck_state;
|
|
goto recheck_state;
|
|
}
|
|
}
|
|
|
|
|
|
- rxrpc_set_timer(call, rxrpc_timer_set_for_resend);
|
|
|
|
|
|
+ rxrpc_set_timer(call, rxrpc_timer_set_for_resend, now);
|
|
|
|
|
|
/* other events may have been raised since we started checking */
|
|
/* other events may have been raised since we started checking */
|
|
if (call->events && call->state < RXRPC_CALL_COMPLETE) {
|
|
if (call->events && call->state < RXRPC_CALL_COMPLETE) {
|