|
@@ -66,11 +66,6 @@ static unsigned int dctcp_alpha_on_init __read_mostly = DCTCP_MAX_ALPHA;
|
|
|
module_param(dctcp_alpha_on_init, uint, 0644);
|
|
module_param(dctcp_alpha_on_init, uint, 0644);
|
|
|
MODULE_PARM_DESC(dctcp_alpha_on_init, "parameter for initial alpha value");
|
|
MODULE_PARM_DESC(dctcp_alpha_on_init, "parameter for initial alpha value");
|
|
|
|
|
|
|
|
-static unsigned int dctcp_clamp_alpha_on_loss __read_mostly;
|
|
|
|
|
-module_param(dctcp_clamp_alpha_on_loss, uint, 0644);
|
|
|
|
|
-MODULE_PARM_DESC(dctcp_clamp_alpha_on_loss,
|
|
|
|
|
- "parameter for clamping alpha on loss");
|
|
|
|
|
-
|
|
|
|
|
static struct tcp_congestion_ops dctcp_reno;
|
|
static struct tcp_congestion_ops dctcp_reno;
|
|
|
|
|
|
|
|
static void dctcp_reset(const struct tcp_sock *tp, struct dctcp *ca)
|
|
static void dctcp_reset(const struct tcp_sock *tp, struct dctcp *ca)
|
|
@@ -211,21 +206,23 @@ static void dctcp_update_alpha(struct sock *sk, u32 flags)
|
|
|
}
|
|
}
|
|
|
}
|
|
}
|
|
|
|
|
|
|
|
-static void dctcp_state(struct sock *sk, u8 new_state)
|
|
|
|
|
|
|
+static void dctcp_react_to_loss(struct sock *sk)
|
|
|
{
|
|
{
|
|
|
- if (dctcp_clamp_alpha_on_loss && new_state == TCP_CA_Loss) {
|
|
|
|
|
- struct dctcp *ca = inet_csk_ca(sk);
|
|
|
|
|
|
|
+ struct dctcp *ca = inet_csk_ca(sk);
|
|
|
|
|
+ struct tcp_sock *tp = tcp_sk(sk);
|
|
|
|
|
|
|
|
- /* If this extension is enabled, we clamp dctcp_alpha to
|
|
|
|
|
- * max on packet loss; the motivation is that dctcp_alpha
|
|
|
|
|
- * is an indicator to the extend of congestion and packet
|
|
|
|
|
- * loss is an indicator of extreme congestion; setting
|
|
|
|
|
- * this in practice turned out to be beneficial, and
|
|
|
|
|
- * effectively assumes total congestion which reduces the
|
|
|
|
|
- * window by half.
|
|
|
|
|
- */
|
|
|
|
|
- ca->dctcp_alpha = DCTCP_MAX_ALPHA;
|
|
|
|
|
- }
|
|
|
|
|
|
|
+ ca->loss_cwnd = tp->snd_cwnd;
|
|
|
|
|
+ tp->snd_ssthresh = max(tp->snd_cwnd >> 1U, 2U);
|
|
|
|
|
+}
|
|
|
|
|
+
|
|
|
|
|
+static void dctcp_state(struct sock *sk, u8 new_state)
|
|
|
|
|
+{
|
|
|
|
|
+ if (new_state == TCP_CA_Recovery &&
|
|
|
|
|
+ new_state != inet_csk(sk)->icsk_ca_state)
|
|
|
|
|
+ dctcp_react_to_loss(sk);
|
|
|
|
|
+ /* We handle RTO in dctcp_cwnd_event to ensure that we perform only
|
|
|
|
|
+ * one loss-adjustment per RTT.
|
|
|
|
|
+ */
|
|
|
}
|
|
}
|
|
|
|
|
|
|
|
static void dctcp_cwnd_event(struct sock *sk, enum tcp_ca_event ev)
|
|
static void dctcp_cwnd_event(struct sock *sk, enum tcp_ca_event ev)
|
|
@@ -237,6 +234,9 @@ static void dctcp_cwnd_event(struct sock *sk, enum tcp_ca_event ev)
|
|
|
case CA_EVENT_ECN_NO_CE:
|
|
case CA_EVENT_ECN_NO_CE:
|
|
|
dctcp_ce_state_1_to_0(sk);
|
|
dctcp_ce_state_1_to_0(sk);
|
|
|
break;
|
|
break;
|
|
|
|
|
+ case CA_EVENT_LOSS:
|
|
|
|
|
+ dctcp_react_to_loss(sk);
|
|
|
|
|
+ break;
|
|
|
default:
|
|
default:
|
|
|
/* Don't care for the rest. */
|
|
/* Don't care for the rest. */
|
|
|
break;
|
|
break;
|