|
@@ -3321,6 +3321,36 @@ static int tcp_ack_update_window(struct sock *sk, const struct sk_buff *skb, u32
|
|
return flag;
|
|
return flag;
|
|
}
|
|
}
|
|
|
|
|
|
|
|
+/* Return true if we're currently rate-limiting out-of-window ACKs and
|
|
|
|
+ * thus shouldn't send a dupack right now. We rate-limit dupacks in
|
|
|
|
+ * response to out-of-window SYNs or ACKs to mitigate ACK loops or DoS
|
|
|
|
+ * attacks that send repeated SYNs or ACKs for the same connection. To
|
|
|
|
+ * do this, we do not send a duplicate SYNACK or ACK if the remote
|
|
|
|
+ * endpoint is sending out-of-window SYNs or pure ACKs at a high rate.
|
|
|
|
+ */
|
|
|
|
+bool tcp_oow_rate_limited(struct net *net, const struct sk_buff *skb,
|
|
|
|
+ int mib_idx, u32 *last_oow_ack_time)
|
|
|
|
+{
|
|
|
|
+ /* Data packets without SYNs are not likely part of an ACK loop. */
|
|
|
|
+ if ((TCP_SKB_CB(skb)->seq != TCP_SKB_CB(skb)->end_seq) &&
|
|
|
|
+ !tcp_hdr(skb)->syn)
|
|
|
|
+ goto not_rate_limited;
|
|
|
|
+
|
|
|
|
+ if (*last_oow_ack_time) {
|
|
|
|
+ s32 elapsed = (s32)(tcp_time_stamp - *last_oow_ack_time);
|
|
|
|
+
|
|
|
|
+ if (0 <= elapsed && elapsed < sysctl_tcp_invalid_ratelimit) {
|
|
|
|
+ NET_INC_STATS_BH(net, mib_idx);
|
|
|
|
+ return true; /* rate-limited: don't send yet! */
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ *last_oow_ack_time = tcp_time_stamp;
|
|
|
|
+
|
|
|
|
+not_rate_limited:
|
|
|
|
+ return false; /* not rate-limited: go ahead, send dupack now! */
|
|
|
|
+}
|
|
|
|
+
|
|
/* RFC 5961 7 [ACK Throttling] */
|
|
/* RFC 5961 7 [ACK Throttling] */
|
|
static void tcp_send_challenge_ack(struct sock *sk, const struct sk_buff *skb)
|
|
static void tcp_send_challenge_ack(struct sock *sk, const struct sk_buff *skb)
|
|
{
|
|
{
|
|
@@ -5912,6 +5942,31 @@ static void tcp_ecn_create_request(struct request_sock *req,
|
|
inet_rsk(req)->ecn_ok = 1;
|
|
inet_rsk(req)->ecn_ok = 1;
|
|
}
|
|
}
|
|
|
|
|
|
|
|
+static void tcp_openreq_init(struct request_sock *req,
|
|
|
|
+ const struct tcp_options_received *rx_opt,
|
|
|
|
+ struct sk_buff *skb, const struct sock *sk)
|
|
|
|
+{
|
|
|
|
+ struct inet_request_sock *ireq = inet_rsk(req);
|
|
|
|
+
|
|
|
|
+ req->rcv_wnd = 0; /* So that tcp_send_synack() knows! */
|
|
|
|
+ req->cookie_ts = 0;
|
|
|
|
+ tcp_rsk(req)->rcv_isn = TCP_SKB_CB(skb)->seq;
|
|
|
|
+ tcp_rsk(req)->rcv_nxt = TCP_SKB_CB(skb)->seq + 1;
|
|
|
|
+ tcp_rsk(req)->snt_synack = tcp_time_stamp;
|
|
|
|
+ tcp_rsk(req)->last_oow_ack_time = 0;
|
|
|
|
+ req->mss = rx_opt->mss_clamp;
|
|
|
|
+ req->ts_recent = rx_opt->saw_tstamp ? rx_opt->rcv_tsval : 0;
|
|
|
|
+ ireq->tstamp_ok = rx_opt->tstamp_ok;
|
|
|
|
+ ireq->sack_ok = rx_opt->sack_ok;
|
|
|
|
+ ireq->snd_wscale = rx_opt->snd_wscale;
|
|
|
|
+ ireq->wscale_ok = rx_opt->wscale_ok;
|
|
|
|
+ ireq->acked = 0;
|
|
|
|
+ ireq->ecn_ok = 0;
|
|
|
|
+ ireq->ir_rmt_port = tcp_hdr(skb)->source;
|
|
|
|
+ ireq->ir_num = ntohs(tcp_hdr(skb)->dest);
|
|
|
|
+ ireq->ir_mark = inet_request_mark(sk, skb);
|
|
|
|
+}
|
|
|
|
+
|
|
int tcp_conn_request(struct request_sock_ops *rsk_ops,
|
|
int tcp_conn_request(struct request_sock_ops *rsk_ops,
|
|
const struct tcp_request_sock_ops *af_ops,
|
|
const struct tcp_request_sock_ops *af_ops,
|
|
struct sock *sk, struct sk_buff *skb)
|
|
struct sock *sk, struct sk_buff *skb)
|