|
@@ -80,9 +80,7 @@ int sysctl_tcp_timestamps __read_mostly = 1;
|
|
int sysctl_tcp_window_scaling __read_mostly = 1;
|
|
int sysctl_tcp_window_scaling __read_mostly = 1;
|
|
int sysctl_tcp_sack __read_mostly = 1;
|
|
int sysctl_tcp_sack __read_mostly = 1;
|
|
int sysctl_tcp_fack __read_mostly = 1;
|
|
int sysctl_tcp_fack __read_mostly = 1;
|
|
-int sysctl_tcp_reordering __read_mostly = TCP_FASTRETRANS_THRESH;
|
|
|
|
int sysctl_tcp_max_reordering __read_mostly = 300;
|
|
int sysctl_tcp_max_reordering __read_mostly = 300;
|
|
-EXPORT_SYMBOL(sysctl_tcp_reordering);
|
|
|
|
int sysctl_tcp_dsack __read_mostly = 1;
|
|
int sysctl_tcp_dsack __read_mostly = 1;
|
|
int sysctl_tcp_app_win __read_mostly = 31;
|
|
int sysctl_tcp_app_win __read_mostly = 31;
|
|
int sysctl_tcp_adv_win_scale __read_mostly = 1;
|
|
int sysctl_tcp_adv_win_scale __read_mostly = 1;
|
|
@@ -1883,6 +1881,7 @@ void tcp_enter_loss(struct sock *sk)
|
|
{
|
|
{
|
|
const struct inet_connection_sock *icsk = inet_csk(sk);
|
|
const struct inet_connection_sock *icsk = inet_csk(sk);
|
|
struct tcp_sock *tp = tcp_sk(sk);
|
|
struct tcp_sock *tp = tcp_sk(sk);
|
|
|
|
+ struct net *net = sock_net(sk);
|
|
struct sk_buff *skb;
|
|
struct sk_buff *skb;
|
|
bool new_recovery = icsk->icsk_ca_state < TCP_CA_Recovery;
|
|
bool new_recovery = icsk->icsk_ca_state < TCP_CA_Recovery;
|
|
bool is_reneg; /* is receiver reneging on SACKs? */
|
|
bool is_reneg; /* is receiver reneging on SACKs? */
|
|
@@ -1933,9 +1932,9 @@ void tcp_enter_loss(struct sock *sk)
|
|
* suggests that the degree of reordering is over-estimated.
|
|
* suggests that the degree of reordering is over-estimated.
|
|
*/
|
|
*/
|
|
if (icsk->icsk_ca_state <= TCP_CA_Disorder &&
|
|
if (icsk->icsk_ca_state <= TCP_CA_Disorder &&
|
|
- tp->sacked_out >= sysctl_tcp_reordering)
|
|
|
|
|
|
+ tp->sacked_out >= net->ipv4.sysctl_tcp_reordering)
|
|
tp->reordering = min_t(unsigned int, tp->reordering,
|
|
tp->reordering = min_t(unsigned int, tp->reordering,
|
|
- sysctl_tcp_reordering);
|
|
|
|
|
|
+ net->ipv4.sysctl_tcp_reordering);
|
|
tcp_set_ca_state(sk, TCP_CA_Loss);
|
|
tcp_set_ca_state(sk, TCP_CA_Loss);
|
|
tp->high_seq = tp->snd_nxt;
|
|
tp->high_seq = tp->snd_nxt;
|
|
tcp_ecn_queue_cwr(tp);
|
|
tcp_ecn_queue_cwr(tp);
|
|
@@ -2119,6 +2118,7 @@ static bool tcp_time_to_recover(struct sock *sk, int flag)
|
|
{
|
|
{
|
|
struct tcp_sock *tp = tcp_sk(sk);
|
|
struct tcp_sock *tp = tcp_sk(sk);
|
|
__u32 packets_out;
|
|
__u32 packets_out;
|
|
|
|
+ int tcp_reordering = sock_net(sk)->ipv4.sysctl_tcp_reordering;
|
|
|
|
|
|
/* Trick#1: The loss is proven. */
|
|
/* Trick#1: The loss is proven. */
|
|
if (tp->lost_out)
|
|
if (tp->lost_out)
|
|
@@ -2133,7 +2133,7 @@ static bool tcp_time_to_recover(struct sock *sk, int flag)
|
|
*/
|
|
*/
|
|
packets_out = tp->packets_out;
|
|
packets_out = tp->packets_out;
|
|
if (packets_out <= tp->reordering &&
|
|
if (packets_out <= tp->reordering &&
|
|
- tp->sacked_out >= max_t(__u32, packets_out/2, sysctl_tcp_reordering) &&
|
|
|
|
|
|
+ tp->sacked_out >= max_t(__u32, packets_out/2, tcp_reordering) &&
|
|
!tcp_may_send_now(sk)) {
|
|
!tcp_may_send_now(sk)) {
|
|
/* We have nothing to send. This connection is limited
|
|
/* We have nothing to send. This connection is limited
|
|
* either by receiver window or by application.
|
|
* either by receiver window or by application.
|
|
@@ -3317,7 +3317,7 @@ static inline bool tcp_may_raise_cwnd(const struct sock *sk, const int flag)
|
|
* new SACK or ECE mark may first advance cwnd here and later reduce
|
|
* new SACK or ECE mark may first advance cwnd here and later reduce
|
|
* cwnd in tcp_fastretrans_alert() based on more states.
|
|
* cwnd in tcp_fastretrans_alert() based on more states.
|
|
*/
|
|
*/
|
|
- if (tcp_sk(sk)->reordering > sysctl_tcp_reordering)
|
|
|
|
|
|
+ if (tcp_sk(sk)->reordering > sock_net(sk)->ipv4.sysctl_tcp_reordering)
|
|
return flag & FLAG_FORWARD_PROGRESS;
|
|
return flag & FLAG_FORWARD_PROGRESS;
|
|
|
|
|
|
return flag & FLAG_DATA_ACKED;
|
|
return flag & FLAG_DATA_ACKED;
|
|
@@ -6163,9 +6163,10 @@ static bool tcp_syn_flood_action(const struct sock *sk,
|
|
struct request_sock_queue *queue = &inet_csk(sk)->icsk_accept_queue;
|
|
struct request_sock_queue *queue = &inet_csk(sk)->icsk_accept_queue;
|
|
const char *msg = "Dropping request";
|
|
const char *msg = "Dropping request";
|
|
bool want_cookie = false;
|
|
bool want_cookie = false;
|
|
|
|
+ struct net *net = sock_net(sk);
|
|
|
|
|
|
#ifdef CONFIG_SYN_COOKIES
|
|
#ifdef CONFIG_SYN_COOKIES
|
|
- if (sysctl_tcp_syncookies) {
|
|
|
|
|
|
+ if (net->ipv4.sysctl_tcp_syncookies) {
|
|
msg = "Sending cookies";
|
|
msg = "Sending cookies";
|
|
want_cookie = true;
|
|
want_cookie = true;
|
|
NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPREQQFULLDOCOOKIES);
|
|
NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPREQQFULLDOCOOKIES);
|
|
@@ -6174,7 +6175,7 @@ static bool tcp_syn_flood_action(const struct sock *sk,
|
|
NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPREQQFULLDROP);
|
|
NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPREQQFULLDROP);
|
|
|
|
|
|
if (!queue->synflood_warned &&
|
|
if (!queue->synflood_warned &&
|
|
- sysctl_tcp_syncookies != 2 &&
|
|
|
|
|
|
+ net->ipv4.sysctl_tcp_syncookies != 2 &&
|
|
xchg(&queue->synflood_warned, 1) == 0)
|
|
xchg(&queue->synflood_warned, 1) == 0)
|
|
pr_info("%s: Possible SYN flooding on port %d. %s. Check SNMP counters.\n",
|
|
pr_info("%s: Possible SYN flooding on port %d. %s. Check SNMP counters.\n",
|
|
proto, ntohs(tcp_hdr(skb)->dest), msg);
|
|
proto, ntohs(tcp_hdr(skb)->dest), msg);
|
|
@@ -6207,6 +6208,7 @@ int tcp_conn_request(struct request_sock_ops *rsk_ops,
|
|
__u32 isn = TCP_SKB_CB(skb)->tcp_tw_isn;
|
|
__u32 isn = TCP_SKB_CB(skb)->tcp_tw_isn;
|
|
struct tcp_options_received tmp_opt;
|
|
struct tcp_options_received tmp_opt;
|
|
struct tcp_sock *tp = tcp_sk(sk);
|
|
struct tcp_sock *tp = tcp_sk(sk);
|
|
|
|
+ struct net *net = sock_net(sk);
|
|
struct sock *fastopen_sk = NULL;
|
|
struct sock *fastopen_sk = NULL;
|
|
struct dst_entry *dst = NULL;
|
|
struct dst_entry *dst = NULL;
|
|
struct request_sock *req;
|
|
struct request_sock *req;
|
|
@@ -6217,7 +6219,7 @@ int tcp_conn_request(struct request_sock_ops *rsk_ops,
|
|
* limitations, they conserve resources and peer is
|
|
* limitations, they conserve resources and peer is
|
|
* evidently real one.
|
|
* evidently real one.
|
|
*/
|
|
*/
|
|
- if ((sysctl_tcp_syncookies == 2 ||
|
|
|
|
|
|
+ if ((net->ipv4.sysctl_tcp_syncookies == 2 ||
|
|
inet_csk_reqsk_queue_is_full(sk)) && !isn) {
|
|
inet_csk_reqsk_queue_is_full(sk)) && !isn) {
|
|
want_cookie = tcp_syn_flood_action(sk, skb, rsk_ops->slab_name);
|
|
want_cookie = tcp_syn_flood_action(sk, skb, rsk_ops->slab_name);
|
|
if (!want_cookie)
|
|
if (!want_cookie)
|
|
@@ -6283,7 +6285,7 @@ int tcp_conn_request(struct request_sock_ops *rsk_ops,
|
|
}
|
|
}
|
|
}
|
|
}
|
|
/* Kill the following clause, if you dislike this way. */
|
|
/* Kill the following clause, if you dislike this way. */
|
|
- else if (!sysctl_tcp_syncookies &&
|
|
|
|
|
|
+ else if (!net->ipv4.sysctl_tcp_syncookies &&
|
|
(sysctl_max_syn_backlog - inet_csk_reqsk_queue_len(sk) <
|
|
(sysctl_max_syn_backlog - inet_csk_reqsk_queue_len(sk) <
|
|
(sysctl_max_syn_backlog >> 2)) &&
|
|
(sysctl_max_syn_backlog >> 2)) &&
|
|
!tcp_peer_is_proven(req, dst, false,
|
|
!tcp_peer_is_proven(req, dst, false,
|