Browse Source

net/smc: handle sockopt TCP_DEFER_ACCEPT

If sockopt TCP_DEFER_ACCEPT is set, the accept is delayed till
data is available.

Signed-off-by: Ursula Braun <ubraun@linux.ibm.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Ursula Braun 7 years ago
parent
commit
abb190f194
4 changed files with 31 additions and 2 deletions
  1. 25 1
      net/smc/af_smc.c
  2. 4 0
      net/smc/smc.h
  3. 1 1
      net/smc/smc_rx.c
  4. 1 0
      net/smc/smc_rx.h

+ 25 - 1
net/smc/af_smc.c

@@ -1044,6 +1044,7 @@ static int smc_accept(struct socket *sock, struct socket *new_sock,
 
 
 	if (lsmc->sk.sk_state != SMC_LISTEN) {
 	if (lsmc->sk.sk_state != SMC_LISTEN) {
 		rc = -EINVAL;
 		rc = -EINVAL;
+		release_sock(sk);
 		goto out;
 		goto out;
 	}
 	}
 
 
@@ -1071,9 +1072,29 @@ static int smc_accept(struct socket *sock, struct socket *new_sock,
 
 
 	if (!rc)
 	if (!rc)
 		rc = sock_error(nsk);
 		rc = sock_error(nsk);
+	release_sock(sk);
+	if (rc)
+		goto out;
+
+	if (lsmc->sockopt_defer_accept && !(flags & O_NONBLOCK)) {
+		/* wait till data arrives on the socket */
+		timeo = msecs_to_jiffies(lsmc->sockopt_defer_accept *
+								MSEC_PER_SEC);
+		if (smc_sk(nsk)->use_fallback) {
+			struct sock *clcsk = smc_sk(nsk)->clcsock->sk;
+
+			lock_sock(clcsk);
+			if (skb_queue_empty(&clcsk->sk_receive_queue))
+				sk_wait_data(clcsk, &timeo, NULL);
+			release_sock(clcsk);
+		} else if (!atomic_read(&smc_sk(nsk)->conn.bytes_to_rcv)) {
+			lock_sock(nsk);
+			smc_rx_wait_data(smc_sk(nsk), &timeo);
+			release_sock(nsk);
+		}
+	}
 
 
 out:
 out:
-	release_sock(sk);
 	sock_put(sk); /* sock_hold above */
 	sock_put(sk); /* sock_hold above */
 	return rc;
 	return rc;
 }
 }
@@ -1340,6 +1361,9 @@ static int smc_setsockopt(struct socket *sock, int level, int optname,
 						 0);
 						 0);
 		}
 		}
 		break;
 		break;
+	case TCP_DEFER_ACCEPT:
+		smc->sockopt_defer_accept = val;
+		break;
 	default:
 	default:
 		break;
 		break;
 	}
 	}

+ 4 - 0
net/smc/smc.h

@@ -180,6 +180,10 @@ struct smc_sock {				/* smc sock container */
 	struct list_head	accept_q;	/* sockets to be accepted */
 	struct list_head	accept_q;	/* sockets to be accepted */
 	spinlock_t		accept_q_lock;	/* protects accept_q */
 	spinlock_t		accept_q_lock;	/* protects accept_q */
 	bool			use_fallback;	/* fallback to tcp */
 	bool			use_fallback;	/* fallback to tcp */
+	int			sockopt_defer_accept;
+						/* sockopt TCP_DEFER_ACCEPT
+						 * value
+						 */
 	u8			wait_close_tx_prepared : 1;
 	u8			wait_close_tx_prepared : 1;
 						/* shutdown wr or close
 						/* shutdown wr or close
 						 * started, waiting for unsent
 						 * started, waiting for unsent

+ 1 - 1
net/smc/smc_rx.c

@@ -51,7 +51,7 @@ static void smc_rx_data_ready(struct sock *sk)
  * 1 if at least 1 byte available in rcvbuf or if socket error/shutdown.
  * 1 if at least 1 byte available in rcvbuf or if socket error/shutdown.
  * 0 otherwise (nothing in rcvbuf nor timeout, e.g. interrupted).
  * 0 otherwise (nothing in rcvbuf nor timeout, e.g. interrupted).
  */
  */
-static int smc_rx_wait_data(struct smc_sock *smc, long *timeo)
+int smc_rx_wait_data(struct smc_sock *smc, long *timeo)
 {
 {
 	DEFINE_WAIT_FUNC(wait, woken_wake_function);
 	DEFINE_WAIT_FUNC(wait, woken_wake_function);
 	struct smc_connection *conn = &smc->conn;
 	struct smc_connection *conn = &smc->conn;

+ 1 - 0
net/smc/smc_rx.h

@@ -20,5 +20,6 @@
 void smc_rx_init(struct smc_sock *smc);
 void smc_rx_init(struct smc_sock *smc);
 int smc_rx_recvmsg(struct smc_sock *smc, struct msghdr *msg, size_t len,
 int smc_rx_recvmsg(struct smc_sock *smc, struct msghdr *msg, size_t len,
 		   int flags);
 		   int flags);
+int smc_rx_wait_data(struct smc_sock *smc, long *timeo);
 
 
 #endif /* SMC_RX_H */
 #endif /* SMC_RX_H */