|
@@ -494,32 +494,21 @@ static inline bool tcp_stream_is_readable(const struct tcp_sock *tp,
|
|
|
}
|
|
|
|
|
|
/*
|
|
|
- * Wait for a TCP event.
|
|
|
- *
|
|
|
- * Note that we don't need to lock the socket, as the upper poll layers
|
|
|
- * take care of normal races (between the test and the event) and we don't
|
|
|
- * go look at any of the socket buffers directly.
|
|
|
+ * Socket is not locked. We are protected from async events by poll logic and
|
|
|
+ * correct handling of state changes made by other threads is impossible in
|
|
|
+ * any case.
|
|
|
*/
|
|
|
-__poll_t tcp_poll(struct file *file, struct socket *sock, poll_table *wait)
|
|
|
+__poll_t tcp_poll_mask(struct socket *sock, __poll_t events)
|
|
|
{
|
|
|
- __poll_t mask;
|
|
|
struct sock *sk = sock->sk;
|
|
|
const struct tcp_sock *tp = tcp_sk(sk);
|
|
|
+ __poll_t mask = 0;
|
|
|
int state;
|
|
|
|
|
|
- sock_poll_wait(file, sk_sleep(sk), wait);
|
|
|
-
|
|
|
state = inet_sk_state_load(sk);
|
|
|
if (state == TCP_LISTEN)
|
|
|
return inet_csk_listen_poll(sk);
|
|
|
|
|
|
- /* Socket is not locked. We are protected from async events
|
|
|
- * by poll logic and correct handling of state changes
|
|
|
- * made by other threads is impossible in any case.
|
|
|
- */
|
|
|
-
|
|
|
- mask = 0;
|
|
|
-
|
|
|
/*
|
|
|
* EPOLLHUP is certainly not done right. But poll() doesn't
|
|
|
* have a notion of HUP in just one direction, and for a
|
|
@@ -600,7 +589,7 @@ __poll_t tcp_poll(struct file *file, struct socket *sock, poll_table *wait)
|
|
|
|
|
|
return mask;
|
|
|
}
|
|
|
-EXPORT_SYMBOL(tcp_poll);
|
|
|
+EXPORT_SYMBOL(tcp_poll_mask);
|
|
|
|
|
|
int tcp_ioctl(struct sock *sk, int cmd, unsigned long arg)
|
|
|
{
|