|
@@ -661,11 +661,11 @@ static int unix_set_peek_off(struct sock *sk, int val)
|
|
|
{
|
|
|
struct unix_sock *u = unix_sk(sk);
|
|
|
|
|
|
- if (mutex_lock_interruptible(&u->readlock))
|
|
|
+ if (mutex_lock_interruptible(&u->iolock))
|
|
|
return -EINTR;
|
|
|
|
|
|
sk->sk_peek_off = val;
|
|
|
- mutex_unlock(&u->readlock);
|
|
|
+ mutex_unlock(&u->iolock);
|
|
|
|
|
|
return 0;
|
|
|
}
|
|
@@ -779,7 +779,8 @@ static struct sock *unix_create1(struct net *net, struct socket *sock, int kern)
|
|
|
spin_lock_init(&u->lock);
|
|
|
atomic_long_set(&u->inflight, 0);
|
|
|
INIT_LIST_HEAD(&u->link);
|
|
|
- mutex_init(&u->readlock); /* single task reading lock */
|
|
|
+ mutex_init(&u->iolock); /* single task reading lock */
|
|
|
+ mutex_init(&u->bindlock); /* single task binding lock */
|
|
|
init_waitqueue_head(&u->peer_wait);
|
|
|
init_waitqueue_func_entry(&u->peer_wake, unix_dgram_peer_wake_relay);
|
|
|
unix_insert_socket(unix_sockets_unbound(sk), sk);
|
|
@@ -848,7 +849,7 @@ static int unix_autobind(struct socket *sock)
|
|
|
int err;
|
|
|
unsigned int retries = 0;
|
|
|
|
|
|
- err = mutex_lock_interruptible(&u->readlock);
|
|
|
+ err = mutex_lock_interruptible(&u->bindlock);
|
|
|
if (err)
|
|
|
return err;
|
|
|
|
|
@@ -895,7 +896,7 @@ retry:
|
|
|
spin_unlock(&unix_table_lock);
|
|
|
err = 0;
|
|
|
|
|
|
-out: mutex_unlock(&u->readlock);
|
|
|
+out: mutex_unlock(&u->bindlock);
|
|
|
return err;
|
|
|
}
|
|
|
|
|
@@ -1009,7 +1010,7 @@ static int unix_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
|
|
|
goto out;
|
|
|
addr_len = err;
|
|
|
|
|
|
- err = mutex_lock_interruptible(&u->readlock);
|
|
|
+ err = mutex_lock_interruptible(&u->bindlock);
|
|
|
if (err)
|
|
|
goto out;
|
|
|
|
|
@@ -1063,7 +1064,7 @@ static int unix_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
|
|
|
out_unlock:
|
|
|
spin_unlock(&unix_table_lock);
|
|
|
out_up:
|
|
|
- mutex_unlock(&u->readlock);
|
|
|
+ mutex_unlock(&u->bindlock);
|
|
|
out:
|
|
|
return err;
|
|
|
}
|
|
@@ -1955,17 +1956,17 @@ static ssize_t unix_stream_sendpage(struct socket *socket, struct page *page,
|
|
|
if (false) {
|
|
|
alloc_skb:
|
|
|
unix_state_unlock(other);
|
|
|
- mutex_unlock(&unix_sk(other)->readlock);
|
|
|
+ mutex_unlock(&unix_sk(other)->iolock);
|
|
|
newskb = sock_alloc_send_pskb(sk, 0, 0, flags & MSG_DONTWAIT,
|
|
|
&err, 0);
|
|
|
if (!newskb)
|
|
|
goto err;
|
|
|
}
|
|
|
|
|
|
- /* we must acquire readlock as we modify already present
|
|
|
+ /* we must acquire iolock as we modify already present
|
|
|
* skbs in the sk_receive_queue and mess with skb->len
|
|
|
*/
|
|
|
- err = mutex_lock_interruptible(&unix_sk(other)->readlock);
|
|
|
+ err = mutex_lock_interruptible(&unix_sk(other)->iolock);
|
|
|
if (err) {
|
|
|
err = flags & MSG_DONTWAIT ? -EAGAIN : -ERESTARTSYS;
|
|
|
goto err;
|
|
@@ -2032,7 +2033,7 @@ alloc_skb:
|
|
|
}
|
|
|
|
|
|
unix_state_unlock(other);
|
|
|
- mutex_unlock(&unix_sk(other)->readlock);
|
|
|
+ mutex_unlock(&unix_sk(other)->iolock);
|
|
|
|
|
|
other->sk_data_ready(other);
|
|
|
scm_destroy(&scm);
|
|
@@ -2041,7 +2042,7 @@ alloc_skb:
|
|
|
err_state_unlock:
|
|
|
unix_state_unlock(other);
|
|
|
err_unlock:
|
|
|
- mutex_unlock(&unix_sk(other)->readlock);
|
|
|
+ mutex_unlock(&unix_sk(other)->iolock);
|
|
|
err:
|
|
|
kfree_skb(newskb);
|
|
|
if (send_sigpipe && !(flags & MSG_NOSIGNAL))
|
|
@@ -2109,7 +2110,7 @@ static int unix_dgram_recvmsg(struct socket *sock, struct msghdr *msg,
|
|
|
timeo = sock_rcvtimeo(sk, flags & MSG_DONTWAIT);
|
|
|
|
|
|
do {
|
|
|
- mutex_lock(&u->readlock);
|
|
|
+ mutex_lock(&u->iolock);
|
|
|
|
|
|
skip = sk_peek_offset(sk, flags);
|
|
|
skb = __skb_try_recv_datagram(sk, flags, &peeked, &skip, &err,
|
|
@@ -2117,14 +2118,14 @@ static int unix_dgram_recvmsg(struct socket *sock, struct msghdr *msg,
|
|
|
if (skb)
|
|
|
break;
|
|
|
|
|
|
- mutex_unlock(&u->readlock);
|
|
|
+ mutex_unlock(&u->iolock);
|
|
|
|
|
|
if (err != -EAGAIN)
|
|
|
break;
|
|
|
} while (timeo &&
|
|
|
!__skb_wait_for_more_packets(sk, &err, &timeo, last));
|
|
|
|
|
|
- if (!skb) { /* implies readlock unlocked */
|
|
|
+ if (!skb) { /* implies iolock unlocked */
|
|
|
unix_state_lock(sk);
|
|
|
/* Signal EOF on disconnected non-blocking SEQPACKET socket. */
|
|
|
if (sk->sk_type == SOCK_SEQPACKET && err == -EAGAIN &&
|
|
@@ -2189,7 +2190,7 @@ static int unix_dgram_recvmsg(struct socket *sock, struct msghdr *msg,
|
|
|
|
|
|
out_free:
|
|
|
skb_free_datagram(sk, skb);
|
|
|
- mutex_unlock(&u->readlock);
|
|
|
+ mutex_unlock(&u->iolock);
|
|
|
out:
|
|
|
return err;
|
|
|
}
|
|
@@ -2284,7 +2285,7 @@ static int unix_stream_read_generic(struct unix_stream_read_state *state)
|
|
|
/* Lock the socket to prevent queue disordering
|
|
|
* while sleeps in memcpy_tomsg
|
|
|
*/
|
|
|
- mutex_lock(&u->readlock);
|
|
|
+ mutex_lock(&u->iolock);
|
|
|
|
|
|
if (flags & MSG_PEEK)
|
|
|
skip = sk_peek_offset(sk, flags);
|
|
@@ -2326,7 +2327,7 @@ again:
|
|
|
break;
|
|
|
}
|
|
|
|
|
|
- mutex_unlock(&u->readlock);
|
|
|
+ mutex_unlock(&u->iolock);
|
|
|
|
|
|
timeo = unix_stream_data_wait(sk, timeo, last,
|
|
|
last_len);
|
|
@@ -2337,7 +2338,7 @@ again:
|
|
|
goto out;
|
|
|
}
|
|
|
|
|
|
- mutex_lock(&u->readlock);
|
|
|
+ mutex_lock(&u->iolock);
|
|
|
goto redo;
|
|
|
unlock:
|
|
|
unix_state_unlock(sk);
|
|
@@ -2440,7 +2441,7 @@ unlock:
|
|
|
}
|
|
|
} while (size);
|
|
|
|
|
|
- mutex_unlock(&u->readlock);
|
|
|
+ mutex_unlock(&u->iolock);
|
|
|
if (state->msg)
|
|
|
scm_recv(sock, state->msg, &scm, flags);
|
|
|
else
|
|
@@ -2481,9 +2482,9 @@ static ssize_t skb_unix_socket_splice(struct sock *sk,
|
|
|
int ret;
|
|
|
struct unix_sock *u = unix_sk(sk);
|
|
|
|
|
|
- mutex_unlock(&u->readlock);
|
|
|
+ mutex_unlock(&u->iolock);
|
|
|
ret = splice_to_pipe(pipe, spd);
|
|
|
- mutex_lock(&u->readlock);
|
|
|
+ mutex_lock(&u->iolock);
|
|
|
|
|
|
return ret;
|
|
|
}
|