|
|
@@ -331,7 +331,7 @@ static void queue_request(struct fuse_iqueue *fiq, struct fuse_req *req)
|
|
|
req->in.h.len = sizeof(struct fuse_in_header) +
|
|
|
len_args(req->in.numargs, (struct fuse_arg *) req->in.args);
|
|
|
list_add_tail(&req->list, &fiq->pending);
|
|
|
- wake_up_locked(&fiq->waitq);
|
|
|
+ wake_up(&fiq->waitq);
|
|
|
kill_fasync(&fiq->fasync, SIGIO, POLL_IN);
|
|
|
}
|
|
|
|
|
|
@@ -343,16 +343,16 @@ void fuse_queue_forget(struct fuse_conn *fc, struct fuse_forget_link *forget,
|
|
|
forget->forget_one.nodeid = nodeid;
|
|
|
forget->forget_one.nlookup = nlookup;
|
|
|
|
|
|
- spin_lock(&fiq->waitq.lock);
|
|
|
+ spin_lock(&fiq->lock);
|
|
|
if (fiq->connected) {
|
|
|
fiq->forget_list_tail->next = forget;
|
|
|
fiq->forget_list_tail = forget;
|
|
|
- wake_up_locked(&fiq->waitq);
|
|
|
+ wake_up(&fiq->waitq);
|
|
|
kill_fasync(&fiq->fasync, SIGIO, POLL_IN);
|
|
|
} else {
|
|
|
kfree(forget);
|
|
|
}
|
|
|
- spin_unlock(&fiq->waitq.lock);
|
|
|
+ spin_unlock(&fiq->lock);
|
|
|
}
|
|
|
|
|
|
static void flush_bg_queue(struct fuse_conn *fc)
|
|
|
@@ -365,10 +365,10 @@ static void flush_bg_queue(struct fuse_conn *fc)
|
|
|
req = list_entry(fc->bg_queue.next, struct fuse_req, list);
|
|
|
list_del(&req->list);
|
|
|
fc->active_background++;
|
|
|
- spin_lock(&fiq->waitq.lock);
|
|
|
+ spin_lock(&fiq->lock);
|
|
|
req->in.h.unique = fuse_get_unique(fiq);
|
|
|
queue_request(fiq, req);
|
|
|
- spin_unlock(&fiq->waitq.lock);
|
|
|
+ spin_unlock(&fiq->lock);
|
|
|
}
|
|
|
}
|
|
|
|
|
|
@@ -387,9 +387,9 @@ static void request_end(struct fuse_conn *fc, struct fuse_req *req)
|
|
|
if (test_and_set_bit(FR_FINISHED, &req->flags))
|
|
|
goto put_request;
|
|
|
|
|
|
- spin_lock(&fiq->waitq.lock);
|
|
|
+ spin_lock(&fiq->lock);
|
|
|
list_del_init(&req->intr_entry);
|
|
|
- spin_unlock(&fiq->waitq.lock);
|
|
|
+ spin_unlock(&fiq->lock);
|
|
|
WARN_ON(test_bit(FR_PENDING, &req->flags));
|
|
|
WARN_ON(test_bit(FR_SENT, &req->flags));
|
|
|
if (test_bit(FR_BACKGROUND, &req->flags)) {
|
|
|
@@ -427,16 +427,16 @@ put_request:
|
|
|
|
|
|
static void queue_interrupt(struct fuse_iqueue *fiq, struct fuse_req *req)
|
|
|
{
|
|
|
- spin_lock(&fiq->waitq.lock);
|
|
|
+ spin_lock(&fiq->lock);
|
|
|
if (test_bit(FR_FINISHED, &req->flags)) {
|
|
|
- spin_unlock(&fiq->waitq.lock);
|
|
|
+ spin_unlock(&fiq->lock);
|
|
|
return;
|
|
|
}
|
|
|
if (list_empty(&req->intr_entry)) {
|
|
|
list_add_tail(&req->intr_entry, &fiq->interrupts);
|
|
|
- wake_up_locked(&fiq->waitq);
|
|
|
+ wake_up(&fiq->waitq);
|
|
|
}
|
|
|
- spin_unlock(&fiq->waitq.lock);
|
|
|
+ spin_unlock(&fiq->lock);
|
|
|
kill_fasync(&fiq->fasync, SIGIO, POLL_IN);
|
|
|
}
|
|
|
|
|
|
@@ -466,16 +466,16 @@ static void request_wait_answer(struct fuse_conn *fc, struct fuse_req *req)
|
|
|
if (!err)
|
|
|
return;
|
|
|
|
|
|
- spin_lock(&fiq->waitq.lock);
|
|
|
+ spin_lock(&fiq->lock);
|
|
|
/* Request is not yet in userspace, bail out */
|
|
|
if (test_bit(FR_PENDING, &req->flags)) {
|
|
|
list_del(&req->list);
|
|
|
- spin_unlock(&fiq->waitq.lock);
|
|
|
+ spin_unlock(&fiq->lock);
|
|
|
__fuse_put_request(req);
|
|
|
req->out.h.error = -EINTR;
|
|
|
return;
|
|
|
}
|
|
|
- spin_unlock(&fiq->waitq.lock);
|
|
|
+ spin_unlock(&fiq->lock);
|
|
|
}
|
|
|
|
|
|
/*
|
|
|
@@ -490,9 +490,9 @@ static void __fuse_request_send(struct fuse_conn *fc, struct fuse_req *req)
|
|
|
struct fuse_iqueue *fiq = &fc->iq;
|
|
|
|
|
|
BUG_ON(test_bit(FR_BACKGROUND, &req->flags));
|
|
|
- spin_lock(&fiq->waitq.lock);
|
|
|
+ spin_lock(&fiq->lock);
|
|
|
if (!fiq->connected) {
|
|
|
- spin_unlock(&fiq->waitq.lock);
|
|
|
+ spin_unlock(&fiq->lock);
|
|
|
req->out.h.error = -ENOTCONN;
|
|
|
} else {
|
|
|
req->in.h.unique = fuse_get_unique(fiq);
|
|
|
@@ -500,7 +500,7 @@ static void __fuse_request_send(struct fuse_conn *fc, struct fuse_req *req)
|
|
|
/* acquire extra reference, since request is still needed
|
|
|
after request_end() */
|
|
|
__fuse_get_request(req);
|
|
|
- spin_unlock(&fiq->waitq.lock);
|
|
|
+ spin_unlock(&fiq->lock);
|
|
|
|
|
|
request_wait_answer(fc, req);
|
|
|
/* Pairs with smp_wmb() in request_end() */
|
|
|
@@ -633,12 +633,12 @@ static int fuse_request_send_notify_reply(struct fuse_conn *fc,
|
|
|
|
|
|
__clear_bit(FR_ISREPLY, &req->flags);
|
|
|
req->in.h.unique = unique;
|
|
|
- spin_lock(&fiq->waitq.lock);
|
|
|
+ spin_lock(&fiq->lock);
|
|
|
if (fiq->connected) {
|
|
|
queue_request(fiq, req);
|
|
|
err = 0;
|
|
|
}
|
|
|
- spin_unlock(&fiq->waitq.lock);
|
|
|
+ spin_unlock(&fiq->lock);
|
|
|
|
|
|
return err;
|
|
|
}
|
|
|
@@ -1082,12 +1082,12 @@ static int request_pending(struct fuse_iqueue *fiq)
|
|
|
* Unlike other requests this is assembled on demand, without a need
|
|
|
* to allocate a separate fuse_req structure.
|
|
|
*
|
|
|
- * Called with fiq->waitq.lock held, releases it
|
|
|
+ * Called with fiq->lock held, releases it
|
|
|
*/
|
|
|
static int fuse_read_interrupt(struct fuse_iqueue *fiq,
|
|
|
struct fuse_copy_state *cs,
|
|
|
size_t nbytes, struct fuse_req *req)
|
|
|
-__releases(fiq->waitq.lock)
|
|
|
+__releases(fiq->lock)
|
|
|
{
|
|
|
struct fuse_in_header ih;
|
|
|
struct fuse_interrupt_in arg;
|
|
|
@@ -1103,7 +1103,7 @@ __releases(fiq->waitq.lock)
|
|
|
ih.unique = req->intr_unique;
|
|
|
arg.unique = req->in.h.unique;
|
|
|
|
|
|
- spin_unlock(&fiq->waitq.lock);
|
|
|
+ spin_unlock(&fiq->lock);
|
|
|
if (nbytes < reqsize)
|
|
|
return -EINVAL;
|
|
|
|
|
|
@@ -1140,7 +1140,7 @@ static struct fuse_forget_link *dequeue_forget(struct fuse_iqueue *fiq,
|
|
|
static int fuse_read_single_forget(struct fuse_iqueue *fiq,
|
|
|
struct fuse_copy_state *cs,
|
|
|
size_t nbytes)
|
|
|
-__releases(fiq->waitq.lock)
|
|
|
+__releases(fiq->lock)
|
|
|
{
|
|
|
int err;
|
|
|
struct fuse_forget_link *forget = dequeue_forget(fiq, 1, NULL);
|
|
|
@@ -1154,7 +1154,7 @@ __releases(fiq->waitq.lock)
|
|
|
.len = sizeof(ih) + sizeof(arg),
|
|
|
};
|
|
|
|
|
|
- spin_unlock(&fiq->waitq.lock);
|
|
|
+ spin_unlock(&fiq->lock);
|
|
|
kfree(forget);
|
|
|
if (nbytes < ih.len)
|
|
|
return -EINVAL;
|
|
|
@@ -1172,7 +1172,7 @@ __releases(fiq->waitq.lock)
|
|
|
|
|
|
static int fuse_read_batch_forget(struct fuse_iqueue *fiq,
|
|
|
struct fuse_copy_state *cs, size_t nbytes)
|
|
|
-__releases(fiq->waitq.lock)
|
|
|
+__releases(fiq->lock)
|
|
|
{
|
|
|
int err;
|
|
|
unsigned max_forgets;
|
|
|
@@ -1186,13 +1186,13 @@ __releases(fiq->waitq.lock)
|
|
|
};
|
|
|
|
|
|
if (nbytes < ih.len) {
|
|
|
- spin_unlock(&fiq->waitq.lock);
|
|
|
+ spin_unlock(&fiq->lock);
|
|
|
return -EINVAL;
|
|
|
}
|
|
|
|
|
|
max_forgets = (nbytes - ih.len) / sizeof(struct fuse_forget_one);
|
|
|
head = dequeue_forget(fiq, max_forgets, &count);
|
|
|
- spin_unlock(&fiq->waitq.lock);
|
|
|
+ spin_unlock(&fiq->lock);
|
|
|
|
|
|
arg.count = count;
|
|
|
ih.len += count * sizeof(struct fuse_forget_one);
|
|
|
@@ -1222,7 +1222,7 @@ __releases(fiq->waitq.lock)
|
|
|
static int fuse_read_forget(struct fuse_conn *fc, struct fuse_iqueue *fiq,
|
|
|
struct fuse_copy_state *cs,
|
|
|
size_t nbytes)
|
|
|
-__releases(fiq->waitq.lock)
|
|
|
+__releases(fiq->lock)
|
|
|
{
|
|
|
if (fc->minor < 16 || fiq->forget_list_head.next->next == NULL)
|
|
|
return fuse_read_single_forget(fiq, cs, nbytes);
|
|
|
@@ -1251,16 +1251,19 @@ static ssize_t fuse_dev_do_read(struct fuse_dev *fud, struct file *file,
|
|
|
unsigned reqsize;
|
|
|
|
|
|
restart:
|
|
|
- spin_lock(&fiq->waitq.lock);
|
|
|
- err = -EAGAIN;
|
|
|
- if ((file->f_flags & O_NONBLOCK) && fiq->connected &&
|
|
|
- !request_pending(fiq))
|
|
|
- goto err_unlock;
|
|
|
+ for (;;) {
|
|
|
+ spin_lock(&fiq->lock);
|
|
|
+ if (!fiq->connected || request_pending(fiq))
|
|
|
+ break;
|
|
|
+ spin_unlock(&fiq->lock);
|
|
|
|
|
|
- err = wait_event_interruptible_exclusive_locked(fiq->waitq,
|
|
|
+ if (file->f_flags & O_NONBLOCK)
|
|
|
+ return -EAGAIN;
|
|
|
+ err = wait_event_interruptible_exclusive(fiq->waitq,
|
|
|
!fiq->connected || request_pending(fiq));
|
|
|
- if (err)
|
|
|
- goto err_unlock;
|
|
|
+ if (err)
|
|
|
+ return err;
|
|
|
+ }
|
|
|
|
|
|
if (!fiq->connected) {
|
|
|
err = (fc->aborted && fc->abort_err) ? -ECONNABORTED : -ENODEV;
|
|
|
@@ -1284,7 +1287,7 @@ static ssize_t fuse_dev_do_read(struct fuse_dev *fud, struct file *file,
|
|
|
req = list_entry(fiq->pending.next, struct fuse_req, list);
|
|
|
clear_bit(FR_PENDING, &req->flags);
|
|
|
list_del_init(&req->list);
|
|
|
- spin_unlock(&fiq->waitq.lock);
|
|
|
+ spin_unlock(&fiq->lock);
|
|
|
|
|
|
in = &req->in;
|
|
|
reqsize = in->h.len;
|
|
|
@@ -1341,7 +1344,7 @@ out_end:
|
|
|
return err;
|
|
|
|
|
|
err_unlock:
|
|
|
- spin_unlock(&fiq->waitq.lock);
|
|
|
+ spin_unlock(&fiq->lock);
|
|
|
return err;
|
|
|
}
|
|
|
|
|
|
@@ -2054,12 +2057,12 @@ static __poll_t fuse_dev_poll(struct file *file, poll_table *wait)
|
|
|
fiq = &fud->fc->iq;
|
|
|
poll_wait(file, &fiq->waitq, wait);
|
|
|
|
|
|
- spin_lock(&fiq->waitq.lock);
|
|
|
+ spin_lock(&fiq->lock);
|
|
|
if (!fiq->connected)
|
|
|
mask = EPOLLERR;
|
|
|
else if (request_pending(fiq))
|
|
|
mask |= EPOLLIN | EPOLLRDNORM;
|
|
|
- spin_unlock(&fiq->waitq.lock);
|
|
|
+ spin_unlock(&fiq->lock);
|
|
|
|
|
|
return mask;
|
|
|
}
|
|
|
@@ -2150,15 +2153,15 @@ void fuse_abort_conn(struct fuse_conn *fc, bool is_abort)
|
|
|
fc->max_background = UINT_MAX;
|
|
|
flush_bg_queue(fc);
|
|
|
|
|
|
- spin_lock(&fiq->waitq.lock);
|
|
|
+ spin_lock(&fiq->lock);
|
|
|
fiq->connected = 0;
|
|
|
list_for_each_entry(req, &fiq->pending, list)
|
|
|
clear_bit(FR_PENDING, &req->flags);
|
|
|
list_splice_tail_init(&fiq->pending, &to_end);
|
|
|
while (forget_pending(fiq))
|
|
|
kfree(dequeue_forget(fiq, 1, NULL));
|
|
|
- wake_up_all_locked(&fiq->waitq);
|
|
|
- spin_unlock(&fiq->waitq.lock);
|
|
|
+ wake_up_all(&fiq->waitq);
|
|
|
+ spin_unlock(&fiq->lock);
|
|
|
kill_fasync(&fiq->fasync, SIGIO, POLL_IN);
|
|
|
end_polls(fc);
|
|
|
wake_up_all(&fc->blocked_waitq);
|