|
@@ -112,13 +112,6 @@ static void __fuse_put_request(struct fuse_req *req)
|
|
|
refcount_dec(&req->count);
|
|
|
}
|
|
|
|
|
|
-static void fuse_req_init_context(struct fuse_conn *fc, struct fuse_req *req)
|
|
|
-{
|
|
|
- req->in.h.uid = from_kuid_munged(&init_user_ns, current_fsuid());
|
|
|
- req->in.h.gid = from_kgid_munged(&init_user_ns, current_fsgid());
|
|
|
- req->in.h.pid = pid_nr_ns(task_pid(current), fc->pid_ns);
|
|
|
-}
|
|
|
-
|
|
|
void fuse_set_initialized(struct fuse_conn *fc)
|
|
|
{
|
|
|
/* Make sure stores before this are seen on another CPU */
|
|
@@ -163,11 +156,19 @@ static struct fuse_req *__fuse_get_req(struct fuse_conn *fc, unsigned npages,
|
|
|
goto out;
|
|
|
}
|
|
|
|
|
|
- fuse_req_init_context(fc, req);
|
|
|
+ req->in.h.uid = from_kuid(&init_user_ns, current_fsuid());
|
|
|
+ req->in.h.gid = from_kgid(&init_user_ns, current_fsgid());
|
|
|
+ req->in.h.pid = pid_nr_ns(task_pid(current), fc->pid_ns);
|
|
|
+
|
|
|
__set_bit(FR_WAITING, &req->flags);
|
|
|
if (for_background)
|
|
|
__set_bit(FR_BACKGROUND, &req->flags);
|
|
|
|
|
|
+ if (unlikely(req->in.h.uid == ((uid_t)-1) ||
|
|
|
+ req->in.h.gid == ((gid_t)-1))) {
|
|
|
+ fuse_put_request(fc, req);
|
|
|
+ return ERR_PTR(-EOVERFLOW);
|
|
|
+ }
|
|
|
return req;
|
|
|
|
|
|
out:
|
|
@@ -256,7 +257,10 @@ struct fuse_req *fuse_get_req_nofail_nopages(struct fuse_conn *fc,
|
|
|
if (!req)
|
|
|
req = get_reserved_req(fc, file);
|
|
|
|
|
|
- fuse_req_init_context(fc, req);
|
|
|
+ req->in.h.uid = from_kuid_munged(&init_user_ns, current_fsuid());
|
|
|
+ req->in.h.gid = from_kgid_munged(&init_user_ns, current_fsgid());
|
|
|
+ req->in.h.pid = pid_nr_ns(task_pid(current), fc->pid_ns);
|
|
|
+
|
|
|
__set_bit(FR_WAITING, &req->flags);
|
|
|
__clear_bit(FR_BACKGROUND, &req->flags);
|
|
|
return req;
|