|
@@ -546,7 +546,7 @@ static ssize_t
|
|
sg_new_read(Sg_fd * sfp, char __user *buf, size_t count, Sg_request * srp)
|
|
sg_new_read(Sg_fd * sfp, char __user *buf, size_t count, Sg_request * srp)
|
|
{
|
|
{
|
|
sg_io_hdr_t *hp = &srp->header;
|
|
sg_io_hdr_t *hp = &srp->header;
|
|
- int err = 0;
|
|
|
|
|
|
+ int err = 0, err2;
|
|
int len;
|
|
int len;
|
|
|
|
|
|
if (count < SZ_SG_IO_HDR) {
|
|
if (count < SZ_SG_IO_HDR) {
|
|
@@ -575,8 +575,8 @@ sg_new_read(Sg_fd * sfp, char __user *buf, size_t count, Sg_request * srp)
|
|
goto err_out;
|
|
goto err_out;
|
|
}
|
|
}
|
|
err_out:
|
|
err_out:
|
|
- err = sg_finish_rem_req(srp);
|
|
|
|
- return (0 == err) ? count : err;
|
|
|
|
|
|
+ err2 = sg_finish_rem_req(srp);
|
|
|
|
+ return err ? : err2 ? : count;
|
|
}
|
|
}
|
|
|
|
|
|
static ssize_t
|
|
static ssize_t
|
|
@@ -1335,6 +1335,17 @@ sg_rq_end_io(struct request *rq, int uptodate)
|
|
}
|
|
}
|
|
/* Rely on write phase to clean out srp status values, so no "else" */
|
|
/* Rely on write phase to clean out srp status values, so no "else" */
|
|
|
|
|
|
|
|
+ /*
|
|
|
|
+ * Free the request as soon as it is complete so that its resources
|
|
|
|
+ * can be reused without waiting for userspace to read() the
|
|
|
|
+ * result. But keep the associated bio (if any) around until
|
|
|
|
+ * blk_rq_unmap_user() can be called from user context.
|
|
|
|
+ */
|
|
|
|
+ srp->rq = NULL;
|
|
|
|
+ if (rq->cmd != rq->__cmd)
|
|
|
|
+ kfree(rq->cmd);
|
|
|
|
+ __blk_put_request(rq->q, rq);
|
|
|
|
+
|
|
write_lock_irqsave(&sfp->rq_list_lock, iflags);
|
|
write_lock_irqsave(&sfp->rq_list_lock, iflags);
|
|
if (unlikely(srp->orphan)) {
|
|
if (unlikely(srp->orphan)) {
|
|
if (sfp->keep_orphan)
|
|
if (sfp->keep_orphan)
|
|
@@ -1669,7 +1680,22 @@ sg_start_req(Sg_request *srp, unsigned char *cmd)
|
|
return -ENOMEM;
|
|
return -ENOMEM;
|
|
}
|
|
}
|
|
|
|
|
|
- rq = blk_get_request(q, rw, GFP_ATOMIC);
|
|
|
|
|
|
+ /*
|
|
|
|
+ * NOTE
|
|
|
|
+ *
|
|
|
|
+ * With scsi-mq enabled, there are a fixed number of preallocated
|
|
|
|
+ * requests equal in number to shost->can_queue. If all of the
|
|
|
|
+ * preallocated requests are already in use, then using GFP_ATOMIC with
|
|
|
|
+ * blk_get_request() will return -EWOULDBLOCK, whereas using GFP_KERNEL
|
|
|
|
+ * will cause blk_get_request() to sleep until an active command
|
|
|
|
+ * completes, freeing up a request. Neither option is ideal, but
|
|
|
|
+ * GFP_KERNEL is the better choice to prevent userspace from getting an
|
|
|
|
+ * unexpected EWOULDBLOCK.
|
|
|
|
+ *
|
|
|
|
+ * With scsi-mq disabled, blk_get_request() with GFP_KERNEL usually
|
|
|
|
+ * does not sleep except under memory pressure.
|
|
|
|
+ */
|
|
|
|
+ rq = blk_get_request(q, rw, GFP_KERNEL);
|
|
if (IS_ERR(rq)) {
|
|
if (IS_ERR(rq)) {
|
|
kfree(long_cmdp);
|
|
kfree(long_cmdp);
|
|
return PTR_ERR(rq);
|
|
return PTR_ERR(rq);
|
|
@@ -1759,10 +1785,10 @@ sg_finish_rem_req(Sg_request *srp)
|
|
SCSI_LOG_TIMEOUT(4, sg_printk(KERN_INFO, sfp->parentdp,
|
|
SCSI_LOG_TIMEOUT(4, sg_printk(KERN_INFO, sfp->parentdp,
|
|
"sg_finish_rem_req: res_used=%d\n",
|
|
"sg_finish_rem_req: res_used=%d\n",
|
|
(int) srp->res_used));
|
|
(int) srp->res_used));
|
|
- if (srp->rq) {
|
|
|
|
- if (srp->bio)
|
|
|
|
- ret = blk_rq_unmap_user(srp->bio);
|
|
|
|
|
|
+ if (srp->bio)
|
|
|
|
+ ret = blk_rq_unmap_user(srp->bio);
|
|
|
|
|
|
|
|
+ if (srp->rq) {
|
|
if (srp->rq->cmd != srp->rq->__cmd)
|
|
if (srp->rq->cmd != srp->rq->__cmd)
|
|
kfree(srp->rq->cmd);
|
|
kfree(srp->rq->cmd);
|
|
blk_put_request(srp->rq);
|
|
blk_put_request(srp->rq);
|