|
@@ -2569,14 +2569,11 @@ EXPORT_SYMBOL(dasd_sleep_on_immediatly);
|
|
|
* Cancellation of a request is an asynchronous operation! The calling
|
|
|
* function has to wait until the request is properly returned via callback.
|
|
|
*/
|
|
|
-int dasd_cancel_req(struct dasd_ccw_req *cqr)
|
|
|
+static int __dasd_cancel_req(struct dasd_ccw_req *cqr)
|
|
|
{
|
|
|
struct dasd_device *device = cqr->startdev;
|
|
|
- unsigned long flags;
|
|
|
- int rc;
|
|
|
+ int rc = 0;
|
|
|
|
|
|
- rc = 0;
|
|
|
- spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
|
|
|
switch (cqr->status) {
|
|
|
case DASD_CQR_QUEUED:
|
|
|
/* request was not started - just set to cleared */
|
|
@@ -2596,11 +2593,21 @@ int dasd_cancel_req(struct dasd_ccw_req *cqr)
|
|
|
default: /* already finished or clear pending - do nothing */
|
|
|
break;
|
|
|
}
|
|
|
- spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
|
|
|
dasd_schedule_device_bh(device);
|
|
|
return rc;
|
|
|
}
|
|
|
-EXPORT_SYMBOL(dasd_cancel_req);
|
|
|
+
|
|
|
+int dasd_cancel_req(struct dasd_ccw_req *cqr)
|
|
|
+{
|
|
|
+ struct dasd_device *device = cqr->startdev;
|
|
|
+ unsigned long flags;
|
|
|
+ int rc;
|
|
|
+
|
|
|
+ spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
|
|
|
+ rc = __dasd_cancel_req(cqr);
|
|
|
+ spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
|
|
|
+ return rc;
|
|
|
+}
|
|
|
|
|
|
/*
|
|
|
* SECTION: Operations of the dasd_block layer.
|
|
@@ -3082,12 +3089,10 @@ enum blk_eh_timer_return dasd_times_out(struct request *req, bool reserved)
|
|
|
cqr->retries = -1;
|
|
|
cqr->intrc = -ETIMEDOUT;
|
|
|
if (cqr->status >= DASD_CQR_QUEUED) {
|
|
|
- spin_unlock(get_ccwdev_lock(device->cdev));
|
|
|
- rc = dasd_cancel_req(cqr);
|
|
|
+ rc = __dasd_cancel_req(cqr);
|
|
|
} else if (cqr->status == DASD_CQR_FILLED ||
|
|
|
cqr->status == DASD_CQR_NEED_ERP) {
|
|
|
cqr->status = DASD_CQR_TERMINATED;
|
|
|
- spin_unlock(get_ccwdev_lock(device->cdev));
|
|
|
} else if (cqr->status == DASD_CQR_IN_ERP) {
|
|
|
struct dasd_ccw_req *searchcqr, *nextcqr, *tmpcqr;
|
|
|
|
|
@@ -3102,9 +3107,7 @@ enum blk_eh_timer_return dasd_times_out(struct request *req, bool reserved)
|
|
|
searchcqr->retries = -1;
|
|
|
searchcqr->intrc = -ETIMEDOUT;
|
|
|
if (searchcqr->status >= DASD_CQR_QUEUED) {
|
|
|
- spin_unlock(get_ccwdev_lock(device->cdev));
|
|
|
- rc = dasd_cancel_req(searchcqr);
|
|
|
- spin_lock(get_ccwdev_lock(device->cdev));
|
|
|
+ rc = __dasd_cancel_req(searchcqr);
|
|
|
} else if ((searchcqr->status == DASD_CQR_FILLED) ||
|
|
|
(searchcqr->status == DASD_CQR_NEED_ERP)) {
|
|
|
searchcqr->status = DASD_CQR_TERMINATED;
|
|
@@ -3118,8 +3121,8 @@ enum blk_eh_timer_return dasd_times_out(struct request *req, bool reserved)
|
|
|
}
|
|
|
break;
|
|
|
}
|
|
|
- spin_unlock(get_ccwdev_lock(device->cdev));
|
|
|
}
|
|
|
+ spin_unlock(get_ccwdev_lock(device->cdev));
|
|
|
dasd_schedule_block_bh(block);
|
|
|
spin_unlock(&block->queue_lock);
|
|
|
spin_unlock_irqrestore(&cqr->dq->lock, flags);
|