|
@@ -345,7 +345,8 @@ static void scsi_dec_host_busy(struct Scsi_Host *shost)
|
|
unsigned long flags;
|
|
unsigned long flags;
|
|
|
|
|
|
rcu_read_lock();
|
|
rcu_read_lock();
|
|
- atomic_dec(&shost->host_busy);
|
|
|
|
|
|
+ if (!shost->use_blk_mq)
|
|
|
|
+ atomic_dec(&shost->host_busy);
|
|
if (unlikely(scsi_host_in_recovery(shost))) {
|
|
if (unlikely(scsi_host_in_recovery(shost))) {
|
|
spin_lock_irqsave(shost->host_lock, flags);
|
|
spin_lock_irqsave(shost->host_lock, flags);
|
|
if (shost->host_failed || shost->host_eh_scheduled)
|
|
if (shost->host_failed || shost->host_eh_scheduled)
|
|
@@ -444,7 +445,12 @@ static inline bool scsi_target_is_busy(struct scsi_target *starget)
|
|
|
|
|
|
static inline bool scsi_host_is_busy(struct Scsi_Host *shost)
|
|
static inline bool scsi_host_is_busy(struct Scsi_Host *shost)
|
|
{
|
|
{
|
|
- if (shost->can_queue > 0 &&
|
|
|
|
|
|
+ /*
|
|
|
|
+ * blk-mq can handle host queue busy efficiently via host-wide driver
|
|
|
|
+ * tag allocation
|
|
|
|
+ */
|
|
|
|
+
|
|
|
|
+ if (!shost->use_blk_mq && shost->can_queue > 0 &&
|
|
atomic_read(&shost->host_busy) >= shost->can_queue)
|
|
atomic_read(&shost->host_busy) >= shost->can_queue)
|
|
return true;
|
|
return true;
|
|
if (atomic_read(&shost->host_blocked) > 0)
|
|
if (atomic_read(&shost->host_blocked) > 0)
|
|
@@ -1600,9 +1606,12 @@ static inline int scsi_host_queue_ready(struct request_queue *q,
|
|
if (scsi_host_in_recovery(shost))
|
|
if (scsi_host_in_recovery(shost))
|
|
return 0;
|
|
return 0;
|
|
|
|
|
|
- busy = atomic_inc_return(&shost->host_busy) - 1;
|
|
|
|
|
|
+ if (!shost->use_blk_mq)
|
|
|
|
+ busy = atomic_inc_return(&shost->host_busy) - 1;
|
|
|
|
+ else
|
|
|
|
+ busy = 0;
|
|
if (atomic_read(&shost->host_blocked) > 0) {
|
|
if (atomic_read(&shost->host_blocked) > 0) {
|
|
- if (busy)
|
|
|
|
|
|
+ if (busy || scsi_host_busy(shost))
|
|
goto starved;
|
|
goto starved;
|
|
|
|
|
|
/*
|
|
/*
|
|
@@ -1616,7 +1625,7 @@ static inline int scsi_host_queue_ready(struct request_queue *q,
|
|
"unblocking host at zero depth\n"));
|
|
"unblocking host at zero depth\n"));
|
|
}
|
|
}
|
|
|
|
|
|
- if (shost->can_queue > 0 && busy >= shost->can_queue)
|
|
|
|
|
|
+ if (!shost->use_blk_mq && shost->can_queue > 0 && busy >= shost->can_queue)
|
|
goto starved;
|
|
goto starved;
|
|
if (shost->host_self_blocked)
|
|
if (shost->host_self_blocked)
|
|
goto starved;
|
|
goto starved;
|
|
@@ -1702,7 +1711,9 @@ static void scsi_kill_request(struct request *req, struct request_queue *q)
|
|
* with the locks as normal issue path does.
|
|
* with the locks as normal issue path does.
|
|
*/
|
|
*/
|
|
atomic_inc(&sdev->device_busy);
|
|
atomic_inc(&sdev->device_busy);
|
|
- atomic_inc(&shost->host_busy);
|
|
|
|
|
|
+
|
|
|
|
+ if (!shost->use_blk_mq)
|
|
|
|
+ atomic_inc(&shost->host_busy);
|
|
if (starget->can_queue > 0)
|
|
if (starget->can_queue > 0)
|
|
atomic_inc(&starget->target_busy);
|
|
atomic_inc(&starget->target_busy);
|
|
|
|
|