|
@@ -295,7 +295,8 @@ void scsi_device_unbusy(struct scsi_device *sdev)
|
|
unsigned long flags;
|
|
unsigned long flags;
|
|
|
|
|
|
atomic_dec(&shost->host_busy);
|
|
atomic_dec(&shost->host_busy);
|
|
- atomic_dec(&starget->target_busy);
|
|
|
|
|
|
+ if (starget->can_queue > 0)
|
|
|
|
+ atomic_dec(&starget->target_busy);
|
|
|
|
|
|
if (unlikely(scsi_host_in_recovery(shost) &&
|
|
if (unlikely(scsi_host_in_recovery(shost) &&
|
|
(shost->host_failed || shost->host_eh_scheduled))) {
|
|
(shost->host_failed || shost->host_eh_scheduled))) {
|
|
@@ -364,11 +365,12 @@ static inline bool scsi_device_is_busy(struct scsi_device *sdev)
|
|
|
|
|
|
static inline bool scsi_target_is_busy(struct scsi_target *starget)
|
|
static inline bool scsi_target_is_busy(struct scsi_target *starget)
|
|
{
|
|
{
|
|
- if (starget->can_queue > 0 &&
|
|
|
|
- atomic_read(&starget->target_busy) >= starget->can_queue)
|
|
|
|
- return true;
|
|
|
|
- if (atomic_read(&starget->target_blocked) > 0)
|
|
|
|
- return true;
|
|
|
|
|
|
+ if (starget->can_queue > 0) {
|
|
|
|
+ if (atomic_read(&starget->target_busy) >= starget->can_queue)
|
|
|
|
+ return true;
|
|
|
|
+ if (atomic_read(&starget->target_blocked) > 0)
|
|
|
|
+ return true;
|
|
|
|
+ }
|
|
return false;
|
|
return false;
|
|
}
|
|
}
|
|
|
|
|
|
@@ -1309,6 +1311,9 @@ static inline int scsi_target_queue_ready(struct Scsi_Host *shost,
|
|
spin_unlock_irq(shost->host_lock);
|
|
spin_unlock_irq(shost->host_lock);
|
|
}
|
|
}
|
|
|
|
|
|
|
|
+ if (starget->can_queue <= 0)
|
|
|
|
+ return 1;
|
|
|
|
+
|
|
busy = atomic_inc_return(&starget->target_busy) - 1;
|
|
busy = atomic_inc_return(&starget->target_busy) - 1;
|
|
if (atomic_read(&starget->target_blocked) > 0) {
|
|
if (atomic_read(&starget->target_blocked) > 0) {
|
|
if (busy)
|
|
if (busy)
|
|
@@ -1324,7 +1329,7 @@ static inline int scsi_target_queue_ready(struct Scsi_Host *shost,
|
|
"unblocking target at zero depth\n"));
|
|
"unblocking target at zero depth\n"));
|
|
}
|
|
}
|
|
|
|
|
|
- if (starget->can_queue > 0 && busy >= starget->can_queue)
|
|
|
|
|
|
+ if (busy >= starget->can_queue)
|
|
goto starved;
|
|
goto starved;
|
|
|
|
|
|
return 1;
|
|
return 1;
|
|
@@ -1334,7 +1339,8 @@ starved:
|
|
list_move_tail(&sdev->starved_entry, &shost->starved_list);
|
|
list_move_tail(&sdev->starved_entry, &shost->starved_list);
|
|
spin_unlock_irq(shost->host_lock);
|
|
spin_unlock_irq(shost->host_lock);
|
|
out_dec:
|
|
out_dec:
|
|
- atomic_dec(&starget->target_busy);
|
|
|
|
|
|
+ if (starget->can_queue > 0)
|
|
|
|
+ atomic_dec(&starget->target_busy);
|
|
return 0;
|
|
return 0;
|
|
}
|
|
}
|
|
|
|
|
|
@@ -1455,7 +1461,8 @@ static void scsi_kill_request(struct request *req, struct request_queue *q)
|
|
*/
|
|
*/
|
|
atomic_inc(&sdev->device_busy);
|
|
atomic_inc(&sdev->device_busy);
|
|
atomic_inc(&shost->host_busy);
|
|
atomic_inc(&shost->host_busy);
|
|
- atomic_inc(&starget->target_busy);
|
|
|
|
|
|
+ if (starget->can_queue > 0)
|
|
|
|
+ atomic_inc(&starget->target_busy);
|
|
|
|
|
|
blk_complete_request(req);
|
|
blk_complete_request(req);
|
|
}
|
|
}
|
|
@@ -1624,7 +1631,8 @@ static void scsi_request_fn(struct request_queue *q)
|
|
return;
|
|
return;
|
|
|
|
|
|
host_not_ready:
|
|
host_not_ready:
|
|
- atomic_dec(&scsi_target(sdev)->target_busy);
|
|
|
|
|
|
+ if (scsi_target(sdev)->can_queue > 0)
|
|
|
|
+ atomic_dec(&scsi_target(sdev)->target_busy);
|
|
not_ready:
|
|
not_ready:
|
|
/*
|
|
/*
|
|
* lock q, handle tag, requeue req, and decrement device_busy. We
|
|
* lock q, handle tag, requeue req, and decrement device_busy. We
|