|
@@ -317,17 +317,17 @@ static int _add_device_to_lcu(struct alias_lcu *lcu,
|
|
|
struct alias_pav_group *group;
|
|
|
struct dasd_uid uid;
|
|
|
|
|
|
+ spin_lock(get_ccwdev_lock(device->cdev));
|
|
|
private->uid.type = lcu->uac->unit[private->uid.real_unit_addr].ua_type;
|
|
|
private->uid.base_unit_addr =
|
|
|
lcu->uac->unit[private->uid.real_unit_addr].base_ua;
|
|
|
uid = private->uid;
|
|
|
-
|
|
|
+ spin_unlock(get_ccwdev_lock(device->cdev));
|
|
|
/* if we have no PAV anyway, we don't need to bother with PAV groups */
|
|
|
if (lcu->pav == NO_PAV) {
|
|
|
list_move(&device->alias_list, &lcu->active_devices);
|
|
|
return 0;
|
|
|
}
|
|
|
-
|
|
|
group = _find_group(lcu, &uid);
|
|
|
if (!group) {
|
|
|
group = kzalloc(sizeof(*group), GFP_ATOMIC);
|
|
@@ -397,130 +397,6 @@ suborder_not_supported(struct dasd_ccw_req *cqr)
|
|
|
return 0;
|
|
|
}
|
|
|
|
|
|
-/*
|
|
|
- * This function tries to lock all devices on an lcu via trylock
|
|
|
- * return NULL on success otherwise return first failed device
|
|
|
- */
|
|
|
-static struct dasd_device *_trylock_all_devices_on_lcu(struct alias_lcu *lcu,
|
|
|
- struct dasd_device *pos)
|
|
|
-
|
|
|
-{
|
|
|
- struct alias_pav_group *pavgroup;
|
|
|
- struct dasd_device *device;
|
|
|
-
|
|
|
- list_for_each_entry(device, &lcu->active_devices, alias_list) {
|
|
|
- if (device == pos)
|
|
|
- continue;
|
|
|
- if (!spin_trylock(get_ccwdev_lock(device->cdev)))
|
|
|
- return device;
|
|
|
- }
|
|
|
- list_for_each_entry(device, &lcu->inactive_devices, alias_list) {
|
|
|
- if (device == pos)
|
|
|
- continue;
|
|
|
- if (!spin_trylock(get_ccwdev_lock(device->cdev)))
|
|
|
- return device;
|
|
|
- }
|
|
|
- list_for_each_entry(pavgroup, &lcu->grouplist, group) {
|
|
|
- list_for_each_entry(device, &pavgroup->baselist, alias_list) {
|
|
|
- if (device == pos)
|
|
|
- continue;
|
|
|
- if (!spin_trylock(get_ccwdev_lock(device->cdev)))
|
|
|
- return device;
|
|
|
- }
|
|
|
- list_for_each_entry(device, &pavgroup->aliaslist, alias_list) {
|
|
|
- if (device == pos)
|
|
|
- continue;
|
|
|
- if (!spin_trylock(get_ccwdev_lock(device->cdev)))
|
|
|
- return device;
|
|
|
- }
|
|
|
- }
|
|
|
- return NULL;
|
|
|
-}
|
|
|
-
|
|
|
-/*
|
|
|
- * unlock all devices except the one that is specified as pos
|
|
|
- * stop if enddev is specified and reached
|
|
|
- */
|
|
|
-static void _unlock_all_devices_on_lcu(struct alias_lcu *lcu,
|
|
|
- struct dasd_device *pos,
|
|
|
- struct dasd_device *enddev)
|
|
|
-
|
|
|
-{
|
|
|
- struct alias_pav_group *pavgroup;
|
|
|
- struct dasd_device *device;
|
|
|
-
|
|
|
- list_for_each_entry(device, &lcu->active_devices, alias_list) {
|
|
|
- if (device == pos)
|
|
|
- continue;
|
|
|
- if (device == enddev)
|
|
|
- return;
|
|
|
- spin_unlock(get_ccwdev_lock(device->cdev));
|
|
|
- }
|
|
|
- list_for_each_entry(device, &lcu->inactive_devices, alias_list) {
|
|
|
- if (device == pos)
|
|
|
- continue;
|
|
|
- if (device == enddev)
|
|
|
- return;
|
|
|
- spin_unlock(get_ccwdev_lock(device->cdev));
|
|
|
- }
|
|
|
- list_for_each_entry(pavgroup, &lcu->grouplist, group) {
|
|
|
- list_for_each_entry(device, &pavgroup->baselist, alias_list) {
|
|
|
- if (device == pos)
|
|
|
- continue;
|
|
|
- if (device == enddev)
|
|
|
- return;
|
|
|
- spin_unlock(get_ccwdev_lock(device->cdev));
|
|
|
- }
|
|
|
- list_for_each_entry(device, &pavgroup->aliaslist, alias_list) {
|
|
|
- if (device == pos)
|
|
|
- continue;
|
|
|
- if (device == enddev)
|
|
|
- return;
|
|
|
- spin_unlock(get_ccwdev_lock(device->cdev));
|
|
|
- }
|
|
|
- }
|
|
|
-}
|
|
|
-
|
|
|
-/*
|
|
|
- * this function is needed because the locking order
|
|
|
- * device lock -> lcu lock
|
|
|
- * needs to be assured when iterating over devices in an LCU
|
|
|
- *
|
|
|
- * if a device is specified in pos then the device lock is already hold
|
|
|
- */
|
|
|
-static void _trylock_and_lock_lcu_irqsave(struct alias_lcu *lcu,
|
|
|
- struct dasd_device *pos,
|
|
|
- unsigned long *flags)
|
|
|
-{
|
|
|
- struct dasd_device *failed;
|
|
|
-
|
|
|
- do {
|
|
|
- spin_lock_irqsave(&lcu->lock, *flags);
|
|
|
- failed = _trylock_all_devices_on_lcu(lcu, pos);
|
|
|
- if (failed) {
|
|
|
- _unlock_all_devices_on_lcu(lcu, pos, failed);
|
|
|
- spin_unlock_irqrestore(&lcu->lock, *flags);
|
|
|
- cpu_relax();
|
|
|
- }
|
|
|
- } while (failed);
|
|
|
-}
|
|
|
-
|
|
|
-static void _trylock_and_lock_lcu(struct alias_lcu *lcu,
|
|
|
- struct dasd_device *pos)
|
|
|
-{
|
|
|
- struct dasd_device *failed;
|
|
|
-
|
|
|
- do {
|
|
|
- spin_lock(&lcu->lock);
|
|
|
- failed = _trylock_all_devices_on_lcu(lcu, pos);
|
|
|
- if (failed) {
|
|
|
- _unlock_all_devices_on_lcu(lcu, pos, failed);
|
|
|
- spin_unlock(&lcu->lock);
|
|
|
- cpu_relax();
|
|
|
- }
|
|
|
- } while (failed);
|
|
|
-}
|
|
|
-
|
|
|
static int read_unit_address_configuration(struct dasd_device *device,
|
|
|
struct alias_lcu *lcu)
|
|
|
{
|
|
@@ -615,7 +491,7 @@ static int _lcu_update(struct dasd_device *refdev, struct alias_lcu *lcu)
|
|
|
if (rc)
|
|
|
return rc;
|
|
|
|
|
|
- _trylock_and_lock_lcu_irqsave(lcu, NULL, &flags);
|
|
|
+ spin_lock_irqsave(&lcu->lock, flags);
|
|
|
lcu->pav = NO_PAV;
|
|
|
for (i = 0; i < MAX_DEVICES_PER_LCU; ++i) {
|
|
|
switch (lcu->uac->unit[i].ua_type) {
|
|
@@ -634,7 +510,6 @@ static int _lcu_update(struct dasd_device *refdev, struct alias_lcu *lcu)
|
|
|
alias_list) {
|
|
|
_add_device_to_lcu(lcu, device, refdev);
|
|
|
}
|
|
|
- _unlock_all_devices_on_lcu(lcu, NULL, NULL);
|
|
|
spin_unlock_irqrestore(&lcu->lock, flags);
|
|
|
return 0;
|
|
|
}
|
|
@@ -722,8 +597,7 @@ int dasd_alias_add_device(struct dasd_device *device)
|
|
|
|
|
|
lcu = private->lcu;
|
|
|
rc = 0;
|
|
|
- spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
|
|
|
- spin_lock(&lcu->lock);
|
|
|
+ spin_lock_irqsave(&lcu->lock, flags);
|
|
|
if (!(lcu->flags & UPDATE_PENDING)) {
|
|
|
rc = _add_device_to_lcu(lcu, device, device);
|
|
|
if (rc)
|
|
@@ -733,8 +607,7 @@ int dasd_alias_add_device(struct dasd_device *device)
|
|
|
list_move(&device->alias_list, &lcu->active_devices);
|
|
|
_schedule_lcu_update(lcu, device);
|
|
|
}
|
|
|
- spin_unlock(&lcu->lock);
|
|
|
- spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
|
|
|
+ spin_unlock_irqrestore(&lcu->lock, flags);
|
|
|
return rc;
|
|
|
}
|
|
|
|
|
@@ -933,15 +806,27 @@ static void _stop_all_devices_on_lcu(struct alias_lcu *lcu)
|
|
|
struct alias_pav_group *pavgroup;
|
|
|
struct dasd_device *device;
|
|
|
|
|
|
- list_for_each_entry(device, &lcu->active_devices, alias_list)
|
|
|
+ list_for_each_entry(device, &lcu->active_devices, alias_list) {
|
|
|
+ spin_lock(get_ccwdev_lock(device->cdev));
|
|
|
dasd_device_set_stop_bits(device, DASD_STOPPED_SU);
|
|
|
- list_for_each_entry(device, &lcu->inactive_devices, alias_list)
|
|
|
+ spin_unlock(get_ccwdev_lock(device->cdev));
|
|
|
+ }
|
|
|
+ list_for_each_entry(device, &lcu->inactive_devices, alias_list) {
|
|
|
+ spin_lock(get_ccwdev_lock(device->cdev));
|
|
|
dasd_device_set_stop_bits(device, DASD_STOPPED_SU);
|
|
|
+ spin_unlock(get_ccwdev_lock(device->cdev));
|
|
|
+ }
|
|
|
list_for_each_entry(pavgroup, &lcu->grouplist, group) {
|
|
|
- list_for_each_entry(device, &pavgroup->baselist, alias_list)
|
|
|
+ list_for_each_entry(device, &pavgroup->baselist, alias_list) {
|
|
|
+ spin_lock(get_ccwdev_lock(device->cdev));
|
|
|
dasd_device_set_stop_bits(device, DASD_STOPPED_SU);
|
|
|
- list_for_each_entry(device, &pavgroup->aliaslist, alias_list)
|
|
|
+ spin_unlock(get_ccwdev_lock(device->cdev));
|
|
|
+ }
|
|
|
+ list_for_each_entry(device, &pavgroup->aliaslist, alias_list) {
|
|
|
+ spin_lock(get_ccwdev_lock(device->cdev));
|
|
|
dasd_device_set_stop_bits(device, DASD_STOPPED_SU);
|
|
|
+ spin_unlock(get_ccwdev_lock(device->cdev));
|
|
|
+ }
|
|
|
}
|
|
|
}
|
|
|
|
|
@@ -950,15 +835,27 @@ static void _unstop_all_devices_on_lcu(struct alias_lcu *lcu)
|
|
|
struct alias_pav_group *pavgroup;
|
|
|
struct dasd_device *device;
|
|
|
|
|
|
- list_for_each_entry(device, &lcu->active_devices, alias_list)
|
|
|
+ list_for_each_entry(device, &lcu->active_devices, alias_list) {
|
|
|
+ spin_lock(get_ccwdev_lock(device->cdev));
|
|
|
dasd_device_remove_stop_bits(device, DASD_STOPPED_SU);
|
|
|
- list_for_each_entry(device, &lcu->inactive_devices, alias_list)
|
|
|
+ spin_unlock(get_ccwdev_lock(device->cdev));
|
|
|
+ }
|
|
|
+ list_for_each_entry(device, &lcu->inactive_devices, alias_list) {
|
|
|
+ spin_lock(get_ccwdev_lock(device->cdev));
|
|
|
dasd_device_remove_stop_bits(device, DASD_STOPPED_SU);
|
|
|
+ spin_unlock(get_ccwdev_lock(device->cdev));
|
|
|
+ }
|
|
|
list_for_each_entry(pavgroup, &lcu->grouplist, group) {
|
|
|
- list_for_each_entry(device, &pavgroup->baselist, alias_list)
|
|
|
+ list_for_each_entry(device, &pavgroup->baselist, alias_list) {
|
|
|
+ spin_lock(get_ccwdev_lock(device->cdev));
|
|
|
dasd_device_remove_stop_bits(device, DASD_STOPPED_SU);
|
|
|
- list_for_each_entry(device, &pavgroup->aliaslist, alias_list)
|
|
|
+ spin_unlock(get_ccwdev_lock(device->cdev));
|
|
|
+ }
|
|
|
+ list_for_each_entry(device, &pavgroup->aliaslist, alias_list) {
|
|
|
+ spin_lock(get_ccwdev_lock(device->cdev));
|
|
|
dasd_device_remove_stop_bits(device, DASD_STOPPED_SU);
|
|
|
+ spin_unlock(get_ccwdev_lock(device->cdev));
|
|
|
+ }
|
|
|
}
|
|
|
}
|
|
|
|
|
@@ -984,48 +881,32 @@ static void summary_unit_check_handling_work(struct work_struct *work)
|
|
|
spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
|
|
|
reset_summary_unit_check(lcu, device, suc_data->reason);
|
|
|
|
|
|
- _trylock_and_lock_lcu_irqsave(lcu, NULL, &flags);
|
|
|
+ spin_lock_irqsave(&lcu->lock, flags);
|
|
|
_unstop_all_devices_on_lcu(lcu);
|
|
|
_restart_all_base_devices_on_lcu(lcu);
|
|
|
/* 3. read new alias configuration */
|
|
|
_schedule_lcu_update(lcu, device);
|
|
|
lcu->suc_data.device = NULL;
|
|
|
dasd_put_device(device);
|
|
|
- _unlock_all_devices_on_lcu(lcu, NULL, NULL);
|
|
|
spin_unlock_irqrestore(&lcu->lock, flags);
|
|
|
}
|
|
|
|
|
|
-/*
|
|
|
- * note: this will be called from int handler context (cdev locked)
|
|
|
- */
|
|
|
-void dasd_alias_handle_summary_unit_check(struct dasd_device *device,
|
|
|
- struct irb *irb)
|
|
|
+void dasd_alias_handle_summary_unit_check(struct work_struct *work)
|
|
|
{
|
|
|
+ struct dasd_device *device = container_of(work, struct dasd_device,
|
|
|
+ suc_work);
|
|
|
struct dasd_eckd_private *private = device->private;
|
|
|
struct alias_lcu *lcu;
|
|
|
- char reason;
|
|
|
- char *sense;
|
|
|
-
|
|
|
- sense = dasd_get_sense(irb);
|
|
|
- if (sense) {
|
|
|
- reason = sense[8];
|
|
|
- DBF_DEV_EVENT(DBF_NOTICE, device, "%s %x",
|
|
|
- "eckd handle summary unit check: reason", reason);
|
|
|
- } else {
|
|
|
- DBF_DEV_EVENT(DBF_WARNING, device, "%s",
|
|
|
- "eckd handle summary unit check:"
|
|
|
- " no reason code available");
|
|
|
- return;
|
|
|
- }
|
|
|
+ unsigned long flags;
|
|
|
|
|
|
lcu = private->lcu;
|
|
|
if (!lcu) {
|
|
|
DBF_DEV_EVENT(DBF_WARNING, device, "%s",
|
|
|
"device not ready to handle summary"
|
|
|
" unit check (no lcu structure)");
|
|
|
- return;
|
|
|
+ goto out;
|
|
|
}
|
|
|
- _trylock_and_lock_lcu(lcu, device);
|
|
|
+ spin_lock_irqsave(&lcu->lock, flags);
|
|
|
/* If this device is about to be removed just return and wait for
|
|
|
* the next interrupt on a different device
|
|
|
*/
|
|
@@ -1033,27 +914,26 @@ void dasd_alias_handle_summary_unit_check(struct dasd_device *device,
|
|
|
DBF_DEV_EVENT(DBF_WARNING, device, "%s",
|
|
|
"device is in offline processing,"
|
|
|
" don't do summary unit check handling");
|
|
|
- _unlock_all_devices_on_lcu(lcu, device, NULL);
|
|
|
- spin_unlock(&lcu->lock);
|
|
|
- return;
|
|
|
+ goto out_unlock;
|
|
|
}
|
|
|
if (lcu->suc_data.device) {
|
|
|
/* already scheduled or running */
|
|
|
DBF_DEV_EVENT(DBF_WARNING, device, "%s",
|
|
|
"previous instance of summary unit check worker"
|
|
|
" still pending");
|
|
|
- _unlock_all_devices_on_lcu(lcu, device, NULL);
|
|
|
- spin_unlock(&lcu->lock);
|
|
|
- return ;
|
|
|
+ goto out_unlock;
|
|
|
}
|
|
|
_stop_all_devices_on_lcu(lcu);
|
|
|
/* prepare for lcu_update */
|
|
|
- private->lcu->flags |= NEED_UAC_UPDATE | UPDATE_PENDING;
|
|
|
- lcu->suc_data.reason = reason;
|
|
|
+ lcu->flags |= NEED_UAC_UPDATE | UPDATE_PENDING;
|
|
|
+ lcu->suc_data.reason = private->suc_reason;
|
|
|
lcu->suc_data.device = device;
|
|
|
dasd_get_device(device);
|
|
|
- _unlock_all_devices_on_lcu(lcu, device, NULL);
|
|
|
- spin_unlock(&lcu->lock);
|
|
|
if (!schedule_work(&lcu->suc_data.worker))
|
|
|
dasd_put_device(device);
|
|
|
+out_unlock:
|
|
|
+ spin_unlock_irqrestore(&lcu->lock, flags);
|
|
|
+out:
|
|
|
+ clear_bit(DASD_FLAG_SUC, &device->flags);
|
|
|
+ dasd_put_device(device);
|
|
|
};
|