|
@@ -1042,8 +1042,11 @@ static void dasd_eckd_clear_conf_data(struct dasd_device *device)
|
|
|
private->conf_data = NULL;
|
|
|
private->conf_len = 0;
|
|
|
for (i = 0; i < 8; i++) {
|
|
|
- kfree(private->path_conf_data[i]);
|
|
|
- private->path_conf_data[i] = NULL;
|
|
|
+ kfree(device->path[i].conf_data);
|
|
|
+ device->path[i].conf_data = NULL;
|
|
|
+ device->path[i].cssid = 0;
|
|
|
+ device->path[i].ssid = 0;
|
|
|
+ device->path[i].chpid = 0;
|
|
|
}
|
|
|
}
|
|
|
|
|
@@ -1055,13 +1058,14 @@ static int dasd_eckd_read_conf(struct dasd_device *device)
|
|
|
int rc, path_err, pos;
|
|
|
__u8 lpm, opm;
|
|
|
struct dasd_eckd_private *private, path_private;
|
|
|
- struct dasd_path *path_data;
|
|
|
struct dasd_uid *uid;
|
|
|
char print_path_uid[60], print_device_uid[60];
|
|
|
+ struct channel_path_desc *chp_desc;
|
|
|
+ struct subchannel_id sch_id;
|
|
|
|
|
|
private = device->private;
|
|
|
- path_data = &device->path_data;
|
|
|
opm = ccw_device_get_path_mask(device->cdev);
|
|
|
+ ccw_device_get_schid(device->cdev, &sch_id);
|
|
|
conf_data_saved = 0;
|
|
|
path_err = 0;
|
|
|
/* get configuration data per operational path */
|
|
@@ -1081,7 +1085,7 @@ static int dasd_eckd_read_conf(struct dasd_device *device)
|
|
|
"No configuration data "
|
|
|
"retrieved");
|
|
|
/* no further analysis possible */
|
|
|
- path_data->opm |= lpm;
|
|
|
+ dasd_path_add_opm(device, opm);
|
|
|
continue; /* no error */
|
|
|
}
|
|
|
/* save first valid configuration data */
|
|
@@ -1098,8 +1102,13 @@ static int dasd_eckd_read_conf(struct dasd_device *device)
|
|
|
}
|
|
|
pos = pathmask_to_pos(lpm);
|
|
|
/* store per path conf_data */
|
|
|
- private->path_conf_data[pos] =
|
|
|
- (struct dasd_conf_data *) conf_data;
|
|
|
+ device->path[pos].conf_data = conf_data;
|
|
|
+ device->path[pos].cssid = sch_id.cssid;
|
|
|
+ device->path[pos].ssid = sch_id.ssid;
|
|
|
+ chp_desc = ccw_device_get_chp_desc(device->cdev, pos);
|
|
|
+ if (chp_desc)
|
|
|
+ device->path[pos].chpid = chp_desc->chpid;
|
|
|
+ kfree(chp_desc);
|
|
|
/*
|
|
|
* build device UID that other path data
|
|
|
* can be compared to it
|
|
@@ -1154,42 +1163,66 @@ static int dasd_eckd_read_conf(struct dasd_device *device)
|
|
|
"device %s instead of %s\n", lpm,
|
|
|
print_path_uid, print_device_uid);
|
|
|
path_err = -EINVAL;
|
|
|
- path_data->cablepm |= lpm;
|
|
|
+ dasd_path_add_cablepm(device, lpm);
|
|
|
continue;
|
|
|
}
|
|
|
pos = pathmask_to_pos(lpm);
|
|
|
/* store per path conf_data */
|
|
|
- private->path_conf_data[pos] =
|
|
|
- (struct dasd_conf_data *) conf_data;
|
|
|
+ device->path[pos].conf_data = conf_data;
|
|
|
+ device->path[pos].cssid = sch_id.cssid;
|
|
|
+ device->path[pos].ssid = sch_id.ssid;
|
|
|
+ chp_desc = ccw_device_get_chp_desc(device->cdev, pos);
|
|
|
+ if (chp_desc)
|
|
|
+ device->path[pos].chpid = chp_desc->chpid;
|
|
|
+ kfree(chp_desc);
|
|
|
path_private.conf_data = NULL;
|
|
|
path_private.conf_len = 0;
|
|
|
}
|
|
|
switch (dasd_eckd_path_access(conf_data, conf_len)) {
|
|
|
case 0x02:
|
|
|
- path_data->npm |= lpm;
|
|
|
+ dasd_path_add_nppm(device, lpm);
|
|
|
break;
|
|
|
case 0x03:
|
|
|
- path_data->ppm |= lpm;
|
|
|
+ dasd_path_add_ppm(device, lpm);
|
|
|
break;
|
|
|
}
|
|
|
- if (!path_data->opm) {
|
|
|
- path_data->opm = lpm;
|
|
|
+ if (!dasd_path_get_opm(device)) {
|
|
|
+ dasd_path_set_opm(device, lpm);
|
|
|
dasd_generic_path_operational(device);
|
|
|
} else {
|
|
|
- path_data->opm |= lpm;
|
|
|
+ dasd_path_add_opm(device, lpm);
|
|
|
}
|
|
|
- /*
|
|
|
- * if the path is used
|
|
|
- * it should not be in one of the negative lists
|
|
|
- */
|
|
|
- path_data->cablepm &= ~lpm;
|
|
|
- path_data->hpfpm &= ~lpm;
|
|
|
- path_data->cuirpm &= ~lpm;
|
|
|
}
|
|
|
|
|
|
return path_err;
|
|
|
}
|
|
|
|
|
|
+static u32 get_fcx_max_data(struct dasd_device *device)
|
|
|
+{
|
|
|
+ struct dasd_eckd_private *private = device->private;
|
|
|
+ int fcx_in_css, fcx_in_gneq, fcx_in_features;
|
|
|
+ int tpm, mdc;
|
|
|
+
|
|
|
+ if (dasd_nofcx)
|
|
|
+ return 0;
|
|
|
+ /* is transport mode supported? */
|
|
|
+ fcx_in_css = css_general_characteristics.fcx;
|
|
|
+ fcx_in_gneq = private->gneq->reserved2[7] & 0x04;
|
|
|
+ fcx_in_features = private->features.feature[40] & 0x80;
|
|
|
+ tpm = fcx_in_css && fcx_in_gneq && fcx_in_features;
|
|
|
+
|
|
|
+ if (!tpm)
|
|
|
+ return 0;
|
|
|
+
|
|
|
+ mdc = ccw_device_get_mdc(device->cdev, 0);
|
|
|
+ if (mdc < 0) {
|
|
|
+ dev_warn(&device->cdev->dev, "Detecting the maximum supported data size for zHPF requests failed\n");
|
|
|
+ return 0;
|
|
|
+ } else {
|
|
|
+ return (u32)mdc * FCX_MAX_DATA_FACTOR;
|
|
|
+ }
|
|
|
+}
|
|
|
+
|
|
|
static int verify_fcx_max_data(struct dasd_device *device, __u8 lpm)
|
|
|
{
|
|
|
struct dasd_eckd_private *private = device->private;
|
|
@@ -1222,8 +1255,7 @@ static int rebuild_device_uid(struct dasd_device *device,
|
|
|
struct path_verification_work_data *data)
|
|
|
{
|
|
|
struct dasd_eckd_private *private = device->private;
|
|
|
- struct dasd_path *path_data = &device->path_data;
|
|
|
- __u8 lpm, opm = path_data->opm;
|
|
|
+ __u8 lpm, opm = dasd_path_get_opm(device);
|
|
|
int rc = -ENODEV;
|
|
|
|
|
|
for (lpm = 0x80; lpm; lpm >>= 1) {
|
|
@@ -1356,7 +1388,7 @@ static void do_path_verification_work(struct work_struct *work)
|
|
|
* in other case the device UID may have changed and
|
|
|
* the first working path UID will be used as device UID
|
|
|
*/
|
|
|
- if (device->path_data.opm &&
|
|
|
+ if (dasd_path_get_opm(device) &&
|
|
|
dasd_eckd_compare_path_uid(device, &path_private)) {
|
|
|
/*
|
|
|
* the comparison was not successful
|
|
@@ -1406,23 +1438,17 @@ static void do_path_verification_work(struct work_struct *work)
|
|
|
* situation in dasd_start_IO.
|
|
|
*/
|
|
|
spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
|
|
|
- if (!device->path_data.opm && opm) {
|
|
|
- device->path_data.opm = opm;
|
|
|
- device->path_data.cablepm &= ~opm;
|
|
|
- device->path_data.cuirpm &= ~opm;
|
|
|
- device->path_data.hpfpm &= ~opm;
|
|
|
+ if (!dasd_path_get_opm(device) && opm) {
|
|
|
+ dasd_path_set_opm(device, opm);
|
|
|
dasd_generic_path_operational(device);
|
|
|
} else {
|
|
|
- device->path_data.opm |= opm;
|
|
|
- device->path_data.cablepm &= ~opm;
|
|
|
- device->path_data.cuirpm &= ~opm;
|
|
|
- device->path_data.hpfpm &= ~opm;
|
|
|
+ dasd_path_add_opm(device, opm);
|
|
|
}
|
|
|
- device->path_data.npm |= npm;
|
|
|
- device->path_data.ppm |= ppm;
|
|
|
- device->path_data.tbvpm |= epm;
|
|
|
- device->path_data.cablepm |= cablepm;
|
|
|
- device->path_data.hpfpm |= hpfpm;
|
|
|
+ dasd_path_add_nppm(device, npm);
|
|
|
+ dasd_path_add_ppm(device, ppm);
|
|
|
+ dasd_path_add_tbvpm(device, epm);
|
|
|
+ dasd_path_add_cablepm(device, cablepm);
|
|
|
+ dasd_path_add_nohpfpm(device, hpfpm);
|
|
|
spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
|
|
|
}
|
|
|
clear_bit(DASD_FLAG_PATH_VERIFY, &device->flags);
|
|
@@ -1456,6 +1482,19 @@ static int dasd_eckd_verify_path(struct dasd_device *device, __u8 lpm)
|
|
|
return 0;
|
|
|
}
|
|
|
|
|
|
+static void dasd_eckd_reset_path(struct dasd_device *device, __u8 pm)
|
|
|
+{
|
|
|
+ struct dasd_eckd_private *private = device->private;
|
|
|
+ unsigned long flags;
|
|
|
+
|
|
|
+ if (!private->fcx_max_data)
|
|
|
+ private->fcx_max_data = get_fcx_max_data(device);
|
|
|
+ spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
|
|
|
+ dasd_path_set_tbvpm(device, pm ? : dasd_path_get_notoperpm(device));
|
|
|
+ dasd_schedule_device_bh(device);
|
|
|
+ spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
|
|
|
+}
|
|
|
+
|
|
|
static int dasd_eckd_read_features(struct dasd_device *device)
|
|
|
{
|
|
|
struct dasd_eckd_private *private = device->private;
|
|
@@ -1652,32 +1691,6 @@ static void dasd_eckd_kick_validate_server(struct dasd_device *device)
|
|
|
dasd_put_device(device);
|
|
|
}
|
|
|
|
|
|
-static u32 get_fcx_max_data(struct dasd_device *device)
|
|
|
-{
|
|
|
- struct dasd_eckd_private *private = device->private;
|
|
|
- int fcx_in_css, fcx_in_gneq, fcx_in_features;
|
|
|
- int tpm, mdc;
|
|
|
-
|
|
|
- if (dasd_nofcx)
|
|
|
- return 0;
|
|
|
- /* is transport mode supported? */
|
|
|
- fcx_in_css = css_general_characteristics.fcx;
|
|
|
- fcx_in_gneq = private->gneq->reserved2[7] & 0x04;
|
|
|
- fcx_in_features = private->features.feature[40] & 0x80;
|
|
|
- tpm = fcx_in_css && fcx_in_gneq && fcx_in_features;
|
|
|
-
|
|
|
- if (!tpm)
|
|
|
- return 0;
|
|
|
-
|
|
|
- mdc = ccw_device_get_mdc(device->cdev, 0);
|
|
|
- if (mdc < 0) {
|
|
|
- dev_warn(&device->cdev->dev, "Detecting the maximum supported"
|
|
|
- " data size for zHPF requests failed\n");
|
|
|
- return 0;
|
|
|
- } else
|
|
|
- return (u32)mdc * FCX_MAX_DATA_FACTOR;
|
|
|
-}
|
|
|
-
|
|
|
/*
|
|
|
* Check device characteristics.
|
|
|
* If the device is accessible using ECKD discipline, the device is enabled.
|
|
@@ -1729,10 +1742,11 @@ dasd_eckd_check_characteristics(struct dasd_device *device)
|
|
|
if (rc)
|
|
|
goto out_err1;
|
|
|
|
|
|
- /* set default timeout */
|
|
|
+ /* set some default values */
|
|
|
device->default_expires = DASD_EXPIRES;
|
|
|
- /* set default retry count */
|
|
|
device->default_retries = DASD_RETRIES;
|
|
|
+ device->path_thrhld = DASD_ECKD_PATH_THRHLD;
|
|
|
+ device->path_interval = DASD_ECKD_PATH_INTERVAL;
|
|
|
|
|
|
if (private->gneq) {
|
|
|
value = 1;
|
|
@@ -1839,13 +1853,16 @@ static void dasd_eckd_uncheck_device(struct dasd_device *device)
|
|
|
private->gneq = NULL;
|
|
|
private->conf_len = 0;
|
|
|
for (i = 0; i < 8; i++) {
|
|
|
- kfree(private->path_conf_data[i]);
|
|
|
- if ((__u8 *)private->path_conf_data[i] ==
|
|
|
+ kfree(device->path[i].conf_data);
|
|
|
+ if ((__u8 *)device->path[i].conf_data ==
|
|
|
private->conf_data) {
|
|
|
private->conf_data = NULL;
|
|
|
private->conf_len = 0;
|
|
|
}
|
|
|
- private->path_conf_data[i] = NULL;
|
|
|
+ device->path[i].conf_data = NULL;
|
|
|
+ device->path[i].cssid = 0;
|
|
|
+ device->path[i].ssid = 0;
|
|
|
+ device->path[i].chpid = 0;
|
|
|
}
|
|
|
kfree(private->conf_data);
|
|
|
private->conf_data = NULL;
|
|
@@ -2966,7 +2983,7 @@ static void dasd_eckd_handle_terminated_request(struct dasd_ccw_req *cqr)
|
|
|
if (cqr->block && (cqr->startdev != cqr->block->base)) {
|
|
|
dasd_eckd_reset_ccw_to_base_io(cqr);
|
|
|
cqr->startdev = cqr->block->base;
|
|
|
- cqr->lpm = cqr->block->base->path_data.opm;
|
|
|
+ cqr->lpm = dasd_path_get_opm(cqr->block->base);
|
|
|
}
|
|
|
};
|
|
|
|
|
@@ -3251,7 +3268,7 @@ static struct dasd_ccw_req *dasd_eckd_build_cp_cmd_single(
|
|
|
cqr->memdev = startdev;
|
|
|
cqr->block = block;
|
|
|
cqr->expires = startdev->default_expires * HZ; /* default 5 minutes */
|
|
|
- cqr->lpm = startdev->path_data.ppm;
|
|
|
+ cqr->lpm = dasd_path_get_ppm(startdev);
|
|
|
cqr->retries = startdev->default_retries;
|
|
|
cqr->buildclk = get_tod_clock();
|
|
|
cqr->status = DASD_CQR_FILLED;
|
|
@@ -3426,7 +3443,7 @@ static struct dasd_ccw_req *dasd_eckd_build_cp_cmd_track(
|
|
|
cqr->memdev = startdev;
|
|
|
cqr->block = block;
|
|
|
cqr->expires = startdev->default_expires * HZ; /* default 5 minutes */
|
|
|
- cqr->lpm = startdev->path_data.ppm;
|
|
|
+ cqr->lpm = dasd_path_get_ppm(startdev);
|
|
|
cqr->retries = startdev->default_retries;
|
|
|
cqr->buildclk = get_tod_clock();
|
|
|
cqr->status = DASD_CQR_FILLED;
|
|
@@ -3735,7 +3752,7 @@ static struct dasd_ccw_req *dasd_eckd_build_cp_tpm_track(
|
|
|
cqr->memdev = startdev;
|
|
|
cqr->block = block;
|
|
|
cqr->expires = startdev->default_expires * HZ; /* default 5 minutes */
|
|
|
- cqr->lpm = startdev->path_data.ppm;
|
|
|
+ cqr->lpm = dasd_path_get_ppm(startdev);
|
|
|
cqr->retries = startdev->default_retries;
|
|
|
cqr->buildclk = get_tod_clock();
|
|
|
cqr->status = DASD_CQR_FILLED;
|
|
@@ -3962,7 +3979,7 @@ static struct dasd_ccw_req *dasd_raw_build_cp(struct dasd_device *startdev,
|
|
|
cqr->memdev = startdev;
|
|
|
cqr->block = block;
|
|
|
cqr->expires = startdev->default_expires * HZ;
|
|
|
- cqr->lpm = startdev->path_data.ppm;
|
|
|
+ cqr->lpm = dasd_path_get_ppm(startdev);
|
|
|
cqr->retries = startdev->default_retries;
|
|
|
cqr->buildclk = get_tod_clock();
|
|
|
cqr->status = DASD_CQR_FILLED;
|
|
@@ -4783,7 +4800,8 @@ static void dasd_eckd_dump_sense_tcw(struct dasd_device *device,
|
|
|
req, scsw_cc(&irb->scsw), scsw_fctl(&irb->scsw),
|
|
|
scsw_actl(&irb->scsw), scsw_stctl(&irb->scsw),
|
|
|
scsw_dstat(&irb->scsw), scsw_cstat(&irb->scsw),
|
|
|
- irb->scsw.tm.fcxs, irb->scsw.tm.schxs,
|
|
|
+ irb->scsw.tm.fcxs,
|
|
|
+ (irb->scsw.tm.ifob << 7) | irb->scsw.tm.sesq,
|
|
|
req ? req->intrc : 0);
|
|
|
len += sprintf(page + len, PRINTK_HEADER
|
|
|
" device %s: Failing TCW: %p\n",
|
|
@@ -5306,11 +5324,10 @@ static int dasd_hosts_print(struct dasd_device *device, struct seq_file *m)
|
|
|
*/
|
|
|
static int
|
|
|
dasd_eckd_psf_cuir_response(struct dasd_device *device, int response,
|
|
|
- __u32 message_id,
|
|
|
- struct channel_path_desc *desc,
|
|
|
- struct subchannel_id sch_id)
|
|
|
+ __u32 message_id, __u8 lpum)
|
|
|
{
|
|
|
struct dasd_psf_cuir_response *psf_cuir;
|
|
|
+ int pos = pathmask_to_pos(lpum);
|
|
|
struct dasd_ccw_req *cqr;
|
|
|
struct ccw1 *ccw;
|
|
|
int rc;
|
|
@@ -5328,11 +5345,10 @@ dasd_eckd_psf_cuir_response(struct dasd_device *device, int response,
|
|
|
psf_cuir = (struct dasd_psf_cuir_response *)cqr->data;
|
|
|
psf_cuir->order = PSF_ORDER_CUIR_RESPONSE;
|
|
|
psf_cuir->cc = response;
|
|
|
- if (desc)
|
|
|
- psf_cuir->chpid = desc->chpid;
|
|
|
+ psf_cuir->chpid = device->path[pos].chpid;
|
|
|
psf_cuir->message_id = message_id;
|
|
|
- psf_cuir->cssid = sch_id.cssid;
|
|
|
- psf_cuir->ssid = sch_id.ssid;
|
|
|
+ psf_cuir->cssid = device->path[pos].cssid;
|
|
|
+ psf_cuir->ssid = device->path[pos].ssid;
|
|
|
ccw = cqr->cpaddr;
|
|
|
ccw->cmd_code = DASD_ECKD_CCW_PSF;
|
|
|
ccw->cda = (__u32)(addr_t)psf_cuir;
|
|
@@ -5363,20 +5379,19 @@ static struct dasd_conf_data *dasd_eckd_get_ref_conf(struct dasd_device *device,
|
|
|
__u8 lpum,
|
|
|
struct dasd_cuir_message *cuir)
|
|
|
{
|
|
|
- struct dasd_eckd_private *private = device->private;
|
|
|
struct dasd_conf_data *conf_data;
|
|
|
int path, pos;
|
|
|
|
|
|
if (cuir->record_selector == 0)
|
|
|
goto out;
|
|
|
for (path = 0x80, pos = 0; path; path >>= 1, pos++) {
|
|
|
- conf_data = private->path_conf_data[pos];
|
|
|
+ conf_data = device->path[pos].conf_data;
|
|
|
if (conf_data->gneq.record_selector ==
|
|
|
cuir->record_selector)
|
|
|
return conf_data;
|
|
|
}
|
|
|
out:
|
|
|
- return private->path_conf_data[pathmask_to_pos(lpum)];
|
|
|
+ return device->path[pathmask_to_pos(lpum)].conf_data;
|
|
|
}
|
|
|
|
|
|
/*
|
|
@@ -5391,7 +5406,6 @@ out:
|
|
|
static int dasd_eckd_cuir_scope(struct dasd_device *device, __u8 lpum,
|
|
|
struct dasd_cuir_message *cuir)
|
|
|
{
|
|
|
- struct dasd_eckd_private *private = device->private;
|
|
|
struct dasd_conf_data *ref_conf_data;
|
|
|
unsigned long bitmask = 0, mask = 0;
|
|
|
struct dasd_conf_data *conf_data;
|
|
@@ -5417,11 +5431,10 @@ static int dasd_eckd_cuir_scope(struct dasd_device *device, __u8 lpum,
|
|
|
mask |= cuir->neq_map[1] << 8;
|
|
|
mask |= cuir->neq_map[0] << 16;
|
|
|
|
|
|
- for (path = 0x80; path; path >>= 1) {
|
|
|
+ for (path = 0; path < 8; path++) {
|
|
|
/* initialise data per path */
|
|
|
bitmask = mask;
|
|
|
- pos = pathmask_to_pos(path);
|
|
|
- conf_data = private->path_conf_data[pos];
|
|
|
+ conf_data = device->path[path].conf_data;
|
|
|
pos = 8 - ffs(cuir->ned_map);
|
|
|
ned = (char *) &conf_data->neds[pos];
|
|
|
/* compare reference ned and per path ned */
|
|
@@ -5442,33 +5455,29 @@ static int dasd_eckd_cuir_scope(struct dasd_device *device, __u8 lpum,
|
|
|
continue;
|
|
|
/* device and path match the reference values
|
|
|
add path to CUIR scope */
|
|
|
- tbcpm |= path;
|
|
|
+ tbcpm |= 0x80 >> path;
|
|
|
}
|
|
|
return tbcpm;
|
|
|
}
|
|
|
|
|
|
static void dasd_eckd_cuir_notify_user(struct dasd_device *device,
|
|
|
- unsigned long paths,
|
|
|
- struct subchannel_id sch_id, int action)
|
|
|
+ unsigned long paths, int action)
|
|
|
{
|
|
|
- struct channel_path_desc *desc;
|
|
|
int pos;
|
|
|
|
|
|
while (paths) {
|
|
|
/* get position of bit in mask */
|
|
|
- pos = ffs(paths) - 1;
|
|
|
+ pos = 8 - ffs(paths);
|
|
|
/* get channel path descriptor from this position */
|
|
|
- desc = ccw_device_get_chp_desc(device->cdev, 7 - pos);
|
|
|
if (action == CUIR_QUIESCE)
|
|
|
- pr_warn("Service on the storage server caused path "
|
|
|
- "%x.%02x to go offline", sch_id.cssid,
|
|
|
- desc ? desc->chpid : 0);
|
|
|
+ pr_warn("Service on the storage server caused path %x.%02x to go offline",
|
|
|
+ device->path[pos].cssid,
|
|
|
+ device->path[pos].chpid);
|
|
|
else if (action == CUIR_RESUME)
|
|
|
- pr_info("Path %x.%02x is back online after service "
|
|
|
- "on the storage server", sch_id.cssid,
|
|
|
- desc ? desc->chpid : 0);
|
|
|
- kfree(desc);
|
|
|
- clear_bit(pos, &paths);
|
|
|
+ pr_info("Path %x.%02x is back online after service on the storage server",
|
|
|
+ device->path[pos].cssid,
|
|
|
+ device->path[pos].chpid);
|
|
|
+ clear_bit(7 - pos, &paths);
|
|
|
}
|
|
|
}
|
|
|
|
|
@@ -5479,16 +5488,16 @@ static int dasd_eckd_cuir_remove_path(struct dasd_device *device, __u8 lpum,
|
|
|
|
|
|
tbcpm = dasd_eckd_cuir_scope(device, lpum, cuir);
|
|
|
/* nothing to do if path is not in use */
|
|
|
- if (!(device->path_data.opm & tbcpm))
|
|
|
+ if (!(dasd_path_get_opm(device) & tbcpm))
|
|
|
return 0;
|
|
|
- if (!(device->path_data.opm & ~tbcpm)) {
|
|
|
+ if (!(dasd_path_get_opm(device) & ~tbcpm)) {
|
|
|
/* no path would be left if the CUIR action is taken
|
|
|
return error */
|
|
|
return -EINVAL;
|
|
|
}
|
|
|
/* remove device from operational path mask */
|
|
|
- device->path_data.opm &= ~tbcpm;
|
|
|
- device->path_data.cuirpm |= tbcpm;
|
|
|
+ dasd_path_remove_opm(device, tbcpm);
|
|
|
+ dasd_path_add_cuirpm(device, tbcpm);
|
|
|
return tbcpm;
|
|
|
}
|
|
|
|
|
@@ -5501,7 +5510,6 @@ static int dasd_eckd_cuir_remove_path(struct dasd_device *device, __u8 lpum,
|
|
|
* notify the already set offline devices again
|
|
|
*/
|
|
|
static int dasd_eckd_cuir_quiesce(struct dasd_device *device, __u8 lpum,
|
|
|
- struct subchannel_id sch_id,
|
|
|
struct dasd_cuir_message *cuir)
|
|
|
{
|
|
|
struct dasd_eckd_private *private = device->private;
|
|
@@ -5556,14 +5564,13 @@ static int dasd_eckd_cuir_quiesce(struct dasd_device *device, __u8 lpum,
|
|
|
}
|
|
|
}
|
|
|
/* notify user about all paths affected by CUIR action */
|
|
|
- dasd_eckd_cuir_notify_user(device, paths, sch_id, CUIR_QUIESCE);
|
|
|
+ dasd_eckd_cuir_notify_user(device, paths, CUIR_QUIESCE);
|
|
|
return 0;
|
|
|
out_err:
|
|
|
return tbcpm;
|
|
|
}
|
|
|
|
|
|
static int dasd_eckd_cuir_resume(struct dasd_device *device, __u8 lpum,
|
|
|
- struct subchannel_id sch_id,
|
|
|
struct dasd_cuir_message *cuir)
|
|
|
{
|
|
|
struct dasd_eckd_private *private = device->private;
|
|
@@ -5581,8 +5588,8 @@ static int dasd_eckd_cuir_resume(struct dasd_device *device, __u8 lpum,
|
|
|
alias_list) {
|
|
|
tbcpm = dasd_eckd_cuir_scope(dev, lpum, cuir);
|
|
|
paths |= tbcpm;
|
|
|
- if (!(dev->path_data.opm & tbcpm)) {
|
|
|
- dev->path_data.tbvpm |= tbcpm;
|
|
|
+ if (!(dasd_path_get_opm(dev) & tbcpm)) {
|
|
|
+ dasd_path_add_tbvpm(dev, tbcpm);
|
|
|
dasd_schedule_device_bh(dev);
|
|
|
}
|
|
|
}
|
|
@@ -5591,8 +5598,8 @@ static int dasd_eckd_cuir_resume(struct dasd_device *device, __u8 lpum,
|
|
|
alias_list) {
|
|
|
tbcpm = dasd_eckd_cuir_scope(dev, lpum, cuir);
|
|
|
paths |= tbcpm;
|
|
|
- if (!(dev->path_data.opm & tbcpm)) {
|
|
|
- dev->path_data.tbvpm |= tbcpm;
|
|
|
+ if (!(dasd_path_get_opm(dev) & tbcpm)) {
|
|
|
+ dasd_path_add_tbvpm(dev, tbcpm);
|
|
|
dasd_schedule_device_bh(dev);
|
|
|
}
|
|
|
}
|
|
@@ -5605,8 +5612,8 @@ static int dasd_eckd_cuir_resume(struct dasd_device *device, __u8 lpum,
|
|
|
alias_list) {
|
|
|
tbcpm = dasd_eckd_cuir_scope(dev, lpum, cuir);
|
|
|
paths |= tbcpm;
|
|
|
- if (!(dev->path_data.opm & tbcpm)) {
|
|
|
- dev->path_data.tbvpm |= tbcpm;
|
|
|
+ if (!(dasd_path_get_opm(dev) & tbcpm)) {
|
|
|
+ dasd_path_add_tbvpm(dev, tbcpm);
|
|
|
dasd_schedule_device_bh(dev);
|
|
|
}
|
|
|
}
|
|
@@ -5615,14 +5622,14 @@ static int dasd_eckd_cuir_resume(struct dasd_device *device, __u8 lpum,
|
|
|
alias_list) {
|
|
|
tbcpm = dasd_eckd_cuir_scope(dev, lpum, cuir);
|
|
|
paths |= tbcpm;
|
|
|
- if (!(dev->path_data.opm & tbcpm)) {
|
|
|
- dev->path_data.tbvpm |= tbcpm;
|
|
|
+ if (!(dasd_path_get_opm(dev) & tbcpm)) {
|
|
|
+ dasd_path_add_tbvpm(dev, tbcpm);
|
|
|
dasd_schedule_device_bh(dev);
|
|
|
}
|
|
|
}
|
|
|
}
|
|
|
/* notify user about all paths affected by CUIR action */
|
|
|
- dasd_eckd_cuir_notify_user(device, paths, sch_id, CUIR_RESUME);
|
|
|
+ dasd_eckd_cuir_notify_user(device, paths, CUIR_RESUME);
|
|
|
return 0;
|
|
|
}
|
|
|
|
|
@@ -5630,38 +5637,31 @@ static void dasd_eckd_handle_cuir(struct dasd_device *device, void *messages,
|
|
|
__u8 lpum)
|
|
|
{
|
|
|
struct dasd_cuir_message *cuir = messages;
|
|
|
- struct channel_path_desc *desc;
|
|
|
- struct subchannel_id sch_id;
|
|
|
- int pos, response;
|
|
|
+ int response;
|
|
|
|
|
|
DBF_DEV_EVENT(DBF_WARNING, device,
|
|
|
"CUIR request: %016llx %016llx %016llx %08x",
|
|
|
((u64 *)cuir)[0], ((u64 *)cuir)[1], ((u64 *)cuir)[2],
|
|
|
((u32 *)cuir)[3]);
|
|
|
- ccw_device_get_schid(device->cdev, &sch_id);
|
|
|
- pos = pathmask_to_pos(lpum);
|
|
|
- desc = ccw_device_get_chp_desc(device->cdev, pos);
|
|
|
|
|
|
if (cuir->code == CUIR_QUIESCE) {
|
|
|
/* quiesce */
|
|
|
- if (dasd_eckd_cuir_quiesce(device, lpum, sch_id, cuir))
|
|
|
+ if (dasd_eckd_cuir_quiesce(device, lpum, cuir))
|
|
|
response = PSF_CUIR_LAST_PATH;
|
|
|
else
|
|
|
response = PSF_CUIR_COMPLETED;
|
|
|
} else if (cuir->code == CUIR_RESUME) {
|
|
|
/* resume */
|
|
|
- dasd_eckd_cuir_resume(device, lpum, sch_id, cuir);
|
|
|
+ dasd_eckd_cuir_resume(device, lpum, cuir);
|
|
|
response = PSF_CUIR_COMPLETED;
|
|
|
} else
|
|
|
response = PSF_CUIR_NOT_SUPPORTED;
|
|
|
|
|
|
dasd_eckd_psf_cuir_response(device, response,
|
|
|
- cuir->message_id, desc, sch_id);
|
|
|
+ cuir->message_id, lpum);
|
|
|
DBF_DEV_EVENT(DBF_WARNING, device,
|
|
|
"CUIR response: %d on message ID %08x", response,
|
|
|
cuir->message_id);
|
|
|
- /* free descriptor copy */
|
|
|
- kfree(desc);
|
|
|
/* to make sure there is no attention left schedule work again */
|
|
|
device->discipline->check_attention(device, lpum);
|
|
|
}
|
|
@@ -5708,6 +5708,63 @@ static int dasd_eckd_check_attention(struct dasd_device *device, __u8 lpum)
|
|
|
return 0;
|
|
|
}
|
|
|
|
|
|
+static int dasd_eckd_disable_hpf_path(struct dasd_device *device, __u8 lpum)
|
|
|
+{
|
|
|
+ if (~lpum & dasd_path_get_opm(device)) {
|
|
|
+ dasd_path_add_nohpfpm(device, lpum);
|
|
|
+ dasd_path_remove_opm(device, lpum);
|
|
|
+ dev_err(&device->cdev->dev,
|
|
|
+ "Channel path %02X lost HPF functionality and is disabled\n",
|
|
|
+ lpum);
|
|
|
+ return 1;
|
|
|
+ }
|
|
|
+ return 0;
|
|
|
+}
|
|
|
+
|
|
|
+static void dasd_eckd_disable_hpf_device(struct dasd_device *device)
|
|
|
+{
|
|
|
+ struct dasd_eckd_private *private = device->private;
|
|
|
+
|
|
|
+ dev_err(&device->cdev->dev,
|
|
|
+ "High Performance FICON disabled\n");
|
|
|
+ private->fcx_max_data = 0;
|
|
|
+}
|
|
|
+
|
|
|
+static int dasd_eckd_hpf_enabled(struct dasd_device *device)
|
|
|
+{
|
|
|
+ struct dasd_eckd_private *private = device->private;
|
|
|
+
|
|
|
+ return private->fcx_max_data ? 1 : 0;
|
|
|
+}
|
|
|
+
|
|
|
+static void dasd_eckd_handle_hpf_error(struct dasd_device *device,
|
|
|
+ struct irb *irb)
|
|
|
+{
|
|
|
+ struct dasd_eckd_private *private = device->private;
|
|
|
+
|
|
|
+ if (!private->fcx_max_data) {
|
|
|
+ /* sanity check for no HPF, the error makes no sense */
|
|
|
+ DBF_DEV_EVENT(DBF_WARNING, device, "%s",
|
|
|
+ "Trying to disable HPF for a non HPF device");
|
|
|
+ return;
|
|
|
+ }
|
|
|
+ if (irb->scsw.tm.sesq == SCSW_SESQ_DEV_NOFCX) {
|
|
|
+ dasd_eckd_disable_hpf_device(device);
|
|
|
+ } else if (irb->scsw.tm.sesq == SCSW_SESQ_PATH_NOFCX) {
|
|
|
+ if (dasd_eckd_disable_hpf_path(device, irb->esw.esw1.lpum))
|
|
|
+ return;
|
|
|
+ dasd_eckd_disable_hpf_device(device);
|
|
|
+ dasd_path_set_tbvpm(device,
|
|
|
+ dasd_path_get_hpfpm(device));
|
|
|
+ }
|
|
|
+ /*
|
|
|
+ * prevent that any new I/O ist started on the device and schedule a
|
|
|
+ * requeue of existing requests
|
|
|
+ */
|
|
|
+ dasd_device_set_stop_bits(device, DASD_STOPPED_NOT_ACC);
|
|
|
+ dasd_schedule_requeue(device);
|
|
|
+}
|
|
|
+
|
|
|
static struct ccw_driver dasd_eckd_driver = {
|
|
|
.driver = {
|
|
|
.name = "dasd-eckd",
|
|
@@ -5776,6 +5833,10 @@ static struct dasd_discipline dasd_eckd_discipline = {
|
|
|
.check_attention = dasd_eckd_check_attention,
|
|
|
.host_access_count = dasd_eckd_host_access_count,
|
|
|
.hosts_print = dasd_hosts_print,
|
|
|
+ .handle_hpf_error = dasd_eckd_handle_hpf_error,
|
|
|
+ .disable_hpf = dasd_eckd_disable_hpf_device,
|
|
|
+ .hpf_enabled = dasd_eckd_hpf_enabled,
|
|
|
+ .reset_path = dasd_eckd_reset_path,
|
|
|
};
|
|
|
|
|
|
static int __init
|