s390/dasd: add support for control unit initiated reconfiguration

Add support for Control Unit Initiated Reconfiguration (CUIR) to
Linux, a storage server interface to reconcile concurrent hardware
changes between storage and host.

Reviewed-by: Stefan Weinhuber <wein@de.ibm.com>
Signed-off-by: Stefan Haberland <stefan.haberland@de.ibm.com>
Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
This commit is contained in:
Stefan Haberland 2014-10-01 14:39:47 +02:00 committed by Martin Schwidefsky
parent 362ce84f43
commit 5db8440c36
5 changed files with 472 additions and 5 deletions

View File

@ -1660,6 +1660,14 @@ void dasd_int_handler(struct ccw_device *cdev, unsigned long intparm,
device->discipline->check_for_device_change(device, cqr, irb);
dasd_put_device(device);
}
/* check for for attention message */
if (scsw_dstat(&irb->scsw) & DEV_STAT_ATTENTION) {
device = dasd_device_from_cdev_locked(cdev);
device->discipline->check_attention(device, irb->esw.esw1.lpum);
dasd_put_device(device);
}
if (!cqr)
return;

View File

@ -1432,6 +1432,29 @@ static ssize_t dasd_reservation_state_store(struct device *dev,
static DEVICE_ATTR(last_known_reservation_state, 0644,
dasd_reservation_state_show, dasd_reservation_state_store);
static ssize_t dasd_pm_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct dasd_device *device;
u8 opm, nppm, cablepm, cuirpm, hpfpm;
device = dasd_device_from_cdev(to_ccwdev(dev));
if (IS_ERR(device))
return sprintf(buf, "0\n");
opm = device->path_data.opm;
nppm = device->path_data.npm;
cablepm = device->path_data.cablepm;
cuirpm = device->path_data.cuirpm;
hpfpm = device->path_data.hpfpm;
dasd_put_device(device);
return sprintf(buf, "%02x %02x %02x %02x %02x\n", opm, nppm,
cablepm, cuirpm, hpfpm);
}
static DEVICE_ATTR(path_masks, 0444, dasd_pm_show, NULL);
static struct attribute * dasd_attrs[] = {
&dev_attr_readonly.attr,
&dev_attr_discipline.attr,
@ -1450,6 +1473,7 @@ static struct attribute * dasd_attrs[] = {
&dev_attr_reservation_policy.attr,
&dev_attr_last_known_reservation_state.attr,
&dev_attr_safe_offline.attr,
&dev_attr_path_masks.attr,
NULL,
};

View File

@ -29,6 +29,8 @@
#include <asm/cio.h>
#include <asm/ccwdev.h>
#include <asm/itcw.h>
#include <asm/schid.h>
#include <asm/chpid.h>
#include "dasd_int.h"
#include "dasd_eckd.h"
@ -112,6 +114,12 @@ struct path_verification_work_data {
static struct path_verification_work_data *path_verification_worker;
static DEFINE_MUTEX(dasd_path_verification_mutex);
struct check_attention_work_data {
struct work_struct worker;
struct dasd_device *device;
__u8 lpum;
};
/* initial attempt at a probe function. this can be simplified once
* the other detection code is gone */
static int
@ -1126,6 +1134,7 @@ static int dasd_eckd_read_conf(struct dasd_device *device)
"device %s instead of %s\n", lpm,
print_path_uid, print_device_uid);
path_err = -EINVAL;
path_data->cablepm |= lpm;
continue;
}
@ -1141,6 +1150,13 @@ static int dasd_eckd_read_conf(struct dasd_device *device)
break;
}
path_data->opm |= lpm;
/*
* if the path is used
* it should not be in one of the negative lists
*/
path_data->cablepm &= ~lpm;
path_data->hpfpm &= ~lpm;
path_data->cuirpm &= ~lpm;
if (conf_data != private->conf_data)
kfree(conf_data);
@ -1230,7 +1246,7 @@ static void do_path_verification_work(struct work_struct *work)
struct dasd_eckd_private path_private;
struct dasd_uid *uid;
__u8 path_rcd_buf[DASD_ECKD_RCD_DATA_SIZE];
__u8 lpm, opm, npm, ppm, epm;
__u8 lpm, opm, npm, ppm, epm, hpfpm, cablepm;
unsigned long flags;
char print_uid[60];
int rc;
@ -1248,6 +1264,9 @@ static void do_path_verification_work(struct work_struct *work)
npm = 0;
ppm = 0;
epm = 0;
hpfpm = 0;
cablepm = 0;
for (lpm = 0x80; lpm; lpm >>= 1) {
if (!(lpm & data->tbvpm))
continue;
@ -1289,6 +1308,7 @@ static void do_path_verification_work(struct work_struct *work)
opm &= ~lpm;
npm &= ~lpm;
ppm &= ~lpm;
hpfpm |= lpm;
continue;
}
@ -1350,6 +1370,7 @@ static void do_path_verification_work(struct work_struct *work)
opm &= ~lpm;
npm &= ~lpm;
ppm &= ~lpm;
cablepm |= lpm;
continue;
}
}
@ -1364,12 +1385,21 @@ static void do_path_verification_work(struct work_struct *work)
spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
if (!device->path_data.opm && opm) {
device->path_data.opm = opm;
device->path_data.cablepm &= ~opm;
device->path_data.cuirpm &= ~opm;
device->path_data.hpfpm &= ~opm;
dasd_generic_path_operational(device);
} else
} else {
device->path_data.opm |= opm;
device->path_data.cablepm &= ~opm;
device->path_data.cuirpm &= ~opm;
device->path_data.hpfpm &= ~opm;
}
device->path_data.npm |= npm;
device->path_data.ppm |= ppm;
device->path_data.tbvpm |= epm;
device->path_data.cablepm |= cablepm;
device->path_data.hpfpm |= hpfpm;
spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
}
@ -4475,6 +4505,343 @@ static int dasd_eckd_reload_device(struct dasd_device *device)
return -1;
}
static int dasd_eckd_read_message_buffer(struct dasd_device *device,
struct dasd_rssd_messages *messages,
__u8 lpum)
{
struct dasd_rssd_messages *message_buf;
struct dasd_psf_prssd_data *prssdp;
struct dasd_eckd_private *private;
struct dasd_ccw_req *cqr;
struct ccw1 *ccw;
int rc;
private = (struct dasd_eckd_private *) device->private;
cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1 /* PSF */ + 1 /* RSSD */,
(sizeof(struct dasd_psf_prssd_data) +
sizeof(struct dasd_rssd_messages)),
device);
if (IS_ERR(cqr)) {
DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "%s",
"Could not allocate read message buffer request");
return PTR_ERR(cqr);
}
cqr->startdev = device;
cqr->memdev = device;
cqr->block = NULL;
cqr->retries = 256;
cqr->expires = 10 * HZ;
/* we need to check for messages on exactly this path */
set_bit(DASD_CQR_VERIFY_PATH, &cqr->flags);
cqr->lpm = lpum;
/* Prepare for Read Subsystem Data */
prssdp = (struct dasd_psf_prssd_data *) cqr->data;
memset(prssdp, 0, sizeof(struct dasd_psf_prssd_data));
prssdp->order = PSF_ORDER_PRSSD;
prssdp->suborder = 0x03; /* Message Buffer */
/* all other bytes of prssdp must be zero */
ccw = cqr->cpaddr;
ccw->cmd_code = DASD_ECKD_CCW_PSF;
ccw->count = sizeof(struct dasd_psf_prssd_data);
ccw->flags |= CCW_FLAG_CC;
ccw->flags |= CCW_FLAG_SLI;
ccw->cda = (__u32)(addr_t) prssdp;
/* Read Subsystem Data - message buffer */
message_buf = (struct dasd_rssd_messages *) (prssdp + 1);
memset(message_buf, 0, sizeof(struct dasd_rssd_messages));
ccw++;
ccw->cmd_code = DASD_ECKD_CCW_RSSD;
ccw->count = sizeof(struct dasd_rssd_messages);
ccw->flags |= CCW_FLAG_SLI;
ccw->cda = (__u32)(addr_t) message_buf;
cqr->buildclk = get_tod_clock();
cqr->status = DASD_CQR_FILLED;
rc = dasd_sleep_on_immediatly(cqr);
if (rc == 0) {
prssdp = (struct dasd_psf_prssd_data *) cqr->data;
message_buf = (struct dasd_rssd_messages *)
(prssdp + 1);
memcpy(messages, message_buf,
sizeof(struct dasd_rssd_messages));
} else
DBF_EVENT_DEVID(DBF_WARNING, device->cdev,
"Reading messages failed with rc=%d\n"
, rc);
dasd_sfree_request(cqr, cqr->memdev);
return rc;
}
/*
* Perform Subsystem Function - CUIR response
*/
static int
dasd_eckd_psf_cuir_response(struct dasd_device *device, int response,
__u32 message_id,
struct channel_path_desc *desc,
struct subchannel_id sch_id)
{
struct dasd_psf_cuir_response *psf_cuir;
struct dasd_ccw_req *cqr;
struct ccw1 *ccw;
int rc;
cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1 /* PSF */ ,
sizeof(struct dasd_psf_cuir_response),
device);
if (IS_ERR(cqr)) {
DBF_DEV_EVENT(DBF_WARNING, device, "%s",
"Could not allocate PSF-CUIR request");
return PTR_ERR(cqr);
}
psf_cuir = (struct dasd_psf_cuir_response *)cqr->data;
psf_cuir->order = PSF_ORDER_CUIR_RESPONSE;
psf_cuir->cc = response;
if (desc)
psf_cuir->chpid = desc->chpid;
psf_cuir->message_id = message_id;
psf_cuir->cssid = sch_id.cssid;
psf_cuir->ssid = sch_id.ssid;
ccw = cqr->cpaddr;
ccw->cmd_code = DASD_ECKD_CCW_PSF;
ccw->cda = (__u32)(addr_t)psf_cuir;
ccw->count = sizeof(struct dasd_psf_cuir_response);
cqr->startdev = device;
cqr->memdev = device;
cqr->block = NULL;
cqr->retries = 256;
cqr->expires = 10*HZ;
cqr->buildclk = get_tod_clock();
cqr->status = DASD_CQR_FILLED;
rc = dasd_sleep_on(cqr);
dasd_sfree_request(cqr, cqr->memdev);
return rc;
}
static int dasd_eckd_cuir_change_state(struct dasd_device *device, __u8 lpum)
{
unsigned long flags;
__u8 tbcpm;
spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
tbcpm = device->path_data.opm & ~lpum;
if (tbcpm) {
device->path_data.opm = tbcpm;
device->path_data.cuirpm |= lpum;
}
spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
return tbcpm ? 0 : PSF_CUIR_LAST_PATH;
}
/*
* walk through all devices and quiesce them
* if it is the last path return error
*
* if only part of the devices are quiesced and an error
* occurs no onlining necessary, the storage server will
* notify the already set offline devices again
*/
static int dasd_eckd_cuir_quiesce(struct dasd_device *device, __u8 lpum,
struct channel_path_desc *desc,
struct subchannel_id sch_id)
{
struct alias_pav_group *pavgroup, *tempgroup;
struct dasd_eckd_private *private;
struct dasd_device *dev, *n;
int rc;
private = (struct dasd_eckd_private *) device->private;
rc = 0;
/* active devices */
list_for_each_entry_safe(dev, n,
&private->lcu->active_devices,
alias_list) {
rc = dasd_eckd_cuir_change_state(dev, lpum);
if (rc)
goto out;
}
/* inactive devices */
list_for_each_entry_safe(dev, n,
&private->lcu->inactive_devices,
alias_list) {
rc = dasd_eckd_cuir_change_state(dev, lpum);
if (rc)
goto out;
}
/* devices in PAV groups */
list_for_each_entry_safe(pavgroup, tempgroup,
&private->lcu->grouplist, group) {
list_for_each_entry_safe(dev, n, &pavgroup->baselist,
alias_list) {
rc = dasd_eckd_cuir_change_state(dev, lpum);
if (rc)
goto out;
}
list_for_each_entry_safe(dev, n, &pavgroup->aliaslist,
alias_list) {
rc = dasd_eckd_cuir_change_state(dev, lpum);
if (rc)
goto out;
}
}
pr_warn("Service on the storage server caused path %x.%02x to go offline",
sch_id.cssid, desc ? desc->chpid : 0);
rc = PSF_CUIR_COMPLETED;
out:
return rc;
}
static int dasd_eckd_cuir_resume(struct dasd_device *device, __u8 lpum,
struct channel_path_desc *desc,
struct subchannel_id sch_id)
{
struct alias_pav_group *pavgroup, *tempgroup;
struct dasd_eckd_private *private;
struct dasd_device *dev, *n;
pr_info("Path %x.%02x is back online after service on the storage server",
sch_id.cssid, desc ? desc->chpid : 0);
private = (struct dasd_eckd_private *) device->private;
/*
* the path may have been added through a generic path event before
* only trigger path verification if the path is not already in use
*/
list_for_each_entry_safe(dev, n,
&private->lcu->active_devices,
alias_list) {
if (!(dev->path_data.opm & lpum)) {
dev->path_data.tbvpm |= lpum;
dasd_schedule_device_bh(dev);
}
}
list_for_each_entry_safe(dev, n,
&private->lcu->inactive_devices,
alias_list) {
if (!(dev->path_data.opm & lpum)) {
dev->path_data.tbvpm |= lpum;
dasd_schedule_device_bh(dev);
}
}
/* devices in PAV groups */
list_for_each_entry_safe(pavgroup, tempgroup,
&private->lcu->grouplist,
group) {
list_for_each_entry_safe(dev, n,
&pavgroup->baselist,
alias_list) {
if (!(dev->path_data.opm & lpum)) {
dev->path_data.tbvpm |= lpum;
dasd_schedule_device_bh(dev);
}
}
list_for_each_entry_safe(dev, n,
&pavgroup->aliaslist,
alias_list) {
if (!(dev->path_data.opm & lpum)) {
dev->path_data.tbvpm |= lpum;
dasd_schedule_device_bh(dev);
}
}
}
return PSF_CUIR_COMPLETED;
}
static void dasd_eckd_handle_cuir(struct dasd_device *device, void *messages,
__u8 lpum)
{
struct dasd_cuir_message *cuir = messages;
struct channel_path_desc *desc;
struct subchannel_id sch_id;
int pos, response;
ccw_device_get_schid(device->cdev, &sch_id);
/* get position of path in mask */
pos = 8 - ffs(lpum);
/* get channel path descriptor from this position */
desc = ccw_device_get_chp_desc(device->cdev, pos);
if (cuir->code == CUIR_QUIESCE) {
/* quiesce */
response = dasd_eckd_cuir_quiesce(device, lpum, desc, sch_id);
} else if (cuir->code == CUIR_RESUME) {
/* resume */
response = dasd_eckd_cuir_resume(device, lpum, desc, sch_id);
} else
response = PSF_CUIR_NOT_SUPPORTED;
dasd_eckd_psf_cuir_response(device, response, cuir->message_id,
desc, sch_id);
/* free descriptor copy */
kfree(desc);
}
static void dasd_eckd_check_attention_work(struct work_struct *work)
{
struct check_attention_work_data *data;
struct dasd_rssd_messages *messages;
struct dasd_device *device;
int rc;
data = container_of(work, struct check_attention_work_data, worker);
device = data->device;
messages = kzalloc(sizeof(*messages), GFP_KERNEL);
if (!messages) {
DBF_DEV_EVENT(DBF_WARNING, device, "%s",
"Could not allocate attention message buffer");
goto out;
}
rc = dasd_eckd_read_message_buffer(device, messages, data->lpum);
if (rc)
goto out;
if (messages->length == ATTENTION_LENGTH_CUIR &&
messages->format == ATTENTION_FORMAT_CUIR)
dasd_eckd_handle_cuir(device, messages, data->lpum);
out:
dasd_put_device(device);
kfree(messages);
kfree(data);
}
static int dasd_eckd_check_attention(struct dasd_device *device, __u8 lpum)
{
struct check_attention_work_data *data;
data = kzalloc(sizeof(*data), GFP_ATOMIC);
if (!data)
return -ENOMEM;
INIT_WORK(&data->worker, dasd_eckd_check_attention_work);
dasd_get_device(device);
data->device = device;
data->lpum = lpum;
schedule_work(&data->worker);
return 0;
}
static struct ccw_driver dasd_eckd_driver = {
.driver = {
.name = "dasd-eckd",
@ -4539,6 +4906,7 @@ static struct dasd_discipline dasd_eckd_discipline = {
.reload = dasd_eckd_reload_device,
.get_uid = dasd_eckd_get_uid,
.kick_validate = dasd_eckd_kick_validate_server,
.check_attention = dasd_eckd_check_attention,
};
static int __init

View File

@ -51,8 +51,35 @@
/*
* Perform Subsystem Function / Sub-Orders
*/
#define PSF_ORDER_PRSSD 0x18
#define PSF_ORDER_SSC 0x1D
#define PSF_ORDER_PRSSD 0x18
#define PSF_ORDER_CUIR_RESPONSE 0x1A
#define PSF_ORDER_SSC 0x1D
/*
* CUIR response condition codes
*/
#define PSF_CUIR_INVALID 0x00
#define PSF_CUIR_COMPLETED 0x01
#define PSF_CUIR_NOT_SUPPORTED 0x02
#define PSF_CUIR_ERROR_IN_REQ 0x03
#define PSF_CUIR_DENIED 0x04
#define PSF_CUIR_LAST_PATH 0x05
#define PSF_CUIR_DEVICE_ONLINE 0x06
#define PSF_CUIR_VARY_FAILURE 0x07
#define PSF_CUIR_SOFTWARE_FAILURE 0x08
#define PSF_CUIR_NOT_RECOGNIZED 0x09
/*
* CUIR codes
*/
#define CUIR_QUIESCE 0x01
#define CUIR_RESUME 0x02
/*
* attention message definitions
*/
#define ATTENTION_LENGTH_CUIR 0x0e
#define ATTENTION_FORMAT_CUIR 0x01
/*
* Size that is reportet for large volumes in the old 16-bit no_cyl field
@ -342,6 +369,38 @@ struct dasd_rssd_features {
char feature[256];
} __attribute__((packed));
struct dasd_rssd_messages {
__u16 length;
__u8 format;
__u8 code;
__u32 message_id;
__u8 flags;
char messages[4087];
} __packed;
struct dasd_cuir_message {
__u16 length;
__u8 format;
__u8 code;
__u32 message_id;
__u8 flags;
__u8 neq_map[3];
__u8 ned_map;
__u8 record_selector;
} __packed;
struct dasd_psf_cuir_response {
__u8 order;
__u8 flags;
__u8 cc;
__u8 chpid;
__u16 device_nr;
__u16 reserved;
__u32 message_id;
__u64 system_id;
__u8 cssid;
__u8 ssid;
} __packed;
/*
* Perform Subsystem Function - Prepare for Read Subsystem Data

View File

@ -357,6 +357,7 @@ struct dasd_discipline {
int (*get_uid) (struct dasd_device *, struct dasd_uid *);
void (*kick_validate) (struct dasd_device *);
int (*check_attention)(struct dasd_device *, __u8);
};
extern struct dasd_discipline *dasd_diag_discipline_pointer;
@ -382,6 +383,10 @@ struct dasd_path {
__u8 tbvpm;
__u8 ppm;
__u8 npm;
/* paths that are not used because of a special condition */
__u8 cablepm; /* miss-cabled */
__u8 hpfpm; /* the HPF requirements of the other paths are not met */
__u8 cuirpm; /* CUIR varied offline */
};
struct dasd_profile_info {
@ -501,7 +506,10 @@ struct dasd_block {
struct dasd_profile profile;
};
struct dasd_attention_data {
struct dasd_device *device;
__u8 lpum;
};
/* reasons why device (ccw_device_start) was stopped */
#define DASD_STOPPED_NOT_ACC 1 /* not accessible */