summaryrefslogtreecommitdiffstats
path: root/drivers/s390/cio/chsc.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/s390/cio/chsc.c')
-rw-r--r--drivers/s390/cio/chsc.c1024
1 files changed, 267 insertions, 757 deletions
diff --git a/drivers/s390/cio/chsc.c b/drivers/s390/cio/chsc.c
index 6f05a44e381..ea92ac4d657 100644
--- a/drivers/s390/cio/chsc.c
+++ b/drivers/s390/cio/chsc.c
@@ -15,202 +15,124 @@
#include <linux/device.h>
#include <asm/cio.h>
+#include <asm/chpid.h>
#include "css.h"
#include "cio.h"
#include "cio_debug.h"
#include "ioasm.h"
+#include "chp.h"
#include "chsc.h"
static void *sei_page;
-static int new_channel_path(int chpid);
-
-static inline void
-set_chp_logically_online(int chp, int onoff)
-{
- css[0]->chps[chp]->state = onoff;
-}
-
-static int
-get_chp_status(int chp)
-{
- return (css[0]->chps[chp] ? css[0]->chps[chp]->state : -ENODEV);
-}
-
-void
-chsc_validate_chpids(struct subchannel *sch)
-{
- int mask, chp;
-
- for (chp = 0; chp <= 7; chp++) {
- mask = 0x80 >> chp;
- if (!get_chp_status(sch->schib.pmcw.chpid[chp]))
- /* disable using this path */
- sch->opm &= ~mask;
- }
-}
-
-void
-chpid_is_actually_online(int chp)
-{
- int state;
-
- state = get_chp_status(chp);
- if (state < 0) {
- need_rescan = 1;
- queue_work(slow_path_wq, &slow_path_work);
- } else
- WARN_ON(!state);
-}
+struct chsc_ssd_area {
+ struct chsc_header request;
+ u16 :10;
+ u16 ssid:2;
+ u16 :4;
+ u16 f_sch; /* first subchannel */
+ u16 :16;
+ u16 l_sch; /* last subchannel */
+ u32 :32;
+ struct chsc_header response;
+ u32 :32;
+ u8 sch_valid : 1;
+ u8 dev_valid : 1;
+ u8 st : 3; /* subchannel type */
+ u8 zeroes : 3;
+ u8 unit_addr; /* unit address */
+ u16 devno; /* device number */
+ u8 path_mask;
+ u8 fla_valid_mask;
+ u16 sch; /* subchannel */
+ u8 chpid[8]; /* chpids 0-7 */
+ u16 fla[8]; /* full link addresses 0-7 */
+} __attribute__ ((packed));
-/* FIXME: this is _always_ called for every subchannel. shouldn't we
- * process more than one at a time? */
-static int
-chsc_get_sch_desc_irq(struct subchannel *sch, void *page)
+int chsc_get_ssd_info(struct subchannel_id schid, struct chsc_ssd_info *ssd)
{
- int ccode, j;
-
- struct {
- struct chsc_header request;
- u16 reserved1a:10;
- u16 ssid:2;
- u16 reserved1b:4;
- u16 f_sch; /* first subchannel */
- u16 reserved2;
- u16 l_sch; /* last subchannel */
- u32 reserved3;
- struct chsc_header response;
- u32 reserved4;
- u8 sch_valid : 1;
- u8 dev_valid : 1;
- u8 st : 3; /* subchannel type */
- u8 zeroes : 3;
- u8 unit_addr; /* unit address */
- u16 devno; /* device number */
- u8 path_mask;
- u8 fla_valid_mask;
- u16 sch; /* subchannel */
- u8 chpid[8]; /* chpids 0-7 */
- u16 fla[8]; /* full link addresses 0-7 */
- } __attribute__ ((packed)) *ssd_area;
-
- ssd_area = page;
+ unsigned long page;
+ struct chsc_ssd_area *ssd_area;
+ int ccode;
+ int ret;
+ int i;
+ int mask;
+ page = get_zeroed_page(GFP_KERNEL | GFP_DMA);
+ if (!page)
+ return -ENOMEM;
+ ssd_area = (struct chsc_ssd_area *) page;
ssd_area->request.length = 0x0010;
ssd_area->request.code = 0x0004;
-
- ssd_area->ssid = sch->schid.ssid;
- ssd_area->f_sch = sch->schid.sch_no;
- ssd_area->l_sch = sch->schid.sch_no;
+ ssd_area->ssid = schid.ssid;
+ ssd_area->f_sch = schid.sch_no;
+ ssd_area->l_sch = schid.sch_no;
ccode = chsc(ssd_area);
+ /* Check response. */
if (ccode > 0) {
- pr_debug("chsc returned with ccode = %d\n", ccode);
- return (ccode == 3) ? -ENODEV : -EBUSY;
+ ret = (ccode == 3) ? -ENODEV : -EBUSY;
+ goto out_free;
}
-
- switch (ssd_area->response.code) {
- case 0x0001: /* everything ok */
- break;
- case 0x0002:
- CIO_CRW_EVENT(2, "Invalid command!\n");
- return -EINVAL;
- case 0x0003:
- CIO_CRW_EVENT(2, "Error in chsc request block!\n");
- return -EINVAL;
- case 0x0004:
- CIO_CRW_EVENT(2, "Model does not provide ssd\n");
- return -EOPNOTSUPP;
- default:
- CIO_CRW_EVENT(2, "Unknown CHSC response %d\n",
+ if (ssd_area->response.code != 0x0001) {
+ CIO_MSG_EVENT(2, "chsc: ssd failed for 0.%x.%04x (rc=%04x)\n",
+ schid.ssid, schid.sch_no,
ssd_area->response.code);
- return -EIO;
+ ret = -EIO;
+ goto out_free;
}
-
- /*
- * ssd_area->st stores the type of the detected
- * subchannel, with the following definitions:
- *
- * 0: I/O subchannel: All fields have meaning
- * 1: CHSC subchannel: Only sch_val, st and sch
- * have meaning
- * 2: Message subchannel: All fields except unit_addr
- * have meaning
- * 3: ADM subchannel: Only sch_val, st and sch
- * have meaning
- *
- * Other types are currently undefined.
- */
- if (ssd_area->st > 3) { /* uhm, that looks strange... */
- CIO_CRW_EVENT(0, "Strange subchannel type %d"
- " for sch 0.%x.%04x\n", ssd_area->st,
- sch->schid.ssid, sch->schid.sch_no);
- /*
- * There may have been a new subchannel type defined in the
- * time since this code was written; since we don't know which
- * fields have meaning and what to do with it we just jump out
- */
- return 0;
- } else {
- const char *type[4] = {"I/O", "chsc", "message", "ADM"};
- CIO_CRW_EVENT(6, "ssd: sch 0.%x.%04x is %s subchannel\n",
- sch->schid.ssid, sch->schid.sch_no,
- type[ssd_area->st]);
-
- sch->ssd_info.valid = 1;
- sch->ssd_info.type = ssd_area->st;
+ if (!ssd_area->sch_valid) {
+ ret = -ENODEV;
+ goto out_free;
}
-
- if (ssd_area->st == 0 || ssd_area->st == 2) {
- for (j = 0; j < 8; j++) {
- if (!((0x80 >> j) & ssd_area->path_mask &
- ssd_area->fla_valid_mask))
- continue;
- sch->ssd_info.chpid[j] = ssd_area->chpid[j];
- sch->ssd_info.fla[j] = ssd_area->fla[j];
+ /* Copy data */
+ ret = 0;
+ memset(ssd, 0, sizeof(struct chsc_ssd_info));
+ if ((ssd_area->st != 0) && (ssd_area->st != 2))
+ goto out_free;
+ ssd->path_mask = ssd_area->path_mask;
+ ssd->fla_valid_mask = ssd_area->fla_valid_mask;
+ for (i = 0; i < 8; i++) {
+ mask = 0x80 >> i;
+ if (ssd_area->path_mask & mask) {
+ chp_id_init(&ssd->chpid[i]);
+ ssd->chpid[i].id = ssd_area->chpid[i];
}
+ if (ssd_area->fla_valid_mask & mask)
+ ssd->fla[i] = ssd_area->fla[i];
}
- return 0;
+out_free:
+ free_page(page);
+ return ret;
}
-int
-css_get_ssd_info(struct subchannel *sch)
+static int check_for_io_on_path(struct subchannel *sch, int mask)
{
- int ret;
- void *page;
+ int cc;
- page = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
- if (!page)
- return -ENOMEM;
- spin_lock_irq(sch->lock);
- ret = chsc_get_sch_desc_irq(sch, page);
- if (ret) {
- static int cio_chsc_err_msg;
-
- if (!cio_chsc_err_msg) {
- printk(KERN_ERR
- "chsc_get_sch_descriptions:"
- " Error %d while doing chsc; "
- "processing some machine checks may "
- "not work\n", ret);
- cio_chsc_err_msg = 1;
- }
- }
- spin_unlock_irq(sch->lock);
- free_page((unsigned long)page);
- if (!ret) {
- int j, chpid, mask;
- /* Allocate channel path structures, if needed. */
- for (j = 0; j < 8; j++) {
- mask = 0x80 >> j;
- chpid = sch->ssd_info.chpid[j];
- if ((sch->schib.pmcw.pim & mask) &&
- (get_chp_status(chpid) < 0))
- new_channel_path(chpid);
- }
+ cc = stsch(sch->schid, &sch->schib);
+ if (cc)
+ return 0;
+ if (sch->schib.scsw.actl && sch->schib.pmcw.lpum == mask)
+ return 1;
+ return 0;
+}
+
+static void terminate_internal_io(struct subchannel *sch)
+{
+ if (cio_clear(sch)) {
+ /* Recheck device in case clear failed. */
+ sch->lpm = 0;
+ if (device_trigger_verify(sch) != 0)
+ css_schedule_eval(sch->schid);
+ return;
}
- return ret;
+ /* Request retry of internal operation. */
+ device_set_intretry(sch);
+ /* Call handler. */
+ if (sch->driver && sch->driver->termination)
+ sch->driver->termination(&sch->dev);
}
static int
@@ -219,7 +141,7 @@ s390_subchannel_remove_chpid(struct device *dev, void *data)
int j;
int mask;
struct subchannel *sch;
- struct channel_path *chpid;
+ struct chp_id *chpid;
struct schib schib;
sch = to_subchannel(dev);
@@ -243,106 +165,50 @@ s390_subchannel_remove_chpid(struct device *dev, void *data)
if (sch->schib.pmcw.pim == 0x80)
goto out_unreg;
- if ((sch->schib.scsw.actl & SCSW_ACTL_DEVACT) &&
- (sch->schib.scsw.actl & SCSW_ACTL_SCHACT) &&
- (sch->schib.pmcw.lpum == mask)) {
- int cc;
-
- cc = cio_clear(sch);
- if (cc == -ENODEV)
+ if (check_for_io_on_path(sch, mask)) {
+ if (device_is_online(sch))
+ device_kill_io(sch);
+ else {
+ terminate_internal_io(sch);
+ /* Re-start path verification. */
+ if (sch->driver && sch->driver->verify)
+ sch->driver->verify(&sch->dev);
+ }
+ } else {
+ /* trigger path verification. */
+ if (sch->driver && sch->driver->verify)
+ sch->driver->verify(&sch->dev);
+ else if (sch->lpm == mask)
goto out_unreg;
- /* Request retry of internal operation. */
- device_set_intretry(sch);
- /* Call handler. */
- if (sch->driver && sch->driver->termination)
- sch->driver->termination(&sch->dev);
- goto out_unlock;
}
- /* trigger path verification. */
- if (sch->driver && sch->driver->verify)
- sch->driver->verify(&sch->dev);
- else if (sch->lpm == mask)
- goto out_unreg;
-out_unlock:
spin_unlock_irq(sch->lock);
return 0;
+
out_unreg:
- spin_unlock_irq(sch->lock);
sch->lpm = 0;
- if (css_enqueue_subchannel_slow(sch->schid)) {
- css_clear_subchannel_slow_list();
- need_rescan = 1;
- }
+ spin_unlock_irq(sch->lock);
+ css_schedule_eval(sch->schid);
return 0;
}
-static void
-s390_set_chpid_offline( __u8 chpid)
+void chsc_chp_offline(struct chp_id chpid)
{
char dbf_txt[15];
- struct device *dev;
- sprintf(dbf_txt, "chpr%x", chpid);
+ sprintf(dbf_txt, "chpr%x.%02x", chpid.cssid, chpid.id);
CIO_TRACE_EVENT(2, dbf_txt);
- if (get_chp_status(chpid) <= 0)
+ if (chp_get_status(chpid) <= 0)
return;
- dev = get_device(&css[0]->chps[chpid]->dev);
- bus_for_each_dev(&css_bus_type, NULL, to_channelpath(dev),
+ bus_for_each_dev(&css_bus_type, NULL, &chpid,
s390_subchannel_remove_chpid);
-
- if (need_rescan || css_slow_subchannels_exist())
- queue_work(slow_path_wq, &slow_path_work);
- put_device(dev);
-}
-
-struct res_acc_data {
- struct channel_path *chp;
- u32 fla_mask;
- u16 fla;
-};
-
-static int
-s390_process_res_acc_sch(struct res_acc_data *res_data, struct subchannel *sch)
-{
- int found;
- int chp;
- int ccode;
-
- found = 0;
- for (chp = 0; chp <= 7; chp++)
- /*
- * check if chpid is in information updated by ssd
- */
- if (sch->ssd_info.valid &&
- sch->ssd_info.chpid[chp] == res_data->chp->id &&
- (sch->ssd_info.fla[chp] & res_data->fla_mask)
- == res_data->fla) {
- found = 1;
- break;
- }
-
- if (found == 0)
- return 0;
-
- /*
- * Do a stsch to update our subchannel structure with the
- * new path information and eventually check for logically
- * offline chpids.
- */
- ccode = stsch(sch->schid, &sch->schib);
- if (ccode > 0)
- return 0;
-
- return 0x80 >> chp;
}
static int
s390_process_res_acc_new_sch(struct subchannel_id schid)
{
struct schib schib;
- int ret;
/*
* We don't know the device yet, but since a path
* may be available now to the device we'll have
@@ -353,14 +219,35 @@ s390_process_res_acc_new_sch(struct subchannel_id schid)
*/
if (stsch_err(schid, &schib))
/* We're through */
- return need_rescan ? -EAGAIN : -ENXIO;
+ return -ENXIO;
/* Put it on the slow path. */
- ret = css_enqueue_subchannel_slow(schid);
- if (ret) {
- css_clear_subchannel_slow_list();
- need_rescan = 1;
- return -EAGAIN;
+ css_schedule_eval(schid);
+ return 0;
+}
+
+struct res_acc_data {
+ struct chp_id chpid;
+ u32 fla_mask;
+ u16 fla;
+};
+
+static int get_res_chpid_mask(struct chsc_ssd_info *ssd,
+ struct res_acc_data *data)
+{
+ int i;
+ int mask;
+
+ for (i = 0; i < 8; i++) {
+ mask = 0x80 >> i;
+ if (!(ssd->path_mask & mask))
+ continue;
+ if (!chp_id_is_equal(&ssd->chpid[i], &data->chpid))
+ continue;
+ if ((ssd->fla_valid_mask & mask) &&
+ ((ssd->fla[i] & data->fla_mask) != data->fla))
+ continue;
+ return mask;
}
return 0;
}
@@ -379,14 +266,11 @@ __s390_process_res_acc(struct subchannel_id schid, void *data)
return s390_process_res_acc_new_sch(schid);
spin_lock_irq(sch->lock);
-
- chp_mask = s390_process_res_acc_sch(res_data, sch);
-
- if (chp_mask == 0) {
- spin_unlock_irq(sch->lock);
- put_device(&sch->dev);
- return 0;
- }
+ chp_mask = get_res_chpid_mask(&sch->ssd_info, res_data);
+ if (chp_mask == 0)
+ goto out;
+ if (stsch(sch->schid, &sch->schib))
+ goto out;
old_lpm = sch->lpm;
sch->lpm = ((sch->schib.pmcw.pim &
sch->schib.pmcw.pam &
@@ -396,20 +280,18 @@ __s390_process_res_acc(struct subchannel_id schid, void *data)
device_trigger_reprobe(sch);
else if (sch->driver && sch->driver->verify)
sch->driver->verify(&sch->dev);
-
+out:
spin_unlock_irq(sch->lock);
put_device(&sch->dev);
return 0;
}
-
-static int
-s390_process_res_acc (struct res_acc_data *res_data)
+static void s390_process_res_acc (struct res_acc_data *res_data)
{
- int rc;
char dbf_txt[15];
- sprintf(dbf_txt, "accpr%x", res_data->chp->id);
+ sprintf(dbf_txt, "accpr%x.%02x", res_data->chpid.cssid,
+ res_data->chpid.id);
CIO_TRACE_EVENT( 2, dbf_txt);
if (res_data->fla != 0) {
sprintf(dbf_txt, "fla%x", res_data->fla);
@@ -423,12 +305,7 @@ s390_process_res_acc (struct res_acc_data *res_data)
* The more information we have (info), the less scanning
* will we have to do.
*/
- rc = for_each_subchannel(__s390_process_res_acc, res_data);
- if (css_slow_subchannels_exist())
- rc = -EAGAIN;
- else if (rc != -EAGAIN)
- rc = 0;
- return rc;
+ for_each_subchannel(__s390_process_res_acc, res_data);
}
static int
@@ -480,43 +357,45 @@ struct chsc_sei_area {
/* ccdf has to be big enough for a link-incident record */
} __attribute__ ((packed));
-static int chsc_process_sei_link_incident(struct chsc_sei_area *sei_area)
+static void chsc_process_sei_link_incident(struct chsc_sei_area *sei_area)
{
- int chpid;
+ struct chp_id chpid;
+ int id;
CIO_CRW_EVENT(4, "chsc: link incident (rs=%02x, rs_id=%04x)\n",
sei_area->rs, sei_area->rsid);
if (sei_area->rs != 4)
- return 0;
- chpid = __get_chpid_from_lir(sei_area->ccdf);
- if (chpid < 0)
+ return;
+ id = __get_chpid_from_lir(sei_area->ccdf);
+ if (id < 0)
CIO_CRW_EVENT(4, "chsc: link incident - invalid LIR\n");
- else
- s390_set_chpid_offline(chpid);
-
- return 0;
+ else {
+ chp_id_init(&chpid);
+ chpid.id = id;
+ chsc_chp_offline(chpid);
+ }
}
-static int chsc_process_sei_res_acc(struct chsc_sei_area *sei_area)
+static void chsc_process_sei_res_acc(struct chsc_sei_area *sei_area)
{
struct res_acc_data res_data;
- struct device *dev;
+ struct chp_id chpid;
int status;
- int rc;
CIO_CRW_EVENT(4, "chsc: resource accessibility event (rs=%02x, "
"rs_id=%04x)\n", sei_area->rs, sei_area->rsid);
if (sei_area->rs != 4)
- return 0;
+ return;
+ chp_id_init(&chpid);
+ chpid.id = sei_area->rsid;
/* allocate a new channel path structure, if needed */
- status = get_chp_status(sei_area->rsid);
+ status = chp_get_status(chpid);
if (status < 0)
- new_channel_path(sei_area->rsid);
+ chp_new(chpid);
else if (!status)
- return 0;
- dev = get_device(&css[0]->chps[sei_area->rsid]->dev);
+ return;
memset(&res_data, 0, sizeof(struct res_acc_data));
- res_data.chp = to_channelpath(dev);
+ res_data.chpid = chpid;
if ((sei_area->vf & 0xc0) != 0) {
res_data.fla = sei_area->fla;
if ((sei_area->vf & 0xc0) == 0xc0)
@@ -526,51 +405,82 @@ static int chsc_process_sei_res_acc(struct chsc_sei_area *sei_area)
/* link address */
res_data.fla_mask = 0xff00;
}
- rc = s390_process_res_acc(&res_data);
- put_device(dev);
-
- return rc;
+ s390_process_res_acc(&res_data);
}
-static int chsc_process_sei(struct chsc_sei_area *sei_area)
+struct chp_config_data {
+ u8 map[32];
+ u8 op;
+ u8 pc;
+};
+
+static void chsc_process_sei_chp_config(struct chsc_sei_area *sei_area)
{
- int rc;
+ struct chp_config_data *data;
+ struct chp_id chpid;
+ int num;
+
+ CIO_CRW_EVENT(4, "chsc: channel-path-configuration notification\n");
+ if (sei_area->rs != 0)
+ return;
+ data = (struct chp_config_data *) &(sei_area->ccdf);
+ chp_id_init(&chpid);
+ for (num = 0; num <= __MAX_CHPID; num++) {
+ if (!chp_test_bit(data->map, num))
+ continue;
+ chpid.id = num;
+ printk(KERN_WARNING "cio: processing configure event %d for "
+ "chpid %x.%02x\n", data->op, chpid.cssid, chpid.id);
+ switch (data->op) {
+ case 0:
+ chp_cfg_schedule(chpid, 1);
+ break;
+ case 1:
+ chp_cfg_schedule(chpid, 0);
+ break;
+ case 2:
+ chp_cfg_cancel_deconfigure(chpid);
+ break;
+ }
+ }
+}
+static void chsc_process_sei(struct chsc_sei_area *sei_area)
+{
/* Check if we might have lost some information. */
- if (sei_area->flags & 0x40)
+ if (sei_area->flags & 0x40) {
CIO_CRW_EVENT(2, "chsc: event overflow\n");
+ css_schedule_eval_all();
+ }
/* which kind of information was stored? */
- rc = 0;
switch (sei_area->cc) {
case 1: /* link incident*/
- rc = chsc_process_sei_link_incident(sei_area);
+ chsc_process_sei_link_incident(sei_area);
break;
case 2: /* i/o resource accessibiliy */
- rc = chsc_process_sei_res_acc(sei_area);
+ chsc_process_sei_res_acc(sei_area);
+ break;
+ case 8: /* channel-path-configuration notification */
+ chsc_process_sei_chp_config(sei_area);
break;
default: /* other stuff */
CIO_CRW_EVENT(4, "chsc: unhandled sei content code %d\n",
sei_area->cc);
break;
}
-
- return rc;
}
-int chsc_process_crw(void)
+void chsc_process_crw(void)
{
struct chsc_sei_area *sei_area;
- int ret;
- int rc;
if (!sei_page)
- return 0;
+ return;
/* Access to sei_page is serialized through machine check handler
* thread, so no need for locking. */
sei_area = sei_page;
CIO_TRACE_EVENT( 2, "prcss");
- ret = 0;
do {
memset(sei_area, 0, sizeof(*sei_area));
sei_area->request.length = 0x0010;
@@ -580,37 +490,26 @@ int chsc_process_crw(void)
if (sei_area->response.code == 0x0001) {
CIO_CRW_EVENT(4, "chsc: sei successful\n");
- rc = chsc_process_sei(sei_area);
- if (rc)
- ret = rc;
+ chsc_process_sei(sei_area);
} else {
CIO_CRW_EVENT(2, "chsc: sei failed (rc=%04x)\n",
sei_area->response.code);
- ret = 0;
break;
}
} while (sei_area->flags & 0x80);
-
- return ret;
}
static int
__chp_add_new_sch(struct subchannel_id schid)
{
struct schib schib;
- int ret;
if (stsch_err(schid, &schib))
/* We're through */
- return need_rescan ? -EAGAIN : -ENXIO;
+ return -ENXIO;
/* Put it on the slow path. */
- ret = css_enqueue_subchannel_slow(schid);
- if (ret) {
- css_clear_subchannel_slow_list();
- need_rescan = 1;
- return -EAGAIN;
- }
+ css_schedule_eval(schid);
return 0;
}
@@ -619,10 +518,10 @@ static int
__chp_add(struct subchannel_id schid, void *data)
{
int i, mask;
- struct channel_path *chp;
+ struct chp_id *chpid;
struct subchannel *sch;
- chp = data;
+ chpid = data;
sch = get_subchannel_by_schid(schid);
if (!sch)
/* Check if the subchannel is now available. */
@@ -631,7 +530,7 @@ __chp_add(struct subchannel_id schid, void *data)
for (i=0; i<8; i++) {
mask = 0x80 >> i;
if ((sch->schib.pmcw.pim & mask) &&
- (sch->schib.pmcw.chpid[i] == chp->id)) {
+ (sch->schib.pmcw.chpid[i] == chpid->id)) {
if (stsch(sch->schid, &sch->schib) != 0) {
/* Endgame. */
spin_unlock_irq(sch->lock);
@@ -657,122 +556,58 @@ __chp_add(struct subchannel_id schid, void *data)
return 0;
}
-static int
-chp_add(int chpid)
+void chsc_chp_online(struct chp_id chpid)
{
- int rc;
char dbf_txt[15];
- struct device *dev;
- if (!get_chp_status(chpid))
- return 0; /* no need to do the rest */
-
- sprintf(dbf_txt, "cadd%x", chpid);
+ sprintf(dbf_txt, "cadd%x.%02x", chpid.cssid, chpid.id);
CIO_TRACE_EVENT(2, dbf_txt);
- dev = get_device(&css[0]->chps[chpid]->dev);
- rc = for_each_subchannel(__chp_add, to_channelpath(dev));
- if (css_slow_subchannels_exist())
- rc = -EAGAIN;
- if (rc != -EAGAIN)
- rc = 0;
- put_device(dev);
- return rc;
+ if (chp_get_status(chpid) != 0)
+ for_each_subchannel(__chp_add, &chpid);
}
-/*
- * Handling of crw machine checks with channel path source.
- */
-int
-chp_process_crw(int chpid, int on)
-{
- if (on == 0) {
- /* Path has gone. We use the link incident routine.*/
- s390_set_chpid_offline(chpid);
- return 0; /* De-register is async anyway. */
- }
- /*
- * Path has come. Allocate a new channel path structure,
- * if needed.
- */
- if (get_chp_status(chpid) < 0)
- new_channel_path(chpid);
- /* Avoid the extra overhead in process_rec_acc. */
- return chp_add(chpid);
-}
-
-static int check_for_io_on_path(struct subchannel *sch, int index)
-{
- int cc;
-
- cc = stsch(sch->schid, &sch->schib);
- if (cc)
- return 0;
- if (sch->schib.scsw.actl && sch->schib.pmcw.lpum == (0x80 >> index))
- return 1;
- return 0;
-}
-
-static void terminate_internal_io(struct subchannel *sch)
-{
- if (cio_clear(sch)) {
- /* Recheck device in case clear failed. */
- sch->lpm = 0;
- if (device_trigger_verify(sch) != 0) {
- if(css_enqueue_subchannel_slow(sch->schid)) {
- css_clear_subchannel_slow_list();
- need_rescan = 1;
- }
- }
- return;
- }
- /* Request retry of internal operation. */
- device_set_intretry(sch);
- /* Call handler. */
- if (sch->driver && sch->driver->termination)
- sch->driver->termination(&sch->dev);
-}
-
-static void
-__s390_subchannel_vary_chpid(struct subchannel *sch, __u8 chpid, int on)
+static void __s390_subchannel_vary_chpid(struct subchannel *sch,
+ struct chp_id chpid, int on)
{
int chp, old_lpm;
+ int mask;
unsigned long flags;
- if (!sch->ssd_info.valid)
- return;
-
spin_lock_irqsave(sch->lock, flags);
old_lpm = sch->lpm;
for (chp = 0; chp < 8; chp++) {
- if (sch->ssd_info.chpid[chp] != chpid)
+ mask = 0x80 >> chp;
+ if (!(sch->ssd_info.path_mask & mask))
+ continue;
+ if (!chp_id_is_equal(&sch->ssd_info.chpid[chp], &chpid))
continue;
if (on) {
- sch->opm |= (0x80 >> chp);
- sch->lpm |= (0x80 >> chp);
+ sch->opm |= mask;
+ sch->lpm |= mask;
if (!old_lpm)
device_trigger_reprobe(sch);
else if (sch->driver && sch->driver->verify)
sch->driver->verify(&sch->dev);
break;
}
- sch->opm &= ~(0x80 >> chp);
- sch->lpm &= ~(0x80 >> chp);
- if (check_for_io_on_path(sch, chp)) {
+ sch->opm &= ~mask;
+ sch->lpm &= ~mask;
+ if (check_for_io_on_path(sch, mask)) {
if (device_is_online(sch))
/* Path verification is done after killing. */
device_kill_io(sch);
- else
+ else {
/* Kill and retry internal I/O. */
terminate_internal_io(sch);
- } else if (!sch->lpm) {
- if (device_trigger_verify(sch) != 0) {
- if (css_enqueue_subchannel_slow(sch->schid)) {
- css_clear_subchannel_slow_list();
- need_rescan = 1;
- }
+ /* Re-start path verification. */
+ if (sch->driver && sch->driver->verify)
+ sch->driver->verify(&sch->dev);
}
+ } else if (!sch->lpm) {
+ if (device_trigger_verify(sch) != 0)
+ css_schedule_eval(sch->schid);
} else if (sch->driver && sch->driver->verify)
sch->driver->verify(&sch->dev);
break;
@@ -780,11 +615,10 @@ __s390_subchannel_vary_chpid(struct subchannel *sch, __u8 chpid, int on)
spin_unlock_irqrestore(sch->lock, flags);
}
-static int
-s390_subchannel_vary_chpid_off(struct device *dev, void *data)
+static int s390_subchannel_vary_chpid_off(struct device *dev, void *data)
{
struct subchannel *sch;
- __u8 *chpid;
+ struct chp_id *chpid;
sch = to_subchannel(dev);
chpid = data;
@@ -793,11 +627,10 @@ s390_subchannel_vary_chpid_off(struct device *dev, void *data)
return 0;
}
-static int
-s390_subchannel_vary_chpid_on(struct device *dev, void *data)
+static int s390_subchannel_vary_chpid_on(struct device *dev, void *data)
{
struct subchannel *sch;
- __u8 *chpid;
+ struct chp_id *chpid;
sch = to_subchannel(dev);
chpid = data;
@@ -821,40 +654,17 @@ __s390_vary_chpid_on(struct subchannel_id schid, void *data)
/* We're through */
return -ENXIO;
/* Put it on the slow path. */
- if (css_enqueue_subchannel_slow(schid)) {
- css_clear_subchannel_slow_list();
- need_rescan = 1;
- return -EAGAIN;
- }
+ css_schedule_eval(schid);
return 0;
}
-/*
- * Function: s390_vary_chpid
- * Varies the specified chpid online or offline
+/**
+ * chsc_chp_vary - propagate channel-path vary operation to subchannels
+ * @chpid: channl-path ID
+ * @on: non-zero for vary online, zero for vary offline
*/
-static int
-s390_vary_chpid( __u8 chpid, int on)
+int chsc_chp_vary(struct chp_id chpid, int on)
{
- char dbf_text[15];
- int status;
-
- sprintf(dbf_text, on?"varyon%x":"varyoff%x", chpid);
- CIO_TRACE_EVENT( 2, dbf_text);
-
- status = get_chp_status(chpid);
- if (status < 0) {
- printk(KERN_ERR "Can't vary unknown chpid %02X\n", chpid);
- return -EINVAL;
- }
-
- if (!on && !status) {
- printk(KERN_ERR "chpid %x is already offline\n", chpid);
- return -EINVAL;
- }
-
- set_chp_logically_online(chpid, on);
-
/*
* Redo PathVerification on the devices the chpid connects to
*/
@@ -865,118 +675,9 @@ s390_vary_chpid( __u8 chpid, int on)
if (on)
/* Scan for new devices on varied on path. */
for_each_subchannel(__s390_vary_chpid_on, NULL);
- if (need_rescan || css_slow_subchannels_exist())
- queue_work(slow_path_wq, &slow_path_work);
return 0;
}
-/*
- * Channel measurement related functions
- */
-static ssize_t
-chp_measurement_chars_read(struct kobject *kobj, char *buf, loff_t off,
- size_t count)
-{
- struct channel_path *chp;
- unsigned int size;
-
- chp = to_channelpath(container_of(kobj, struct device, kobj));
- if (!chp->cmg_chars)
- return 0;
-
- size = sizeof(struct cmg_chars);
-
- if (off > size)
- return 0;
- if (off + count > size)
- count = size - off;
- memcpy(buf, chp->cmg_chars + off, count);
- return count;
-}
-
-static struct bin_attribute chp_measurement_chars_attr = {
- .attr = {
- .name = "measurement_chars",
- .mode = S_IRUSR,
- .owner = THIS_MODULE,
- },
- .size = sizeof(struct cmg_chars),
- .read = chp_measurement_chars_read,
-};
-
-static void
-chp_measurement_copy_block(struct cmg_entry *buf,
- struct channel_subsystem *css, int chpid)
-{
- void *area;
- struct cmg_entry *entry, reference_buf;
- int idx;
-
- if (chpid < 128) {
- area = css->cub_addr1;
- idx = chpid;
- } else {
- area = css->cub_addr2;
- idx = chpid - 128;
- }
- entry = area + (idx * sizeof(struct cmg_entry));
- do {
- memcpy(buf, entry, sizeof(*entry));
- memcpy(&reference_buf, entry, sizeof(*entry));
- } while (reference_buf.values[0] != buf->values[0]);
-}
-
-static ssize_t
-chp_measurement_read(struct kobject *kobj, char *buf, loff_t off, size_t count)
-{
- struct channel_path *chp;
- struct channel_subsystem *css;
- unsigned int size;
-
- chp = to_channelpath(container_of(kobj, struct device, kobj));
- css = to_css(chp->dev.parent);
-
- size = sizeof(struct cmg_entry);
-
- /* Only allow single reads. */
- if (off || count < size)
- return 0;
- chp_measurement_copy_block((struct cmg_entry *)buf, css, chp->id);
- count = size;
- return count;
-}
-
-static struct bin_attribute chp_measurement_attr = {
- .attr = {
- .name = "measurement",
- .mode = S_IRUSR,
- .owner = THIS_MODULE,
- },
- .size = sizeof(struct cmg_entry),
- .read = chp_measurement_read,
-};
-
-static void
-chsc_remove_chp_cmg_attr(struct channel_path *chp)
-{
- device_remove_bin_file(&chp->dev, &chp_measurement_chars_attr);
- device_remove_bin_file(&chp->dev, &chp_measurement_attr);
-}
-
-static int
-chsc_add_chp_cmg_attr(struct channel_path *chp)
-{
- int ret;
-
- ret = device_create_bin_file(&chp->dev, &chp_measurement_chars_attr);
- if (ret)
- return ret;
- ret = device_create_bin_file(&chp->dev, &chp_measurement_attr);
- if (ret)
- device_remove_bin_file(&chp->dev, &chp_measurement_chars_attr);
- return ret;
-}
-
static void
chsc_remove_cmg_attr(struct channel_subsystem *css)
{
@@ -985,7 +686,7 @@ chsc_remove_cmg_attr(struct channel_subsystem *css)
for (i = 0; i <= __MAX_CHPID; i++) {
if (!css->chps[i])
continue;
- chsc_remove_chp_cmg_attr(css->chps[i]);
+ chp_remove_cmg_attr(css->chps[i]);
}
}
@@ -998,7 +699,7 @@ chsc_add_cmg_attr(struct channel_subsystem *css)
for (i = 0; i <= __MAX_CHPID; i++) {
if (!css->chps[i])
continue;
- ret = chsc_add_chp_cmg_attr(css->chps[i]);
+ ret = chp_add_cmg_attr(css->chps[i]);
if (ret)
goto cleanup;
}
@@ -1007,12 +708,11 @@ cleanup:
for (--i; i >= 0; i--) {
if (!css->chps[i])
continue;
- chsc_remove_chp_cmg_attr(css->chps[i]);
+ chp_remove_cmg_attr(css->chps[i]);
}
return ret;
}
-
static int
__chsc_do_secm(struct channel_subsystem *css, int enable, void *page)
{
@@ -1118,7 +818,7 @@ chsc_secm(struct channel_subsystem *css, int enable)
} else
chsc_remove_cmg_attr(css);
}
- if (enable && !css->cm_enabled) {
+ if (!css->cm_enabled) {
free_page((unsigned long)css->cub_addr1);
free_page((unsigned long)css->cub_addr2);
}
@@ -1127,109 +827,8 @@ chsc_secm(struct channel_subsystem *css, int enable)
return ret;
}
-/*
- * Files for the channel path entries.
- */
-static ssize_t
-chp_status_show(struct device *dev, struct device_attribute *attr, char *buf)
-{
- struct channel_path *chp = container_of(dev, struct channel_path, dev);
-
- if (!chp)
- return 0;
- return (get_chp_status(chp->id) ? sprintf(buf, "online\n") :
- sprintf(buf, "offline\n"));
-}
-
-static ssize_t
-chp_status_write(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
-{
- struct channel_path *cp = container_of(dev, struct channel_path, dev);
- char cmd[10];
- int num_args;
- int error;
-
- num_args = sscanf(buf, "%5s", cmd);
- if (!num_args)
- return count;
-
- if (!strnicmp(cmd, "on", 2))
- error = s390_vary_chpid(cp->id, 1);
- else if (!strnicmp(cmd, "off", 3))
- error = s390_vary_chpid(cp->id, 0);
- else
- error = -EINVAL;
-
- return error < 0 ? error : count;
-
-}
-
-static DEVICE_ATTR(status, 0644, chp_status_show, chp_status_write);
-
-static ssize_t
-chp_type_show(struct device *dev, struct device_attribute *attr, char *buf)
-{
- struct channel_path *chp = container_of(dev, struct channel_path, dev);
-
- if (!chp)
- return 0;
- return sprintf(buf, "%x\n", chp->desc.desc);
-}
-
-static DEVICE_ATTR(type, 0444, chp_type_show, NULL);
-
-static ssize_t
-chp_cmg_show(struct device *dev, struct device_attribute *attr, char *buf)
-{
- struct channel_path *chp = to_channelpath(dev);
-
- if (!chp)
- return 0;
- if (chp->cmg == -1) /* channel measurements not available */
- return sprintf(buf, "unknown\n");
- return sprintf(buf, "%x\n", chp->cmg);
-}
-
-static DEVICE_ATTR(cmg, 0444, chp_cmg_show, NULL);
-
-static ssize_t
-chp_shared_show(struct device *dev, struct device_attribute *attr, char *buf)
-{
- struct channel_path *chp = to_channelpath(dev);
-
- if (!chp)
- return 0;
- if (chp->shared == -1) /* channel measurements not available */
- return sprintf(buf, "unknown\n");
- return sprintf(buf, "%x\n", chp->shared);
-}
-
-static DEVICE_ATTR(shared, 0444, chp_shared_show, NULL);
-
-static struct attribute * chp_attrs[] = {
- &dev_attr_status.attr,
- &dev_attr_type.attr,
- &dev_attr_cmg.attr,
- &dev_attr_shared.attr,
- NULL,
-};
-
-static struct attribute_group chp_attr_group = {
- .attrs = chp_attrs,
-};
-
-static void
-chp_release(struct device *dev)
-{
- struct channel_path *cp;
-
- cp = container_of(dev, struct channel_path, dev);
- kfree(cp);
-}
-
-static int
-chsc_determine_channel_path_description(int chpid,
- struct channel_path_desc *desc)
+int chsc_determine_channel_path_description(struct chp_id chpid,
+ struct channel_path_desc *desc)
{
int ccode, ret;
@@ -1252,8 +851,8 @@ chsc_determine_channel_path_description(int chpid,
scpd_area->request.length = 0x0010;
scpd_area->request.code = 0x0002;
- scpd_area->first_chpid = chpid;
- scpd_area->last_chpid = chpid;
+ scpd_area->first_chpid = chpid.id;
+ scpd_area->last_chpid = chpid.id;
ccode = chsc(scpd_area);
if (ccode > 0) {
@@ -1316,8 +915,7 @@ chsc_initialize_cmg_chars(struct channel_path *chp, u8 cmcv,
}
}
-static int
-chsc_get_channel_measurement_chars(struct channel_path *chp)
+int chsc_get_channel_measurement_chars(struct channel_path *chp)
{
int ccode, ret;
@@ -1349,8 +947,8 @@ chsc_get_channel_measurement_chars(struct channel_path *chp)
scmc_area->request.length = 0x0010;
scmc_area->request.code = 0x0022;
- scmc_area->first_chpid = chp->id;
- scmc_area->last_chpid = chp->id;
+ scmc_area->first_chpid = chp->chpid.id;
+ scmc_area->last_chpid = chp->chpid.id;
ccode = chsc(scmc_area);
if (ccode > 0) {
@@ -1392,94 +990,6 @@ out:
return ret;
}
-/*
- * Entries for chpids on the system bus.
- * This replaces /proc/chpids.
- */
-static int
-new_channel_path(int chpid)
-{
- struct channel_path *chp;
- int ret;
-
- chp = kzalloc(sizeof(struct channel_path), GFP_KERNEL);
- if (!chp)
- return -ENOMEM;
-
- /* fill in status, etc. */
- chp->id = chpid;
- chp->state = 1;
- chp->dev.parent = &css[0]->device;
- chp->dev.release = chp_release;
- snprintf(chp->dev.bus_id, BUS_ID_SIZE, "chp0.%x", chpid);
-
- /* Obtain channel path description and fill it in. */
- ret = chsc_determine_channel_path_description(chpid, &chp->desc);
- if (ret)
- goto out_free;
- /* Get channel-measurement characteristics. */
- if (css_characteristics_avail && css_chsc_characteristics.scmc
- && css_chsc_characteristics.secm) {
- ret = chsc_get_channel_measurement_chars(chp);
- if (ret)
- goto out_free;
- } else {
- static int msg_done;
-
- if (!msg_done) {
- printk(KERN_WARNING "cio: Channel measurements not "
- "available, continuing.\n");
- msg_done = 1;
- }
- chp->cmg = -1;
- }
-
- /* make it known to the system */
- ret = device_register(&chp->dev);
- if (ret) {
- printk(KERN_WARNING "%s: could not register %02x\n",
- __func__, chpid);
- goto out_free;
- }
- ret = sysfs_create_group(&chp->dev.kobj, &chp_attr_group);
- if (ret) {
- device_unregister(&chp->dev);
- goto out_free;
- }
- mutex_lock(&css[0]->mutex);
- if (css[0]->cm_enabled) {
- ret = chsc_add_chp_cmg_attr(chp);
- if (ret) {
- sysfs_remove_group(&chp->dev.kobj, &chp_attr_group);
- device_unregister(&chp->dev);
- mutex_unlock(&css[0]->mutex);
- goto out_free;
- }
- }
- css[0]->chps[chpid] = chp;
- mutex_unlock(&css[0]->mutex);
- return ret;
-out_free:
- kfree(chp);
- return ret;
-}
-
-void *
-chsc_get_chp_desc(struct subchannel *sch, int chp_no)
-{
- struct channel_path *chp;
- struct channel_path_desc *desc;
-
- chp = css[0]->chps[sch->schib.pmcw.chpid[chp_no]];
- if (!chp)
- return NULL;
- desc = kmalloc(sizeof(struct channel_path_desc), GFP_KERNEL);
- if (!desc)
- return NULL;
- memcpy(desc, &chp->desc, sizeof(struct channel_path_desc));
- return desc;
-}
-
static int __init
chsc_alloc_sei_area(void)
{