summaryrefslogtreecommitdiffstats
path: root/drivers/s390/cio
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/s390/cio')
-rw-r--r--drivers/s390/cio/chp.c22
-rw-r--r--drivers/s390/cio/chp.h2
-rw-r--r--drivers/s390/cio/chsc.c11
-rw-r--r--drivers/s390/cio/cio.c160
-rw-r--r--drivers/s390/cio/cio.h11
-rw-r--r--drivers/s390/cio/css.c114
-rw-r--r--drivers/s390/cio/css.h4
-rw-r--r--drivers/s390/cio/device.c139
-rw-r--r--drivers/s390/cio/device.h2
-rw-r--r--drivers/s390/cio/device_ops.c22
-rw-r--r--drivers/s390/cio/idset.c2
11 files changed, 226 insertions, 263 deletions
diff --git a/drivers/s390/cio/chp.c b/drivers/s390/cio/chp.c
index 50ad5fdd815..21fabc6d5a9 100644
--- a/drivers/s390/cio/chp.c
+++ b/drivers/s390/cio/chp.c
@@ -377,6 +377,26 @@ static void chp_release(struct device *dev)
}
/**
+ * chp_update_desc - update channel-path description
+ * @chp - channel-path
+ *
+ * Update the channel-path description of the specified channel-path.
+ * Return zero on success, non-zero otherwise.
+ */
+int chp_update_desc(struct channel_path *chp)
+{
+ int rc;
+
+ rc = chsc_determine_base_channel_path_desc(chp->chpid, &chp->desc);
+ if (rc)
+ return rc;
+
+ rc = chsc_determine_fmt1_channel_path_desc(chp->chpid, &chp->desc_fmt1);
+
+ return rc;
+}
+
+/**
* chp_new - register a new channel-path
* @chpid - channel-path ID
*
@@ -403,7 +423,7 @@ int chp_new(struct chp_id chpid)
mutex_init(&chp->lock);
/* Obtain channel path description and fill it in. */
- ret = chsc_determine_base_channel_path_desc(chpid, &chp->desc);
+ ret = chp_update_desc(chp);
if (ret)
goto out_free;
if ((chp->desc.flags & 0x80) == 0) {
diff --git a/drivers/s390/cio/chp.h b/drivers/s390/cio/chp.h
index e1399dbee83..9284b785a06 100644
--- a/drivers/s390/cio/chp.h
+++ b/drivers/s390/cio/chp.h
@@ -44,6 +44,7 @@ struct channel_path {
struct mutex lock; /* Serialize access to below members. */
int state;
struct channel_path_desc desc;
+ struct channel_path_desc_fmt1 desc_fmt1;
/* Channel-measurement related stuff: */
int cmg;
int shared;
@@ -62,6 +63,7 @@ int chp_is_registered(struct chp_id chpid);
void *chp_get_chp_desc(struct chp_id chpid);
void chp_remove_cmg_attr(struct channel_path *chp);
int chp_add_cmg_attr(struct channel_path *chp);
+int chp_update_desc(struct channel_path *chp);
int chp_new(struct chp_id chpid);
void chp_cfg_schedule(struct chp_id chpid, int configure);
void chp_cfg_cancel_deconfigure(struct chp_id chpid);
diff --git a/drivers/s390/cio/chsc.c b/drivers/s390/cio/chsc.c
index e16c553f655..8ea7d9b2c67 100644
--- a/drivers/s390/cio/chsc.c
+++ b/drivers/s390/cio/chsc.c
@@ -376,7 +376,7 @@ static void chsc_process_sei_chp_avail(struct chsc_sei_nt0_area *sei_area)
continue;
}
mutex_lock(&chp->lock);
- chsc_determine_base_channel_path_desc(chpid, &chp->desc);
+ chp_update_desc(chp);
mutex_unlock(&chp->lock);
}
}
@@ -631,8 +631,8 @@ int chsc_chp_vary(struct chp_id chpid, int on)
* Redo PathVerification on the devices the chpid connects to
*/
if (on) {
- /* Try to update the channel path descritor. */
- chsc_determine_base_channel_path_desc(chpid, &chp->desc);
+ /* Try to update the channel path description. */
+ chp_update_desc(chp);
for_each_subchannel_staged(s390_subchannel_vary_chpid_on,
__s390_vary_chpid_on, &chpid);
} else
@@ -825,9 +825,10 @@ int chsc_determine_fmt1_channel_path_desc(struct chp_id chpid,
{
struct chsc_response_struct *chsc_resp;
struct chsc_scpd *scpd_area;
+ unsigned long flags;
int ret;
- spin_lock_irq(&chsc_page_lock);
+ spin_lock_irqsave(&chsc_page_lock, flags);
scpd_area = chsc_page;
ret = chsc_determine_channel_path_desc(chpid, 0, 0, 1, 0, scpd_area);
if (ret)
@@ -835,7 +836,7 @@ int chsc_determine_fmt1_channel_path_desc(struct chp_id chpid,
chsc_resp = (void *)&scpd_area->response;
memcpy(desc, &chsc_resp->data, sizeof(*desc));
out:
- spin_unlock_irq(&chsc_page_lock);
+ spin_unlock_irqrestore(&chsc_page_lock, flags);
return ret;
}
diff --git a/drivers/s390/cio/cio.c b/drivers/s390/cio/cio.c
index 986ef6a92a4..935d80b4e9c 100644
--- a/drivers/s390/cio/cio.c
+++ b/drivers/s390/cio/cio.c
@@ -471,15 +471,6 @@ int cio_disable_subchannel(struct subchannel *sch)
}
EXPORT_SYMBOL_GPL(cio_disable_subchannel);
-int cio_create_sch_lock(struct subchannel *sch)
-{
- sch->lock = kmalloc(sizeof(spinlock_t), GFP_KERNEL);
- if (!sch->lock)
- return -ENOMEM;
- spin_lock_init(sch->lock);
- return 0;
-}
-
static int cio_check_devno_blacklisted(struct subchannel *sch)
{
if (is_blacklisted(sch->schid.ssid, sch->schib.pmcw.dev)) {
@@ -536,32 +527,19 @@ int cio_validate_subchannel(struct subchannel *sch, struct subchannel_id schid)
sprintf(dbf_txt, "valsch%x", schid.sch_no);
CIO_TRACE_EVENT(4, dbf_txt);
- /* Nuke all fields. */
- memset(sch, 0, sizeof(struct subchannel));
-
- sch->schid = schid;
- if (cio_is_console(schid)) {
- sch->lock = cio_get_console_lock();
- } else {
- err = cio_create_sch_lock(sch);
- if (err)
- goto out;
- }
- mutex_init(&sch->reg_mutex);
-
/*
* The first subchannel that is not-operational (ccode==3)
- * indicates that there aren't any more devices available.
+ * indicates that there aren't any more devices available.
* If stsch gets an exception, it means the current subchannel set
- * is not valid.
+ * is not valid.
*/
- ccode = stsch_err (schid, &sch->schib);
+ ccode = stsch_err(schid, &sch->schib);
if (ccode) {
err = (ccode == 3) ? -ENXIO : ccode;
goto out;
}
- /* Copy subchannel type from path management control word. */
sch->st = sch->schib.pmcw.st;
+ sch->schid = schid;
switch (sch->st) {
case SUBCHANNEL_TYPE_IO:
@@ -578,11 +556,7 @@ int cio_validate_subchannel(struct subchannel *sch, struct subchannel_id schid)
CIO_MSG_EVENT(4, "Subchannel 0.%x.%04x reports subchannel type %04X\n",
sch->schid.ssid, sch->schid.sch_no, sch->st);
- return 0;
out:
- if (!cio_is_console(schid))
- kfree(sch->lock);
- sch->lock = NULL;
return err;
}
@@ -650,15 +624,13 @@ void __irq_entry do_IRQ(struct pt_regs *regs)
}
#ifdef CONFIG_CCW_CONSOLE
-static struct subchannel console_subchannel;
-static struct io_subchannel_private console_priv;
-static int console_subchannel_in_use;
+static struct subchannel *console_sch;
/*
* Use cio_tsch to update the subchannel status and call the interrupt handler
- * if status had been pending. Called with the console_subchannel lock.
+ * if status had been pending. Called with the subchannel's lock held.
*/
-static void cio_tsch(struct subchannel *sch)
+void cio_tsch(struct subchannel *sch)
{
struct irb *irb;
int irq_context;
@@ -675,6 +647,7 @@ static void cio_tsch(struct subchannel *sch)
local_bh_disable();
irq_enter();
}
+ kstat_incr_irqs_this_cpu(IO_INTERRUPT, NULL);
if (sch->driver && sch->driver->irq)
sch->driver->irq(sch);
else
@@ -685,135 +658,90 @@ static void cio_tsch(struct subchannel *sch)
}
}
-void *cio_get_console_priv(void)
-{
- return &console_priv;
-}
-
-/*
- * busy wait for the next interrupt on the console
- */
-void wait_cons_dev(void)
+static int cio_test_for_console(struct subchannel_id schid, void *data)
{
- if (!console_subchannel_in_use)
- return;
-
- while (1) {
- cio_tsch(&console_subchannel);
- if (console_subchannel.schib.scsw.cmd.actl == 0)
- break;
- udelay_simple(100);
- }
-}
+ struct schib schib;
-static int
-cio_test_for_console(struct subchannel_id schid, void *data)
-{
- if (stsch_err(schid, &console_subchannel.schib) != 0)
+ if (stsch_err(schid, &schib) != 0)
return -ENXIO;
- if ((console_subchannel.schib.pmcw.st == SUBCHANNEL_TYPE_IO) &&
- console_subchannel.schib.pmcw.dnv &&
- (console_subchannel.schib.pmcw.dev == console_devno)) {
+ if ((schib.pmcw.st == SUBCHANNEL_TYPE_IO) && schib.pmcw.dnv &&
+ (schib.pmcw.dev == console_devno)) {
console_irq = schid.sch_no;
return 1; /* found */
}
return 0;
}
-
-static int
-cio_get_console_sch_no(void)
+static int cio_get_console_sch_no(void)
{
struct subchannel_id schid;
-
+ struct schib schib;
+
init_subchannel_id(&schid);
if (console_irq != -1) {
/* VM provided us with the irq number of the console. */
schid.sch_no = console_irq;
- if (stsch_err(schid, &console_subchannel.schib) != 0 ||
- (console_subchannel.schib.pmcw.st != SUBCHANNEL_TYPE_IO) ||
- !console_subchannel.schib.pmcw.dnv)
+ if (stsch_err(schid, &schib) != 0 ||
+ (schib.pmcw.st != SUBCHANNEL_TYPE_IO) || !schib.pmcw.dnv)
return -1;
- console_devno = console_subchannel.schib.pmcw.dev;
+ console_devno = schib.pmcw.dev;
} else if (console_devno != -1) {
/* At least the console device number is known. */
for_each_subchannel(cio_test_for_console, NULL);
- if (console_irq == -1)
- return -1;
- } else {
- /* unlike in 2.4, we cannot autoprobe here, since
- * the channel subsystem is not fully initialized.
- * With some luck, the HWC console can take over */
- return -1;
}
return console_irq;
}
-struct subchannel *
-cio_probe_console(void)
+struct subchannel *cio_probe_console(void)
{
- int sch_no, ret;
struct subchannel_id schid;
+ struct subchannel *sch;
+ int sch_no, ret;
- if (xchg(&console_subchannel_in_use, 1) != 0)
- return ERR_PTR(-EBUSY);
sch_no = cio_get_console_sch_no();
if (sch_no == -1) {
- console_subchannel_in_use = 0;
pr_warning("No CCW console was found\n");
return ERR_PTR(-ENODEV);
}
- memset(&console_subchannel, 0, sizeof(struct subchannel));
init_subchannel_id(&schid);
schid.sch_no = sch_no;
- ret = cio_validate_subchannel(&console_subchannel, schid);
- if (ret) {
- console_subchannel_in_use = 0;
- return ERR_PTR(-ENODEV);
- }
+ sch = css_alloc_subchannel(schid);
+ if (IS_ERR(sch))
+ return sch;
- /*
- * enable console I/O-interrupt subclass
- */
isc_register(CONSOLE_ISC);
- console_subchannel.config.isc = CONSOLE_ISC;
- console_subchannel.config.intparm = (u32)(addr_t)&console_subchannel;
- ret = cio_commit_config(&console_subchannel);
+ sch->config.isc = CONSOLE_ISC;
+ sch->config.intparm = (u32)(addr_t)sch;
+ ret = cio_commit_config(sch);
if (ret) {
isc_unregister(CONSOLE_ISC);
- console_subchannel_in_use = 0;
+ put_device(&sch->dev);
return ERR_PTR(ret);
}
- return &console_subchannel;
-}
-
-void
-cio_release_console(void)
-{
- console_subchannel.config.intparm = 0;
- cio_commit_config(&console_subchannel);
- isc_unregister(CONSOLE_ISC);
- console_subchannel_in_use = 0;
+ console_sch = sch;
+ return sch;
}
-/* Bah... hack to catch console special sausages. */
-int
-cio_is_console(struct subchannel_id schid)
+int cio_is_console(struct subchannel_id schid)
{
- if (!console_subchannel_in_use)
+ if (!console_sch)
return 0;
- return schid_equal(&schid, &console_subchannel.schid);
+ return schid_equal(&schid, &console_sch->schid);
}
-struct subchannel *
-cio_get_console_subchannel(void)
+void cio_register_early_subchannels(void)
{
- if (!console_subchannel_in_use)
- return NULL;
- return &console_subchannel;
+ int ret;
+
+ if (!console_sch)
+ return;
+
+ ret = css_register_subchannel(console_sch);
+ if (ret)
+ put_device(&console_sch->dev);
}
+#endif /* CONFIG_CCW_CONSOLE */
-#endif
static int
__disable_subchannel_easy(struct subchannel_id schid, struct schib *schib)
{
diff --git a/drivers/s390/cio/cio.h b/drivers/s390/cio/cio.h
index 4a1ff5c2eb8..d62f5e7f3cf 100644
--- a/drivers/s390/cio/cio.h
+++ b/drivers/s390/cio/cio.h
@@ -121,23 +121,18 @@ extern int cio_commit_config(struct subchannel *sch);
int cio_tm_start_key(struct subchannel *sch, struct tcw *tcw, u8 lpm, u8 key);
int cio_tm_intrg(struct subchannel *sch);
-int cio_create_sch_lock(struct subchannel *);
void do_adapter_IO(u8 isc);
void do_IRQ(struct pt_regs *);
/* Use with care. */
#ifdef CONFIG_CCW_CONSOLE
extern struct subchannel *cio_probe_console(void);
-extern void cio_release_console(void);
extern int cio_is_console(struct subchannel_id);
-extern struct subchannel *cio_get_console_subchannel(void);
-extern spinlock_t * cio_get_console_lock(void);
-extern void *cio_get_console_priv(void);
+extern void cio_register_early_subchannels(void);
+extern void cio_tsch(struct subchannel *sch);
#else
#define cio_is_console(schid) 0
-#define cio_get_console_subchannel() NULL
-#define cio_get_console_lock() NULL
-#define cio_get_console_priv() NULL
+static inline void cio_register_early_subchannels(void) {}
#endif
#endif
diff --git a/drivers/s390/cio/css.c b/drivers/s390/cio/css.c
index a239237d43f..1ebe5d3ddeb 100644
--- a/drivers/s390/cio/css.c
+++ b/drivers/s390/cio/css.c
@@ -137,37 +137,53 @@ out:
static void css_sch_todo(struct work_struct *work);
-static struct subchannel *
-css_alloc_subchannel(struct subchannel_id schid)
+static int css_sch_create_locks(struct subchannel *sch)
+{
+ sch->lock = kmalloc(sizeof(*sch->lock), GFP_KERNEL);
+ if (!sch->lock)
+ return -ENOMEM;
+
+ spin_lock_init(sch->lock);
+ mutex_init(&sch->reg_mutex);
+
+ return 0;
+}
+
+static void css_subchannel_release(struct device *dev)
+{
+ struct subchannel *sch = to_subchannel(dev);
+
+ sch->config.intparm = 0;
+ cio_commit_config(sch);
+ kfree(sch->lock);
+ kfree(sch);
+}
+
+struct subchannel *css_alloc_subchannel(struct subchannel_id schid)
{
struct subchannel *sch;
int ret;
- sch = kmalloc (sizeof (*sch), GFP_KERNEL | GFP_DMA);
- if (sch == NULL)
+ sch = kzalloc(sizeof(*sch), GFP_KERNEL | GFP_DMA);
+ if (!sch)
return ERR_PTR(-ENOMEM);
- ret = cio_validate_subchannel (sch, schid);
- if (ret < 0) {
- kfree(sch);
- return ERR_PTR(ret);
- }
+
+ ret = cio_validate_subchannel(sch, schid);
+ if (ret < 0)
+ goto err;
+
+ ret = css_sch_create_locks(sch);
+ if (ret)
+ goto err;
+
INIT_WORK(&sch->todo_work, css_sch_todo);
+ sch->dev.release = &css_subchannel_release;
+ device_initialize(&sch->dev);
return sch;
-}
-
-static void
-css_subchannel_release(struct device *dev)
-{
- struct subchannel *sch;
- sch = to_subchannel(dev);
- if (!cio_is_console(sch->schid)) {
- /* Reset intparm to zeroes. */
- sch->config.intparm = 0;
- cio_commit_config(sch);
- kfree(sch->lock);
- kfree(sch);
- }
+err:
+ kfree(sch);
+ return ERR_PTR(ret);
}
static int css_sch_device_register(struct subchannel *sch)
@@ -177,7 +193,7 @@ static int css_sch_device_register(struct subchannel *sch)
mutex_lock(&sch->reg_mutex);
dev_set_name(&sch->dev, "0.%x.%04x", sch->schid.ssid,
sch->schid.sch_no);
- ret = device_register(&sch->dev);
+ ret = device_add(&sch->dev);
mutex_unlock(&sch->reg_mutex);
return ret;
}
@@ -228,16 +244,11 @@ void css_update_ssd_info(struct subchannel *sch)
{
int ret;
- if (cio_is_console(sch->schid)) {
- /* Console is initialized too early for functions requiring
- * memory allocation. */
+ ret = chsc_get_ssd_info(sch->schid, &sch->ssd_info);
+ if (ret)
ssd_from_pmcw(&sch->ssd_info, &sch->schib.pmcw);
- } else {
- ret = chsc_get_ssd_info(sch->schid, &sch->ssd_info);
- if (ret)
- ssd_from_pmcw(&sch->ssd_info, &sch->schib.pmcw);
- ssd_register_chpids(&sch->ssd_info);
- }
+
+ ssd_register_chpids(&sch->ssd_info);
}
static ssize_t type_show(struct device *dev, struct device_attribute *attr,
@@ -275,14 +286,13 @@ static const struct attribute_group *default_subch_attr_groups[] = {
NULL,
};
-static int css_register_subchannel(struct subchannel *sch)
+int css_register_subchannel(struct subchannel *sch)
{
int ret;
/* Initialize the subchannel structure */
sch->dev.parent = &channel_subsystems[0]->device;
sch->dev.bus = &css_bus_type;
- sch->dev.release = &css_subchannel_release;
sch->dev.groups = default_subch_attr_groups;
/*
* We don't want to generate uevents for I/O subchannels that don't
@@ -314,23 +324,19 @@ static int css_register_subchannel(struct subchannel *sch)
return ret;
}
-int css_probe_device(struct subchannel_id schid)
+static int css_probe_device(struct subchannel_id schid)
{
- int ret;
struct subchannel *sch;
+ int ret;
+
+ sch = css_alloc_subchannel(schid);
+ if (IS_ERR(sch))
+ return PTR_ERR(sch);
- if (cio_is_console(schid))
- sch = cio_get_console_subchannel();
- else {
- sch = css_alloc_subchannel(schid);
- if (IS_ERR(sch))
- return PTR_ERR(sch);
- }
ret = css_register_subchannel(sch);
- if (ret) {
- if (!cio_is_console(schid))
- put_device(&sch->dev);
- }
+ if (ret)
+ put_device(&sch->dev);
+
return ret;
}
@@ -770,7 +776,7 @@ static int __init setup_css(int nr)
css->pseudo_subchannel->dev.release = css_subchannel_release;
dev_set_name(&css->pseudo_subchannel->dev, "defunct");
mutex_init(&css->pseudo_subchannel->reg_mutex);
- ret = cio_create_sch_lock(css->pseudo_subchannel);
+ ret = css_sch_create_locks(css->pseudo_subchannel);
if (ret) {
kfree(css->pseudo_subchannel);
return ret;
@@ -870,8 +876,7 @@ static struct notifier_block css_power_notifier = {
/*
* Now that the driver core is running, we can setup our channel subsystem.
- * The struct subchannel's are created during probing (except for the
- * static console subchannel).
+ * The struct subchannel's are created during probing.
*/
static int __init css_bus_init(void)
{
@@ -1050,6 +1055,8 @@ int css_complete_work(void)
*/
static int __init channel_subsystem_init_sync(void)
{
+ /* Register subchannels which are already in use. */
+ cio_register_early_subchannels();
/* Start initial subchannel evaluation. */
css_schedule_eval_all();
css_complete_work();
@@ -1065,9 +1072,8 @@ void channel_subsystem_reinit(void)
chsc_enable_facility(CHSC_SDA_OC_MSS);
chp_id_for_each(&chpid) {
chp = chpid_to_chp(chpid);
- if (!chp)
- continue;
- chsc_determine_base_channel_path_desc(chpid, &chp->desc);
+ if (chp)
+ chp_update_desc(chp);
}
}
diff --git a/drivers/s390/cio/css.h b/drivers/s390/cio/css.h
index 4af3dfe70ef..b1de6033523 100644
--- a/drivers/s390/cio/css.h
+++ b/drivers/s390/cio/css.h
@@ -101,7 +101,8 @@ extern int css_driver_register(struct css_driver *);
extern void css_driver_unregister(struct css_driver *);
extern void css_sch_device_unregister(struct subchannel *);
-extern int css_probe_device(struct subchannel_id);
+extern int css_register_subchannel(struct subchannel *);
+extern struct subchannel *css_alloc_subchannel(struct subchannel_id);
extern struct subchannel *get_subchannel_by_schid(struct subchannel_id);
extern int css_init_done;
extern int max_ssid;
@@ -109,7 +110,6 @@ int for_each_subchannel_staged(int (*fn_known)(struct subchannel *, void *),
int (*fn_unknown)(struct subchannel_id,
void *), void *data);
extern int for_each_subchannel(int(*fn)(struct subchannel_id, void *), void *);
-extern void css_reiterate_subchannels(void);
void css_update_ssd_info(struct subchannel *sch);
struct channel_subsystem {
diff --git a/drivers/s390/cio/device.c b/drivers/s390/cio/device.c
index c6767f5a58b..1ab5f6c36d9 100644
--- a/drivers/s390/cio/device.c
+++ b/drivers/s390/cio/device.c
@@ -19,6 +19,7 @@
#include <linux/list.h>
#include <linux/device.h>
#include <linux/workqueue.h>
+#include <linux/delay.h>
#include <linux/timer.h>
#include <linux/kernel_stat.h>
@@ -43,6 +44,10 @@ static DEFINE_SPINLOCK(recovery_lock);
static int recovery_phase;
static const unsigned long recovery_delay[] = { 3, 30, 300 };
+static atomic_t ccw_device_init_count = ATOMIC_INIT(0);
+static DECLARE_WAIT_QUEUE_HEAD(ccw_device_init_wq);
+static struct bus_type ccw_bus_type;
+
/******************* bus type handling ***********************/
/* The Linux driver model distinguishes between a bus type and
@@ -127,8 +132,6 @@ static int ccw_uevent(struct device *dev, struct kobj_uevent_env *env)
return ret;
}
-static struct bus_type ccw_bus_type;
-
static void io_subchannel_irq(struct subchannel *);
static int io_subchannel_probe(struct subchannel *);
static int io_subchannel_remove(struct subchannel *);
@@ -137,8 +140,6 @@ static int io_subchannel_sch_event(struct subchannel *, int);
static int io_subchannel_chp_event(struct subchannel *, struct chp_link *,
int);
static void recovery_func(unsigned long data);
-wait_queue_head_t ccw_device_init_wq;
-atomic_t ccw_device_init_count;
static struct css_device_id io_subchannel_ids[] = {
{ .match_flags = 0x1, .type = SUBCHANNEL_TYPE_IO, },
@@ -191,10 +192,7 @@ int __init io_subchannel_init(void)
{
int ret;
- init_waitqueue_head(&ccw_device_init_wq);
- atomic_set(&ccw_device_init_count, 0);
setup_timer(&recovery_timer, recovery_func, 0);
-
ret = bus_register(&ccw_bus_type);
if (ret)
return ret;
@@ -1086,19 +1084,14 @@ static int io_subchannel_probe(struct subchannel *sch)
dev_set_uevent_suppress(&sch->dev, 0);
kobject_uevent(&sch->dev.kobj, KOBJ_ADD);
cdev = sch_get_cdev(sch);
- cdev->dev.groups = ccwdev_attr_groups;
- device_initialize(&cdev->dev);
- cdev->private->flags.initialized = 1;
- ccw_device_register(cdev);
- /*
- * Check if the device is already online. If it is
- * the reference count needs to be corrected since we
- * didn't obtain a reference in ccw_device_set_online.
- */
- if (cdev->private->state != DEV_STATE_NOT_OPER &&
- cdev->private->state != DEV_STATE_OFFLINE &&
- cdev->private->state != DEV_STATE_BOXED)
- get_device(&cdev->dev);
+ rc = ccw_device_register(cdev);
+ if (rc) {
+ /* Release online reference. */
+ put_device(&cdev->dev);
+ goto out_schedule;
+ }
+ if (atomic_dec_and_test(&ccw_device_init_count))
+ wake_up(&ccw_device_init_wq);
return 0;
}
io_subchannel_init_fields(sch);
@@ -1580,88 +1573,102 @@ out:
}
#ifdef CONFIG_CCW_CONSOLE
-static struct ccw_device console_cdev;
-static struct ccw_device_private console_private;
-static int console_cdev_in_use;
-
-static DEFINE_SPINLOCK(ccw_console_lock);
-
-spinlock_t * cio_get_console_lock(void)
-{
- return &ccw_console_lock;
-}
-
static int ccw_device_console_enable(struct ccw_device *cdev,
struct subchannel *sch)
{
- struct io_subchannel_private *io_priv = cio_get_console_priv();
int rc;
- /* Attach subchannel private data. */
- memset(io_priv, 0, sizeof(*io_priv));
- set_io_private(sch, io_priv);
io_subchannel_init_fields(sch);
rc = cio_commit_config(sch);
if (rc)
return rc;
sch->driver = &io_subchannel_driver;
- /* Initialize the ccw_device structure. */
- cdev->dev.parent= &sch->dev;
sch_set_cdev(sch, cdev);
io_subchannel_recog(cdev, sch);
/* Now wait for the async. recognition to come to an end. */
spin_lock_irq(cdev->ccwlock);
while (!dev_fsm_final_state(cdev))
- wait_cons_dev();
- rc = -EIO;
- if (cdev->private->state != DEV_STATE_OFFLINE)
+ ccw_device_wait_idle(cdev);
+
+ /* Hold on to an extra reference while device is online. */
+ get_device(&cdev->dev);
+ rc = ccw_device_online(cdev);
+ if (rc)
goto out_unlock;
- ccw_device_online(cdev);
+
while (!dev_fsm_final_state(cdev))
- wait_cons_dev();
- if (cdev->private->state != DEV_STATE_ONLINE)
- goto out_unlock;
- rc = 0;
+ ccw_device_wait_idle(cdev);
+
+ if (cdev->private->state == DEV_STATE_ONLINE)
+ cdev->online = 1;
+ else
+ rc = -EIO;
out_unlock:
spin_unlock_irq(cdev->ccwlock);
+ if (rc) /* Give up online reference since onlining failed. */
+ put_device(&cdev->dev);
return rc;
}
-struct ccw_device *
-ccw_device_probe_console(void)
+struct ccw_device *ccw_device_probe_console(void)
{
+ struct io_subchannel_private *io_priv;
+ struct ccw_device *cdev;
struct subchannel *sch;
int ret;
- if (xchg(&console_cdev_in_use, 1) != 0)
- return ERR_PTR(-EBUSY);
sch = cio_probe_console();
- if (IS_ERR(sch)) {
- console_cdev_in_use = 0;
- return (void *) sch;
+ if (IS_ERR(sch))
+ return ERR_CAST(sch);
+
+ io_priv = kzalloc(sizeof(*io_priv), GFP_KERNEL | GFP_DMA);
+ if (!io_priv) {
+ put_device(&sch->dev);
+ return ERR_PTR(-ENOMEM);
}
- memset(&console_cdev, 0, sizeof(struct ccw_device));
- memset(&console_private, 0, sizeof(struct ccw_device_private));
- console_cdev.private = &console_private;
- console_private.cdev = &console_cdev;
- console_private.int_class = IRQIO_CIO;
- ret = ccw_device_console_enable(&console_cdev, sch);
+ cdev = io_subchannel_create_ccwdev(sch);
+ if (IS_ERR(cdev)) {
+ put_device(&sch->dev);
+ kfree(io_priv);
+ return cdev;
+ }
+ set_io_private(sch, io_priv);
+ ret = ccw_device_console_enable(cdev, sch);
if (ret) {
- cio_release_console();
- console_cdev_in_use = 0;
+ set_io_private(sch, NULL);
+ put_device(&sch->dev);
+ put_device(&cdev->dev);
+ kfree(io_priv);
return ERR_PTR(ret);
}
- console_cdev.online = 1;
- return &console_cdev;
+ return cdev;
+}
+
+/**
+ * ccw_device_wait_idle() - busy wait for device to become idle
+ * @cdev: ccw device
+ *
+ * Poll until activity control is zero, that is, no function or data
+ * transfer is pending/active.
+ * Called with device lock being held.
+ */
+void ccw_device_wait_idle(struct ccw_device *cdev)
+{
+ struct subchannel *sch = to_subchannel(cdev->dev.parent);
+
+ while (1) {
+ cio_tsch(sch);
+ if (sch->schib.scsw.cmd.actl == 0)
+ break;
+ udelay_simple(100);
+ }
}
static int ccw_device_pm_restore(struct device *dev);
-int ccw_device_force_console(void)
+int ccw_device_force_console(struct ccw_device *cdev)
{
- if (!console_cdev_in_use)
- return -ENODEV;
- return ccw_device_pm_restore(&console_cdev.dev);
+ return ccw_device_pm_restore(&cdev->dev);
}
EXPORT_SYMBOL_GPL(ccw_device_force_console);
#endif
diff --git a/drivers/s390/cio/device.h b/drivers/s390/cio/device.h
index 7d4ecb65db0..8d1d2987317 100644
--- a/drivers/s390/cio/device.h
+++ b/drivers/s390/cio/device.h
@@ -81,8 +81,6 @@ dev_fsm_final_state(struct ccw_device *cdev)
cdev->private->state == DEV_STATE_BOXED);
}
-extern wait_queue_head_t ccw_device_init_wq;
-extern atomic_t ccw_device_init_count;
int __init io_subchannel_init(void);
void io_subchannel_recog_done(struct ccw_device *cdev);
diff --git a/drivers/s390/cio/device_ops.c b/drivers/s390/cio/device_ops.c
index c77b6e06bf6..4845d64f284 100644
--- a/drivers/s390/cio/device_ops.c
+++ b/drivers/s390/cio/device_ops.c
@@ -704,9 +704,9 @@ EXPORT_SYMBOL(ccw_device_tm_start_timeout);
int ccw_device_get_mdc(struct ccw_device *cdev, u8 mask)
{
struct subchannel *sch = to_subchannel(cdev->dev.parent);
- struct channel_path_desc_fmt1 desc;
+ struct channel_path *chp;
struct chp_id chpid;
- int mdc = 0, ret, i;
+ int mdc = 0, i;
/* Adjust requested path mask to excluded varied off paths. */
if (mask)
@@ -719,14 +719,20 @@ int ccw_device_get_mdc(struct ccw_device *cdev, u8 mask)
if (!(mask & (0x80 >> i)))
continue;
chpid.id = sch->schib.pmcw.chpid[i];
- ret = chsc_determine_fmt1_channel_path_desc(chpid, &desc);
- if (ret)
- return ret;
- if (!desc.f)
+ chp = chpid_to_chp(chpid);
+ if (!chp)
+ continue;
+
+ mutex_lock(&chp->lock);
+ if (!chp->desc_fmt1.f) {
+ mutex_unlock(&chp->lock);
return 0;
- if (!desc.r)
+ }
+ if (!chp->desc_fmt1.r)
mdc = 1;
- mdc = mdc ? min(mdc, (int)desc.mdc) : desc.mdc;
+ mdc = mdc ? min_t(int, mdc, chp->desc_fmt1.mdc) :
+ chp->desc_fmt1.mdc;
+ mutex_unlock(&chp->lock);
}
return mdc;
diff --git a/drivers/s390/cio/idset.c b/drivers/s390/cio/idset.c
index 65d13e38803..5a999084a22 100644
--- a/drivers/s390/cio/idset.c
+++ b/drivers/s390/cio/idset.c
@@ -17,7 +17,7 @@ struct idset {
static inline unsigned long bitmap_size(int num_ssid, int num_id)
{
- return __BITOPS_WORDS(num_ssid * num_id) * sizeof(unsigned long);
+ return BITS_TO_LONGS(num_ssid * num_id) * sizeof(unsigned long);
}
static struct idset *idset_new(int num_ssid, int num_id)