summaryrefslogtreecommitdiffstats
path: root/drivers/s390
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/s390')
-rw-r--r--drivers/s390/block/Kconfig2
-rw-r--r--drivers/s390/block/dasd.c35
-rw-r--r--drivers/s390/block/dasd_3990_erp.c23
-rw-r--r--drivers/s390/block/dasd_devmap.c87
-rw-r--r--drivers/s390/block/dasd_diag.c38
-rw-r--r--drivers/s390/block/dasd_eckd.c4
-rw-r--r--drivers/s390/block/dasd_fba.c4
-rw-r--r--drivers/s390/block/dasd_int.h6
-rw-r--r--drivers/s390/block/dasd_ioctl.c2
-rw-r--r--drivers/s390/block/xpram.c54
-rw-r--r--drivers/s390/char/con3215.c52
-rw-r--r--drivers/s390/char/ctrlchar.c9
-rw-r--r--drivers/s390/char/fs3270.c28
-rw-r--r--drivers/s390/char/keyboard.c2
-rw-r--r--drivers/s390/char/monwriter.c24
-rw-r--r--drivers/s390/char/sclp.c35
-rw-r--r--drivers/s390/char/sclp_quiesce.c37
-rw-r--r--drivers/s390/char/sclp_tty.c4
-rw-r--r--drivers/s390/char/sclp_vt220.c2
-rw-r--r--drivers/s390/char/tape.h3
-rw-r--r--drivers/s390/char/tape_34xx.c23
-rw-r--r--drivers/s390/char/tape_3590.c7
-rw-r--r--drivers/s390/char/tape_block.c14
-rw-r--r--drivers/s390/char/tape_char.c8
-rw-r--r--drivers/s390/char/tape_core.c14
-rw-r--r--drivers/s390/char/tty3270.c5
-rw-r--r--drivers/s390/char/vmwatchdog.c52
-rw-r--r--drivers/s390/cio/chsc.c154
-rw-r--r--drivers/s390/cio/cio.c200
-rw-r--r--drivers/s390/cio/cio.h6
-rw-r--r--drivers/s390/cio/css.c87
-rw-r--r--drivers/s390/cio/css.h19
-rw-r--r--drivers/s390/cio/device.c498
-rw-r--r--drivers/s390/cio/device.h8
-rw-r--r--drivers/s390/cio/device_fsm.c160
-rw-r--r--drivers/s390/cio/device_id.c62
-rw-r--r--drivers/s390/cio/device_ops.c48
-rw-r--r--drivers/s390/cio/device_pgid.c61
-rw-r--r--drivers/s390/cio/device_status.c10
-rw-r--r--drivers/s390/cio/ioasm.h220
-rw-r--r--drivers/s390/cio/qdio.c254
-rw-r--r--drivers/s390/cio/qdio.h224
-rw-r--r--drivers/s390/crypto/ap_bus.c65
-rw-r--r--drivers/s390/net/claw.h2
-rw-r--r--drivers/s390/net/iucv.c43
-rw-r--r--drivers/s390/net/lcs.c88
-rw-r--r--drivers/s390/net/lcs.h29
-rw-r--r--drivers/s390/net/qeth.h2
-rw-r--r--drivers/s390/net/qeth_eddp.c40
-rw-r--r--drivers/s390/net/qeth_eddp.h2
-rw-r--r--drivers/s390/net/qeth_main.c14
-rw-r--r--drivers/s390/s390mach.c95
-rw-r--r--drivers/s390/scsi/zfcp_def.h10
-rw-r--r--drivers/s390/scsi/zfcp_erp.c2
-rw-r--r--drivers/s390/scsi/zfcp_fsf.c2
-rw-r--r--drivers/s390/scsi/zfcp_scsi.c3
56 files changed, 1626 insertions, 1356 deletions
diff --git a/drivers/s390/block/Kconfig b/drivers/s390/block/Kconfig
index 929d6fff615..b250c535450 100644
--- a/drivers/s390/block/Kconfig
+++ b/drivers/s390/block/Kconfig
@@ -1,4 +1,4 @@
-if S390
+if S390 && BLOCK
comment "S/390 block device drivers"
depends on S390
diff --git a/drivers/s390/block/dasd.c b/drivers/s390/block/dasd.c
index d0647d116ea..492b68bcd7c 100644
--- a/drivers/s390/block/dasd.c
+++ b/drivers/s390/block/dasd.c
@@ -54,7 +54,7 @@ static void dasd_flush_request_queue(struct dasd_device *);
static void dasd_int_handler(struct ccw_device *, unsigned long, struct irb *);
static int dasd_flush_ccw_queue(struct dasd_device *, int);
static void dasd_tasklet(struct dasd_device *);
-static void do_kick_device(void *data);
+static void do_kick_device(struct work_struct *);
/*
* SECTION: Operations on the device structure.
@@ -100,7 +100,7 @@ dasd_alloc_device(void)
(unsigned long) device);
INIT_LIST_HEAD(&device->ccw_queue);
init_timer(&device->timer);
- INIT_WORK(&device->kick_work, do_kick_device, device);
+ INIT_WORK(&device->kick_work, do_kick_device);
device->state = DASD_STATE_NEW;
device->target = DASD_STATE_NEW;
@@ -203,6 +203,7 @@ dasd_state_basic_to_known(struct dasd_device * device)
rc = dasd_flush_ccw_queue(device, 1);
if (rc)
return rc;
+ dasd_clear_timer(device);
DBF_DEV_EVENT(DBF_EMERG, device, "%p debug area deleted", device);
if (device->debug_area != NULL) {
@@ -406,11 +407,9 @@ dasd_change_state(struct dasd_device *device)
* event daemon.
*/
static void
-do_kick_device(void *data)
+do_kick_device(struct work_struct *work)
{
- struct dasd_device *device;
-
- device = (struct dasd_device *) data;
+ struct dasd_device *device = container_of(work, struct dasd_device, kick_work);
dasd_change_state(device);
dasd_schedule_bh(device);
dasd_put_device(device);
@@ -1051,10 +1050,10 @@ dasd_int_handler(struct ccw_device *cdev, unsigned long intparm,
}
} else { /* error */
memcpy(&cqr->irb, irb, sizeof (struct irb));
-#ifdef ERP_DEBUG
- /* dump sense data */
- dasd_log_sense(cqr, irb);
-#endif
+ if (device->features & DASD_FEATURE_ERPLOG) {
+ /* dump sense data */
+ dasd_log_sense(cqr, irb);
+ }
switch (era) {
case dasd_era_fatal:
cqr->status = DASD_CQR_FAILED;
@@ -1263,15 +1262,21 @@ __dasd_check_expire(struct dasd_device * device)
if (list_empty(&device->ccw_queue))
return;
cqr = list_entry(device->ccw_queue.next, struct dasd_ccw_req, list);
- if (cqr->status == DASD_CQR_IN_IO && cqr->expires != 0) {
- if (time_after_eq(jiffies, cqr->expires + cqr->starttime)) {
+ if ((cqr->status == DASD_CQR_IN_IO && cqr->expires != 0) &&
+ (time_after_eq(jiffies, cqr->expires + cqr->starttime))) {
+ if (device->discipline->term_IO(cqr) != 0) {
+ /* Hmpf, try again in 5 sec */
+ dasd_set_timer(device, 5*HZ);
+ DEV_MESSAGE(KERN_ERR, device,
+ "internal error - timeout (%is) expired "
+ "for cqr %p, termination failed, "
+ "retrying in 5s",
+ (cqr->expires/HZ), cqr);
+ } else {
DEV_MESSAGE(KERN_ERR, device,
"internal error - timeout (%is) expired "
"for cqr %p (%i retries left)",
(cqr->expires/HZ), cqr, cqr->retries);
- if (device->discipline->term_IO(cqr) != 0)
- /* Hmpf, try again in 1/10 sec */
- dasd_set_timer(device, 10);
}
}
}
diff --git a/drivers/s390/block/dasd_3990_erp.c b/drivers/s390/block/dasd_3990_erp.c
index 669805d4402..4d01040c2c6 100644
--- a/drivers/s390/block/dasd_3990_erp.c
+++ b/drivers/s390/block/dasd_3990_erp.c
@@ -2641,14 +2641,12 @@ dasd_3990_erp_action(struct dasd_ccw_req * cqr)
struct dasd_ccw_req *erp = NULL;
struct dasd_device *device = cqr->device;
__u32 cpa = cqr->irb.scsw.cpa;
+ struct dasd_ccw_req *temp_erp = NULL;
-#ifdef ERP_DEBUG
- /* print current erp_chain */
- DEV_MESSAGE(KERN_ERR, device, "%s",
- "ERP chain at BEGINNING of ERP-ACTION");
- {
- struct dasd_ccw_req *temp_erp = NULL;
-
+ if (device->features & DASD_FEATURE_ERPLOG) {
+ /* print current erp_chain */
+ DEV_MESSAGE(KERN_ERR, device, "%s",
+ "ERP chain at BEGINNING of ERP-ACTION");
for (temp_erp = cqr;
temp_erp != NULL; temp_erp = temp_erp->refers) {
@@ -2658,7 +2656,6 @@ dasd_3990_erp_action(struct dasd_ccw_req * cqr)
temp_erp->refers);
}
}
-#endif /* ERP_DEBUG */
/* double-check if current erp/cqr was successfull */
if ((cqr->irb.scsw.cstat == 0x00) &&
@@ -2695,11 +2692,10 @@ dasd_3990_erp_action(struct dasd_ccw_req * cqr)
erp = dasd_3990_erp_handle_match_erp(cqr, erp);
}
-#ifdef ERP_DEBUG
- /* print current erp_chain */
- DEV_MESSAGE(KERN_ERR, device, "%s", "ERP chain at END of ERP-ACTION");
- {
- struct dasd_ccw_req *temp_erp = NULL;
+ if (device->features & DASD_FEATURE_ERPLOG) {
+ /* print current erp_chain */
+ DEV_MESSAGE(KERN_ERR, device, "%s",
+ "ERP chain at END of ERP-ACTION");
for (temp_erp = erp;
temp_erp != NULL; temp_erp = temp_erp->refers) {
@@ -2709,7 +2705,6 @@ dasd_3990_erp_action(struct dasd_ccw_req * cqr)
temp_erp->refers);
}
}
-#endif /* ERP_DEBUG */
if (erp->status == DASD_CQR_FAILED)
dasd_log_ccw(erp, 1, cpa);
diff --git a/drivers/s390/block/dasd_devmap.c b/drivers/s390/block/dasd_devmap.c
index 91cf971f065..5943266152f 100644
--- a/drivers/s390/block/dasd_devmap.c
+++ b/drivers/s390/block/dasd_devmap.c
@@ -25,7 +25,7 @@
#include "dasd_int.h"
-kmem_cache_t *dasd_page_cache;
+struct kmem_cache *dasd_page_cache;
EXPORT_SYMBOL_GPL(dasd_page_cache);
/*
@@ -202,6 +202,8 @@ dasd_feature_list(char *str, char **endp)
features |= DASD_FEATURE_READONLY;
else if (len == 4 && !strncmp(str, "diag", 4))
features |= DASD_FEATURE_USEDIAG;
+ else if (len == 6 && !strncmp(str, "erplog", 6))
+ features |= DASD_FEATURE_ERPLOG;
else {
MESSAGE(KERN_WARNING,
"unsupported feature: %*s, "
@@ -684,26 +686,77 @@ dasd_ro_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
struct dasd_devmap *devmap;
- int ro_flag;
+ int val;
+ char *endp;
devmap = dasd_devmap_from_cdev(to_ccwdev(dev));
if (IS_ERR(devmap))
return PTR_ERR(devmap);
- ro_flag = buf[0] == '1';
+
+ val = simple_strtoul(buf, &endp, 0);
+ if (((endp + 1) < (buf + count)) || (val > 1))
+ return -EINVAL;
+
spin_lock(&dasd_devmap_lock);
- if (ro_flag)
+ if (val)
devmap->features |= DASD_FEATURE_READONLY;
else
devmap->features &= ~DASD_FEATURE_READONLY;
if (devmap->device)
devmap->device->features = devmap->features;
if (devmap->device && devmap->device->gdp)
- set_disk_ro(devmap->device->gdp, ro_flag);
+ set_disk_ro(devmap->device->gdp, val);
spin_unlock(&dasd_devmap_lock);
return count;
}
static DEVICE_ATTR(readonly, 0644, dasd_ro_show, dasd_ro_store);
+/*
+ * erplog controls the logging of ERP related data
+ * (e.g. failing channel programs).
+ */
+static ssize_t
+dasd_erplog_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct dasd_devmap *devmap;
+ int erplog;
+
+ devmap = dasd_find_busid(dev->bus_id);
+ if (!IS_ERR(devmap))
+ erplog = (devmap->features & DASD_FEATURE_ERPLOG) != 0;
+ else
+ erplog = (DASD_FEATURE_DEFAULT & DASD_FEATURE_ERPLOG) != 0;
+ return snprintf(buf, PAGE_SIZE, erplog ? "1\n" : "0\n");
+}
+
+static ssize_t
+dasd_erplog_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct dasd_devmap *devmap;
+ int val;
+ char *endp;
+
+ devmap = dasd_devmap_from_cdev(to_ccwdev(dev));
+ if (IS_ERR(devmap))
+ return PTR_ERR(devmap);
+
+ val = simple_strtoul(buf, &endp, 0);
+ if (((endp + 1) < (buf + count)) || (val > 1))
+ return -EINVAL;
+
+ spin_lock(&dasd_devmap_lock);
+ if (val)
+ devmap->features |= DASD_FEATURE_ERPLOG;
+ else
+ devmap->features &= ~DASD_FEATURE_ERPLOG;
+ if (devmap->device)
+ devmap->device->features = devmap->features;
+ spin_unlock(&dasd_devmap_lock);
+ return count;
+}
+
+static DEVICE_ATTR(erplog, 0644, dasd_erplog_show, dasd_erplog_store);
/*
* use_diag controls whether the driver should use diag rather than ssch
@@ -729,17 +782,22 @@ dasd_use_diag_store(struct device *dev, struct device_attribute *attr,
{
struct dasd_devmap *devmap;
ssize_t rc;
- int use_diag;
+ int val;
+ char *endp;
devmap = dasd_devmap_from_cdev(to_ccwdev(dev));
if (IS_ERR(devmap))
return PTR_ERR(devmap);
- use_diag = buf[0] == '1';
+
+ val = simple_strtoul(buf, &endp, 0);
+ if (((endp + 1) < (buf + count)) || (val > 1))
+ return -EINVAL;
+
spin_lock(&dasd_devmap_lock);
/* Changing diag discipline flag is only allowed in offline state. */
rc = count;
if (!devmap->device) {
- if (use_diag)
+ if (val)
devmap->features |= DASD_FEATURE_USEDIAG;
else
devmap->features &= ~DASD_FEATURE_USEDIAG;
@@ -854,14 +912,20 @@ dasd_eer_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
struct dasd_devmap *devmap;
- int rc;
+ int val, rc;
+ char *endp;
devmap = dasd_devmap_from_cdev(to_ccwdev(dev));
if (IS_ERR(devmap))
return PTR_ERR(devmap);
if (!devmap->device)
- return count;
- if (buf[0] == '1') {
+ return -ENODEV;
+
+ val = simple_strtoul(buf, &endp, 0);
+ if (((endp + 1) < (buf + count)) || (val > 1))
+ return -EINVAL;
+
+ if (val) {
rc = dasd_eer_enable(devmap->device);
if (rc)
return rc;
@@ -880,6 +944,7 @@ static struct attribute * dasd_attrs[] = {
&dev_attr_uid.attr,
&dev_attr_use_diag.attr,
&dev_attr_eer_enabled.attr,
+ &dev_attr_erplog.attr,
NULL,
};
diff --git a/drivers/s390/block/dasd_diag.c b/drivers/s390/block/dasd_diag.c
index 23fa0b28917..53db58a6861 100644
--- a/drivers/s390/block/dasd_diag.c
+++ b/drivers/s390/block/dasd_diag.c
@@ -63,44 +63,26 @@ static const u8 DASD_DIAG_CMS1[] = { 0xc3, 0xd4, 0xe2, 0xf1 };/* EBCDIC CMS1 */
* and function code cmd.
* In case of an exception return 3. Otherwise return result of bitwise OR of
* resulting condition code and DIAG return code. */
-static __inline__ int
-dia250(void *iob, int cmd)
+static inline int dia250(void *iob, int cmd)
{
+ register unsigned long reg0 asm ("0") = (unsigned long) iob;
typedef union {
struct dasd_diag_init_io init_io;
struct dasd_diag_rw_io rw_io;
} addr_type;
int rc;
- __asm__ __volatile__(
-#ifdef CONFIG_64BIT
- " lghi %0,3\n"
- " lgr 0,%3\n"
- " diag 0,%2,0x250\n"
- "0: ipm %0\n"
- " srl %0,28\n"
- " or %0,1\n"
- "1:\n"
- ".section __ex_table,\"a\"\n"
- " .align 8\n"
- " .quad 0b,1b\n"
- ".previous\n"
-#else
- " lhi %0,3\n"
- " lr 0,%3\n"
+ rc = 3;
+ asm volatile(
" diag 0,%2,0x250\n"
"0: ipm %0\n"
" srl %0,28\n"
" or %0,1\n"
"1:\n"
- ".section __ex_table,\"a\"\n"
- " .align 4\n"
- " .long 0b,1b\n"
- ".previous\n"
-#endif
- : "=&d" (rc), "=m" (*(addr_type *) iob)
- : "d" (cmd), "d" (iob), "m" (*(addr_type *) iob)
- : "0", "1", "cc");
+ EX_TABLE(0b,1b)
+ : "+d" (rc), "=m" (*(addr_type *) iob)
+ : "d" (cmd), "d" (reg0), "m" (*(addr_type *) iob)
+ : "1", "cc");
return rc;
}
@@ -236,7 +218,7 @@ dasd_diag_term_IO(struct dasd_ccw_req * cqr)
/* Handle external interruption. */
static void
-dasd_ext_handler(struct pt_regs *regs, __u16 code)
+dasd_ext_handler(__u16 code)
{
struct dasd_ccw_req *cqr, *next;
struct dasd_device *device;
@@ -547,7 +529,7 @@ dasd_diag_build_cp(struct dasd_device * device, struct request *req)
}
cqr->retries = DIAG_MAX_RETRIES;
cqr->buildclk = get_clock();
- if (req->flags & REQ_FAILFAST)
+ if (req->cmd_flags & REQ_FAILFAST)
set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags);
cqr->device = device;
cqr->expires = DIAG_TIMEOUT;
diff --git a/drivers/s390/block/dasd_eckd.c b/drivers/s390/block/dasd_eckd.c
index b7a7fac3f7c..fdaa471e845 100644
--- a/drivers/s390/block/dasd_eckd.c
+++ b/drivers/s390/block/dasd_eckd.c
@@ -1215,7 +1215,7 @@ dasd_eckd_build_cp(struct dasd_device * device, struct request *req)
dst = page_address(bv->bv_page) + bv->bv_offset;
if (dasd_page_cache) {
char *copy = kmem_cache_alloc(dasd_page_cache,
- SLAB_DMA | __GFP_NOWARN);
+ GFP_DMA | __GFP_NOWARN);
if (copy && rq_data_dir(req) == WRITE)
memcpy(copy + bv->bv_offset, dst, bv->bv_len);
if (copy)
@@ -1266,7 +1266,7 @@ dasd_eckd_build_cp(struct dasd_device * device, struct request *req)
recid++;
}
}
- if (req->flags & REQ_FAILFAST)
+ if (req->cmd_flags & REQ_FAILFAST)
set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags);
cqr->device = device;
cqr->expires = 5 * 60 * HZ; /* 5 minutes */
diff --git a/drivers/s390/block/dasd_fba.c b/drivers/s390/block/dasd_fba.c
index e85015be109..b857fd5893f 100644
--- a/drivers/s390/block/dasd_fba.c
+++ b/drivers/s390/block/dasd_fba.c
@@ -308,7 +308,7 @@ dasd_fba_build_cp(struct dasd_device * device, struct request *req)
dst = page_address(bv->bv_page) + bv->bv_offset;
if (dasd_page_cache) {
char *copy = kmem_cache_alloc(dasd_page_cache,
- SLAB_DMA | __GFP_NOWARN);
+ GFP_DMA | __GFP_NOWARN);
if (copy && rq_data_dir(req) == WRITE)
memcpy(copy + bv->bv_offset, dst, bv->bv_len);
if (copy)
@@ -344,7 +344,7 @@ dasd_fba_build_cp(struct dasd_device * device, struct request *req)
recid++;
}
}
- if (req->flags & REQ_FAILFAST)
+ if (req->cmd_flags & REQ_FAILFAST)
set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags);
cqr->device = device;
cqr->expires = 5 * 60 * HZ; /* 5 minutes */
diff --git a/drivers/s390/block/dasd_int.h b/drivers/s390/block/dasd_int.h
index 9f52004f6fc..fb725e3b08f 100644
--- a/drivers/s390/block/dasd_int.h
+++ b/drivers/s390/block/dasd_int.h
@@ -13,10 +13,6 @@
#ifdef __KERNEL__
-/* erp debugging in dasd.c and dasd_3990_erp.c */
-#define ERP_DEBUG
-
-
/* we keep old device allocation scheme; IOW, minors are still in 0..255 */
#define DASD_PER_MAJOR (1U << (MINORBITS - DASD_PARTN_BITS))
#define DASD_PARTN_MASK ((1 << DASD_PARTN_BITS) - 1)
@@ -474,7 +470,7 @@ extern struct dasd_profile_info_t dasd_global_profile;
extern unsigned int dasd_profile_level;
extern struct block_device_operations dasd_device_operations;
-extern kmem_cache_t *dasd_page_cache;
+extern struct kmem_cache *dasd_page_cache;
struct dasd_ccw_req *
dasd_kmalloc_request(char *, int, int, struct dasd_device *);
diff --git a/drivers/s390/block/dasd_ioctl.c b/drivers/s390/block/dasd_ioctl.c
index 8fed3603e9e..758cfb54286 100644
--- a/drivers/s390/block/dasd_ioctl.c
+++ b/drivers/s390/block/dasd_ioctl.c
@@ -430,7 +430,7 @@ dasd_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
int rval;
lock_kernel();
- rval = dasd_ioctl(filp->f_dentry->d_inode, filp, cmd, arg);
+ rval = dasd_ioctl(filp->f_path.dentry->d_inode, filp, cmd, arg);
unlock_kernel();
return (rval == -EINVAL) ? -ENOIOCTLCMD : rval;
diff --git a/drivers/s390/block/xpram.c b/drivers/s390/block/xpram.c
index cab2c736683..a04d9120cef 100644
--- a/drivers/s390/block/xpram.c
+++ b/drivers/s390/block/xpram.c
@@ -89,28 +89,15 @@ MODULE_LICENSE("GPL");
*/
static int xpram_page_in (unsigned long page_addr, unsigned int xpage_index)
{
- int cc;
+ int cc = 2; /* return unused cc 2 if pgin traps */
- __asm__ __volatile__ (
- " lhi %0,2\n" /* return unused cc 2 if pgin traps */
- " .insn rre,0xb22e0000,%1,%2\n" /* pgin %1,%2 */
- "0: ipm %0\n"
- " srl %0,28\n"
+ asm volatile(
+ " .insn rre,0xb22e0000,%1,%2\n" /* pgin %1,%2 */
+ "0: ipm %0\n"
+ " srl %0,28\n"
"1:\n"
-#ifndef CONFIG_64BIT
- ".section __ex_table,\"a\"\n"
- " .align 4\n"
- " .long 0b,1b\n"
- ".previous"
-#else
- ".section __ex_table,\"a\"\n"
- " .align 8\n"
- " .quad 0b,1b\n"
- ".previous"
-#endif
- : "=&d" (cc)
- : "a" (__pa(page_addr)), "a" (xpage_index)
- : "cc" );
+ EX_TABLE(0b,1b)
+ : "+d" (cc) : "a" (__pa(page_addr)), "d" (xpage_index) : "cc");
if (cc == 3)
return -ENXIO;
if (cc == 2) {
@@ -137,28 +124,15 @@ static int xpram_page_in (unsigned long page_addr, unsigned int xpage_index)
*/
static long xpram_page_out (unsigned long page_addr, unsigned int xpage_index)
{
- int cc;
+ int cc = 2; /* return unused cc 2 if pgin traps */
- __asm__ __volatile__ (
- " lhi %0,2\n" /* return unused cc 2 if pgout traps */
- " .insn rre,0xb22f0000,%1,%2\n" /* pgout %1,%2 */
- "0: ipm %0\n"
- " srl %0,28\n"
+ asm volatile(
+ " .insn rre,0xb22f0000,%1,%2\n" /* pgout %1,%2 */
+ "0: ipm %0\n"
+ " srl %0,28\n"
"1:\n"
-#ifndef CONFIG_64BIT
- ".section __ex_table,\"a\"\n"
- " .align 4\n"
- " .long 0b,1b\n"
- ".previous"
-#else
- ".section __ex_table,\"a\"\n"
- " .align 8\n"
- " .quad 0b,1b\n"
- ".previous"
-#endif
- : "=&d" (cc)
- : "a" (__pa(page_addr)), "a" (xpage_index)
- : "cc" );
+ EX_TABLE(0b,1b)
+ : "+d" (cc) : "a" (__pa(page_addr)), "d" (xpage_index) : "cc");
if (cc == 3)
return -ENXIO;
if (cc == 2) {
diff --git a/drivers/s390/char/con3215.c b/drivers/s390/char/con3215.c
index 2fa566fa6da..c9321b920e9 100644
--- a/drivers/s390/char/con3215.c
+++ b/drivers/s390/char/con3215.c
@@ -299,14 +299,14 @@ raw3215_timeout(unsigned long __data)
struct raw3215_info *raw = (struct raw3215_info *) __data;
unsigned long flags;
- spin_lock_irqsave(raw->lock, flags);
+ spin_lock_irqsave(get_ccwdev_lock(raw->cdev), flags);
if (raw->flags & RAW3215_TIMER_RUNS) {
del_timer(&raw->timer);
raw->flags &= ~RAW3215_TIMER_RUNS;
raw3215_mk_write_req(raw);
raw3215_start_io(raw);
}
- spin_unlock_irqrestore(raw->lock, flags);
+ spin_unlock_irqrestore(get_ccwdev_lock(raw->cdev), flags);
}
/*
@@ -355,10 +355,10 @@ raw3215_tasklet(void *data)
unsigned long flags;
raw = (struct raw3215_info *) data;
- spin_lock_irqsave(raw->lock, flags);
+ spin_lock_irqsave(get_ccwdev_lock(raw->cdev), flags);
raw3215_mk_write_req(raw);
raw3215_try_io(raw);
- spin_unlock_irqrestore(raw->lock, flags);
+ spin_unlock_irqrestore(get_ccwdev_lock(raw->cdev), flags);
/* Check for pending message from raw3215_irq */
if (raw->message != NULL) {
printk(raw->message, raw->msg_dstat, raw->msg_cstat);
@@ -512,9 +512,9 @@ raw3215_make_room(struct raw3215_info *raw, unsigned int length)
if (RAW3215_BUFFER_SIZE - raw->count >= length)
break;
/* there might be another cpu waiting for the lock */
- spin_unlock(raw->lock);
+ spin_unlock(get_ccwdev_lock(raw->cdev));
udelay(100);
- spin_lock(raw->lock);
+ spin_lock(get_ccwdev_lock(raw->cdev));
}
}
@@ -528,7 +528,7 @@ raw3215_write(struct raw3215_info *raw, const char *str, unsigned int length)
int c, count;
while (length > 0) {
- spin_lock_irqsave(raw->lock, flags);
+ spin_lock_irqsave(get_ccwdev_lock(raw->cdev), flags);
count = (length > RAW3215_BUFFER_SIZE) ?
RAW3215_BUFFER_SIZE : length;
length -= count;
@@ -555,7 +555,7 @@ raw3215_write(struct raw3215_info *raw, const char *str, unsigned int length)
/* start or queue request */
raw3215_try_io(raw);
}
- spin_unlock_irqrestore(raw->lock, flags);
+ spin_unlock_irqrestore(get_ccwdev_lock(raw->cdev), flags);
}
}
@@ -568,7 +568,7 @@ raw3215_putchar(struct raw3215_info *raw, unsigned char ch)
unsigned long flags;
unsigned int length, i;
- spin_lock_irqsave(raw->lock, flags);
+ spin_lock_irqsave(get_ccwdev_lock(raw->cdev), flags);
if (ch == '\t') {
length = TAB_STOP_SIZE - (raw->line_pos%TAB_STOP_SIZE);
raw->line_pos += length;
@@ -592,7 +592,7 @@ raw3215_putchar(struct raw3215_info *raw, unsigned char ch)
/* start or queue request */
raw3215_try_io(raw);
}
- spin_unlock_irqrestore(raw->lock, flags);
+ spin_unlock_irqrestore(get_ccwdev_lock(raw->cdev), flags);
}
/*
@@ -604,13 +604,13 @@ raw3215_flush_buffer(struct raw3215_info *raw)
{
unsigned long flags;
- spin_lock_irqsave(raw->lock, flags);
+ spin_lock_irqsave(get_ccwdev_lock(raw->cdev), flags);
if (raw->count > 0) {
raw->flags |= RAW3215_FLUSHING;
raw3215_try_io(raw);
raw->flags &= ~RAW3215_FLUSHING;
}
- spin_unlock_irqrestore(raw->lock, flags);
+ spin_unlock_irqrestore(get_ccwdev_lock(raw->cdev), flags);
}
/*
@@ -625,9 +625,9 @@ raw3215_startup(struct raw3215_info *raw)
return 0;
raw->line_pos = 0;
raw->flags |= RAW3215_ACTIVE;
- spin_lock_irqsave(raw->lock, flags);
+ spin_lock_irqsave(get_ccwdev_lock(raw->cdev), flags);
raw3215_try_io(raw);
- spin_unlock_irqrestore(raw->lock, flags);
+ spin_unlock_irqrestore(get_ccwdev_lock(raw->cdev), flags);
return 0;
}
@@ -644,21 +644,21 @@ raw3215_shutdown(struct raw3215_info *raw)
if (!(raw->flags & RAW3215_ACTIVE) || (raw->flags & RAW3215_FIXED))
return;
/* Wait for outstanding requests, then free irq */
- spin_lock_irqsave(raw->lock, flags);
+ spin_lock_irqsave(get_ccwdev_lock(raw->cdev), flags);
if ((raw->flags & RAW3215_WORKING) ||
raw->queued_write != NULL ||
raw->queued_read != NULL) {
raw->flags |= RAW3215_CLOSING;
add_wait_queue(&raw->empty_wait, &wait);
set_current_state(TASK_INTERRUPTIBLE);
- spin_unlock_irqrestore(raw->lock, flags);
+ spin_unlock_irqrestore(get_ccwdev_lock(raw->cdev), flags);
schedule();
- spin_lock_irqsave(raw->lock, flags);
+ spin_lock_irqsave(get_ccwdev_lock(raw->cdev), flags);
remove_wait_queue(&raw->empty_wait, &wait);
set_current_state(TASK_RUNNING);
raw->flags &= ~(RAW3215_ACTIVE | RAW3215_CLOSING);
}
- spin_unlock_irqrestore(raw->lock, flags);
+ spin_unlock_irqrestore(get_ccwdev_lock(raw->cdev), flags);
}
static int
@@ -686,7 +686,6 @@ raw3215_probe (struct ccw_device *cdev)
}
raw->cdev = cdev;
- raw->lock = get_ccwdev_lock(cdev);
raw->inbuf = (char *) raw + sizeof(struct raw3215_info);
memset(raw, 0, sizeof(struct raw3215_info));
raw->buffer = (char *) kmalloc(RAW3215_BUFFER_SIZE,
@@ -809,9 +808,9 @@ con3215_unblank(void)
unsigned long flags;
raw = raw3215[0]; /* console 3215 is the first one */
- spin_lock_irqsave(raw->lock, flags);
+ spin_lock_irqsave(get_ccwdev_lock(raw->cdev), flags);
raw3215_make_room(raw, RAW3215_BUFFER_SIZE);
- spin_unlock_irqrestore(raw->lock, flags);
+ spin_unlock_irqrestore(get_ccwdev_lock(raw->cdev), flags);
}
static int __init
@@ -873,7 +872,6 @@ con3215_init(void)
raw->buffer = (char *) alloc_bootmem_low(RAW3215_BUFFER_SIZE);
raw->inbuf = (char *) alloc_bootmem_low(RAW3215_INBUF_SIZE);
raw->cdev = cdev;
- raw->lock = get_ccwdev_lock(cdev);
cdev->dev.driver_data = raw;
cdev->handler = raw3215_irq;
@@ -1066,10 +1064,10 @@ tty3215_unthrottle(struct tty_struct * tty)
raw = (struct raw3215_info *) tty->driver_data;
if (raw->flags & RAW3215_THROTTLED) {
- spin_lock_irqsave(raw->lock, flags);
+ spin_lock_irqsave(get_ccwdev_lock(raw->cdev), flags);
raw->flags &= ~RAW3215_THROTTLED;
raw3215_try_io(raw);
- spin_unlock_irqrestore(raw->lock, flags);
+ spin_unlock_irqrestore(get_ccwdev_lock(raw->cdev), flags);
}
}
@@ -1096,14 +1094,14 @@ tty3215_start(struct tty_struct *tty)
raw = (struct raw3215_info *) tty->driver_data;
if (raw->flags & RAW3215_STOPPED) {
- spin_lock_irqsave(raw->lock, flags);
+ spin_lock_irqsave(get_ccwdev_lock(raw->cdev), flags);
raw->flags &= ~RAW3215_STOPPED;
raw3215_try_io(raw);
- spin_unlock_irqrestore(raw->lock, flags);
+ spin_unlock_irqrestore(get_ccwdev_lock(raw->cdev), flags);
}
}
-static struct tty_operations tty3215_ops = {
+static const struct tty_operations tty3215_ops = {
.open = tty3215_open,
.close = tty3215_close,
.write = tty3215_write,
diff --git a/drivers/s390/char/ctrlchar.c b/drivers/s390/char/ctrlchar.c
index d83eb6358ba..c6cbcb3f925 100644
--- a/drivers/s390/char/ctrlchar.c
+++ b/drivers/s390/char/ctrlchar.c
@@ -16,14 +16,15 @@
#ifdef CONFIG_MAGIC_SYSRQ
static int ctrlchar_sysrq_key;
+static struct tty_struct *sysrq_tty;
static void
-ctrlchar_handle_sysrq(void *tty)
+ctrlchar_handle_sysrq(struct work_struct *work)
{
- handle_sysrq(ctrlchar_sysrq_key, NULL, (struct tty_struct *) tty);
+ handle_sysrq(ctrlchar_sysrq_key, sysrq_tty);
}
-static DECLARE_WORK(ctrlchar_work, ctrlchar_handle_sysrq, NULL);
+static DECLARE_WORK(ctrlchar_work, ctrlchar_handle_sysrq);
#endif
@@ -53,7 +54,7 @@ ctrlchar_handle(const unsigned char *buf, int len, struct tty_struct *tty)
/* racy */
if (len == 3 && buf[1] == '-') {
ctrlchar_sysrq_key = buf[2];
- ctrlchar_work.data = tty;
+ sysrq_tty = tty;
schedule_work(&ctrlchar_work);
return CTRLCHAR_SYSRQ;
}
diff --git a/drivers/s390/char/fs3270.c b/drivers/s390/char/fs3270.c
index ef004d08971..0893d306ae8 100644
--- a/drivers/s390/char/fs3270.c
+++ b/drivers/s390/char/fs3270.c
@@ -17,7 +17,6 @@
#include <asm/ccwdev.h>
#include <asm/cio.h>
-#include <asm/cpcmd.h>
#include <asm/ebcdic.h>
#include <asm/idals.h>
@@ -28,7 +27,7 @@ struct raw3270_fn fs3270_fn;
struct fs3270 {
struct raw3270_view view;
- pid_t fs_pid; /* Pid of controlling program. */
+ struct pid *fs_pid; /* Pid of controlling program. */
int read_command; /* ccw command to use for reads. */
int write_command; /* ccw command to use for writes. */
int attention; /* Got attention. */
@@ -103,7 +102,7 @@ fs3270_restore_callback(struct raw3270_request *rq, void *data)
fp = (struct fs3270 *) rq->view;
if (rq->rc != 0 || rq->rescnt != 0) {
if (fp->fs_pid)
- kill_proc(fp->fs_pid, SIGHUP, 1);
+ kill_pid(fp->fs_pid, SIGHUP, 1);
}
fp->rdbuf_size = 0;
raw3270_request_reset(rq);
@@ -174,7 +173,7 @@ fs3270_save_callback(struct raw3270_request *rq, void *data)
*/
if (rq->rc != 0 || rq->rescnt == 0) {
if (fp->fs_pid)
- kill_proc(fp->fs_pid, SIGHUP, 1);
+ kill_pid(fp->fs_pid, SIGHUP, 1);
fp->rdbuf_size = 0;
} else
fp->rdbuf_size = fp->rdbuf->size - rq->rescnt;
@@ -420,16 +419,20 @@ fs3270_open(struct inode *inode, struct file *filp)
struct idal_buffer *ib;
int minor, rc;
- if (imajor(filp->f_dentry->d_inode) != IBM_FS3270_MAJOR)
+ if (imajor(filp->f_path.dentry->d_inode) != IBM_FS3270_MAJOR)
return -ENODEV;
- minor = iminor(filp->f_dentry->d_inode);
+ minor = iminor(filp->f_path.dentry->d_inode);
/* Check for minor 0 multiplexer. */
if (minor == 0) {
- if (!current->signal->tty)
+ struct tty_struct *tty;
+ mutex_lock(&tty_mutex);
+ tty = get_current_tty();
+ if (!tty || tty->driver->major != IBM_TTY3270_MAJOR) {
+ mutex_unlock(&tty_mutex);
return -ENODEV;
- if (current->signal->tty->driver->major != IBM_TTY3270_MAJOR)
- return -ENODEV;
- minor = current->signal->tty->index + RAW3270_FIRSTMINOR;
+ }
+ minor = tty->index + RAW3270_FIRSTMINOR;
+ mutex_unlock(&tty_mutex);
}
/* Check if some other program is already using fullscreen mode. */
fp = (struct fs3270 *) raw3270_find_view(&fs3270_fn, minor);
@@ -443,7 +446,7 @@ fs3270_open(struct inode *inode, struct file *filp)
return PTR_ERR(fp);
init_waitqueue_head(&fp->wait);
- fp->fs_pid = current->pid;
+ fp->fs_pid = get_pid(task_pid(current));
rc = raw3270_add_view(&fp->view, &fs3270_fn, minor);
if (rc) {
fs3270_free_view(&fp->view);
@@ -481,7 +484,8 @@ fs3270_close(struct inode *inode, struct file *filp)
fp = filp->private_data;
filp->private_data = NULL;
if (fp) {
- fp->fs_pid = 0;
+ put_pid(fp->fs_pid);
+ fp->fs_pid = NULL;
raw3270_reset(&fp->view);
raw3270_put_view(&fp->view);
raw3270_del_view(&fp->view);
diff --git a/drivers/s390/char/keyboard.c b/drivers/s390/char/keyboard.c
index 3be06569180..e3491a5f521 100644
--- a/drivers/s390/char/keyboard.c
+++ b/drivers/s390/char/keyboard.c
@@ -304,7 +304,7 @@ kbd_keycode(struct kbd_data *kbd, unsigned int keycode)
if (kbd->sysrq) {
if (kbd->sysrq == K(KT_LATIN, '-')) {
kbd->sysrq = 0;
- handle_sysrq(value, NULL, kbd->tty);
+ handle_sysrq(value, kbd->tty);
return;
}
if (value == '-') {
diff --git a/drivers/s390/char/monwriter.c b/drivers/s390/char/monwriter.c
index 1e3939aeb8a..b9b0fc3f812 100644
--- a/drivers/s390/char/monwriter.c
+++ b/drivers/s390/char/monwriter.c
@@ -26,6 +26,7 @@
#define MONWRITE_MAX_DATALEN 4024
static int mon_max_bufs = 255;
+static int mon_buf_count;
struct mon_buf {
struct list_head list;
@@ -40,7 +41,6 @@ struct mon_private {
size_t hdr_to_read;
size_t data_to_read;
struct mon_buf *current_buf;
- int mon_buf_count;
};
/*
@@ -73,12 +73,15 @@ static inline struct mon_buf *monwrite_find_hdr(struct mon_private *monpriv,
struct mon_buf *entry, *next;
list_for_each_entry_safe(entry, next, &monpriv->list, list)
- if (entry->hdr.applid == monhdr->applid &&
+ if ((entry->hdr.mon_function == monhdr->mon_function ||
+ monhdr->mon_function == MONWRITE_STOP_INTERVAL) &&
+ entry->hdr.applid == monhdr->applid &&
entry->hdr.record_num == monhdr->record_num &&
entry->hdr.version == monhdr->version &&
entry->hdr.release == monhdr->release &&
entry->hdr.mod_level == monhdr->mod_level)
return entry;
+
return NULL;
}
@@ -92,25 +95,27 @@ static int monwrite_new_hdr(struct mon_private *monpriv)
monhdr->mon_function > MONWRITE_START_CONFIG ||
monhdr->hdrlen != sizeof(struct monwrite_hdr))
return -EINVAL;
- monbuf = monwrite_find_hdr(monpriv, monhdr);
+ monbuf = NULL;
+ if (monhdr->mon_function != MONWRITE_GEN_EVENT)
+ monbuf = monwrite_find_hdr(monpriv, monhdr);
if (monbuf) {
if (monhdr->mon_function == MONWRITE_STOP_INTERVAL) {
monhdr->datalen = monbuf->hdr.datalen;
rc = monwrite_diag(monhdr, monbuf->data,
APPLDATA_STOP_REC);
list_del(&monbuf->list);
- monpriv->mon_buf_count--;
+ mon_buf_count--;
kfree(monbuf->data);
kfree(monbuf);
monbuf = NULL;
}
- } else {
- if (monpriv->mon_buf_count >= mon_max_bufs)
+ } else if (monhdr->mon_function != MONWRITE_STOP_INTERVAL) {
+ if (mon_buf_count >= mon_max_bufs)
return -ENOSPC;
monbuf = kzalloc(sizeof(struct mon_buf), GFP_KERNEL);
if (!monbuf)
return -ENOMEM;
- monbuf->data = kzalloc(monbuf->hdr.datalen,
+ monbuf->data = kzalloc(monhdr->datalen,
GFP_KERNEL | GFP_DMA);
if (!monbuf->data) {
kfree(monbuf);
@@ -118,7 +123,8 @@ static int monwrite_new_hdr(struct mon_private *monpriv)
}
monbuf->hdr = *monhdr;
list_add_tail(&monbuf->list, &monpriv->list);
- monpriv->mon_buf_count++;
+ if (monhdr->mon_function != MONWRITE_GEN_EVENT)
+ mon_buf_count++;
}
monpriv->current_buf = monbuf;
return 0;
@@ -186,7 +192,7 @@ static int monwrite_close(struct inode *inode, struct file *filp)
if (entry->hdr.mon_function != MONWRITE_GEN_EVENT)
monwrite_diag(&entry->hdr, entry->data,
APPLDATA_STOP_REC);
- monpriv->mon_buf_count--;
+ mon_buf_count--;
list_del(&entry->list);
kfree(entry->data);
kfree(entry);
diff --git a/drivers/s390/char/sclp.c b/drivers/s390/char/sclp.c
index 985d1613baa..8a056df09d6 100644
--- a/drivers/s390/char/sclp.c
+++ b/drivers/s390/char/sclp.c
@@ -100,13 +100,12 @@ service_call(sclp_cmdw_t command, void *sccb)
{
int cc;
- __asm__ __volatile__(
- " .insn rre,0xb2200000,%1,%2\n" /* servc %1,%2 */
- " ipm %0\n"
- " srl %0,28"
- : "=&d" (cc)
- : "d" (command), "a" (__pa(sccb))
- : "cc", "memory" );
+ asm volatile(
+ " .insn rre,0xb2200000,%1,%2\n" /* servc %1,%2 */
+ " ipm %0\n"
+ " srl %0,28"
+ : "=&d" (cc) : "d" (command), "a" (__pa(sccb))
+ : "cc", "memory");
if (cc == 3)
return -EIO;
if (cc == 2)
@@ -325,7 +324,7 @@ __sclp_find_req(u32 sccb)
* Prepare read event data request if necessary. Start processing of next
* request on queue. */
static void
-sclp_interrupt_handler(struct pt_regs *regs, __u16 code)
+sclp_interrupt_handler(__u16 code)
{
struct sclp_req *req;
u32 finished_sccb;
@@ -360,16 +359,6 @@ sclp_interrupt_handler(struct pt_regs *regs, __u16 code)
sclp_process_queue();
}
-/* Return current Time-Of-Day clock. */
-static inline u64
-sclp_get_clock(void)
-{
- u64 result;
-
- asm volatile ("STCK 0(%1)" : "=m" (result) : "a" (&(result)) : "cc");
- return result;
-}
-
/* Convert interval in jiffies to TOD ticks. */
static inline u64
sclp_tod_from_jiffies(unsigned long jiffies)
@@ -382,7 +371,6 @@ sclp_tod_from_jiffies(unsigned long jiffies)
void
sclp_sync_wait(void)
{
- unsigned long psw_mask;
unsigned long flags;
unsigned long cr0, cr0_sync;
u64 timeout;
@@ -392,7 +380,7 @@ sclp_sync_wait(void)
timeout = 0;
if (timer_pending(&sclp_request_timer)) {
/* Get timeout TOD value */
- timeout = sclp_get_clock() +
+ timeout = get_clock() +
sclp_tod_from_jiffies(sclp_request_timer.expires -
jiffies);
}
@@ -406,13 +394,12 @@ sclp_sync_wait(void)
cr0_sync |= 0x00000200;
cr0_sync &= 0xFFFFF3AC;
__ctl_load(cr0_sync, 0, 0);
- asm volatile ("STOSM 0(%1),0x01"
- : "=m" (psw_mask) : "a" (&psw_mask) : "memory");
+ __raw_local_irq_stosm(0x01);
/* Loop until driver state indicates finished request */
while (sclp_running_state != sclp_running_state_idle) {
/* Check for expired request timer */
if (timer_pending(&sclp_request_timer) &&
- sclp_get_clock() > timeout &&
+ get_clock() > timeout &&
del_timer(&sclp_request_timer))
sclp_request_timer.function(sclp_request_timer.data);
barrier();
@@ -756,7 +743,7 @@ EXPORT_SYMBOL(sclp_reactivate);
/* Handler for external interruption used during initialization. Modify
* request state to done. */
static void
-sclp_check_handler(struct pt_regs *regs, __u16 code)
+sclp_check_handler(__u16 code)
{
u32 finished_sccb;
diff --git a/drivers/s390/char/sclp_quiesce.c b/drivers/s390/char/sclp_quiesce.c
index 32004aae95c..ffa9282ce97 100644
--- a/drivers/s390/char/sclp_quiesce.c
+++ b/drivers/s390/char/sclp_quiesce.c
@@ -19,52 +19,17 @@
#include "sclp.h"
-
-#ifdef CONFIG_SMP
-/* Signal completion of shutdown process. All CPUs except the first to enter
- * this function: go to stopped state. First CPU: wait until all other
- * CPUs are in stopped or check stop state. Afterwards, load special PSW
- * to indicate completion. */
-static void
-do_load_quiesce_psw(void * __unused)
-{
- static atomic_t cpuid = ATOMIC_INIT(-1);
- psw_t quiesce_psw;
- int cpu;
-
- if (atomic_cmpxchg(&cpuid, -1, smp_processor_id()) != -1)
- signal_processor(smp_processor_id(), sigp_stop);
- /* Wait for all other cpus to enter stopped state */
- for_each_online_cpu(cpu) {
- if (cpu == smp_processor_id())
- continue;
- while(!smp_cpu_not_running(cpu))
- cpu_relax();
- }
- /* Quiesce the last cpu with the special psw */
- quiesce_psw.mask = PSW_BASE_BITS | PSW_MASK_WAIT;
- quiesce_psw.addr = 0xfff;
- __load_psw(quiesce_psw);
-}
-
-/* Shutdown handler. Perform shutdown function on all CPUs. */
-static void
-do_machine_quiesce(void)
-{
- on_each_cpu(do_load_quiesce_psw, NULL, 0, 0);
-}
-#else
/* Shutdown handler. Signal completion of shutdown by loading special PSW. */
static void
do_machine_quiesce(void)
{
psw_t quiesce_psw;
+ smp_send_stop();
quiesce_psw.mask = PSW_BASE_BITS | PSW_MASK_WAIT;
quiesce_psw.addr = 0xfff;
__load_psw(quiesce_psw);
}
-#endif
/* Handler for quiesce event. Start shutdown procedure. */
static void
diff --git a/drivers/s390/char/sclp_tty.c b/drivers/s390/char/sclp_tty.c
index f6cf9023039..2d173e5c8a0 100644
--- a/drivers/s390/char/sclp_tty.c
+++ b/drivers/s390/char/sclp_tty.c
@@ -60,8 +60,6 @@ static unsigned short int sclp_tty_chars_count;
struct tty_driver *sclp_tty_driver;
-extern struct termios tty_std_termios;
-
static struct sclp_ioctls sclp_ioctls;
static struct sclp_ioctls sclp_ioctls_init =
{
@@ -711,7 +709,7 @@ static struct sclp_register sclp_input_event =
.receiver_fn = sclp_tty_receiver
};
-static struct tty_operations sclp_ops = {
+static const struct tty_operations sclp_ops = {
.open = sclp_tty_open,
.close = sclp_tty_close,
.write = sclp_tty_write,
diff --git a/drivers/s390/char/sclp_vt220.c b/drivers/s390/char/sclp_vt220.c
index 54fba6f1718..723bf4191bf 100644
--- a/drivers/s390/char/sclp_vt220.c
+++ b/drivers/s390/char/sclp_vt220.c
@@ -655,7 +655,7 @@ __sclp_vt220_init(int early)
return 0;
}
-static struct tty_operations sclp_vt220_ops = {
+static const struct tty_operations sclp_vt220_ops = {
.open = sclp_vt220_open,
.close = sclp_vt220_close,
.write = sclp_vt220_write,
diff --git a/drivers/s390/char/tape.h b/drivers/s390/char/tape.h
index 1f4c89967be..c9f1c4c8bb1 100644
--- a/drivers/s390/char/tape.h
+++ b/drivers/s390/char/tape.h
@@ -179,6 +179,7 @@ struct tape_char_data {
/* Block Frontend Data */
struct tape_blk_data
{
+ struct tape_device * device;
/* Block device request queue. */
request_queue_t * request_queue;
spinlock_t request_queue_lock;
@@ -240,7 +241,7 @@ struct tape_device {
#endif
/* Function to start or stop the next request later. */
- struct work_struct tape_dnr;
+ struct delayed_work tape_dnr;
};
/* Externals from tape_core.c */
diff --git a/drivers/s390/char/tape_34xx.c b/drivers/s390/char/tape_34xx.c
index 7b95dab913d..e765875e8db 100644
--- a/drivers/s390/char/tape_34xx.c
+++ b/drivers/s390/char/tape_34xx.c
@@ -95,6 +95,12 @@ tape_34xx_medium_sense(struct tape_device *device)
return rc;
}
+struct tape_34xx_work {
+ struct tape_device *device;
+ enum tape_op op;
+ struct work_struct work;
+};
+
/*
* These functions are currently used only to schedule a medium_sense for
* later execution. This is because we get an interrupt whenever a medium
@@ -103,13 +109,10 @@ tape_34xx_medium_sense(struct tape_device *device)
* interrupt handler.
*/
static void
-tape_34xx_work_handler(void *data)
+tape_34xx_work_handler(struct work_struct *work)
{
- struct {
- struct tape_device *device;
- enum tape_op op;
- struct work_struct work;
- } *p = data;
+ struct tape_34xx_work *p =
+ container_of(work, struct tape_34xx_work, work);
switch(p->op) {
case TO_MSEN:
@@ -126,17 +129,13 @@ tape_34xx_work_handler(void *data)
static int
tape_34xx_schedule_work(struct tape_device *device, enum tape_op op)
{
- struct {
- struct tape_device *device;
- enum tape_op op;
- struct work_struct work;
- } *p;
+ struct tape_34xx_work *p;
if ((p = kmalloc(sizeof(*p), GFP_ATOMIC)) == NULL)
return -ENOMEM;
memset(p, 0, sizeof(*p));
- INIT_WORK(&p->work, tape_34xx_work_handler, p);
+ INIT_WORK(&p->work, tape_34xx_work_handler);
p->device = tape_get_device_reference(device);
p->op = op;
diff --git a/drivers/s390/char/tape_3590.c b/drivers/s390/char/tape_3590.c
index 928cbefc49d..9df912f6318 100644
--- a/drivers/s390/char/tape_3590.c
+++ b/drivers/s390/char/tape_3590.c
@@ -236,9 +236,10 @@ struct work_handler_data {
};
static void
-tape_3590_work_handler(void *data)
+tape_3590_work_handler(struct work_struct *work)
{
- struct work_handler_data *p = data;
+ struct work_handler_data *p =
+ container_of(work, struct work_handler_data, work);
switch (p->op) {
case TO_MSEN:
@@ -263,7 +264,7 @@ tape_3590_schedule_work(struct tape_device *device, enum tape_op op)
if ((p = kzalloc(sizeof(*p), GFP_ATOMIC)) == NULL)
return -ENOMEM;
- INIT_WORK(&p->work, tape_3590_work_handler, p);
+ INIT_WORK(&p->work, tape_3590_work_handler);
p->device = tape_get_device_reference(device);
p->op = op;
diff --git a/drivers/s390/char/tape_block.c b/drivers/s390/char/tape_block.c
index 3225fcd1dcb..c8a89b3b87d 100644
--- a/drivers/s390/char/tape_block.c
+++ b/drivers/s390/char/tape_block.c
@@ -15,6 +15,7 @@
#include <linux/blkdev.h>
#include <linux/interrupt.h>
#include <linux/buffer_head.h>
+#include <linux/kernel.h>
#include <asm/debug.h>
@@ -143,7 +144,8 @@ tapeblock_start_request(struct tape_device *device, struct request *req)
* queue.
*/
static void
-tapeblock_requeue(void *data) {
+tapeblock_requeue(struct work_struct *work) {
+ struct tape_blk_data * blkdat;
struct tape_device * device;
request_queue_t * queue;
int nr_queued;
@@ -151,7 +153,8 @@ tapeblock_requeue(void *data) {
struct list_head * l;
int rc;
- device = (struct tape_device *) data;
+ blkdat = container_of(work, struct tape_blk_data, requeue_task);
+ device = blkdat->device;
if (!device)
return;
@@ -212,6 +215,7 @@ tapeblock_setup_device(struct tape_device * device)
int rc;
blkdat = &device->blk_data;
+ blkdat->device = device;
spin_lock_init(&blkdat->request_queue_lock);
atomic_set(&blkdat->requeue_scheduled, 0);
@@ -255,8 +259,8 @@ tapeblock_setup_device(struct tape_device * device)
add_disk(disk);
- INIT_WORK(&blkdat->requeue_task, tapeblock_requeue,
- tape_get_device_reference(device));
+ tape_get_device_reference(device);
+ INIT_WORK(&blkdat->requeue_task, tapeblock_requeue);
return 0;
@@ -271,7 +275,7 @@ void
tapeblock_cleanup_device(struct tape_device *device)
{
flush_scheduled_work();
- device->blk_data.requeue_task.data = tape_put_device(device);
+ tape_put_device(device);
if (!device->blk_data.disk) {
PRINT_ERR("(%s): No gendisk to clean up!\n",
diff --git a/drivers/s390/char/tape_char.c b/drivers/s390/char/tape_char.c
index 97f75237bed..31198c8f271 100644
--- a/drivers/s390/char/tape_char.c
+++ b/drivers/s390/char/tape_char.c
@@ -298,13 +298,13 @@ tapechar_open (struct inode *inode, struct file *filp)
int minor, rc;
DBF_EVENT(6, "TCHAR:open: %i:%i\n",
- imajor(filp->f_dentry->d_inode),
- iminor(filp->f_dentry->d_inode));
+ imajor(filp->f_path.dentry->d_inode),
+ iminor(filp->f_path.dentry->d_inode));
- if (imajor(filp->f_dentry->d_inode) != tapechar_major)
+ if (imajor(filp->f_path.dentry->d_inode) != tapechar_major)
return -ENODEV;
- minor = iminor(filp->f_dentry->d_inode);
+ minor = iminor(filp->f_path.dentry->d_inode);
device = tape_get_device(minor / TAPE_MINORS_PER_DEV);
if (IS_ERR(device)) {
DBF_EVENT(3, "TCHAR:open: tape_get_device() failed\n");
diff --git a/drivers/s390/char/tape_core.c b/drivers/s390/char/tape_core.c
index 2826aed9104..c6c2e918b99 100644
--- a/drivers/s390/char/tape_core.c
+++ b/drivers/s390/char/tape_core.c
@@ -28,7 +28,7 @@
#define PRINTK_HEADER "TAPE_CORE: "
static void __tape_do_irq (struct ccw_device *, unsigned long, struct irb *);
-static void tape_delayed_next_request(void * data);
+static void tape_delayed_next_request(struct work_struct *);
/*
* One list to contain all tape devices of all disciplines, so
@@ -272,7 +272,7 @@ __tape_cancel_io(struct tape_device *device, struct tape_request *request)
return 0;
case -EBUSY:
request->status = TAPE_REQUEST_CANCEL;
- schedule_work(&device->tape_dnr);
+ schedule_delayed_work(&device->tape_dnr, 0);
return 0;
case -ENODEV:
DBF_EXCEPTION(2, "device gone, retry\n");
@@ -470,7 +470,7 @@ tape_alloc_device(void)
*device->modeset_byte = 0;
device->first_minor = -1;
atomic_set(&device->ref_count, 1);
- INIT_WORK(&device->tape_dnr, tape_delayed_next_request, device);
+ INIT_DELAYED_WORK(&device->tape_dnr, tape_delayed_next_request);
return device;
}
@@ -724,7 +724,7 @@ __tape_start_io(struct tape_device *device, struct tape_request *request)
} else if (rc == -EBUSY) {
/* The common I/O subsystem is currently busy. Retry later. */
request->status = TAPE_REQUEST_QUEUED;
- schedule_work(&device->tape_dnr);
+ schedule_delayed_work(&device->tape_dnr, 0);
rc = 0;
} else {
/* Start failed. Remove request and indicate failure. */
@@ -790,11 +790,11 @@ __tape_start_next_request(struct tape_device *device)
}
static void
-tape_delayed_next_request(void *data)
+tape_delayed_next_request(struct work_struct *work)
{
- struct tape_device * device;
+ struct tape_device *device =
+ container_of(work, struct tape_device, tape_dnr.work);
- device = (struct tape_device *) data;
DBF_LH(6, "tape_delayed_next_request(%p)\n", device);
spin_lock_irq(get_ccwdev_lock(device->cdev));
__tape_start_next_request(device);
diff --git a/drivers/s390/char/tty3270.c b/drivers/s390/char/tty3270.c
index 29718042c6c..09844621edc 100644
--- a/drivers/s390/char/tty3270.c
+++ b/drivers/s390/char/tty3270.c
@@ -698,7 +698,6 @@ tty3270_alloc_view(void)
if (!tp->freemem_pages)
goto out_tp;
INIT_LIST_HEAD(&tp->freemem);
- init_timer(&tp->timer);
for (pages = 0; pages < TTY3270_STRING_PAGES; pages++) {
tp->freemem_pages[pages] = (void *)
__get_free_pages(GFP_KERNEL|GFP_DMA, 0);
@@ -1660,7 +1659,7 @@ tty3270_flush_buffer(struct tty_struct *tty)
* Check for visible/invisible input switches
*/
static void
-tty3270_set_termios(struct tty_struct *tty, struct termios *old)
+tty3270_set_termios(struct tty_struct *tty, struct ktermios *old)
{
struct tty3270 *tp;
int new;
@@ -1738,7 +1737,7 @@ tty3270_ioctl(struct tty_struct *tty, struct file *file,
return kbd_ioctl(tp->kbd, file, cmd, arg);
}
-static struct tty_operations tty3270_ops = {
+static const struct tty_operations tty3270_ops = {
.open = tty3270_open,
.close = tty3270_close,
.write = tty3270_write,
diff --git a/drivers/s390/char/vmwatchdog.c b/drivers/s390/char/vmwatchdog.c
index 807320a41fa..4b868f72fe8 100644
--- a/drivers/s390/char/vmwatchdog.c
+++ b/drivers/s390/char/vmwatchdog.c
@@ -54,48 +54,20 @@ enum vmwdt_func {
static int __diag288(enum vmwdt_func func, unsigned int timeout,
char *cmd, size_t len)
{
- register unsigned long __func asm("2");
- register unsigned long __timeout asm("3");
- register unsigned long __cmdp asm("4");
- register unsigned long __cmdl asm("5");
+ register unsigned long __func asm("2") = func;
+ register unsigned long __timeout asm("3") = timeout;
+ register unsigned long __cmdp asm("4") = virt_to_phys(cmd);
+ register unsigned long __cmdl asm("5") = len;
int err;
- __func = func;
- __timeout = timeout;
- __cmdp = virt_to_phys(cmd);
- __cmdl = len;
- err = 0;
- asm volatile (
-#ifdef CONFIG_64BIT
- "diag %2,%4,0x288\n"
- "1: \n"
- ".section .fixup,\"ax\"\n"
- "2: lghi %0,%1\n"
- " jg 1b\n"
- ".previous\n"
- ".section __ex_table,\"a\"\n"
- " .align 8\n"
- " .quad 1b,2b\n"
- ".previous\n"
-#else
- "diag %2,%4,0x288\n"
- "1: \n"
- ".section .fixup,\"ax\"\n"
- "2: lhi %0,%1\n"
- " bras 1,3f\n"
- " .long 1b\n"
- "3: l 1,0(1)\n"
- " br 1\n"
- ".previous\n"
- ".section __ex_table,\"a\"\n"
- " .align 4\n"
- " .long 1b,2b\n"
- ".previous\n"
-#endif
- : "+&d"(err)
- : "i"(-EINVAL), "d"(__func), "d"(__timeout),
- "d"(__cmdp), "d"(__cmdl)
- : "1", "cc");
+ err = -EINVAL;
+ asm volatile(
+ " diag %1,%3,0x288\n"
+ "0: la %0,0\n"
+ "1:\n"
+ EX_TABLE(0b,1b)
+ : "=d" (err) : "d"(__func), "d"(__timeout),
+ "d"(__cmdp), "d"(__cmdl), "0" (-EINVAL) : "1", "cc");
return err;
}
diff --git a/drivers/s390/cio/chsc.c b/drivers/s390/cio/chsc.c
index 3bb4e472d73..cbab8d2ce5c 100644
--- a/drivers/s390/cio/chsc.c
+++ b/drivers/s390/cio/chsc.c
@@ -183,7 +183,7 @@ css_get_ssd_info(struct subchannel *sch)
page = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
if (!page)
return -ENOMEM;
- spin_lock_irq(&sch->lock);
+ spin_lock_irq(sch->lock);
ret = chsc_get_sch_desc_irq(sch, page);
if (ret) {
static int cio_chsc_err_msg;
@@ -197,14 +197,16 @@ css_get_ssd_info(struct subchannel *sch)
cio_chsc_err_msg = 1;
}
}
- spin_unlock_irq(&sch->lock);
+ spin_unlock_irq(sch->lock);
free_page((unsigned long)page);
if (!ret) {
- int j, chpid;
+ int j, chpid, mask;
/* Allocate channel path structures, if needed. */
for (j = 0; j < 8; j++) {
+ mask = 0x80 >> j;
chpid = sch->ssd_info.chpid[j];
- if (chpid && (get_chp_status(chpid) < 0))
+ if ((sch->schib.pmcw.pim & mask) &&
+ (get_chp_status(chpid) < 0))
new_channel_path(chpid);
}
}
@@ -222,14 +224,16 @@ s390_subchannel_remove_chpid(struct device *dev, void *data)
sch = to_subchannel(dev);
chpid = data;
- for (j = 0; j < 8; j++)
- if (sch->schib.pmcw.chpid[j] == chpid->id)
+ for (j = 0; j < 8; j++) {
+ mask = 0x80 >> j;
+ if ((sch->schib.pmcw.pim & mask) &&
+ (sch->schib.pmcw.chpid[j] == chpid->id))
break;
+ }
if (j >= 8)
return 0;
- mask = 0x80 >> j;
- spin_lock_irq(&sch->lock);
+ spin_lock_irq(sch->lock);
stsch(sch->schid, &schib);
if (!schib.pmcw.dnv)
@@ -247,6 +251,8 @@ s390_subchannel_remove_chpid(struct device *dev, void *data)
cc = cio_clear(sch);
if (cc == -ENODEV)
goto out_unreg;
+ /* Request retry of internal operation. */
+ device_set_intretry(sch);
/* Call handler. */
if (sch->driver && sch->driver->termination)
sch->driver->termination(&sch->dev);
@@ -259,10 +265,10 @@ s390_subchannel_remove_chpid(struct device *dev, void *data)
else if (sch->lpm == mask)
goto out_unreg;
out_unlock:
- spin_unlock_irq(&sch->lock);
+ spin_unlock_irq(sch->lock);
return 0;
out_unreg:
- spin_unlock_irq(&sch->lock);
+ spin_unlock_irq(sch->lock);
sch->lpm = 0;
if (css_enqueue_subchannel_slow(sch->schid)) {
css_clear_subchannel_slow_list();
@@ -366,18 +372,18 @@ __s390_process_res_acc(struct subchannel_id schid, void *data)
struct res_acc_data *res_data;
struct subchannel *sch;
- res_data = (struct res_acc_data *)data;
+ res_data = data;
sch = get_subchannel_by_schid(schid);
if (!sch)
/* Check if a subchannel is newly available. */
return s390_process_res_acc_new_sch(schid);
- spin_lock_irq(&sch->lock);
+ spin_lock_irq(sch->lock);
chp_mask = s390_process_res_acc_sch(res_data, sch);
if (chp_mask == 0) {
- spin_unlock_irq(&sch->lock);
+ spin_unlock_irq(sch->lock);
put_device(&sch->dev);
return 0;
}
@@ -391,7 +397,7 @@ __s390_process_res_acc(struct subchannel_id schid, void *data)
else if (sch->driver && sch->driver->verify)
sch->driver->verify(&sch->dev);
- spin_unlock_irq(&sch->lock);
+ spin_unlock_irq(sch->lock);
put_device(&sch->dev);
return 0;
}
@@ -440,7 +446,7 @@ __get_chpid_from_lir(void *data)
u32 isinfo[28];
} *lir;
- lir = (struct lir*) data;
+ lir = data;
if (!(lir->iq&0x80))
/* NULL link incident record */
return -EINVAL;
@@ -620,38 +626,41 @@ __chp_add_new_sch(struct subchannel_id schid)
static int
__chp_add(struct subchannel_id schid, void *data)
{
- int i;
+ int i, mask;
struct channel_path *chp;
struct subchannel *sch;
- chp = (struct channel_path *)data;
+ chp = data;
sch = get_subchannel_by_schid(schid);
if (!sch)
/* Check if the subchannel is now available. */
return __chp_add_new_sch(schid);
- spin_lock_irq(&sch->lock);
- for (i=0; i<8; i++)
- if (sch->schib.pmcw.chpid[i] == chp->id) {
+ spin_lock_irq(sch->lock);
+ for (i=0; i<8; i++) {
+ mask = 0x80 >> i;
+ if ((sch->schib.pmcw.pim & mask) &&
+ (sch->schib.pmcw.chpid[i] == chp->id)) {
if (stsch(sch->schid, &sch->schib) != 0) {
/* Endgame. */
- spin_unlock_irq(&sch->lock);
+ spin_unlock_irq(sch->lock);
return -ENXIO;
}
break;
}
+ }
if (i==8) {
- spin_unlock_irq(&sch->lock);
+ spin_unlock_irq(sch->lock);
return 0;
}
sch->lpm = ((sch->schib.pmcw.pim &
sch->schib.pmcw.pam &
sch->schib.pmcw.pom)
- | 0x80 >> i) & sch->opm;
+ | mask) & sch->opm;
if (sch->driver && sch->driver->verify)
sch->driver->verify(&sch->dev);
- spin_unlock_irq(&sch->lock);
+ spin_unlock_irq(sch->lock);
put_device(&sch->dev);
return 0;
}
@@ -700,24 +709,38 @@ chp_process_crw(int chpid, int on)
return chp_add(chpid);
}
-static inline int
-__check_for_io_and_kill(struct subchannel *sch, int index)
+static inline int check_for_io_on_path(struct subchannel *sch, int index)
{
int cc;
- if (!device_is_online(sch))
- /* cio could be doing I/O. */
- return 0;
cc = stsch(sch->schid, &sch->schib);
if (cc)
return 0;
- if (sch->schib.scsw.actl && sch->schib.pmcw.lpum == (0x80 >> index)) {
- device_set_waiting(sch);
+ if (sch->schib.scsw.actl && sch->schib.pmcw.lpum == (0x80 >> index))
return 1;
- }
return 0;
}
+static void terminate_internal_io(struct subchannel *sch)
+{
+ if (cio_clear(sch)) {
+ /* Recheck device in case clear failed. */
+ sch->lpm = 0;
+ if (device_trigger_verify(sch) != 0) {
+ if(css_enqueue_subchannel_slow(sch->schid)) {
+ css_clear_subchannel_slow_list();
+ need_rescan = 1;
+ }
+ }
+ return;
+ }
+ /* Request retry of internal operation. */
+ device_set_intretry(sch);
+ /* Call handler. */
+ if (sch->driver && sch->driver->termination)
+ sch->driver->termination(&sch->dev);
+}
+
static inline void
__s390_subchannel_vary_chpid(struct subchannel *sch, __u8 chpid, int on)
{
@@ -727,7 +750,7 @@ __s390_subchannel_vary_chpid(struct subchannel *sch, __u8 chpid, int on)
if (!sch->ssd_info.valid)
return;
- spin_lock_irqsave(&sch->lock, flags);
+ spin_lock_irqsave(sch->lock, flags);
old_lpm = sch->lpm;
for (chp = 0; chp < 8; chp++) {
if (sch->ssd_info.chpid[chp] != chpid)
@@ -740,25 +763,29 @@ __s390_subchannel_vary_chpid(struct subchannel *sch, __u8 chpid, int on)
device_trigger_reprobe(sch);
else if (sch->driver && sch->driver->verify)
sch->driver->verify(&sch->dev);
- } else {
- sch->opm &= ~(0x80 >> chp);
- sch->lpm &= ~(0x80 >> chp);
- /*
- * Give running I/O a grace period in which it
- * can successfully terminate, even using the
- * just varied off path. Then kill it.
- */
- if (!__check_for_io_and_kill(sch, chp) && !sch->lpm) {
+ break;
+ }
+ sch->opm &= ~(0x80 >> chp);
+ sch->lpm &= ~(0x80 >> chp);
+ if (check_for_io_on_path(sch, chp)) {
+ if (device_is_online(sch))
+ /* Path verification is done after killing. */
+ device_kill_io(sch);
+ else
+ /* Kill and retry internal I/O. */
+ terminate_internal_io(sch);
+ } else if (!sch->lpm) {
+ if (device_trigger_verify(sch) != 0) {
if (css_enqueue_subchannel_slow(sch->schid)) {
css_clear_subchannel_slow_list();
need_rescan = 1;
}
- } else if (sch->driver && sch->driver->verify)
- sch->driver->verify(&sch->dev);
- }
+ }
+ } else if (sch->driver && sch->driver->verify)
+ sch->driver->verify(&sch->dev);
break;
}
- spin_unlock_irqrestore(&sch->lock, flags);
+ spin_unlock_irqrestore(sch->lock, flags);
}
static int
@@ -1463,41 +1490,6 @@ chsc_get_chp_desc(struct subchannel *sch, int chp_no)
return desc;
}
-static int reset_channel_path(struct channel_path *chp)
-{
- int cc;
-
- cc = rchp(chp->id);
- switch (cc) {
- case 0:
- return 0;
- case 2:
- return -EBUSY;
- default:
- return -ENODEV;
- }
-}
-
-static void reset_channel_paths_css(struct channel_subsystem *css)
-{
- int i;
-
- for (i = 0; i <= __MAX_CHPID; i++) {
- if (css->chps[i])
- reset_channel_path(css->chps[i]);
- }
-}
-
-void cio_reset_channel_paths(void)
-{
- int i;
-
- for (i = 0; i <= __MAX_CSSID; i++) {
- if (css[i] && css[i]->valid)
- reset_channel_paths_css(css[i]);
- }
-}
-
static int __init
chsc_alloc_sei_area(void)
{
diff --git a/drivers/s390/cio/cio.c b/drivers/s390/cio/cio.c
index 2e2882daefb..7835a714a40 100644
--- a/drivers/s390/cio/cio.c
+++ b/drivers/s390/cio/cio.c
@@ -19,7 +19,9 @@
#include <asm/cio.h>
#include <asm/delay.h>
#include <asm/irq.h>
+#include <asm/irq_regs.h>
#include <asm/setup.h>
+#include <asm/reset.h>
#include "airq.h"
#include "cio.h"
#include "css.h"
@@ -27,6 +29,7 @@
#include "ioasm.h"
#include "blacklist.h"
#include "cio_debug.h"
+#include "../s390mach.h"
debug_info_t *cio_debug_msg_id;
debug_info_t *cio_debug_trace_id;
@@ -140,11 +143,11 @@ cio_tpi(void)
return 1;
local_bh_disable();
irq_enter ();
- spin_lock(&sch->lock);
+ spin_lock(sch->lock);
memcpy (&sch->schib.scsw, &irb->scsw, sizeof (struct scsw));
if (sch->driver && sch->driver->irq)
sch->driver->irq(&sch->dev);
- spin_unlock(&sch->lock);
+ spin_unlock(sch->lock);
irq_exit ();
_local_bh_enable();
return 1;
@@ -412,6 +415,8 @@ cio_enable_subchannel (struct subchannel *sch, unsigned int isc)
CIO_TRACE_EVENT (2, "ensch");
CIO_TRACE_EVENT (2, sch->dev.bus_id);
+ if (sch_is_pseudo_sch(sch))
+ return -EINVAL;
ccode = stsch (sch->schid, &sch->schib);
if (ccode)
return -ENODEV;
@@ -459,6 +464,8 @@ cio_disable_subchannel (struct subchannel *sch)
CIO_TRACE_EVENT (2, "dissch");
CIO_TRACE_EVENT (2, sch->dev.bus_id);
+ if (sch_is_pseudo_sch(sch))
+ return 0;
ccode = stsch (sch->schid, &sch->schib);
if (ccode == 3) /* Not operational. */
return -ENODEV;
@@ -493,6 +500,15 @@ cio_disable_subchannel (struct subchannel *sch)
return ret;
}
+int cio_create_sch_lock(struct subchannel *sch)
+{
+ sch->lock = kmalloc(sizeof(spinlock_t), GFP_KERNEL);
+ if (!sch->lock)
+ return -ENOMEM;
+ spin_lock_init(sch->lock);
+ return 0;
+}
+
/*
* cio_validate_subchannel()
*
@@ -510,6 +526,7 @@ cio_validate_subchannel (struct subchannel *sch, struct subchannel_id schid)
{
char dbf_txt[15];
int ccode;
+ int err;
sprintf (dbf_txt, "valsch%x", schid.sch_no);
CIO_TRACE_EVENT (4, dbf_txt);
@@ -517,9 +534,15 @@ cio_validate_subchannel (struct subchannel *sch, struct subchannel_id schid)
/* Nuke all fields. */
memset(sch, 0, sizeof(struct subchannel));
- spin_lock_init(&sch->lock);
+ sch->schid = schid;
+ if (cio_is_console(schid)) {
+ sch->lock = cio_get_console_lock();
+ } else {
+ err = cio_create_sch_lock(sch);
+ if (err)
+ goto out;
+ }
mutex_init(&sch->reg_mutex);
-
/* Set a name for the subchannel */
snprintf (sch->dev.bus_id, BUS_ID_SIZE, "0.%x.%04x", schid.ssid,
schid.sch_no);
@@ -531,10 +554,10 @@ cio_validate_subchannel (struct subchannel *sch, struct subchannel_id schid)
* is not valid.
*/
ccode = stsch_err (schid, &sch->schib);
- if (ccode)
- return (ccode == 3) ? -ENXIO : ccode;
-
- sch->schid = schid;
+ if (ccode) {
+ err = (ccode == 3) ? -ENXIO : ccode;
+ goto out;
+ }
/* Copy subchannel type from path management control word. */
sch->st = sch->schib.pmcw.st;
@@ -547,14 +570,16 @@ cio_validate_subchannel (struct subchannel *sch, struct subchannel_id schid)
"non-I/O subchannel type %04X\n",
sch->schid.ssid, sch->schid.sch_no, sch->st);
/* We stop here for non-io subchannels. */
- return sch->st;
+ err = sch->st;
+ goto out;
}
/* Initialization for io subchannels. */
- if (!sch->schib.pmcw.dnv)
+ if (!sch->schib.pmcw.dnv) {
/* io subchannel but device number is invalid. */
- return -ENODEV;
-
+ err = -ENODEV;
+ goto out;
+ }
/* Devno is valid. */
if (is_blacklisted (sch->schid.ssid, sch->schib.pmcw.dev)) {
/*
@@ -564,7 +589,8 @@ cio_validate_subchannel (struct subchannel *sch, struct subchannel_id schid)
CIO_MSG_EVENT(0, "Blacklisted device detected "
"at devno %04X, subchannel set %x\n",
sch->schib.pmcw.dev, sch->schid.ssid);
- return -ENODEV;
+ err = -ENODEV;
+ goto out;
}
sch->opm = 0xff;
if (!cio_is_console(sch->schid))
@@ -592,6 +618,11 @@ cio_validate_subchannel (struct subchannel *sch, struct subchannel_id schid)
if ((sch->lpm & (sch->lpm - 1)) != 0)
sch->schib.pmcw.mp = 1; /* multipath mode */
return 0;
+out:
+ if (!cio_is_console(schid))
+ kfree(sch->lock);
+ sch->lock = NULL;
+ return err;
}
/*
@@ -606,15 +637,17 @@ do_IRQ (struct pt_regs *regs)
struct tpi_info *tpi_info;
struct subchannel *sch;
struct irb *irb;
+ struct pt_regs *old_regs;
- irq_enter ();
+ old_regs = set_irq_regs(regs);
+ irq_enter();
asm volatile ("mc 0,0");
if (S390_lowcore.int_clock >= S390_lowcore.jiffy_timer)
/**
* Make sure that the i/o interrupt did not "overtake"
* the last HZ timer interrupt.
*/
- account_ticks(regs);
+ account_ticks();
/*
* Get interrupt information from lowcore
*/
@@ -632,7 +665,7 @@ do_IRQ (struct pt_regs *regs)
}
sch = (struct subchannel *)(unsigned long)tpi_info->intparm;
if (sch)
- spin_lock(&sch->lock);
+ spin_lock(sch->lock);
/* Store interrupt response block to lowcore. */
if (tsch (tpi_info->schid, irb) == 0 && sch) {
/* Keep subchannel information word up to date. */
@@ -643,7 +676,7 @@ do_IRQ (struct pt_regs *regs)
sch->driver->irq(&sch->dev);
}
if (sch)
- spin_unlock(&sch->lock);
+ spin_unlock(sch->lock);
/*
* Are more interrupts pending?
* If so, the tpi instruction will update the lowcore
@@ -652,7 +685,8 @@ do_IRQ (struct pt_regs *regs)
* out of the sie which costs more cycles than it saves.
*/
} while (!MACHINE_IS_VM && tpi (NULL) != 0);
- irq_exit ();
+ irq_exit();
+ set_irq_regs(old_regs);
}
#ifdef CONFIG_CCW_CONSOLE
@@ -681,10 +715,10 @@ wait_cons_dev (void)
__ctl_load (cr6, 6, 6);
do {
- spin_unlock(&console_subchannel.lock);
+ spin_unlock(console_subchannel.lock);
if (!cio_tpi())
cpu_relax();
- spin_lock(&console_subchannel.lock);
+ spin_lock(console_subchannel.lock);
} while (console_subchannel.schib.scsw.actl != 0);
/*
* restore previous isc value
@@ -837,26 +871,12 @@ __clear_subchannel_easy(struct subchannel_id schid)
return -EBUSY;
}
-struct sch_match_id {
- struct subchannel_id schid;
- struct ccw_dev_id devid;
- int rc;
-};
-
-static int __shutdown_subchannel_easy_and_match(struct subchannel_id schid,
- void *data)
+static int __shutdown_subchannel_easy(struct subchannel_id schid, void *data)
{
struct schib schib;
- struct sch_match_id *match_id = data;
if (stsch_err(schid, &schib))
return -ENXIO;
- if (match_id && schib.pmcw.dnv &&
- (schib.pmcw.dev == match_id->devid.devno) &&
- (schid.ssid == match_id->devid.ssid)) {
- match_id->schid = schid;
- match_id->rc = 0;
- }
if (!schib.pmcw.ena)
return 0;
switch(__disable_subchannel_easy(schid, &schib)) {
@@ -872,27 +892,111 @@ static int __shutdown_subchannel_easy_and_match(struct subchannel_id schid,
return 0;
}
-static int clear_all_subchannels_and_match(struct ccw_dev_id *devid,
- struct subchannel_id *schid)
+static atomic_t chpid_reset_count;
+
+static void s390_reset_chpids_mcck_handler(void)
+{
+ struct crw crw;
+ struct mci *mci;
+
+ /* Check for pending channel report word. */
+ mci = (struct mci *)&S390_lowcore.mcck_interruption_code;
+ if (!mci->cp)
+ return;
+ /* Process channel report words. */
+ while (stcrw(&crw) == 0) {
+ /* Check for responses to RCHP. */
+ if (crw.slct && crw.rsc == CRW_RSC_CPATH)
+ atomic_dec(&chpid_reset_count);
+ }
+}
+
+#define RCHP_TIMEOUT (30 * USEC_PER_SEC)
+static void css_reset(void)
+{
+ int i, ret;
+ unsigned long long timeout;
+
+ /* Reset subchannels. */
+ for_each_subchannel(__shutdown_subchannel_easy, NULL);
+ /* Reset channel paths. */
+ s390_reset_mcck_handler = s390_reset_chpids_mcck_handler;
+ /* Enable channel report machine checks. */
+ __ctl_set_bit(14, 28);
+ /* Temporarily reenable machine checks. */
+ local_mcck_enable();
+ for (i = 0; i <= __MAX_CHPID; i++) {
+ ret = rchp(i);
+ if ((ret == 0) || (ret == 2))
+ /*
+ * rchp either succeeded, or another rchp is already
+ * in progress. In either case, we'll get a crw.
+ */
+ atomic_inc(&chpid_reset_count);
+ }
+ /* Wait for machine check for all channel paths. */
+ timeout = get_clock() + (RCHP_TIMEOUT << 12);
+ while (atomic_read(&chpid_reset_count) != 0) {
+ if (get_clock() > timeout)
+ break;
+ cpu_relax();
+ }
+ /* Disable machine checks again. */
+ local_mcck_disable();
+ /* Disable channel report machine checks. */
+ __ctl_clear_bit(14, 28);
+ s390_reset_mcck_handler = NULL;
+}
+
+static struct reset_call css_reset_call = {
+ .fn = css_reset,
+};
+
+static int __init init_css_reset_call(void)
+{
+ atomic_set(&chpid_reset_count, 0);
+ register_reset_call(&css_reset_call);
+ return 0;
+}
+
+arch_initcall(init_css_reset_call);
+
+struct sch_match_id {
+ struct subchannel_id schid;
+ struct ccw_dev_id devid;
+ int rc;
+};
+
+static int __reipl_subchannel_match(struct subchannel_id schid, void *data)
+{
+ struct schib schib;
+ struct sch_match_id *match_id = data;
+
+ if (stsch_err(schid, &schib))
+ return -ENXIO;
+ if (schib.pmcw.dnv &&
+ (schib.pmcw.dev == match_id->devid.devno) &&
+ (schid.ssid == match_id->devid.ssid)) {
+ match_id->schid = schid;
+ match_id->rc = 0;
+ return 1;
+ }
+ return 0;
+}
+
+static int reipl_find_schid(struct ccw_dev_id *devid,
+ struct subchannel_id *schid)
{
struct sch_match_id match_id;
match_id.devid = *devid;
match_id.rc = -ENODEV;
- local_irq_disable();
- for_each_subchannel(__shutdown_subchannel_easy_and_match, &match_id);
+ for_each_subchannel(__reipl_subchannel_match, &match_id);
if (match_id.rc == 0)
*schid = match_id.schid;
return match_id.rc;
}
-
-void clear_all_subchannels(void)
-{
- local_irq_disable();
- for_each_subchannel(__shutdown_subchannel_easy_and_match, NULL);
-}
-
extern void do_reipl_asm(__u32 schid);
/* Make sure all subchannels are quiet before we re-ipl an lpar. */
@@ -900,9 +1004,9 @@ void reipl_ccw_dev(struct ccw_dev_id *devid)
{
struct subchannel_id schid;
- if (clear_all_subchannels_and_match(devid, &schid))
+ s390_reset_system();
+ if (reipl_find_schid(devid, &schid) != 0)
panic("IPL Device not found\n");
- cio_reset_channel_paths();
do_reipl_asm(*((__u32*)&schid));
}
diff --git a/drivers/s390/cio/cio.h b/drivers/s390/cio/cio.h
index 4541c1af4b6..35154a21035 100644
--- a/drivers/s390/cio/cio.h
+++ b/drivers/s390/cio/cio.h
@@ -87,7 +87,7 @@ struct orb {
/* subchannel data structure used by I/O subroutines */
struct subchannel {
struct subchannel_id schid;
- spinlock_t lock; /* subchannel lock */
+ spinlock_t *lock; /* subchannel lock */
struct mutex reg_mutex;
enum {
SUBCHANNEL_TYPE_IO = 0,
@@ -131,15 +131,19 @@ extern int cio_set_options (struct subchannel *, int);
extern int cio_get_options (struct subchannel *);
extern int cio_modify (struct subchannel *);
+int cio_create_sch_lock(struct subchannel *);
+
/* Use with care. */
#ifdef CONFIG_CCW_CONSOLE
extern struct subchannel *cio_probe_console(void);
extern void cio_release_console(void);
extern int cio_is_console(struct subchannel_id);
extern struct subchannel *cio_get_console_subchannel(void);
+extern spinlock_t * cio_get_console_lock(void);
#else
#define cio_is_console(schid) 0
#define cio_get_console_subchannel() NULL
+#define cio_get_console_lock() NULL;
#endif
extern int cio_show_msg;
diff --git a/drivers/s390/cio/css.c b/drivers/s390/cio/css.c
index 7086a74e987..4c81d890791 100644
--- a/drivers/s390/cio/css.c
+++ b/drivers/s390/cio/css.c
@@ -91,9 +91,9 @@ css_free_subchannel(struct subchannel *sch)
/* Reset intparm to zeroes. */
sch->schib.pmcw.intparm = 0;
cio_modify(sch);
+ kfree(sch->lock);
kfree(sch);
}
-
}
static void
@@ -102,8 +102,10 @@ css_subchannel_release(struct device *dev)
struct subchannel *sch;
sch = to_subchannel(dev);
- if (!cio_is_console(sch->schid))
+ if (!cio_is_console(sch->schid)) {
+ kfree(sch->lock);
kfree(sch);
+ }
}
extern int css_get_ssd_info(struct subchannel *sch);
@@ -135,14 +137,16 @@ css_register_subchannel(struct subchannel *sch)
sch->dev.parent = &css[0]->device;
sch->dev.bus = &css_bus_type;
sch->dev.release = &css_subchannel_release;
-
+ sch->dev.groups = subch_attr_groups;
+
/* make it known to the system */
ret = css_sch_device_register(sch);
- if (ret)
+ if (ret) {
printk (KERN_WARNING "%s: could not register %s\n",
__func__, sch->dev.bus_id);
- else
- css_get_ssd_info(sch);
+ return ret;
+ }
+ css_get_ssd_info(sch);
return ret;
}
@@ -177,7 +181,7 @@ get_subchannel_by_schid(struct subchannel_id schid)
struct device *dev;
dev = bus_find_device(&css_bus_type, NULL,
- (void *)&schid, check_subchannel);
+ &schid, check_subchannel);
return dev ? to_subchannel(dev) : NULL;
}
@@ -201,18 +205,18 @@ static int css_evaluate_known_subchannel(struct subchannel *sch, int slow)
unsigned long flags;
enum { NONE, UNREGISTER, UNREGISTER_PROBE, REPROBE } action;
- spin_lock_irqsave(&sch->lock, flags);
+ spin_lock_irqsave(sch->lock, flags);
disc = device_is_disconnected(sch);
if (disc && slow) {
/* Disconnected devices are evaluated directly only.*/
- spin_unlock_irqrestore(&sch->lock, flags);
+ spin_unlock_irqrestore(sch->lock, flags);
return 0;
}
/* No interrupt after machine check - kill pending timers. */
device_kill_pending_timer(sch);
if (!disc && !slow) {
/* Non-disconnected devices are evaluated on the slow path. */
- spin_unlock_irqrestore(&sch->lock, flags);
+ spin_unlock_irqrestore(sch->lock, flags);
return -EAGAIN;
}
event = css_get_subchannel_status(sch);
@@ -237,9 +241,9 @@ static int css_evaluate_known_subchannel(struct subchannel *sch, int slow)
/* Ask driver what to do with device. */
action = UNREGISTER;
if (sch->driver && sch->driver->notify) {
- spin_unlock_irqrestore(&sch->lock, flags);
+ spin_unlock_irqrestore(sch->lock, flags);
ret = sch->driver->notify(&sch->dev, event);
- spin_lock_irqsave(&sch->lock, flags);
+ spin_lock_irqsave(sch->lock, flags);
if (ret)
action = NONE;
}
@@ -264,17 +268,13 @@ static int css_evaluate_known_subchannel(struct subchannel *sch, int slow)
case UNREGISTER:
case UNREGISTER_PROBE:
/* Unregister device (will use subchannel lock). */
- spin_unlock_irqrestore(&sch->lock, flags);
+ spin_unlock_irqrestore(sch->lock, flags);
css_sch_device_unregister(sch);
- spin_lock_irqsave(&sch->lock, flags);
+ spin_lock_irqsave(sch->lock, flags);
/* Reset intparm to zeroes. */
sch->schib.pmcw.intparm = 0;
cio_modify(sch);
-
- /* Probe if necessary. */
- if (action == UNREGISTER_PROBE)
- ret = css_probe_device(sch->schid);
break;
case REPROBE:
device_trigger_reprobe(sch);
@@ -282,7 +282,10 @@ static int css_evaluate_known_subchannel(struct subchannel *sch, int slow)
default:
break;
}
- spin_unlock_irqrestore(&sch->lock, flags);
+ spin_unlock_irqrestore(sch->lock, flags);
+ /* Probe if necessary. */
+ if (action == UNREGISTER_PROBE)
+ ret = css_probe_device(sch->schid);
return ret;
}
@@ -335,7 +338,7 @@ static LIST_HEAD(slow_subchannels_head);
static DEFINE_SPINLOCK(slow_subchannel_lock);
static void
-css_trigger_slow_path(void)
+css_trigger_slow_path(struct work_struct *unused)
{
CIO_TRACE_EVENT(4, "slowpath");
@@ -360,8 +363,7 @@ css_trigger_slow_path(void)
spin_unlock_irq(&slow_subchannel_lock);
}
-typedef void (*workfunc)(void *);
-DECLARE_WORK(slow_path_work, (workfunc)css_trigger_slow_path, NULL);
+DECLARE_WORK(slow_path_work, css_trigger_slow_path);
struct workqueue_struct *slow_path_wq;
/* Reprobe subchannel if unregistered. */
@@ -398,7 +400,7 @@ static int reprobe_subchannel(struct subchannel_id schid, void *data)
}
/* Work function used to reprobe all unregistered subchannels. */
-static void reprobe_all(void *data)
+static void reprobe_all(struct work_struct *unused)
{
int ret;
@@ -414,7 +416,7 @@ static void reprobe_all(void *data)
need_reprobe);
}
-DECLARE_WORK(css_reprobe_work, reprobe_all, NULL);
+DECLARE_WORK(css_reprobe_work, reprobe_all);
/* Schedule reprobing of all unregistered subchannels. */
void css_schedule_reprobe(void)
@@ -575,12 +577,24 @@ css_cm_enable_store(struct device *dev, struct device_attribute *attr,
static DEVICE_ATTR(cm_enable, 0644, css_cm_enable_show, css_cm_enable_store);
-static inline void __init
-setup_css(int nr)
+static inline int __init setup_css(int nr)
{
u32 tod_high;
+ int ret;
memset(css[nr], 0, sizeof(struct channel_subsystem));
+ css[nr]->pseudo_subchannel =
+ kzalloc(sizeof(*css[nr]->pseudo_subchannel), GFP_KERNEL);
+ if (!css[nr]->pseudo_subchannel)
+ return -ENOMEM;
+ css[nr]->pseudo_subchannel->dev.parent = &css[nr]->device;
+ css[nr]->pseudo_subchannel->dev.release = css_subchannel_release;
+ sprintf(css[nr]->pseudo_subchannel->dev.bus_id, "defunct");
+ ret = cio_create_sch_lock(css[nr]->pseudo_subchannel);
+ if (ret) {
+ kfree(css[nr]->pseudo_subchannel);
+ return ret;
+ }
mutex_init(&css[nr]->mutex);
css[nr]->valid = 1;
css[nr]->cssid = nr;
@@ -588,6 +602,7 @@ setup_css(int nr)
css[nr]->device.release = channel_subsystem_release;
tod_high = (u32) (get_clock() >> 32);
css_generate_pgid(css[nr], tod_high);
+ return 0;
}
/*
@@ -624,10 +639,12 @@ init_channel_subsystem (void)
ret = -ENOMEM;
goto out_unregister;
}
- setup_css(i);
- ret = device_register(&css[i]->device);
+ ret = setup_css(i);
if (ret)
goto out_free;
+ ret = device_register(&css[i]->device);
+ if (ret)
+ goto out_free_all;
if (css_characteristics_avail &&
css_chsc_characteristics.secm) {
ret = device_create_file(&css[i]->device,
@@ -635,6 +652,9 @@ init_channel_subsystem (void)
if (ret)
goto out_device;
}
+ ret = device_register(&css[i]->pseudo_subchannel->dev);
+ if (ret)
+ goto out_file;
}
css_init_done = 1;
@@ -642,13 +662,19 @@ init_channel_subsystem (void)
for_each_subchannel(__init_channel_subsystem, NULL);
return 0;
+out_file:
+ device_remove_file(&css[i]->device, &dev_attr_cm_enable);
out_device:
device_unregister(&css[i]->device);
+out_free_all:
+ kfree(css[i]->pseudo_subchannel->lock);
+ kfree(css[i]->pseudo_subchannel);
out_free:
kfree(css[i]);
out_unregister:
while (i > 0) {
i--;
+ device_unregister(&css[i]->pseudo_subchannel->dev);
if (css_characteristics_avail && css_chsc_characteristics.secm)
device_remove_file(&css[i]->device,
&dev_attr_cm_enable);
@@ -660,6 +686,11 @@ out:
return ret;
}
+int sch_is_pseudo_sch(struct subchannel *sch)
+{
+ return sch == to_css(sch->dev.parent)->pseudo_subchannel;
+}
+
/*
* find a driver for a subchannel. They identify by the subchannel
* type with the exception that the console subchannel driver has its own
diff --git a/drivers/s390/cio/css.h b/drivers/s390/cio/css.h
index 8aabb4adeb5..3464c5b875c 100644
--- a/drivers/s390/cio/css.h
+++ b/drivers/s390/cio/css.h
@@ -73,12 +73,13 @@ struct senseid {
} __attribute__ ((packed,aligned(4)));
struct ccw_device_private {
+ struct ccw_device *cdev;
+ struct subchannel *sch;
int state; /* device state */
atomic_t onoff;
unsigned long registered;
- __u16 devno; /* device number */
- __u16 sch_no; /* subchannel number */
- __u8 ssid; /* subchannel set id */
+ struct ccw_dev_id dev_id; /* device id */
+ struct subchannel_id schid; /* subchannel number */
__u8 imask; /* lpm mask for SNID/SID/SPGID */
int iretry; /* retry counter SNID/SID/SPGID */
struct {
@@ -95,6 +96,7 @@ struct ccw_device_private {
unsigned int donotify:1; /* call notify function */
unsigned int recog_done:1; /* dev. recog. complete */
unsigned int fake_irb:1; /* deliver faked irb */
+ unsigned int intretry:1; /* retry internal operation */
} __attribute__((packed)) flags;
unsigned long intparm; /* user interruption parameter */
struct qdio_irq *qdio_data;
@@ -158,6 +160,8 @@ struct channel_subsystem {
int cm_enabled;
void *cub_addr1;
void *cub_addr2;
+ /* for orphaned ccw devices */
+ struct subchannel *pseudo_subchannel;
};
#define to_css(dev) container_of(dev, struct channel_subsystem, device)
@@ -171,7 +175,9 @@ void device_trigger_reprobe(struct subchannel *);
/* Helper functions for vary on/off. */
int device_is_online(struct subchannel *);
-void device_set_waiting(struct subchannel *);
+void device_kill_io(struct subchannel *);
+void device_set_intretry(struct subchannel *sch);
+int device_trigger_verify(struct subchannel *sch);
/* Machine check helper function. */
void device_kill_pending_timer(struct subchannel *);
@@ -183,6 +189,11 @@ void css_clear_subchannel_slow_list(void);
int css_slow_subchannels_exist(void);
extern int need_rescan;
+int sch_is_pseudo_sch(struct subchannel *);
+
extern struct workqueue_struct *slow_path_wq;
extern struct work_struct slow_path_work;
+
+int subchannel_add_files (struct device *);
+extern struct attribute_group *subch_attr_groups[];
#endif
diff --git a/drivers/s390/cio/device.c b/drivers/s390/cio/device.c
index 688945662c1..803579053c2 100644
--- a/drivers/s390/cio/device.c
+++ b/drivers/s390/cio/device.c
@@ -23,6 +23,7 @@
#include <asm/param.h> /* HZ */
#include "cio.h"
+#include "cio_debug.h"
#include "css.h"
#include "device.h"
#include "ioasm.h"
@@ -234,9 +235,11 @@ chpids_show (struct device * dev, struct device_attribute *attr, char * buf)
ssize_t ret = 0;
int chp;
- for (chp = 0; chp < 8; chp++)
- ret += sprintf (buf+ret, "%02x ", ssd->chpid[chp]);
-
+ if (ssd)
+ for (chp = 0; chp < 8; chp++)
+ ret += sprintf (buf+ret, "%02x ", ssd->chpid[chp]);
+ else
+ ret += sprintf (buf, "n/a");
ret += sprintf (buf+ret, "\n");
return min((ssize_t)PAGE_SIZE, ret);
}
@@ -294,14 +297,44 @@ online_show (struct device *dev, struct device_attribute *attr, char *buf)
return sprintf(buf, cdev->online ? "1\n" : "0\n");
}
+int ccw_device_is_orphan(struct ccw_device *cdev)
+{
+ return sch_is_pseudo_sch(to_subchannel(cdev->dev.parent));
+}
+
+static void ccw_device_unregister(struct work_struct *work)
+{
+ struct ccw_device_private *priv;
+ struct ccw_device *cdev;
+
+ priv = container_of(work, struct ccw_device_private, kick_work);
+ cdev = priv->cdev;
+ if (test_and_clear_bit(1, &cdev->private->registered))
+ device_unregister(&cdev->dev);
+ put_device(&cdev->dev);
+}
+
static void
ccw_device_remove_disconnected(struct ccw_device *cdev)
{
struct subchannel *sch;
+ unsigned long flags;
/*
* Forced offline in disconnected state means
* 'throw away device'.
*/
+ if (ccw_device_is_orphan(cdev)) {
+ /* Deregister ccw device. */
+ spin_lock_irqsave(cdev->ccwlock, flags);
+ cdev->private->state = DEV_STATE_NOT_OPER;
+ spin_unlock_irqrestore(cdev->ccwlock, flags);
+ if (get_device(&cdev->dev)) {
+ PREPARE_WORK(&cdev->private->kick_work,
+ ccw_device_unregister);
+ queue_work(ccw_device_work, &cdev->private->kick_work);
+ }
+ return ;
+ }
sch = to_subchannel(cdev->dev.parent);
css_sch_device_unregister(sch);
/* Reset intparm to zeroes. */
@@ -462,6 +495,8 @@ available_show (struct device *dev, struct device_attribute *attr, char *buf)
struct ccw_device *cdev = to_ccwdev(dev);
struct subchannel *sch;
+ if (ccw_device_is_orphan(cdev))
+ return sprintf(buf, "no device\n");
switch (cdev->private->state) {
case DEV_STATE_BOXED:
return sprintf(buf, "boxed\n");
@@ -498,11 +533,10 @@ static struct attribute_group subch_attr_group = {
.attrs = subch_attrs,
};
-static inline int
-subchannel_add_files (struct device *dev)
-{
- return sysfs_create_group(&dev->kobj, &subch_attr_group);
-}
+struct attribute_group *subch_attr_groups[] = {
+ &subch_attr_group,
+ NULL,
+};
static struct attribute * ccwdev_attrs[] = {
&dev_attr_devtype.attr,
@@ -532,8 +566,7 @@ device_remove_files(struct device *dev)
/* this is a simple abstraction for device_register that sets the
* correct bus type and adds the bus specific files */
-int
-ccw_device_register(struct ccw_device *cdev)
+static int ccw_device_register(struct ccw_device *cdev)
{
struct device *dev = &cdev->dev;
int ret;
@@ -552,50 +585,68 @@ ccw_device_register(struct ccw_device *cdev)
}
struct match_data {
- unsigned int devno;
- unsigned int ssid;
+ struct ccw_dev_id dev_id;
struct ccw_device * sibling;
};
static int
match_devno(struct device * dev, void * data)
{
- struct match_data * d = (struct match_data *)data;
+ struct match_data * d = data;
struct ccw_device * cdev;
cdev = to_ccwdev(dev);
if ((cdev->private->state == DEV_STATE_DISCONNECTED) &&
- (cdev->private->devno == d->devno) &&
- (cdev->private->ssid == d->ssid) &&
- (cdev != d->sibling)) {
- cdev->private->state = DEV_STATE_NOT_OPER;
+ !ccw_device_is_orphan(cdev) &&
+ ccw_dev_id_is_equal(&cdev->private->dev_id, &d->dev_id) &&
+ (cdev != d->sibling))
return 1;
- }
return 0;
}
-static struct ccw_device *
-get_disc_ccwdev_by_devno(unsigned int devno, unsigned int ssid,
- struct ccw_device *sibling)
+static struct ccw_device * get_disc_ccwdev_by_dev_id(struct ccw_dev_id *dev_id,
+ struct ccw_device *sibling)
{
struct device *dev;
struct match_data data;
- data.devno = devno;
- data.ssid = ssid;
+ data.dev_id = *dev_id;
data.sibling = sibling;
dev = bus_find_device(&ccw_bus_type, NULL, &data, match_devno);
return dev ? to_ccwdev(dev) : NULL;
}
-static void
-ccw_device_add_changed(void *data)
+static int match_orphan(struct device *dev, void *data)
+{
+ struct ccw_dev_id *dev_id;
+ struct ccw_device *cdev;
+
+ dev_id = data;
+ cdev = to_ccwdev(dev);
+ return ccw_dev_id_is_equal(&cdev->private->dev_id, dev_id);
+}
+
+static struct ccw_device *
+get_orphaned_ccwdev_by_dev_id(struct channel_subsystem *css,
+ struct ccw_dev_id *dev_id)
{
+ struct device *dev;
+ dev = device_find_child(&css->pseudo_subchannel->dev, dev_id,
+ match_orphan);
+
+ return dev ? to_ccwdev(dev) : NULL;
+}
+
+static void
+ccw_device_add_changed(struct work_struct *work)
+{
+ struct ccw_device_private *priv;
struct ccw_device *cdev;
- cdev = (struct ccw_device *)data;
+ priv = container_of(work, struct ccw_device_private, kick_work);
+ cdev = priv->cdev;
if (device_add(&cdev->dev)) {
put_device(&cdev->dev);
return;
@@ -607,62 +658,21 @@ ccw_device_add_changed(void *data)
}
}
-extern int css_get_ssd_info(struct subchannel *sch);
-
-void
-ccw_device_do_unreg_rereg(void *data)
+void ccw_device_do_unreg_rereg(struct work_struct *work)
{
+ struct ccw_device_private *priv;
struct ccw_device *cdev;
struct subchannel *sch;
- int need_rename;
- cdev = (struct ccw_device *)data;
+ priv = container_of(work, struct ccw_device_private, kick_work);
+ cdev = priv->cdev;
sch = to_subchannel(cdev->dev.parent);
- if (cdev->private->devno != sch->schib.pmcw.dev) {
- /*
- * The device number has changed. This is usually only when
- * a device has been detached under VM and then re-appeared
- * on another subchannel because of a different attachment
- * order than before. Ideally, we should should just switch
- * subchannels, but unfortunately, this is not possible with
- * the current implementation.
- * Instead, we search for the old subchannel for this device
- * number and deregister so there are no collisions with the
- * newly registered ccw_device.
- * FIXME: Find another solution so the block layer doesn't
- * get possibly sick...
- */
- struct ccw_device *other_cdev;
-
- need_rename = 1;
- other_cdev = get_disc_ccwdev_by_devno(sch->schib.pmcw.dev,
- sch->schid.ssid, cdev);
- if (other_cdev) {
- struct subchannel *other_sch;
-
- other_sch = to_subchannel(other_cdev->dev.parent);
- if (get_device(&other_sch->dev)) {
- stsch(other_sch->schid, &other_sch->schib);
- if (other_sch->schib.pmcw.dnv) {
- other_sch->schib.pmcw.intparm = 0;
- cio_modify(other_sch);
- }
- css_sch_device_unregister(other_sch);
- }
- }
- /* Update ssd info here. */
- css_get_ssd_info(sch);
- cdev->private->devno = sch->schib.pmcw.dev;
- } else
- need_rename = 0;
+
device_remove_files(&cdev->dev);
if (test_and_clear_bit(1, &cdev->private->registered))
device_del(&cdev->dev);
- if (need_rename)
- snprintf (cdev->dev.bus_id, BUS_ID_SIZE, "0.%x.%04x",
- sch->schid.ssid, sch->schib.pmcw.dev);
PREPARE_WORK(&cdev->private->kick_work,
- ccw_device_add_changed, (void *)cdev);
+ ccw_device_add_changed);
queue_work(ccw_device_work, &cdev->private->kick_work);
}
@@ -676,22 +686,210 @@ ccw_device_release(struct device *dev)
kfree(cdev);
}
+static struct ccw_device * io_subchannel_allocate_dev(struct subchannel *sch)
+{
+ struct ccw_device *cdev;
+
+ cdev = kzalloc(sizeof(*cdev), GFP_KERNEL);
+ if (cdev) {
+ cdev->private = kzalloc(sizeof(struct ccw_device_private),
+ GFP_KERNEL | GFP_DMA);
+ if (cdev->private)
+ return cdev;
+ }
+ kfree(cdev);
+ return ERR_PTR(-ENOMEM);
+}
+
+static int io_subchannel_initialize_dev(struct subchannel *sch,
+ struct ccw_device *cdev)
+{
+ cdev->private->cdev = cdev;
+ atomic_set(&cdev->private->onoff, 0);
+ cdev->dev.parent = &sch->dev;
+ cdev->dev.release = ccw_device_release;
+ INIT_LIST_HEAD(&cdev->private->kick_work.entry);
+ /* Do first half of device_register. */
+ device_initialize(&cdev->dev);
+ if (!get_device(&sch->dev)) {
+ if (cdev->dev.release)
+ cdev->dev.release(&cdev->dev);
+ return -ENODEV;
+ }
+ return 0;
+}
+
+static struct ccw_device * io_subchannel_create_ccwdev(struct subchannel *sch)
+{
+ struct ccw_device *cdev;
+ int ret;
+
+ cdev = io_subchannel_allocate_dev(sch);
+ if (!IS_ERR(cdev)) {
+ ret = io_subchannel_initialize_dev(sch, cdev);
+ if (ret) {
+ kfree(cdev);
+ cdev = ERR_PTR(ret);
+ }
+ }
+ return cdev;
+}
+
+static int io_subchannel_recog(struct ccw_device *, struct subchannel *);
+
+static void sch_attach_device(struct subchannel *sch,
+ struct ccw_device *cdev)
+{
+ spin_lock_irq(sch->lock);
+ sch->dev.driver_data = cdev;
+ cdev->private->schid = sch->schid;
+ cdev->ccwlock = sch->lock;
+ device_trigger_reprobe(sch);
+ spin_unlock_irq(sch->lock);
+}
+
+static void sch_attach_disconnected_device(struct subchannel *sch,
+ struct ccw_device *cdev)
+{
+ struct subchannel *other_sch;
+ int ret;
+
+ other_sch = to_subchannel(get_device(cdev->dev.parent));
+ ret = device_move(&cdev->dev, &sch->dev);
+ if (ret) {
+ CIO_MSG_EVENT(2, "Moving disconnected device 0.%x.%04x failed "
+ "(ret=%d)!\n", cdev->private->dev_id.ssid,
+ cdev->private->dev_id.devno, ret);
+ put_device(&other_sch->dev);
+ return;
+ }
+ other_sch->dev.driver_data = NULL;
+ /* No need to keep a subchannel without ccw device around. */
+ css_sch_device_unregister(other_sch);
+ put_device(&other_sch->dev);
+ sch_attach_device(sch, cdev);
+}
+
+static void sch_attach_orphaned_device(struct subchannel *sch,
+ struct ccw_device *cdev)
+{
+ int ret;
+
+ /* Try to move the ccw device to its new subchannel. */
+ ret = device_move(&cdev->dev, &sch->dev);
+ if (ret) {
+ CIO_MSG_EVENT(0, "Moving device 0.%x.%04x from orphanage "
+ "failed (ret=%d)!\n",
+ cdev->private->dev_id.ssid,
+ cdev->private->dev_id.devno, ret);
+ return;
+ }
+ sch_attach_device(sch, cdev);
+}
+
+static void sch_create_and_recog_new_device(struct subchannel *sch)
+{
+ struct ccw_device *cdev;
+
+ /* Need to allocate a new ccw device. */
+ cdev = io_subchannel_create_ccwdev(sch);
+ if (IS_ERR(cdev)) {
+ /* OK, we did everything we could... */
+ css_sch_device_unregister(sch);
+ return;
+ }
+ spin_lock_irq(sch->lock);
+ sch->dev.driver_data = cdev;
+ spin_unlock_irq(sch->lock);
+ /* Start recognition for the new ccw device. */
+ if (io_subchannel_recog(cdev, sch)) {
+ spin_lock_irq(sch->lock);
+ sch->dev.driver_data = NULL;
+ spin_unlock_irq(sch->lock);
+ if (cdev->dev.release)
+ cdev->dev.release(&cdev->dev);
+ css_sch_device_unregister(sch);
+ }
+}
+
+
+void ccw_device_move_to_orphanage(struct work_struct *work)
+{
+ struct ccw_device_private *priv;
+ struct ccw_device *cdev;
+ struct ccw_device *replacing_cdev;
+ struct subchannel *sch;
+ int ret;
+ struct channel_subsystem *css;
+ struct ccw_dev_id dev_id;
+
+ priv = container_of(work, struct ccw_device_private, kick_work);
+ cdev = priv->cdev;
+ sch = to_subchannel(cdev->dev.parent);
+ css = to_css(sch->dev.parent);
+ dev_id.devno = sch->schib.pmcw.dev;
+ dev_id.ssid = sch->schid.ssid;
+
+ /*
+ * Move the orphaned ccw device to the orphanage so the replacing
+ * ccw device can take its place on the subchannel.
+ */
+ ret = device_move(&cdev->dev, &css->pseudo_subchannel->dev);
+ if (ret) {
+ CIO_MSG_EVENT(0, "Moving device 0.%x.%04x to orphanage failed "
+ "(ret=%d)!\n", cdev->private->dev_id.ssid,
+ cdev->private->dev_id.devno, ret);
+ return;
+ }
+ cdev->ccwlock = css->pseudo_subchannel->lock;
+ /*
+ * Search for the replacing ccw device
+ * - among the disconnected devices
+ * - in the orphanage
+ */
+ replacing_cdev = get_disc_ccwdev_by_dev_id(&dev_id, cdev);
+ if (replacing_cdev) {
+ sch_attach_disconnected_device(sch, replacing_cdev);
+ return;
+ }
+ replacing_cdev = get_orphaned_ccwdev_by_dev_id(css, &dev_id);
+ if (replacing_cdev) {
+ sch_attach_orphaned_device(sch, replacing_cdev);
+ return;
+ }
+ sch_create_and_recog_new_device(sch);
+}
+
/*
* Register recognized device.
*/
static void
-io_subchannel_register(void *data)
+io_subchannel_register(struct work_struct *work)
{
+ struct ccw_device_private *priv;
struct ccw_device *cdev;
struct subchannel *sch;
int ret;
unsigned long flags;
- cdev = (struct ccw_device *) data;
+ priv = container_of(work, struct ccw_device_private, kick_work);
+ cdev = priv->cdev;
sch = to_subchannel(cdev->dev.parent);
+ /*
+ * io_subchannel_register() will also be called after device
+ * recognition has been done for a boxed device (which will already
+ * be registered). We need to reprobe since we may now have sense id
+ * information.
+ */
if (klist_node_attached(&cdev->dev.knode_parent)) {
- bus_rescan_devices(&ccw_bus_type);
+ if (!cdev->drv) {
+ ret = device_reprobe(&cdev->dev);
+ if (ret)
+ /* We can't do much here. */
+ dev_info(&cdev->dev, "device_reprobe() returned"
+ " %d\n", ret);
+ }
goto out;
}
/* make it known to the system */
@@ -700,9 +898,9 @@ io_subchannel_register(void *data)
printk (KERN_WARNING "%s: could not register %s\n",
__func__, cdev->dev.bus_id);
put_device(&cdev->dev);
- spin_lock_irqsave(&sch->lock, flags);
+ spin_lock_irqsave(sch->lock, flags);
sch->dev.driver_data = NULL;
- spin_unlock_irqrestore(&sch->lock, flags);
+ spin_unlock_irqrestore(sch->lock, flags);
kfree (cdev->private);
kfree (cdev);
put_device(&sch->dev);
@@ -710,11 +908,6 @@ io_subchannel_register(void *data)
wake_up(&ccw_device_init_wq);
return;
}
-
- ret = subchannel_add_files(cdev->dev.parent);
- if (ret)
- printk(KERN_WARNING "%s: could not add attributes to %s\n",
- __func__, sch->dev.bus_id);
put_device(&cdev->dev);
out:
cdev->private->flags.recog_done = 1;
@@ -725,11 +918,14 @@ out:
}
void
-ccw_device_call_sch_unregister(void *data)
+ccw_device_call_sch_unregister(struct work_struct *work)
{
- struct ccw_device *cdev = data;
+ struct ccw_device_private *priv;
+ struct ccw_device *cdev;
struct subchannel *sch;
+ priv = container_of(work, struct ccw_device_private, kick_work);
+ cdev = priv->cdev;
sch = to_subchannel(cdev->dev.parent);
css_sch_device_unregister(sch);
/* Reset intparm to zeroes. */
@@ -759,7 +955,7 @@ io_subchannel_recog_done(struct ccw_device *cdev)
break;
sch = to_subchannel(cdev->dev.parent);
PREPARE_WORK(&cdev->private->kick_work,
- ccw_device_call_sch_unregister, (void *) cdev);
+ ccw_device_call_sch_unregister);
queue_work(slow_path_wq, &cdev->private->kick_work);
if (atomic_dec_and_test(&ccw_device_init_count))
wake_up(&ccw_device_init_wq);
@@ -774,7 +970,7 @@ io_subchannel_recog_done(struct ccw_device *cdev)
if (!get_device(&cdev->dev))
break;
PREPARE_WORK(&cdev->private->kick_work,
- io_subchannel_register, (void *) cdev);
+ io_subchannel_register);
queue_work(slow_path_wq, &cdev->private->kick_work);
break;
}
@@ -788,13 +984,13 @@ io_subchannel_recog(struct ccw_device *cdev, struct subchannel *sch)
sch->dev.driver_data = cdev;
sch->driver = &io_subchannel_driver;
- cdev->ccwlock = &sch->lock;
+ cdev->ccwlock = sch->lock;
/* Init private data. */
priv = cdev->private;
- priv->devno = sch->schib.pmcw.dev;
- priv->ssid = sch->schid.ssid;
- priv->sch_no = sch->schid.sch_no;
+ priv->dev_id.devno = sch->schib.pmcw.dev;
+ priv->dev_id.ssid = sch->schid.ssid;
+ priv->schid = sch->schid;
priv->state = DEV_STATE_NOT_OPER;
INIT_LIST_HEAD(&priv->cmb_list);
init_waitqueue_head(&priv->wait_q);
@@ -808,9 +1004,9 @@ io_subchannel_recog(struct ccw_device *cdev, struct subchannel *sch)
atomic_inc(&ccw_device_init_count);
/* Start async. device sensing. */
- spin_lock_irq(&sch->lock);
+ spin_lock_irq(sch->lock);
rc = ccw_device_recognition(cdev);
- spin_unlock_irq(&sch->lock);
+ spin_unlock_irq(sch->lock);
if (rc) {
if (atomic_dec_and_test(&ccw_device_init_count))
wake_up(&ccw_device_init_wq);
@@ -818,12 +1014,55 @@ io_subchannel_recog(struct ccw_device *cdev, struct subchannel *sch)
return rc;
}
+static void ccw_device_move_to_sch(struct work_struct *work)
+{
+ struct ccw_device_private *priv;
+ int rc;
+ struct subchannel *sch;
+ struct ccw_device *cdev;
+ struct subchannel *former_parent;
+
+ priv = container_of(work, struct ccw_device_private, kick_work);
+ sch = priv->sch;
+ cdev = priv->cdev;
+ former_parent = ccw_device_is_orphan(cdev) ?
+ NULL : to_subchannel(get_device(cdev->dev.parent));
+ mutex_lock(&sch->reg_mutex);
+ /* Try to move the ccw device to its new subchannel. */
+ rc = device_move(&cdev->dev, &sch->dev);
+ mutex_unlock(&sch->reg_mutex);
+ if (rc) {
+ CIO_MSG_EVENT(2, "Moving device 0.%x.%04x to subchannel "
+ "0.%x.%04x failed (ret=%d)!\n",
+ cdev->private->dev_id.ssid,
+ cdev->private->dev_id.devno, sch->schid.ssid,
+ sch->schid.sch_no, rc);
+ css_sch_device_unregister(sch);
+ goto out;
+ }
+ if (former_parent) {
+ spin_lock_irq(former_parent->lock);
+ former_parent->dev.driver_data = NULL;
+ spin_unlock_irq(former_parent->lock);
+ css_sch_device_unregister(former_parent);
+ /* Reset intparm to zeroes. */
+ former_parent->schib.pmcw.intparm = 0;
+ cio_modify(former_parent);
+ }
+ sch_attach_device(sch, cdev);
+out:
+ if (former_parent)
+ put_device(&former_parent->dev);
+ put_device(&cdev->dev);
+}
+
static int
io_subchannel_probe (struct subchannel *sch)
{
struct ccw_device *cdev;
int rc;
unsigned long flags;
+ struct ccw_dev_id dev_id;
if (sch->dev.driver_data) {
/*
@@ -834,7 +1073,6 @@ io_subchannel_probe (struct subchannel *sch)
cdev = sch->dev.driver_data;
device_initialize(&cdev->dev);
ccw_device_register(cdev);
- subchannel_add_files(&sch->dev);
/*
* Check if the device is already online. If it is
* the reference count needs to be corrected
@@ -847,33 +1085,37 @@ io_subchannel_probe (struct subchannel *sch)
get_device(&cdev->dev);
return 0;
}
- cdev = kzalloc (sizeof(*cdev), GFP_KERNEL);
+ /*
+ * First check if a fitting device may be found amongst the
+ * disconnected devices or in the orphanage.
+ */
+ dev_id.devno = sch->schib.pmcw.dev;
+ dev_id.ssid = sch->schid.ssid;
+ cdev = get_disc_ccwdev_by_dev_id(&dev_id, NULL);
if (!cdev)
- return -ENOMEM;
- cdev->private = kzalloc(sizeof(struct ccw_device_private),
- GFP_KERNEL | GFP_DMA);
- if (!cdev->private) {
- kfree(cdev);
- return -ENOMEM;
- }
- atomic_set(&cdev->private->onoff, 0);
- cdev->dev.parent = &sch->dev;
- cdev->dev.release = ccw_device_release;
- INIT_LIST_HEAD(&cdev->private->kick_work.entry);
- /* Do first half of device_register. */
- device_initialize(&cdev->dev);
-
- if (!get_device(&sch->dev)) {
- if (cdev->dev.release)
- cdev->dev.release(&cdev->dev);
- return -ENODEV;
+ cdev = get_orphaned_ccwdev_by_dev_id(to_css(sch->dev.parent),
+ &dev_id);
+ if (cdev) {
+ /*
+ * Schedule moving the device until when we have a registered
+ * subchannel to move to and succeed the probe. We can
+ * unregister later again, when the probe is through.
+ */
+ cdev->private->sch = sch;
+ PREPARE_WORK(&cdev->private->kick_work,
+ ccw_device_move_to_sch);
+ queue_work(slow_path_wq, &cdev->private->kick_work);
+ return 0;
}
+ cdev = io_subchannel_create_ccwdev(sch);
+ if (IS_ERR(cdev))
+ return PTR_ERR(cdev);
rc = io_subchannel_recog(cdev, sch);
if (rc) {
- spin_lock_irqsave(&sch->lock, flags);
+ spin_lock_irqsave(sch->lock, flags);
sch->dev.driver_data = NULL;
- spin_unlock_irqrestore(&sch->lock, flags);
+ spin_unlock_irqrestore(sch->lock, flags);
if (cdev->dev.release)
cdev->dev.release(&cdev->dev);
}
@@ -881,17 +1123,6 @@ io_subchannel_probe (struct subchannel *sch)
return rc;
}
-static void
-ccw_device_unregister(void *data)
-{
- struct ccw_device *cdev;
-
- cdev = (struct ccw_device *)data;
- if (test_and_clear_bit(1, &cdev->private->registered))
- device_unregister(&cdev->dev);
- put_device(&cdev->dev);
-}
-
static int
io_subchannel_remove (struct subchannel *sch)
{
@@ -912,7 +1143,7 @@ io_subchannel_remove (struct subchannel *sch)
*/
if (get_device(&cdev->dev)) {
PREPARE_WORK(&cdev->private->kick_work,
- ccw_device_unregister, (void *) cdev);
+ ccw_device_unregister);
queue_work(ccw_device_work, &cdev->private->kick_work);
}
return 0;
@@ -951,6 +1182,9 @@ io_subchannel_ioterm(struct device *dev)
cdev = dev->driver_data;
if (!cdev)
return;
+ /* Internal I/O will be retried by the interrupt handler. */
+ if (cdev->private->flags.intretry)
+ return;
cdev->private->state = DEV_STATE_CLEAR_VERIFY;
if (cdev->handler)
cdev->handler(cdev, cdev->private->intparm,
@@ -991,6 +1225,13 @@ static struct ccw_device console_cdev;
static struct ccw_device_private console_private;
static int console_cdev_in_use;
+static DEFINE_SPINLOCK(ccw_console_lock);
+
+spinlock_t * cio_get_console_lock(void)
+{
+ return &ccw_console_lock;
+}
+
static int
ccw_device_console_enable (struct ccw_device *cdev, struct subchannel *sch)
{
@@ -1036,6 +1277,7 @@ ccw_device_probe_console(void)
memset(&console_cdev, 0, sizeof(struct ccw_device));
memset(&console_private, 0, sizeof(struct ccw_device_private));
console_cdev.private = &console_private;
+ console_private.cdev = &console_cdev;
ret = ccw_device_console_enable(&console_cdev, sch);
if (ret) {
cio_release_console();
@@ -1055,7 +1297,7 @@ __ccwdev_check_busid(struct device *dev, void *id)
{
char *bus_id;
- bus_id = (char *)id;
+ bus_id = id;
return (strncmp(bus_id, dev->bus_id, BUS_ID_SIZE) == 0);
}
diff --git a/drivers/s390/cio/device.h b/drivers/s390/cio/device.h
index 00be9a5b4ac..29db6341d63 100644
--- a/drivers/s390/cio/device.h
+++ b/drivers/s390/cio/device.h
@@ -21,7 +21,6 @@ enum dev_state {
/* states to wait for i/o completion before doing something */
DEV_STATE_CLEAR_VERIFY,
DEV_STATE_TIMEOUT_KILL,
- DEV_STATE_WAIT4IO,
DEV_STATE_QUIESCE,
/* special states for devices gone not operational */
DEV_STATE_DISCONNECTED,
@@ -79,9 +78,10 @@ void io_subchannel_recog_done(struct ccw_device *cdev);
int ccw_device_cancel_halt_clear(struct ccw_device *);
-int ccw_device_register(struct ccw_device *);
-void ccw_device_do_unreg_rereg(void *);
-void ccw_device_call_sch_unregister(void *);
+void ccw_device_do_unreg_rereg(struct work_struct *);
+void ccw_device_call_sch_unregister(struct work_struct *);
+void ccw_device_move_to_orphanage(struct work_struct *);
+int ccw_device_is_orphan(struct ccw_device *);
int ccw_device_recognition(struct ccw_device *);
int ccw_device_online(struct ccw_device *);
diff --git a/drivers/s390/cio/device_fsm.c b/drivers/s390/cio/device_fsm.c
index dace46fc32e..eed14572fc3 100644
--- a/drivers/s390/cio/device_fsm.c
+++ b/drivers/s390/cio/device_fsm.c
@@ -59,16 +59,25 @@ device_set_disconnected(struct subchannel *sch)
cdev->private->state = DEV_STATE_DISCONNECTED;
}
-void
-device_set_waiting(struct subchannel *sch)
+void device_set_intretry(struct subchannel *sch)
{
struct ccw_device *cdev;
- if (!sch->dev.driver_data)
+ cdev = sch->dev.driver_data;
+ if (!cdev)
return;
+ cdev->private->flags.intretry = 1;
+}
+
+int device_trigger_verify(struct subchannel *sch)
+{
+ struct ccw_device *cdev;
+
cdev = sch->dev.driver_data;
- ccw_device_set_timeout(cdev, 10*HZ);
- cdev->private->state = DEV_STATE_WAIT4IO;
+ if (!cdev || !cdev->online)
+ return -EINVAL;
+ dev_fsm_event(cdev, DEV_EVENT_VERIFY);
+ return 0;
}
/*
@@ -177,15 +186,14 @@ ccw_device_handle_oper(struct ccw_device *cdev)
/*
* Check if cu type and device type still match. If
* not, it is certainly another device and we have to
- * de- and re-register. Also check here for non-matching devno.
+ * de- and re-register.
*/
if (cdev->id.cu_type != cdev->private->senseid.cu_type ||
cdev->id.cu_model != cdev->private->senseid.cu_model ||
cdev->id.dev_type != cdev->private->senseid.dev_type ||
- cdev->id.dev_model != cdev->private->senseid.dev_model ||
- cdev->private->devno != sch->schib.pmcw.dev) {
+ cdev->id.dev_model != cdev->private->senseid.dev_model) {
PREPARE_WORK(&cdev->private->kick_work,
- ccw_device_do_unreg_rereg, (void *)cdev);
+ ccw_device_do_unreg_rereg);
queue_work(ccw_device_work, &cdev->private->kick_work);
return 0;
}
@@ -255,7 +263,7 @@ ccw_device_recog_done(struct ccw_device *cdev, int state)
case DEV_STATE_NOT_OPER:
CIO_DEBUG(KERN_WARNING, 2,
"SenseID : unknown device %04x on subchannel "
- "0.%x.%04x\n", cdev->private->devno,
+ "0.%x.%04x\n", cdev->private->dev_id.devno,
sch->schid.ssid, sch->schid.sch_no);
break;
case DEV_STATE_OFFLINE:
@@ -282,14 +290,15 @@ ccw_device_recog_done(struct ccw_device *cdev, int state)
CIO_DEBUG(KERN_INFO, 2, "SenseID : device 0.%x.%04x reports: "
"CU Type/Mod = %04X/%02X, Dev Type/Mod = "
"%04X/%02X\n",
- cdev->private->ssid, cdev->private->devno,
+ cdev->private->dev_id.ssid,
+ cdev->private->dev_id.devno,
cdev->id.cu_type, cdev->id.cu_model,
cdev->id.dev_type, cdev->id.dev_model);
break;
case DEV_STATE_BOXED:
CIO_DEBUG(KERN_WARNING, 2,
"SenseID : boxed device %04x on subchannel "
- "0.%x.%04x\n", cdev->private->devno,
+ "0.%x.%04x\n", cdev->private->dev_id.devno,
sch->schid.ssid, sch->schid.sch_no);
break;
}
@@ -319,19 +328,21 @@ ccw_device_sense_id_done(struct ccw_device *cdev, int err)
}
static void
-ccw_device_oper_notify(void *data)
+ccw_device_oper_notify(struct work_struct *work)
{
+ struct ccw_device_private *priv;
struct ccw_device *cdev;
struct subchannel *sch;
int ret;
- cdev = (struct ccw_device *)data;
+ priv = container_of(work, struct ccw_device_private, kick_work);
+ cdev = priv->cdev;
sch = to_subchannel(cdev->dev.parent);
ret = (sch->driver && sch->driver->notify) ?
sch->driver->notify(&sch->dev, CIO_OPER) : 0;
if (!ret)
/* Driver doesn't want device back. */
- ccw_device_do_unreg_rereg((void *)cdev);
+ ccw_device_do_unreg_rereg(work);
else {
/* Reenable channel measurements, if needed. */
cmf_reenable(cdev);
@@ -349,6 +360,8 @@ ccw_device_done(struct ccw_device *cdev, int state)
sch = to_subchannel(cdev->dev.parent);
+ ccw_device_set_timeout(cdev, 0);
+
if (state != DEV_STATE_ONLINE)
cio_disable_subchannel(sch);
@@ -361,12 +374,11 @@ ccw_device_done(struct ccw_device *cdev, int state)
if (state == DEV_STATE_BOXED)
CIO_DEBUG(KERN_WARNING, 2,
"Boxed device %04x on subchannel %04x\n",
- cdev->private->devno, sch->schid.sch_no);
+ cdev->private->dev_id.devno, sch->schid.sch_no);
if (cdev->private->flags.donotify) {
cdev->private->flags.donotify = 0;
- PREPARE_WORK(&cdev->private->kick_work, ccw_device_oper_notify,
- (void *)cdev);
+ PREPARE_WORK(&cdev->private->kick_work, ccw_device_oper_notify);
queue_work(ccw_device_notify_work, &cdev->private->kick_work);
}
wake_up(&cdev->private->wait_q);
@@ -410,7 +422,8 @@ static void __ccw_device_get_common_pgid(struct ccw_device *cdev)
/* PGID mismatch, can't pathgroup. */
CIO_MSG_EVENT(0, "SNID - pgid mismatch for device "
"0.%x.%04x, can't pathgroup\n",
- cdev->private->ssid, cdev->private->devno);
+ cdev->private->dev_id.ssid,
+ cdev->private->dev_id.devno);
cdev->private->options.pgroup = 0;
return;
}
@@ -515,13 +528,15 @@ ccw_device_recog_timeout(struct ccw_device *cdev, enum dev_event dev_event)
static void
-ccw_device_nopath_notify(void *data)
+ccw_device_nopath_notify(struct work_struct *work)
{
+ struct ccw_device_private *priv;
struct ccw_device *cdev;
struct subchannel *sch;
int ret;
- cdev = (struct ccw_device *)data;
+ priv = container_of(work, struct ccw_device_private, kick_work);
+ cdev = priv->cdev;
sch = to_subchannel(cdev->dev.parent);
/* Extra sanity. */
if (sch->lpm)
@@ -534,8 +549,7 @@ ccw_device_nopath_notify(void *data)
cio_disable_subchannel(sch);
if (get_device(&cdev->dev)) {
PREPARE_WORK(&cdev->private->kick_work,
- ccw_device_call_sch_unregister,
- (void *)cdev);
+ ccw_device_call_sch_unregister);
queue_work(ccw_device_work,
&cdev->private->kick_work);
} else
@@ -586,11 +600,15 @@ ccw_device_verify_done(struct ccw_device *cdev, int err)
}
break;
case -ETIME:
+ /* Reset oper notify indication after verify error. */
+ cdev->private->flags.donotify = 0;
ccw_device_done(cdev, DEV_STATE_BOXED);
break;
default:
+ /* Reset oper notify indication after verify error. */
+ cdev->private->flags.donotify = 0;
PREPARE_WORK(&cdev->private->kick_work,
- ccw_device_nopath_notify, (void *)cdev);
+ ccw_device_nopath_notify);
queue_work(ccw_device_notify_work, &cdev->private->kick_work);
ccw_device_done(cdev, DEV_STATE_NOT_OPER);
break;
@@ -657,6 +675,10 @@ ccw_device_offline(struct ccw_device *cdev)
{
struct subchannel *sch;
+ if (ccw_device_is_orphan(cdev)) {
+ ccw_device_done(cdev, DEV_STATE_OFFLINE);
+ return 0;
+ }
sch = to_subchannel(cdev->dev.parent);
if (stsch(sch->schid, &sch->schib) || !sch->schib.pmcw.dnv)
return -ENODEV;
@@ -721,7 +743,7 @@ ccw_device_offline_notoper(struct ccw_device *cdev, enum dev_event dev_event)
sch = to_subchannel(cdev->dev.parent);
if (get_device(&cdev->dev)) {
PREPARE_WORK(&cdev->private->kick_work,
- ccw_device_call_sch_unregister, (void *)cdev);
+ ccw_device_call_sch_unregister);
queue_work(ccw_device_work, &cdev->private->kick_work);
}
wake_up(&cdev->private->wait_q);
@@ -752,7 +774,7 @@ ccw_device_online_notoper(struct ccw_device *cdev, enum dev_event dev_event)
}
if (get_device(&cdev->dev)) {
PREPARE_WORK(&cdev->private->kick_work,
- ccw_device_call_sch_unregister, (void *)cdev);
+ ccw_device_call_sch_unregister);
queue_work(ccw_device_work, &cdev->private->kick_work);
}
wake_up(&cdev->private->wait_q);
@@ -857,7 +879,7 @@ ccw_device_online_timeout(struct ccw_device *cdev, enum dev_event dev_event)
sch = to_subchannel(cdev->dev.parent);
if (!sch->lpm) {
PREPARE_WORK(&cdev->private->kick_work,
- ccw_device_nopath_notify, (void *)cdev);
+ ccw_device_nopath_notify);
queue_work(ccw_device_notify_work,
&cdev->private->kick_work);
} else
@@ -883,7 +905,8 @@ ccw_device_w4sense(struct ccw_device *cdev, enum dev_event dev_event)
/* Basic sense hasn't started. Try again. */
ccw_device_do_sense(cdev, irb);
else {
- printk("Huh? %s(%s): unsolicited interrupt...\n",
+ printk(KERN_INFO "Huh? %s(%s): unsolicited "
+ "interrupt...\n",
__FUNCTION__, cdev->dev.bus_id);
if (cdev->handler)
cdev->handler (cdev, 0, irb);
@@ -896,6 +919,12 @@ ccw_device_w4sense(struct ccw_device *cdev, enum dev_event dev_event)
* had killed the original request.
*/
if (irb->scsw.fctl & (SCSW_FCTL_CLEAR_FUNC | SCSW_FCTL_HALT_FUNC)) {
+ /* Retry Basic Sense if requested. */
+ if (cdev->private->flags.intretry) {
+ cdev->private->flags.intretry = 0;
+ ccw_device_do_sense(cdev, irb);
+ return;
+ }
cdev->private->flags.dosense = 0;
memset(&cdev->private->irb, 0, sizeof(struct irb));
ccw_device_accumulate_irb(cdev, irb);
@@ -942,10 +971,10 @@ ccw_device_killing_irq(struct ccw_device *cdev, enum dev_event dev_event)
cdev->private->state = DEV_STATE_ONLINE;
if (cdev->handler)
cdev->handler(cdev, cdev->private->intparm,
- ERR_PTR(-ETIMEDOUT));
+ ERR_PTR(-EIO));
if (!sch->lpm) {
PREPARE_WORK(&cdev->private->kick_work,
- ccw_device_nopath_notify, (void *)cdev);
+ ccw_device_nopath_notify);
queue_work(ccw_device_notify_work, &cdev->private->kick_work);
} else if (cdev->private->flags.doverify)
/* Start delayed path verification. */
@@ -968,7 +997,7 @@ ccw_device_killing_timeout(struct ccw_device *cdev, enum dev_event dev_event)
sch = to_subchannel(cdev->dev.parent);
if (!sch->lpm) {
PREPARE_WORK(&cdev->private->kick_work,
- ccw_device_nopath_notify, (void *)cdev);
+ ccw_device_nopath_notify);
queue_work(ccw_device_notify_work,
&cdev->private->kick_work);
} else
@@ -979,51 +1008,15 @@ ccw_device_killing_timeout(struct ccw_device *cdev, enum dev_event dev_event)
cdev->private->state = DEV_STATE_ONLINE;
if (cdev->handler)
cdev->handler(cdev, cdev->private->intparm,
- ERR_PTR(-ETIMEDOUT));
-}
-
-static void
-ccw_device_wait4io_irq(struct ccw_device *cdev, enum dev_event dev_event)
-{
- struct irb *irb;
- struct subchannel *sch;
-
- irb = (struct irb *) __LC_IRB;
- /*
- * Accumulate status and find out if a basic sense is needed.
- * This is fine since we have already adapted the lpm.
- */
- ccw_device_accumulate_irb(cdev, irb);
- if (cdev->private->flags.dosense) {
- if (ccw_device_do_sense(cdev, irb) == 0) {
- cdev->private->state = DEV_STATE_W4SENSE;
- }
- return;
- }
-
- /* Iff device is idle, reset timeout. */
- sch = to_subchannel(cdev->dev.parent);
- if (!stsch(sch->schid, &sch->schib))
- if (sch->schib.scsw.actl == 0)
- ccw_device_set_timeout(cdev, 0);
- /* Call the handler. */
- ccw_device_call_handler(cdev);
- if (!sch->lpm) {
- PREPARE_WORK(&cdev->private->kick_work,
- ccw_device_nopath_notify, (void *)cdev);
- queue_work(ccw_device_notify_work, &cdev->private->kick_work);
- } else if (cdev->private->flags.doverify)
- ccw_device_online_verify(cdev, 0);
+ ERR_PTR(-EIO));
}
-static void
-ccw_device_wait4io_timeout(struct ccw_device *cdev, enum dev_event dev_event)
+void device_kill_io(struct subchannel *sch)
{
int ret;
- struct subchannel *sch;
+ struct ccw_device *cdev;
- sch = to_subchannel(cdev->dev.parent);
- ccw_device_set_timeout(cdev, 0);
+ cdev = sch->dev.driver_data;
ret = ccw_device_cancel_halt_clear(cdev);
if (ret == -EBUSY) {
ccw_device_set_timeout(cdev, 3*HZ);
@@ -1033,7 +1026,7 @@ ccw_device_wait4io_timeout(struct ccw_device *cdev, enum dev_event dev_event)
if (ret == -ENODEV) {
if (!sch->lpm) {
PREPARE_WORK(&cdev->private->kick_work,
- ccw_device_nopath_notify, (void *)cdev);
+ ccw_device_nopath_notify);
queue_work(ccw_device_notify_work,
&cdev->private->kick_work);
} else
@@ -1042,12 +1035,12 @@ ccw_device_wait4io_timeout(struct ccw_device *cdev, enum dev_event dev_event)
}
if (cdev->handler)
cdev->handler(cdev, cdev->private->intparm,
- ERR_PTR(-ETIMEDOUT));
+ ERR_PTR(-EIO));
if (!sch->lpm) {
PREPARE_WORK(&cdev->private->kick_work,
- ccw_device_nopath_notify, (void *)cdev);
+ ccw_device_nopath_notify);
queue_work(ccw_device_notify_work, &cdev->private->kick_work);
- } else if (cdev->private->flags.doverify)
+ } else
/* Start delayed path verification. */
ccw_device_online_verify(cdev, 0);
}
@@ -1116,7 +1109,8 @@ device_trigger_reprobe(struct subchannel *sch)
/* Update some values. */
if (stsch(sch->schid, &sch->schib))
return;
-
+ if (!sch->schib.pmcw.dnv)
+ return;
/*
* The pim, pam, pom values may not be accurate, but they are the best
* we have before performing device selection :/
@@ -1130,7 +1124,13 @@ device_trigger_reprobe(struct subchannel *sch)
sch->schib.pmcw.mp = 1;
sch->schib.pmcw.intparm = (__u32)(unsigned long)sch;
/* We should also udate ssd info, but this has to wait. */
- ccw_device_start_id(cdev, 0);
+ /* Check if this is another device which appeared on the same sch. */
+ if (sch->schib.pmcw.dev != cdev->private->dev_id.devno) {
+ PREPARE_WORK(&cdev->private->kick_work,
+ ccw_device_move_to_orphanage);
+ queue_work(ccw_device_work, &cdev->private->kick_work);
+ } else
+ ccw_device_start_id(cdev, 0);
}
static void
@@ -1284,12 +1284,6 @@ fsm_func_t *dev_jumptable[NR_DEV_STATES][NR_DEV_EVENTS] = {
[DEV_EVENT_TIMEOUT] = ccw_device_killing_timeout,
[DEV_EVENT_VERIFY] = ccw_device_nop, //FIXME
},
- [DEV_STATE_WAIT4IO] = {
- [DEV_EVENT_NOTOPER] = ccw_device_online_notoper,
- [DEV_EVENT_INTERRUPT] = ccw_device_wait4io_irq,
- [DEV_EVENT_TIMEOUT] = ccw_device_wait4io_timeout,
- [DEV_EVENT_VERIFY] = ccw_device_delay_verify,
- },
[DEV_STATE_QUIESCE] = {
[DEV_EVENT_NOTOPER] = ccw_device_quiesce_done,
[DEV_EVENT_INTERRUPT] = ccw_device_quiesce_done,
diff --git a/drivers/s390/cio/device_id.c b/drivers/s390/cio/device_id.c
index 438db483035..f17275917fe 100644
--- a/drivers/s390/cio/device_id.c
+++ b/drivers/s390/cio/device_id.c
@@ -42,18 +42,15 @@ diag210(struct diag210 * addr)
spin_lock_irqsave(&diag210_lock, flags);
diag210_tmp = *addr;
- asm volatile (
- " lhi %0,-1\n"
- " sam31\n"
- " diag %1,0,0x210\n"
- "0: ipm %0\n"
- " srl %0,28\n"
- "1: sam64\n"
- ".section __ex_table,\"a\"\n"
- " .align 8\n"
- " .quad 0b,1b\n"
- ".previous"
- : "=&d" (ccode) : "a" (__pa(&diag210_tmp)) : "cc", "memory" );
+ asm volatile(
+ " lhi %0,-1\n"
+ " sam31\n"
+ " diag %1,0,0x210\n"
+ "0: ipm %0\n"
+ " srl %0,28\n"
+ "1: sam64\n"
+ EX_TABLE(0b,1b)
+ : "=&d" (ccode) : "a" (__pa(&diag210_tmp)) : "cc", "memory");
*addr = diag210_tmp;
spin_unlock_irqrestore(&diag210_lock, flags);
@@ -66,17 +63,14 @@ diag210(struct diag210 * addr)
{
int ccode;
- asm volatile (
- " lhi %0,-1\n"
- " diag %1,0,0x210\n"
- "0: ipm %0\n"
- " srl %0,28\n"
+ asm volatile(
+ " lhi %0,-1\n"
+ " diag %1,0,0x210\n"
+ "0: ipm %0\n"
+ " srl %0,28\n"
"1:\n"
- ".section __ex_table,\"a\"\n"
- " .align 4\n"
- " .long 0b,1b\n"
- ".previous"
- : "=&d" (ccode) : "a" (__pa(addr)) : "cc", "memory" );
+ EX_TABLE(0b,1b)
+ : "=&d" (ccode) : "a" (__pa(addr)) : "cc", "memory");
return ccode;
}
@@ -197,6 +191,8 @@ __ccw_device_sense_id_start(struct ccw_device *cdev)
if ((sch->opm & cdev->private->imask) != 0 &&
cdev->private->iretry > 0) {
cdev->private->iretry--;
+ /* Reset internal retry indication. */
+ cdev->private->flags.intretry = 0;
ret = cio_start (sch, cdev->private->iccws,
cdev->private->imask);
/* ret is 0, -EBUSY, -EACCES or -ENODEV */
@@ -243,8 +239,14 @@ ccw_device_check_sense_id(struct ccw_device *cdev)
return 0; /* Success */
}
/* Check the error cases. */
- if (irb->scsw.fctl & (SCSW_FCTL_HALT_FUNC | SCSW_FCTL_CLEAR_FUNC))
+ if (irb->scsw.fctl & (SCSW_FCTL_HALT_FUNC | SCSW_FCTL_CLEAR_FUNC)) {
+ /* Retry Sense ID if requested. */
+ if (cdev->private->flags.intretry) {
+ cdev->private->flags.intretry = 0;
+ return -EAGAIN;
+ }
return -ETIME;
+ }
if (irb->esw.esw0.erw.cons && (irb->ecw[0] & SNS0_CMD_REJECT)) {
/*
* if the device doesn't support the SenseID
@@ -257,7 +259,7 @@ ccw_device_check_sense_id(struct ccw_device *cdev)
*/
CIO_MSG_EVENT(2, "SenseID : device %04x on Subchannel "
"0.%x.%04x reports cmd reject\n",
- cdev->private->devno, sch->schid.ssid,
+ cdev->private->dev_id.devno, sch->schid.ssid,
sch->schid.sch_no);
return -EOPNOTSUPP;
}
@@ -265,7 +267,8 @@ ccw_device_check_sense_id(struct ccw_device *cdev)
CIO_MSG_EVENT(2, "SenseID : UC on dev 0.%x.%04x, "
"lpum %02X, cnt %02d, sns :"
" %02X%02X%02X%02X %02X%02X%02X%02X ...\n",
- cdev->private->ssid, cdev->private->devno,
+ cdev->private->dev_id.ssid,
+ cdev->private->dev_id.devno,
irb->esw.esw0.sublog.lpum,
irb->esw.esw0.erw.scnt,
irb->ecw[0], irb->ecw[1],
@@ -280,14 +283,15 @@ ccw_device_check_sense_id(struct ccw_device *cdev)
CIO_MSG_EVENT(2, "SenseID : path %02X for device %04x "
"on subchannel 0.%x.%04x is "
"'not operational'\n", sch->orb.lpm,
- cdev->private->devno, sch->schid.ssid,
- sch->schid.sch_no);
+ cdev->private->dev_id.devno,
+ sch->schid.ssid, sch->schid.sch_no);
return -EACCES;
}
/* Hmm, whatever happened, try again. */
CIO_MSG_EVENT(2, "SenseID : start_IO() for device %04x on "
"subchannel 0.%x.%04x returns status %02X%02X\n",
- cdev->private->devno, sch->schid.ssid, sch->schid.sch_no,
+ cdev->private->dev_id.devno, sch->schid.ssid,
+ sch->schid.sch_no,
irb->scsw.dstat, irb->scsw.cstat);
return -EAGAIN;
}
@@ -336,7 +340,7 @@ ccw_device_sense_id_irq(struct ccw_device *cdev, enum dev_event dev_event)
/* fall through. */
default: /* Sense ID failed. Try asking VM. */
if (MACHINE_IS_VM) {
- VM_virtual_device_info (cdev->private->devno,
+ VM_virtual_device_info (cdev->private->dev_id.devno,
&cdev->private->senseid);
if (cdev->private->senseid.cu_type != 0xFFFF) {
/* Got the device information from VM. */
diff --git a/drivers/s390/cio/device_ops.c b/drivers/s390/cio/device_ops.c
index 93a897eebff..d269607336e 100644
--- a/drivers/s390/cio/device_ops.c
+++ b/drivers/s390/cio/device_ops.c
@@ -50,7 +50,6 @@ ccw_device_clear(struct ccw_device *cdev, unsigned long intparm)
if (cdev->private->state == DEV_STATE_NOT_OPER)
return -ENODEV;
if (cdev->private->state != DEV_STATE_ONLINE &&
- cdev->private->state != DEV_STATE_WAIT4IO &&
cdev->private->state != DEV_STATE_W4SENSE)
return -EINVAL;
sch = to_subchannel(cdev->dev.parent);
@@ -155,7 +154,6 @@ ccw_device_halt(struct ccw_device *cdev, unsigned long intparm)
if (cdev->private->state == DEV_STATE_NOT_OPER)
return -ENODEV;
if (cdev->private->state != DEV_STATE_ONLINE &&
- cdev->private->state != DEV_STATE_WAIT4IO &&
cdev->private->state != DEV_STATE_W4SENSE)
return -EINVAL;
sch = to_subchannel(cdev->dev.parent);
@@ -216,6 +214,9 @@ ccw_device_call_handler(struct ccw_device *cdev)
(stctl & SCSW_STCTL_PRIM_STATUS)))
return 0;
+ /* Clear pending timers for device driver initiated I/O. */
+ if (ending_status)
+ ccw_device_set_timeout(cdev, 0);
/*
* Now we are ready to call the device driver interrupt handler.
*/
@@ -285,10 +286,10 @@ ccw_device_wake_up(struct ccw_device *cdev, unsigned long ip, struct irb *irb)
if (cdev->private->flags.doverify ||
cdev->private->state == DEV_STATE_VERIFY)
cdev->private->intparm = -EAGAIN;
- if ((irb->scsw.dstat & DEV_STAT_UNIT_CHECK) &&
- !(irb->ecw[0] &
- (SNS0_CMD_REJECT | SNS0_INTERVENTION_REQ)))
- cdev->private->intparm = -EAGAIN;
+ else if ((irb->scsw.dstat & DEV_STAT_UNIT_CHECK) &&
+ !(irb->ecw[0] &
+ (SNS0_CMD_REJECT | SNS0_INTERVENTION_REQ)))
+ cdev->private->intparm = -EAGAIN;
else if ((irb->scsw.dstat & DEV_STAT_ATTENTION) &&
(irb->scsw.dstat & DEV_STAT_DEV_END) &&
(irb->scsw.dstat & DEV_STAT_UNIT_EXCEP))
@@ -309,12 +310,15 @@ __ccw_device_retry_loop(struct ccw_device *cdev, struct ccw1 *ccw, long magic, _
sch = to_subchannel(cdev->dev.parent);
do {
+ ccw_device_set_timeout(cdev, 60 * HZ);
ret = cio_start (sch, ccw, lpm);
+ if (ret != 0)
+ ccw_device_set_timeout(cdev, 0);
if (ret == -EBUSY) {
/* Try again later. */
- spin_unlock_irq(&sch->lock);
+ spin_unlock_irq(sch->lock);
msleep(10);
- spin_lock_irq(&sch->lock);
+ spin_lock_irq(sch->lock);
continue;
}
if (ret != 0)
@@ -322,12 +326,12 @@ __ccw_device_retry_loop(struct ccw_device *cdev, struct ccw1 *ccw, long magic, _
break;
/* Wait for end of request. */
cdev->private->intparm = magic;
- spin_unlock_irq(&sch->lock);
+ spin_unlock_irq(sch->lock);
wait_event(cdev->private->wait_q,
(cdev->private->intparm == -EIO) ||
(cdev->private->intparm == -EAGAIN) ||
(cdev->private->intparm == 0));
- spin_lock_irq(&sch->lock);
+ spin_lock_irq(sch->lock);
/* Check at least for channel end / device end */
if (cdev->private->intparm == -EIO) {
/* Non-retryable error. */
@@ -338,9 +342,9 @@ __ccw_device_retry_loop(struct ccw_device *cdev, struct ccw1 *ccw, long magic, _
/* Success. */
break;
/* Try again later. */
- spin_unlock_irq(&sch->lock);
+ spin_unlock_irq(sch->lock);
msleep(10);
- spin_lock_irq(&sch->lock);
+ spin_lock_irq(sch->lock);
} while (1);
return ret;
@@ -385,7 +389,7 @@ read_dev_chars (struct ccw_device *cdev, void **buffer, int length)
return ret;
}
- spin_lock_irq(&sch->lock);
+ spin_lock_irq(sch->lock);
/* Save interrupt handler. */
handler = cdev->handler;
/* Temporarily install own handler. */
@@ -402,7 +406,7 @@ read_dev_chars (struct ccw_device *cdev, void **buffer, int length)
/* Restore interrupt handler. */
cdev->handler = handler;
- spin_unlock_irq(&sch->lock);
+ spin_unlock_irq(sch->lock);
clear_normalized_cda (rdc_ccw);
kfree(rdc_ccw);
@@ -459,7 +463,7 @@ read_conf_data_lpm (struct ccw_device *cdev, void **buffer, int *length, __u8 lp
rcd_ccw->count = ciw->count;
rcd_ccw->flags = CCW_FLAG_SLI;
- spin_lock_irq(&sch->lock);
+ spin_lock_irq(sch->lock);
/* Save interrupt handler. */
handler = cdev->handler;
/* Temporarily install own handler. */
@@ -476,7 +480,7 @@ read_conf_data_lpm (struct ccw_device *cdev, void **buffer, int *length, __u8 lp
/* Restore interrupt handler. */
cdev->handler = handler;
- spin_unlock_irq(&sch->lock);
+ spin_unlock_irq(sch->lock);
/*
* on success we update the user input parms
@@ -533,7 +537,7 @@ ccw_device_stlck(struct ccw_device *cdev)
kfree(buf);
return -ENOMEM;
}
- spin_lock_irqsave(&sch->lock, flags);
+ spin_lock_irqsave(sch->lock, flags);
ret = cio_enable_subchannel(sch, 3);
if (ret)
goto out_unlock;
@@ -555,9 +559,9 @@ ccw_device_stlck(struct ccw_device *cdev)
goto out_unlock;
}
cdev->private->irb.scsw.actl |= SCSW_ACTL_START_PEND;
- spin_unlock_irqrestore(&sch->lock, flags);
+ spin_unlock_irqrestore(sch->lock, flags);
wait_event(cdev->private->wait_q, cdev->private->irb.scsw.actl == 0);
- spin_lock_irqsave(&sch->lock, flags);
+ spin_lock_irqsave(sch->lock, flags);
cio_disable_subchannel(sch); //FIXME: return code?
if ((cdev->private->irb.scsw.dstat !=
(DEV_STAT_CHN_END|DEV_STAT_DEV_END)) ||
@@ -568,7 +572,7 @@ ccw_device_stlck(struct ccw_device *cdev)
out_unlock:
kfree(buf);
kfree(buf2);
- spin_unlock_irqrestore(&sch->lock, flags);
+ spin_unlock_irqrestore(sch->lock, flags);
return ret;
}
@@ -586,13 +590,13 @@ ccw_device_get_chp_desc(struct ccw_device *cdev, int chp_no)
int
_ccw_device_get_subchannel_number(struct ccw_device *cdev)
{
- return cdev->private->sch_no;
+ return cdev->private->schid.sch_no;
}
int
_ccw_device_get_device_number(struct ccw_device *cdev)
{
- return cdev->private->devno;
+ return cdev->private->dev_id.devno;
}
diff --git a/drivers/s390/cio/device_pgid.c b/drivers/s390/cio/device_pgid.c
index 8ca2d078848..cb1879a9681 100644
--- a/drivers/s390/cio/device_pgid.c
+++ b/drivers/s390/cio/device_pgid.c
@@ -71,6 +71,8 @@ __ccw_device_sense_pgid_start(struct ccw_device *cdev)
ccw->cda = (__u32) __pa (&cdev->private->pgid[i]);
if (cdev->private->iretry > 0) {
cdev->private->iretry--;
+ /* Reset internal retry indication. */
+ cdev->private->flags.intretry = 0;
ret = cio_start (sch, cdev->private->iccws,
cdev->private->imask);
/* ret is 0, -EBUSY, -EACCES or -ENODEV */
@@ -79,7 +81,8 @@ __ccw_device_sense_pgid_start(struct ccw_device *cdev)
CIO_MSG_EVENT(2, "SNID - Device %04x on Subchannel "
"0.%x.%04x, lpm %02X, became 'not "
"operational'\n",
- cdev->private->devno, sch->schid.ssid,
+ cdev->private->dev_id.devno,
+ sch->schid.ssid,
sch->schid.sch_no, cdev->private->imask);
}
@@ -96,6 +99,9 @@ ccw_device_sense_pgid_start(struct ccw_device *cdev)
{
int ret;
+ /* Set a timeout of 60s */
+ ccw_device_set_timeout(cdev, 60*HZ);
+
cdev->private->state = DEV_STATE_SENSE_PGID;
cdev->private->imask = 0x80;
cdev->private->iretry = 5;
@@ -118,8 +124,14 @@ __ccw_device_check_sense_pgid(struct ccw_device *cdev)
sch = to_subchannel(cdev->dev.parent);
irb = &cdev->private->irb;
- if (irb->scsw.fctl & (SCSW_FCTL_HALT_FUNC | SCSW_FCTL_CLEAR_FUNC))
+ if (irb->scsw.fctl & (SCSW_FCTL_HALT_FUNC | SCSW_FCTL_CLEAR_FUNC)) {
+ /* Retry Sense PGID if requested. */
+ if (cdev->private->flags.intretry) {
+ cdev->private->flags.intretry = 0;
+ return -EAGAIN;
+ }
return -ETIME;
+ }
if (irb->esw.esw0.erw.cons &&
(irb->ecw[0]&(SNS0_CMD_REJECT|SNS0_INTERVENTION_REQ))) {
/*
@@ -132,7 +144,8 @@ __ccw_device_check_sense_pgid(struct ccw_device *cdev)
CIO_MSG_EVENT(2, "SNID - device 0.%x.%04x, unit check, "
"lpum %02X, cnt %02d, sns : "
"%02X%02X%02X%02X %02X%02X%02X%02X ...\n",
- cdev->private->ssid, cdev->private->devno,
+ cdev->private->dev_id.ssid,
+ cdev->private->dev_id.devno,
irb->esw.esw0.sublog.lpum,
irb->esw.esw0.erw.scnt,
irb->ecw[0], irb->ecw[1],
@@ -144,7 +157,7 @@ __ccw_device_check_sense_pgid(struct ccw_device *cdev)
if (irb->scsw.cc == 3) {
CIO_MSG_EVENT(2, "SNID - Device %04x on Subchannel 0.%x.%04x,"
" lpm %02X, became 'not operational'\n",
- cdev->private->devno, sch->schid.ssid,
+ cdev->private->dev_id.devno, sch->schid.ssid,
sch->schid.sch_no, sch->orb.lpm);
return -EACCES;
}
@@ -152,7 +165,7 @@ __ccw_device_check_sense_pgid(struct ccw_device *cdev)
if (cdev->private->pgid[i].inf.ps.state2 == SNID_STATE2_RESVD_ELSE) {
CIO_MSG_EVENT(2, "SNID - Device %04x on Subchannel 0.%x.%04x "
"is reserved by someone else\n",
- cdev->private->devno, sch->schid.ssid,
+ cdev->private->dev_id.devno, sch->schid.ssid,
sch->schid.sch_no);
return -EUSERS;
}
@@ -248,6 +261,8 @@ __ccw_device_do_pgid(struct ccw_device *cdev, __u8 func)
ret = -EACCES;
if (cdev->private->iretry > 0) {
cdev->private->iretry--;
+ /* Reset internal retry indication. */
+ cdev->private->flags.intretry = 0;
ret = cio_start (sch, cdev->private->iccws,
cdev->private->imask);
/* We expect an interrupt in case of success or busy
@@ -258,7 +273,7 @@ __ccw_device_do_pgid(struct ccw_device *cdev, __u8 func)
/* PGID command failed on this path. */
CIO_MSG_EVENT(2, "SPID - Device %04x on Subchannel "
"0.%x.%04x, lpm %02X, became 'not operational'\n",
- cdev->private->devno, sch->schid.ssid,
+ cdev->private->dev_id.devno, sch->schid.ssid,
sch->schid.sch_no, cdev->private->imask);
return ret;
}
@@ -288,6 +303,8 @@ static int __ccw_device_do_nop(struct ccw_device *cdev)
ret = -EACCES;
if (cdev->private->iretry > 0) {
cdev->private->iretry--;
+ /* Reset internal retry indication. */
+ cdev->private->flags.intretry = 0;
ret = cio_start (sch, cdev->private->iccws,
cdev->private->imask);
/* We expect an interrupt in case of success or busy
@@ -298,7 +315,7 @@ static int __ccw_device_do_nop(struct ccw_device *cdev)
/* nop command failed on this path. */
CIO_MSG_EVENT(2, "NOP - Device %04x on Subchannel "
"0.%x.%04x, lpm %02X, became 'not operational'\n",
- cdev->private->devno, sch->schid.ssid,
+ cdev->private->dev_id.devno, sch->schid.ssid,
sch->schid.sch_no, cdev->private->imask);
return ret;
}
@@ -316,8 +333,14 @@ __ccw_device_check_pgid(struct ccw_device *cdev)
sch = to_subchannel(cdev->dev.parent);
irb = &cdev->private->irb;
- if (irb->scsw.fctl & (SCSW_FCTL_HALT_FUNC | SCSW_FCTL_CLEAR_FUNC))
+ if (irb->scsw.fctl & (SCSW_FCTL_HALT_FUNC | SCSW_FCTL_CLEAR_FUNC)) {
+ /* Retry Set PGID if requested. */
+ if (cdev->private->flags.intretry) {
+ cdev->private->flags.intretry = 0;
+ return -EAGAIN;
+ }
return -ETIME;
+ }
if (irb->esw.esw0.erw.cons) {
if (irb->ecw[0] & SNS0_CMD_REJECT)
return -EOPNOTSUPP;
@@ -325,8 +348,9 @@ __ccw_device_check_pgid(struct ccw_device *cdev)
CIO_MSG_EVENT(2, "SPID - device 0.%x.%04x, unit check, "
"cnt %02d, "
"sns : %02X%02X%02X%02X %02X%02X%02X%02X ...\n",
- cdev->private->ssid,
- cdev->private->devno, irb->esw.esw0.erw.scnt,
+ cdev->private->dev_id.ssid,
+ cdev->private->dev_id.devno,
+ irb->esw.esw0.erw.scnt,
irb->ecw[0], irb->ecw[1],
irb->ecw[2], irb->ecw[3],
irb->ecw[4], irb->ecw[5],
@@ -336,7 +360,7 @@ __ccw_device_check_pgid(struct ccw_device *cdev)
if (irb->scsw.cc == 3) {
CIO_MSG_EVENT(2, "SPID - Device %04x on Subchannel 0.%x.%04x,"
" lpm %02X, became 'not operational'\n",
- cdev->private->devno, sch->schid.ssid,
+ cdev->private->dev_id.devno, sch->schid.ssid,
sch->schid.sch_no, cdev->private->imask);
return -EACCES;
}
@@ -354,12 +378,18 @@ static int __ccw_device_check_nop(struct ccw_device *cdev)
sch = to_subchannel(cdev->dev.parent);
irb = &cdev->private->irb;
- if (irb->scsw.fctl & (SCSW_FCTL_HALT_FUNC | SCSW_FCTL_CLEAR_FUNC))
+ if (irb->scsw.fctl & (SCSW_FCTL_HALT_FUNC | SCSW_FCTL_CLEAR_FUNC)) {
+ /* Retry NOP if requested. */
+ if (cdev->private->flags.intretry) {
+ cdev->private->flags.intretry = 0;
+ return -EAGAIN;
+ }
return -ETIME;
+ }
if (irb->scsw.cc == 3) {
CIO_MSG_EVENT(2, "NOP - Device %04x on Subchannel 0.%x.%04x,"
" lpm %02X, became 'not operational'\n",
- cdev->private->devno, sch->schid.ssid,
+ cdev->private->dev_id.devno, sch->schid.ssid,
sch->schid.sch_no, cdev->private->imask);
return -EACCES;
}
@@ -480,6 +510,8 @@ ccw_device_verify_start(struct ccw_device *cdev)
ccw_device_verify_done(cdev, -ENODEV);
return;
}
+ /* After 60s path verification is considered to have failed. */
+ ccw_device_set_timeout(cdev, 60*HZ);
__ccw_device_verify_start(cdev);
}
@@ -554,6 +586,9 @@ ccw_device_disband_irq(struct ccw_device *cdev, enum dev_event dev_event)
void
ccw_device_disband_start(struct ccw_device *cdev)
{
+ /* After 60s disbanding is considered to have failed. */
+ ccw_device_set_timeout(cdev, 60*HZ);
+
cdev->private->flags.pgid_single = 0;
cdev->private->iretry = 5;
cdev->private->imask = 0x80;
diff --git a/drivers/s390/cio/device_status.c b/drivers/s390/cio/device_status.c
index caf148d5caa..bdcf930f7be 100644
--- a/drivers/s390/cio/device_status.c
+++ b/drivers/s390/cio/device_status.c
@@ -32,19 +32,18 @@ ccw_device_msg_control_check(struct ccw_device *cdev, struct irb *irb)
SCHN_STAT_CHN_CTRL_CHK |
SCHN_STAT_INTF_CTRL_CHK)))
return;
-
CIO_MSG_EVENT(0, "Channel-Check or Interface-Control-Check "
"received"
" ... device %04x on subchannel 0.%x.%04x, dev_stat "
": %02X sch_stat : %02X\n",
- cdev->private->devno, cdev->private->ssid,
- cdev->private->sch_no,
+ cdev->private->dev_id.devno, cdev->private->schid.ssid,
+ cdev->private->schid.sch_no,
irb->scsw.dstat, irb->scsw.cstat);
if (irb->scsw.cc != 3) {
char dbf_text[15];
- sprintf(dbf_text, "chk%x", cdev->private->sch_no);
+ sprintf(dbf_text, "chk%x", cdev->private->schid.sch_no);
CIO_TRACE_EVENT(0, dbf_text);
CIO_HEX_EVENT(0, irb, sizeof (struct irb));
}
@@ -320,6 +319,9 @@ ccw_device_do_sense(struct ccw_device *cdev, struct irb *irb)
sch->sense_ccw.count = SENSE_MAX_COUNT;
sch->sense_ccw.flags = CCW_FLAG_SLI;
+ /* Reset internal retry indication. */
+ cdev->private->flags.intretry = 0;
+
return cio_start (sch, &sch->sense_ccw, 0xff);
}
diff --git a/drivers/s390/cio/ioasm.h b/drivers/s390/cio/ioasm.h
index 95a9462f9a9..ad6d8294006 100644
--- a/drivers/s390/cio/ioasm.h
+++ b/drivers/s390/cio/ioasm.h
@@ -25,106 +25,74 @@ struct tpi_info {
static inline int stsch(struct subchannel_id schid,
volatile struct schib *addr)
{
+ register struct subchannel_id reg1 asm ("1") = schid;
int ccode;
- __asm__ __volatile__(
- " lr 1,%1\n"
- " stsch 0(%2)\n"
- " ipm %0\n"
- " srl %0,28"
- : "=d" (ccode)
- : "d" (schid), "a" (addr), "m" (*addr)
- : "cc", "1" );
+ asm volatile(
+ " stsch 0(%2)\n"
+ " ipm %0\n"
+ " srl %0,28"
+ : "=d" (ccode) : "d" (reg1), "a" (addr), "m" (*addr) : "cc");
return ccode;
}
static inline int stsch_err(struct subchannel_id schid,
volatile struct schib *addr)
{
- int ccode;
+ register struct subchannel_id reg1 asm ("1") = schid;
+ int ccode = -EIO;
- __asm__ __volatile__(
- " lhi %0,%3\n"
- " lr 1,%1\n"
- " stsch 0(%2)\n"
- "0: ipm %0\n"
- " srl %0,28\n"
+ asm volatile(
+ " stsch 0(%2)\n"
+ "0: ipm %0\n"
+ " srl %0,28\n"
"1:\n"
-#ifdef CONFIG_64BIT
- ".section __ex_table,\"a\"\n"
- " .align 8\n"
- " .quad 0b,1b\n"
- ".previous"
-#else
- ".section __ex_table,\"a\"\n"
- " .align 4\n"
- " .long 0b,1b\n"
- ".previous"
-#endif
- : "=&d" (ccode)
- : "d" (schid), "a" (addr), "K" (-EIO), "m" (*addr)
- : "cc", "1" );
+ EX_TABLE(0b,1b)
+ : "+d" (ccode) : "d" (reg1), "a" (addr), "m" (*addr) : "cc");
return ccode;
}
static inline int msch(struct subchannel_id schid,
volatile struct schib *addr)
{
+ register struct subchannel_id reg1 asm ("1") = schid;
int ccode;
- __asm__ __volatile__(
- " lr 1,%1\n"
- " msch 0(%2)\n"
- " ipm %0\n"
- " srl %0,28"
- : "=d" (ccode)
- : "d" (schid), "a" (addr), "m" (*addr)
- : "cc", "1" );
+ asm volatile(
+ " msch 0(%2)\n"
+ " ipm %0\n"
+ " srl %0,28"
+ : "=d" (ccode) : "d" (reg1), "a" (addr), "m" (*addr) : "cc");
return ccode;
}
static inline int msch_err(struct subchannel_id schid,
volatile struct schib *addr)
{
- int ccode;
+ register struct subchannel_id reg1 asm ("1") = schid;
+ int ccode = -EIO;
- __asm__ __volatile__(
- " lhi %0,%3\n"
- " lr 1,%1\n"
- " msch 0(%2)\n"
- "0: ipm %0\n"
- " srl %0,28\n"
+ asm volatile(
+ " msch 0(%2)\n"
+ "0: ipm %0\n"
+ " srl %0,28\n"
"1:\n"
-#ifdef CONFIG_64BIT
- ".section __ex_table,\"a\"\n"
- " .align 8\n"
- " .quad 0b,1b\n"
- ".previous"
-#else
- ".section __ex_table,\"a\"\n"
- " .align 4\n"
- " .long 0b,1b\n"
- ".previous"
-#endif
- : "=&d" (ccode)
- : "d" (schid), "a" (addr), "K" (-EIO), "m" (*addr)
- : "cc", "1" );
+ EX_TABLE(0b,1b)
+ : "+d" (ccode) : "d" (reg1), "a" (addr), "m" (*addr) : "cc");
return ccode;
}
static inline int tsch(struct subchannel_id schid,
volatile struct irb *addr)
{
+ register struct subchannel_id reg1 asm ("1") = schid;
int ccode;
- __asm__ __volatile__(
- " lr 1,%1\n"
- " tsch 0(%2)\n"
- " ipm %0\n"
- " srl %0,28"
- : "=d" (ccode)
- : "d" (schid), "a" (addr), "m" (*addr)
- : "cc", "1" );
+ asm volatile(
+ " tsch 0(%2)\n"
+ " ipm %0\n"
+ " srl %0,28"
+ : "=d" (ccode) : "d" (reg1), "a" (addr), "m" (*addr) : "cc");
return ccode;
}
@@ -132,89 +100,77 @@ static inline int tpi( volatile struct tpi_info *addr)
{
int ccode;
- __asm__ __volatile__(
- " tpi 0(%1)\n"
- " ipm %0\n"
- " srl %0,28"
- : "=d" (ccode)
- : "a" (addr), "m" (*addr)
- : "cc", "1" );
+ asm volatile(
+ " tpi 0(%1)\n"
+ " ipm %0\n"
+ " srl %0,28"
+ : "=d" (ccode) : "a" (addr), "m" (*addr) : "cc");
return ccode;
}
static inline int ssch(struct subchannel_id schid,
volatile struct orb *addr)
{
+ register struct subchannel_id reg1 asm ("1") = schid;
int ccode;
- __asm__ __volatile__(
- " lr 1,%1\n"
- " ssch 0(%2)\n"
- " ipm %0\n"
- " srl %0,28"
- : "=d" (ccode)
- : "d" (schid), "a" (addr), "m" (*addr)
- : "cc", "1" );
+ asm volatile(
+ " ssch 0(%2)\n"
+ " ipm %0\n"
+ " srl %0,28"
+ : "=d" (ccode) : "d" (reg1), "a" (addr), "m" (*addr) : "cc");
return ccode;
}
static inline int rsch(struct subchannel_id schid)
{
+ register struct subchannel_id reg1 asm ("1") = schid;
int ccode;
- __asm__ __volatile__(
- " lr 1,%1\n"
- " rsch\n"
- " ipm %0\n"
- " srl %0,28"
- : "=d" (ccode)
- : "d" (schid)
- : "cc", "1" );
+ asm volatile(
+ " rsch\n"
+ " ipm %0\n"
+ " srl %0,28"
+ : "=d" (ccode) : "d" (reg1) : "cc");
return ccode;
}
static inline int csch(struct subchannel_id schid)
{
+ register struct subchannel_id reg1 asm ("1") = schid;
int ccode;
- __asm__ __volatile__(
- " lr 1,%1\n"
- " csch\n"
- " ipm %0\n"
- " srl %0,28"
- : "=d" (ccode)
- : "d" (schid)
- : "cc", "1" );
+ asm volatile(
+ " csch\n"
+ " ipm %0\n"
+ " srl %0,28"
+ : "=d" (ccode) : "d" (reg1) : "cc");
return ccode;
}
static inline int hsch(struct subchannel_id schid)
{
+ register struct subchannel_id reg1 asm ("1") = schid;
int ccode;
- __asm__ __volatile__(
- " lr 1,%1\n"
- " hsch\n"
- " ipm %0\n"
- " srl %0,28"
- : "=d" (ccode)
- : "d" (schid)
- : "cc", "1" );
+ asm volatile(
+ " hsch\n"
+ " ipm %0\n"
+ " srl %0,28"
+ : "=d" (ccode) : "d" (reg1) : "cc");
return ccode;
}
static inline int xsch(struct subchannel_id schid)
{
+ register struct subchannel_id reg1 asm ("1") = schid;
int ccode;
- __asm__ __volatile__(
- " lr 1,%1\n"
- " .insn rre,0xb2760000,%1,0\n"
- " ipm %0\n"
- " srl %0,28"
- : "=d" (ccode)
- : "d" (schid)
- : "cc", "1" );
+ asm volatile(
+ " .insn rre,0xb2760000,%1,0\n"
+ " ipm %0\n"
+ " srl %0,28"
+ : "=d" (ccode) : "d" (reg1) : "cc");
return ccode;
}
@@ -223,41 +179,27 @@ static inline int chsc(void *chsc_area)
typedef struct { char _[4096]; } addr_type;
int cc;
- __asm__ __volatile__ (
- ".insn rre,0xb25f0000,%2,0 \n\t"
- "ipm %0 \n\t"
- "srl %0,28 \n\t"
+ asm volatile(
+ " .insn rre,0xb25f0000,%2,0\n"
+ " ipm %0\n"
+ " srl %0,28\n"
: "=d" (cc), "=m" (*(addr_type *) chsc_area)
: "d" (chsc_area), "m" (*(addr_type *) chsc_area)
- : "cc" );
-
+ : "cc");
return cc;
}
-static inline int iac( void)
-{
- int ccode;
-
- __asm__ __volatile__(
- " iac 1\n"
- " ipm %0\n"
- " srl %0,28"
- : "=d" (ccode) : : "cc", "1" );
- return ccode;
-}
-
static inline int rchp(int chpid)
{
+ register unsigned int reg1 asm ("1") = chpid;
int ccode;
- __asm__ __volatile__(
- " lr 1,%1\n"
- " rchp\n"
- " ipm %0\n"
- " srl %0,28"
- : "=d" (ccode)
- : "d" (chpid)
- : "cc", "1" );
+ asm volatile(
+ " lr 1,%1\n"
+ " rchp\n"
+ " ipm %0\n"
+ " srl %0,28"
+ : "=d" (ccode) : "d" (reg1) : "cc");
return ccode;
}
diff --git a/drivers/s390/cio/qdio.c b/drivers/s390/cio/qdio.c
index cde822d8b5c..9d4ea449a60 100644
--- a/drivers/s390/cio/qdio.c
+++ b/drivers/s390/cio/qdio.c
@@ -46,6 +46,7 @@
#include <asm/timex.h>
#include <asm/debug.h>
+#include <asm/s390_rdev.h>
#include <asm/qdio.h>
#include "cio.h"
@@ -65,12 +66,12 @@ MODULE_LICENSE("GPL");
/******************** HERE WE GO ***********************************/
static const char version[] = "QDIO base support version 2";
+extern struct bus_type ccw_bus_type;
-#ifdef QDIO_PERFORMANCE_STATS
+static int qdio_performance_stats = 0;
static int proc_perf_file_registration;
static unsigned long i_p_c, i_p_nc, o_p_c, o_p_nc, ii_p_c, ii_p_nc;
static struct qdio_perf_stats perf_stats;
-#endif /* QDIO_PERFORMANCE_STATS */
static int hydra_thinints;
static int is_passthrough = 0;
@@ -275,9 +276,8 @@ qdio_siga_sync(struct qdio_q *q, unsigned int gpr2,
QDIO_DBF_TEXT4(0,trace,"sigasync");
QDIO_DBF_HEX4(0,trace,&q,sizeof(void*));
-#ifdef QDIO_PERFORMANCE_STATS
- perf_stats.siga_syncs++;
-#endif /* QDIO_PERFORMANCE_STATS */
+ if (qdio_performance_stats)
+ perf_stats.siga_syncs++;
cc = do_siga_sync(q->schid, gpr2, gpr3);
if (cc)
@@ -322,9 +322,8 @@ qdio_siga_output(struct qdio_q *q)
__u32 busy_bit;
__u64 start_time=0;
-#ifdef QDIO_PERFORMANCE_STATS
- perf_stats.siga_outs++;
-#endif /* QDIO_PERFORMANCE_STATS */
+ if (qdio_performance_stats)
+ perf_stats.siga_outs++;
QDIO_DBF_TEXT4(0,trace,"sigaout");
QDIO_DBF_HEX4(0,trace,&q,sizeof(void*));
@@ -358,9 +357,8 @@ qdio_siga_input(struct qdio_q *q)
QDIO_DBF_TEXT4(0,trace,"sigain");
QDIO_DBF_HEX4(0,trace,&q,sizeof(void*));
-#ifdef QDIO_PERFORMANCE_STATS
- perf_stats.siga_ins++;
-#endif /* QDIO_PERFORMANCE_STATS */
+ if (qdio_performance_stats)
+ perf_stats.siga_ins++;
cc = do_siga_input(q->schid, q->mask);
@@ -481,7 +479,7 @@ qdio_stop_polling(struct qdio_q *q)
unsigned char state = 0;
struct qdio_irq *irq = (struct qdio_irq *) q->irq_ptr;
- if (!atomic_swap(&q->polling,0))
+ if (!atomic_xchg(&q->polling,0))
return 1;
QDIO_DBF_TEXT4(0,trace,"stoppoll");
@@ -954,9 +952,8 @@ __qdio_outbound_processing(struct qdio_q *q)
if (unlikely(qdio_reserve_q(q))) {
qdio_release_q(q);
-#ifdef QDIO_PERFORMANCE_STATS
- o_p_c++;
-#endif /* QDIO_PERFORMANCE_STATS */
+ if (qdio_performance_stats)
+ o_p_c++;
/* as we're sissies, we'll check next time */
if (likely(!atomic_read(&q->is_in_shutdown))) {
qdio_mark_q(q);
@@ -964,10 +961,10 @@ __qdio_outbound_processing(struct qdio_q *q)
}
return;
}
-#ifdef QDIO_PERFORMANCE_STATS
- o_p_nc++;
- perf_stats.tl_runs++;
-#endif /* QDIO_PERFORMANCE_STATS */
+ if (qdio_performance_stats) {
+ o_p_nc++;
+ perf_stats.tl_runs++;
+ }
/* see comment in qdio_kick_outbound_q */
siga_attempts=atomic_read(&q->busy_siga_counter);
@@ -1142,15 +1139,16 @@ qdio_has_inbound_q_moved(struct qdio_q *q)
{
int i;
-#ifdef QDIO_PERFORMANCE_STATS
static int old_pcis=0;
static int old_thinints=0;
- if ((old_pcis==perf_stats.pcis)&&(old_thinints==perf_stats.thinints))
- perf_stats.start_time_inbound=NOW;
- else
- old_pcis=perf_stats.pcis;
-#endif /* QDIO_PERFORMANCE_STATS */
+ if (qdio_performance_stats) {
+ if ((old_pcis==perf_stats.pcis)&&
+ (old_thinints==perf_stats.thinints))
+ perf_stats.start_time_inbound=NOW;
+ else
+ old_pcis=perf_stats.pcis;
+ }
i=qdio_get_inbound_buffer_frontier(q);
if ( (i!=GET_SAVED_FRONTIER(q)) ||
@@ -1340,10 +1338,10 @@ qdio_kick_inbound_handler(struct qdio_q *q)
q->siga_error=0;
q->error_status_flags=0;
-#ifdef QDIO_PERFORMANCE_STATS
- perf_stats.inbound_time+=NOW-perf_stats.start_time_inbound;
- perf_stats.inbound_cnt++;
-#endif /* QDIO_PERFORMANCE_STATS */
+ if (qdio_performance_stats) {
+ perf_stats.inbound_time+=NOW-perf_stats.start_time_inbound;
+ perf_stats.inbound_cnt++;
+ }
}
static inline void
@@ -1363,9 +1361,8 @@ __tiqdio_inbound_processing(struct qdio_q *q, int spare_ind_was_set)
*/
if (unlikely(qdio_reserve_q(q))) {
qdio_release_q(q);
-#ifdef QDIO_PERFORMANCE_STATS
- ii_p_c++;
-#endif /* QDIO_PERFORMANCE_STATS */
+ if (qdio_performance_stats)
+ ii_p_c++;
/*
* as we might just be about to stop polling, we make
* sure that we check again at least once more
@@ -1373,9 +1370,8 @@ __tiqdio_inbound_processing(struct qdio_q *q, int spare_ind_was_set)
tiqdio_sched_tl();
return;
}
-#ifdef QDIO_PERFORMANCE_STATS
- ii_p_nc++;
-#endif /* QDIO_PERFORMANCE_STATS */
+ if (qdio_performance_stats)
+ ii_p_nc++;
if (unlikely(atomic_read(&q->is_in_shutdown))) {
qdio_unmark_q(q);
goto out;
@@ -1416,11 +1412,11 @@ __tiqdio_inbound_processing(struct qdio_q *q, int spare_ind_was_set)
irq_ptr = (struct qdio_irq*)q->irq_ptr;
for (i=0;i<irq_ptr->no_output_qs;i++) {
oq = irq_ptr->output_qs[i];
-#ifdef QDIO_PERFORMANCE_STATS
- perf_stats.tl_runs--;
-#endif /* QDIO_PERFORMANCE_STATS */
- if (!qdio_is_outbound_q_done(oq))
+ if (!qdio_is_outbound_q_done(oq)) {
+ if (qdio_performance_stats)
+ perf_stats.tl_runs--;
__qdio_outbound_processing(oq);
+ }
}
}
@@ -1457,9 +1453,8 @@ __qdio_inbound_processing(struct qdio_q *q)
if (unlikely(qdio_reserve_q(q))) {
qdio_release_q(q);
-#ifdef QDIO_PERFORMANCE_STATS
- i_p_c++;
-#endif /* QDIO_PERFORMANCE_STATS */
+ if (qdio_performance_stats)
+ i_p_c++;
/* as we're sissies, we'll check next time */
if (likely(!atomic_read(&q->is_in_shutdown))) {
qdio_mark_q(q);
@@ -1467,10 +1462,10 @@ __qdio_inbound_processing(struct qdio_q *q)
}
return;
}
-#ifdef QDIO_PERFORMANCE_STATS
- i_p_nc++;
- perf_stats.tl_runs++;
-#endif /* QDIO_PERFORMANCE_STATS */
+ if (qdio_performance_stats) {
+ i_p_nc++;
+ perf_stats.tl_runs++;
+ }
again:
if (qdio_has_inbound_q_moved(q)) {
@@ -1516,9 +1511,8 @@ tiqdio_reset_processing_state(struct qdio_q *q, int q_laps)
if (unlikely(qdio_reserve_q(q))) {
qdio_release_q(q);
-#ifdef QDIO_PERFORMANCE_STATS
- ii_p_c++;
-#endif /* QDIO_PERFORMANCE_STATS */
+ if (qdio_performance_stats)
+ ii_p_c++;
/*
* as we might just be about to stop polling, we make
* sure that we check again at least once more
@@ -1609,9 +1603,8 @@ tiqdio_tl(unsigned long data)
{
QDIO_DBF_TEXT4(0,trace,"iqdio_tl");
-#ifdef QDIO_PERFORMANCE_STATS
- perf_stats.tl_runs++;
-#endif /* QDIO_PERFORMANCE_STATS */
+ if (qdio_performance_stats)
+ perf_stats.tl_runs++;
tiqdio_inbound_checks();
}
@@ -1741,7 +1734,7 @@ qdio_fill_qs(struct qdio_irq *irq_ptr, struct ccw_device *cdev,
void *ptr;
int available;
- sprintf(dbf_text,"qfqs%4x",cdev->private->sch_no);
+ sprintf(dbf_text,"qfqs%4x",cdev->private->schid.sch_no);
QDIO_DBF_TEXT0(0,setup,dbf_text);
for (i=0;i<no_input_qs;i++) {
q=irq_ptr->input_qs[i];
@@ -1918,10 +1911,10 @@ tiqdio_thinint_handler(void)
{
QDIO_DBF_TEXT4(0,trace,"thin_int");
-#ifdef QDIO_PERFORMANCE_STATS
- perf_stats.thinints++;
- perf_stats.start_time_inbound=NOW;
-#endif /* QDIO_PERFORMANCE_STATS */
+ if (qdio_performance_stats) {
+ perf_stats.thinints++;
+ perf_stats.start_time_inbound=NOW;
+ }
/* SVS only when needed:
* issue SVS to benefit from iqdio interrupt avoidance
@@ -1964,8 +1957,8 @@ qdio_irq_check_sense(struct subchannel_id schid, struct irb *irb)
QDIO_DBF_HEX0(0,sense,irb,QDIO_DBF_SENSE_LEN);
QDIO_PRINT_WARN("sense data available on qdio channel.\n");
- HEXDUMP16(WARN,"irb: ",irb);
- HEXDUMP16(WARN,"sense data: ",irb->ecw);
+ QDIO_HEXDUMP16(WARN,"irb: ",irb);
+ QDIO_HEXDUMP16(WARN,"sense data: ",irb->ecw);
}
}
@@ -1976,18 +1969,17 @@ qdio_handle_pci(struct qdio_irq *irq_ptr)
int i;
struct qdio_q *q;
-#ifdef QDIO_PERFORMANCE_STATS
- perf_stats.pcis++;
- perf_stats.start_time_inbound=NOW;
-#endif /* QDIO_PERFORMANCE_STATS */
+ if (qdio_performance_stats) {
+ perf_stats.pcis++;
+ perf_stats.start_time_inbound=NOW;
+ }
for (i=0;i<irq_ptr->no_input_qs;i++) {
q=irq_ptr->input_qs[i];
if (q->is_input_q&QDIO_FLAG_NO_INPUT_INTERRUPT_CONTEXT)
qdio_mark_q(q);
else {
-#ifdef QDIO_PERFORMANCE_STATS
- perf_stats.tl_runs--;
-#endif /* QDIO_PERFORMANCE_STATS */
+ if (qdio_performance_stats)
+ perf_stats.tl_runs--;
__qdio_inbound_processing(q);
}
}
@@ -1995,11 +1987,10 @@ qdio_handle_pci(struct qdio_irq *irq_ptr)
return;
for (i=0;i<irq_ptr->no_output_qs;i++) {
q=irq_ptr->output_qs[i];
-#ifdef QDIO_PERFORMANCE_STATS
- perf_stats.tl_runs--;
-#endif /* QDIO_PERFORMANCE_STATS */
if (qdio_is_outbound_q_done(q))
continue;
+ if (qdio_performance_stats)
+ perf_stats.tl_runs--;
if (!irq_ptr->sync_done_on_outb_pcis)
SYNC_MEMORY;
__qdio_outbound_processing(q);
@@ -2045,11 +2036,13 @@ omit_handler_call:
}
static void
-qdio_call_shutdown(void *data)
+qdio_call_shutdown(struct work_struct *work)
{
+ struct ccw_device_private *priv;
struct ccw_device *cdev;
- cdev = (struct ccw_device *)data;
+ priv = container_of(work, struct ccw_device_private, kick_work);
+ cdev = priv->cdev;
qdio_shutdown(cdev, QDIO_FLAG_CLEANUP_USING_CLEAR);
put_device(&cdev->dev);
}
@@ -2091,7 +2084,7 @@ qdio_timeout_handler(struct ccw_device *cdev)
if (get_device(&cdev->dev)) {
/* Can't call shutdown from interrupt context. */
PREPARE_WORK(&cdev->private->kick_work,
- qdio_call_shutdown, (void *)cdev);
+ qdio_call_shutdown);
queue_work(ccw_device_work, &cdev->private->kick_work);
}
break;
@@ -2924,7 +2917,7 @@ qdio_establish_handle_irq(struct ccw_device *cdev, int cstat, int dstat)
irq_ptr = cdev->private->qdio_data;
- sprintf(dbf_text,"qehi%4x",cdev->private->sch_no);
+ sprintf(dbf_text,"qehi%4x",cdev->private->schid.sch_no);
QDIO_DBF_TEXT0(0,setup,dbf_text);
QDIO_DBF_TEXT0(0,trace,dbf_text);
@@ -2943,7 +2936,7 @@ qdio_initialize(struct qdio_initialize *init_data)
int rc;
char dbf_text[15];
- sprintf(dbf_text,"qini%4x",init_data->cdev->private->sch_no);
+ sprintf(dbf_text,"qini%4x",init_data->cdev->private->schid.sch_no);
QDIO_DBF_TEXT0(0,setup,dbf_text);
QDIO_DBF_TEXT0(0,trace,dbf_text);
@@ -2964,7 +2957,7 @@ qdio_allocate(struct qdio_initialize *init_data)
struct qdio_irq *irq_ptr;
char dbf_text[15];
- sprintf(dbf_text,"qalc%4x",init_data->cdev->private->sch_no);
+ sprintf(dbf_text,"qalc%4x",init_data->cdev->private->schid.sch_no);
QDIO_DBF_TEXT0(0,setup,dbf_text);
QDIO_DBF_TEXT0(0,trace,dbf_text);
if ( (init_data->no_input_qs>QDIO_MAX_QUEUES_PER_IRQ) ||
@@ -3187,7 +3180,7 @@ qdio_establish(struct qdio_initialize *init_data)
tiqdio_set_delay_target(irq_ptr,TIQDIO_DELAY_TARGET);
}
- sprintf(dbf_text,"qest%4x",cdev->private->sch_no);
+ sprintf(dbf_text,"qest%4x",cdev->private->schid.sch_no);
QDIO_DBF_TEXT0(0,setup,dbf_text);
QDIO_DBF_TEXT0(0,trace,dbf_text);
@@ -3425,7 +3418,7 @@ do_qdio_handle_inbound(struct qdio_q *q, unsigned int callflags,
if ((used_elements+count==QDIO_MAX_BUFFERS_PER_Q)&&
(callflags&QDIO_FLAG_UNDER_INTERRUPT))
- atomic_swap(&q->polling,0);
+ atomic_xchg(&q->polling,0);
if (used_elements)
return;
@@ -3458,19 +3451,18 @@ do_qdio_handle_outbound(struct qdio_q *q, unsigned int callflags,
struct qdio_irq *irq = (struct qdio_irq *) q->irq_ptr;
/* This is the outbound handling of queues */
-#ifdef QDIO_PERFORMANCE_STATS
- perf_stats.start_time_outbound=NOW;
-#endif /* QDIO_PERFORMANCE_STATS */
+ if (qdio_performance_stats)
+ perf_stats.start_time_outbound=NOW;
qdio_do_qdio_fill_output(q,qidx,count,buffers);
used_elements=atomic_add_return(count, &q->number_of_buffers_used) - count;
if (callflags&QDIO_FLAG_DONT_SIGA) {
-#ifdef QDIO_PERFORMANCE_STATS
- perf_stats.outbound_time+=NOW-perf_stats.start_time_outbound;
- perf_stats.outbound_cnt++;
-#endif /* QDIO_PERFORMANCE_STATS */
+ if (qdio_performance_stats) {
+ perf_stats.outbound_time+=NOW-perf_stats.start_time_outbound;
+ perf_stats.outbound_cnt++;
+ }
return;
}
if (q->is_iqdio_q) {
@@ -3500,9 +3492,8 @@ do_qdio_handle_outbound(struct qdio_q *q, unsigned int callflags,
qdio_kick_outbound_q(q);
} else {
QDIO_DBF_TEXT3(0,trace, "fast-req");
-#ifdef QDIO_PERFORMANCE_STATS
- perf_stats.fast_reqs++;
-#endif /* QDIO_PERFORMANCE_STATS */
+ if (qdio_performance_stats)
+ perf_stats.fast_reqs++;
}
}
/*
@@ -3513,10 +3504,10 @@ do_qdio_handle_outbound(struct qdio_q *q, unsigned int callflags,
__qdio_outbound_processing(q);
}
-#ifdef QDIO_PERFORMANCE_STATS
- perf_stats.outbound_time+=NOW-perf_stats.start_time_outbound;
- perf_stats.outbound_cnt++;
-#endif /* QDIO_PERFORMANCE_STATS */
+ if (qdio_performance_stats) {
+ perf_stats.outbound_time+=NOW-perf_stats.start_time_outbound;
+ perf_stats.outbound_cnt++;
+ }
}
/* count must be 1 in iqdio */
@@ -3529,7 +3520,7 @@ do_QDIO(struct ccw_device *cdev,unsigned int callflags,
#ifdef CONFIG_QDIO_DEBUG
char dbf_text[20];
- sprintf(dbf_text,"doQD%04x",cdev->private->sch_no);
+ sprintf(dbf_text,"doQD%04x",cdev->private->schid.sch_no);
QDIO_DBF_TEXT3(0,trace,dbf_text);
#endif /* CONFIG_QDIO_DEBUG */
@@ -3574,7 +3565,6 @@ do_QDIO(struct ccw_device *cdev,unsigned int callflags,
return 0;
}
-#ifdef QDIO_PERFORMANCE_STATS
static int
qdio_perf_procfile_read(char *buffer, char **buffer_location, off_t offset,
int buffer_length, int *eof, void *data)
@@ -3590,29 +3580,29 @@ qdio_perf_procfile_read(char *buffer, char **buffer_location, off_t offset,
_OUTP_IT("i_p_nc/c=%lu/%lu\n",i_p_nc,i_p_c);
_OUTP_IT("ii_p_nc/c=%lu/%lu\n",ii_p_nc,ii_p_c);
_OUTP_IT("o_p_nc/c=%lu/%lu\n",o_p_nc,o_p_c);
- _OUTP_IT("Number of tasklet runs (total) : %u\n",
+ _OUTP_IT("Number of tasklet runs (total) : %lu\n",
perf_stats.tl_runs);
_OUTP_IT("\n");
- _OUTP_IT("Number of SIGA sync's issued : %u\n",
+ _OUTP_IT("Number of SIGA sync's issued : %lu\n",
perf_stats.siga_syncs);
- _OUTP_IT("Number of SIGA in's issued : %u\n",
+ _OUTP_IT("Number of SIGA in's issued : %lu\n",
perf_stats.siga_ins);
- _OUTP_IT("Number of SIGA out's issued : %u\n",
+ _OUTP_IT("Number of SIGA out's issued : %lu\n",
perf_stats.siga_outs);
- _OUTP_IT("Number of PCIs caught : %u\n",
+ _OUTP_IT("Number of PCIs caught : %lu\n",
perf_stats.pcis);
- _OUTP_IT("Number of adapter interrupts caught : %u\n",
+ _OUTP_IT("Number of adapter interrupts caught : %lu\n",
perf_stats.thinints);
- _OUTP_IT("Number of fast requeues (outg. SBALs w/o SIGA) : %u\n",
+ _OUTP_IT("Number of fast requeues (outg. SBALs w/o SIGA) : %lu\n",
perf_stats.fast_reqs);
_OUTP_IT("\n");
- _OUTP_IT("Total time of all inbound actions (us) incl. UL : %u\n",
+ _OUTP_IT("Total time of all inbound actions (us) incl. UL : %lu\n",
perf_stats.inbound_time);
- _OUTP_IT("Number of inbound transfers : %u\n",
+ _OUTP_IT("Number of inbound transfers : %lu\n",
perf_stats.inbound_cnt);
- _OUTP_IT("Total time of all outbound do_QDIOs (us) : %u\n",
+ _OUTP_IT("Total time of all outbound do_QDIOs (us) : %lu\n",
perf_stats.outbound_time);
- _OUTP_IT("Number of do_QDIOs outbound : %u\n",
+ _OUTP_IT("Number of do_QDIOs outbound : %lu\n",
perf_stats.outbound_cnt);
_OUTP_IT("\n");
@@ -3620,12 +3610,10 @@ qdio_perf_procfile_read(char *buffer, char **buffer_location, off_t offset,
}
static struct proc_dir_entry *qdio_perf_proc_file;
-#endif /* QDIO_PERFORMANCE_STATS */
static void
qdio_add_procfs_entry(void)
{
-#ifdef QDIO_PERFORMANCE_STATS
proc_perf_file_registration=0;
qdio_perf_proc_file=create_proc_entry(QDIO_PERF,
S_IFREG|0444,&proc_root);
@@ -3637,20 +3625,58 @@ qdio_add_procfs_entry(void)
QDIO_PRINT_WARN("was not able to register perf. " \
"proc-file (%i).\n",
proc_perf_file_registration);
-#endif /* QDIO_PERFORMANCE_STATS */
}
static void
qdio_remove_procfs_entry(void)
{
-#ifdef QDIO_PERFORMANCE_STATS
perf_stats.tl_runs=0;
if (!proc_perf_file_registration) /* means if it went ok earlier */
remove_proc_entry(QDIO_PERF,&proc_root);
-#endif /* QDIO_PERFORMANCE_STATS */
}
+/**
+ * attributes in sysfs
+ *****************************************************************************/
+
+static ssize_t
+qdio_performance_stats_show(struct bus_type *bus, char *buf)
+{
+ return sprintf(buf, "%i\n", qdio_performance_stats ? 1 : 0);
+}
+
+static ssize_t
+qdio_performance_stats_store(struct bus_type *bus, const char *buf, size_t count)
+{
+ char *tmp;
+ int i;
+
+ i = simple_strtoul(buf, &tmp, 16);
+ if ((i == 0) || (i == 1)) {
+ if (i == qdio_performance_stats)
+ return count;
+ qdio_performance_stats = i;
+ if (i==0) {
+ /* reset perf. stat. info */
+ i_p_nc = 0;
+ i_p_c = 0;
+ ii_p_nc = 0;
+ ii_p_c = 0;
+ o_p_nc = 0;
+ o_p_c = 0;
+ memset(&perf_stats, 0, sizeof(struct qdio_perf_stats));
+ }
+ } else {
+ QDIO_PRINT_WARN("QDIO performance_stats: write 0 or 1 to this file!\n");
+ return -EINVAL;
+ }
+ return count;
+}
+
+static BUS_ATTR(qdio_performance_stats, 0644, qdio_performance_stats_show,
+ qdio_performance_stats_store);
+
static void
tiqdio_register_thinints(void)
{
@@ -3695,6 +3721,7 @@ qdio_release_qdio_memory(void)
kfree(indicators);
}
+
static void
qdio_unregister_dbf_views(void)
{
@@ -3796,9 +3823,7 @@ static int __init
init_QDIO(void)
{
int res;
-#ifdef QDIO_PERFORMANCE_STATS
void *ptr;
-#endif /* QDIO_PERFORMANCE_STATS */
printk("qdio: loading %s\n",version);
@@ -3811,13 +3836,12 @@ init_QDIO(void)
return res;
QDIO_DBF_TEXT0(0,setup,"initQDIO");
+ res = bus_create_file(&ccw_bus_type, &bus_attr_qdio_performance_stats);
-#ifdef QDIO_PERFORMANCE_STATS
- memset((void*)&perf_stats,0,sizeof(perf_stats));
+ memset((void*)&perf_stats,0,sizeof(perf_stats));
QDIO_DBF_TEXT0(0,setup,"perfstat");
ptr=&perf_stats;
QDIO_DBF_HEX0(0,setup,&ptr,sizeof(void*));
-#endif /* QDIO_PERFORMANCE_STATS */
qdio_add_procfs_entry();
@@ -3841,7 +3865,7 @@ cleanup_QDIO(void)
qdio_release_qdio_memory();
qdio_unregister_dbf_views();
mempool_destroy(qdio_mempool_scssc);
-
+ bus_remove_file(&ccw_bus_type, &bus_attr_qdio_performance_stats);
printk("qdio: %s: module removed\n",version);
}
diff --git a/drivers/s390/cio/qdio.h b/drivers/s390/cio/qdio.h
index 124569362f0..ec9af72b2af 100644
--- a/drivers/s390/cio/qdio.h
+++ b/drivers/s390/cio/qdio.h
@@ -12,10 +12,6 @@
#endif /* CONFIG_QDIO_DEBUG */
#define QDIO_USE_PROCESSING_STATE
-#ifdef CONFIG_QDIO_PERF_STATS
-#define QDIO_PERFORMANCE_STATS
-#endif /* CONFIG_QDIO_PERF_STATS */
-
#define QDIO_MINIMAL_BH_RELIEF_TIME 16
#define QDIO_TIMER_POLL_VALUE 1
#define IQDIO_TIMER_POLL_VALUE 1
@@ -236,7 +232,7 @@ enum qdio_irq_states {
#define QDIO_PRINT_EMERG(x...) do { } while (0)
#endif
-#define HEXDUMP16(importance,header,ptr) \
+#define QDIO_HEXDUMP16(importance,header,ptr) \
QDIO_PRINT_##importance(header "%02x %02x %02x %02x " \
"%02x %02x %02x %02x %02x %02x %02x %02x " \
"%02x %02x %02x %02x\n",*(((char*)ptr)), \
@@ -274,12 +270,11 @@ do_sqbs(unsigned long sch, unsigned char state, int queue,
register unsigned long _sch asm ("1") = sch;
unsigned long _queuestart = ((unsigned long)queue << 32) | *start;
- asm volatile (
- " .insn rsy,0xeb000000008A,%1,0,0(%2)\n\t"
- : "+d" (_ccq), "+d" (_queuestart)
- : "d" ((unsigned long)state), "d" (_sch)
- : "memory", "cc"
- );
+ asm volatile(
+ " .insn rsy,0xeb000000008A,%1,0,0(%2)"
+ : "+d" (_ccq), "+d" (_queuestart)
+ : "d" ((unsigned long)state), "d" (_sch)
+ : "memory", "cc");
*count = _ccq & 0xff;
*start = _queuestart & 0xff;
@@ -299,12 +294,11 @@ do_eqbs(unsigned long sch, unsigned char *state, int queue,
unsigned long _queuestart = ((unsigned long)queue << 32) | *start;
unsigned long _state = 0;
- asm volatile (
- " .insn rrf,0xB99c0000,%1,%2,0,0 \n\t"
- : "+d" (_ccq), "+d" (_queuestart), "+d" (_state)
- : "d" (_sch)
- : "memory", "cc"
- );
+ asm volatile(
+ " .insn rrf,0xB99c0000,%1,%2,0,0"
+ : "+d" (_ccq), "+d" (_queuestart), "+d" (_state)
+ : "d" (_sch)
+ : "memory", "cc" );
*count = _ccq & 0xff;
*start = _queuestart & 0xff;
*state = _state & 0xff;
@@ -319,69 +313,35 @@ do_eqbs(unsigned long sch, unsigned char *state, int queue,
static inline int
do_siga_sync(struct subchannel_id schid, unsigned int mask1, unsigned int mask2)
{
+ register unsigned long reg0 asm ("0") = 2;
+ register struct subchannel_id reg1 asm ("1") = schid;
+ register unsigned long reg2 asm ("2") = mask1;
+ register unsigned long reg3 asm ("3") = mask2;
int cc;
-#ifndef CONFIG_64BIT
- asm volatile (
- "lhi 0,2 \n\t"
- "lr 1,%1 \n\t"
- "lr 2,%2 \n\t"
- "lr 3,%3 \n\t"
- "siga 0 \n\t"
- "ipm %0 \n\t"
- "srl %0,28 \n\t"
- : "=d" (cc)
- : "d" (schid), "d" (mask1), "d" (mask2)
- : "cc", "0", "1", "2", "3"
- );
-#else /* CONFIG_64BIT */
- asm volatile (
- "lghi 0,2 \n\t"
- "llgfr 1,%1 \n\t"
- "llgfr 2,%2 \n\t"
- "llgfr 3,%3 \n\t"
- "siga 0 \n\t"
- "ipm %0 \n\t"
- "srl %0,28 \n\t"
+ asm volatile(
+ " siga 0\n"
+ " ipm %0\n"
+ " srl %0,28\n"
: "=d" (cc)
- : "d" (schid), "d" (mask1), "d" (mask2)
- : "cc", "0", "1", "2", "3"
- );
-#endif /* CONFIG_64BIT */
+ : "d" (reg0), "d" (reg1), "d" (reg2), "d" (reg3) : "cc");
return cc;
}
static inline int
do_siga_input(struct subchannel_id schid, unsigned int mask)
{
+ register unsigned long reg0 asm ("0") = 1;
+ register struct subchannel_id reg1 asm ("1") = schid;
+ register unsigned long reg2 asm ("2") = mask;
int cc;
-#ifndef CONFIG_64BIT
- asm volatile (
- "lhi 0,1 \n\t"
- "lr 1,%1 \n\t"
- "lr 2,%2 \n\t"
- "siga 0 \n\t"
- "ipm %0 \n\t"
- "srl %0,28 \n\t"
+ asm volatile(
+ " siga 0\n"
+ " ipm %0\n"
+ " srl %0,28\n"
: "=d" (cc)
- : "d" (schid), "d" (mask)
- : "cc", "0", "1", "2", "memory"
- );
-#else /* CONFIG_64BIT */
- asm volatile (
- "lghi 0,1 \n\t"
- "llgfr 1,%1 \n\t"
- "llgfr 2,%2 \n\t"
- "siga 0 \n\t"
- "ipm %0 \n\t"
- "srl %0,28 \n\t"
- : "=d" (cc)
- : "d" (schid), "d" (mask)
- : "cc", "0", "1", "2", "memory"
- );
-#endif /* CONFIG_64BIT */
-
+ : "d" (reg0), "d" (reg1), "d" (reg2) : "cc", "memory");
return cc;
}
@@ -389,93 +349,35 @@ static inline int
do_siga_output(unsigned long schid, unsigned long mask, __u32 *bb,
unsigned int fc)
{
+ register unsigned long __fc asm("0") = fc;
+ register unsigned long __schid asm("1") = schid;
+ register unsigned long __mask asm("2") = mask;
int cc;
- __u32 busy_bit;
-
-#ifndef CONFIG_64BIT
- asm volatile (
- "lhi 0,0 \n\t"
- "lr 1,%2 \n\t"
- "lr 2,%3 \n\t"
- "siga 0 \n\t"
- "0:"
- "ipm %0 \n\t"
- "srl %0,28 \n\t"
- "srl 0,31 \n\t"
- "lr %1,0 \n\t"
- "1: \n\t"
- ".section .fixup,\"ax\"\n\t"
- "2: \n\t"
- "lhi %0,%4 \n\t"
- "bras 1,3f \n\t"
- ".long 1b \n\t"
- "3: \n\t"
- "l 1,0(1) \n\t"
- "br 1 \n\t"
- ".previous \n\t"
- ".section __ex_table,\"a\"\n\t"
- ".align 4 \n\t"
- ".long 0b,2b \n\t"
- ".previous \n\t"
- : "=d" (cc), "=d" (busy_bit)
- : "d" (schid), "d" (mask),
- "i" (QDIO_SIGA_ERROR_ACCESS_EXCEPTION)
- : "cc", "0", "1", "2", "memory"
- );
-#else /* CONFIG_64BIT */
- asm volatile (
- "llgfr 0,%5 \n\t"
- "lgr 1,%2 \n\t"
- "llgfr 2,%3 \n\t"
- "siga 0 \n\t"
- "0:"
- "ipm %0 \n\t"
- "srl %0,28 \n\t"
- "srl 0,31 \n\t"
- "llgfr %1,0 \n\t"
- "1: \n\t"
- ".section .fixup,\"ax\"\n\t"
- "lghi %0,%4 \n\t"
- "jg 1b \n\t"
- ".previous\n\t"
- ".section __ex_table,\"a\"\n\t"
- ".align 8 \n\t"
- ".quad 0b,1b \n\t"
- ".previous \n\t"
- : "=d" (cc), "=d" (busy_bit)
- : "d" (schid), "d" (mask),
- "i" (QDIO_SIGA_ERROR_ACCESS_EXCEPTION), "d" (fc)
- : "cc", "0", "1", "2", "memory"
- );
-#endif /* CONFIG_64BIT */
-
- (*bb) = busy_bit;
+
+ asm volatile(
+ " siga 0\n"
+ "0: ipm %0\n"
+ " srl %0,28\n"
+ "1:\n"
+ EX_TABLE(0b,1b)
+ : "=d" (cc), "+d" (__fc), "+d" (__schid), "+d" (__mask)
+ : "0" (QDIO_SIGA_ERROR_ACCESS_EXCEPTION)
+ : "cc", "memory");
+ (*bb) = ((unsigned int) __fc) >> 31;
return cc;
}
static inline unsigned long
do_clear_global_summary(void)
{
-
- unsigned long time;
-
-#ifndef CONFIG_64BIT
- asm volatile (
- "lhi 1,3 \n\t"
- ".insn rre,0xb2650000,2,0 \n\t"
- "lr %0,3 \n\t"
- : "=d" (time) : : "cc", "1", "2", "3"
- );
-#else /* CONFIG_64BIT */
- asm volatile (
- "lghi 1,3 \n\t"
- ".insn rre,0xb2650000,2,0 \n\t"
- "lgr %0,3 \n\t"
- : "=d" (time) : : "cc", "1", "2", "3"
- );
-#endif /* CONFIG_64BIT */
-
- return time;
+ register unsigned long __fn asm("1") = 3;
+ register unsigned long __tmp asm("2");
+ register unsigned long __time asm("3");
+
+ asm volatile(
+ " .insn rre,0xb2650000,2,0"
+ : "+d" (__fn), "=d" (__tmp), "=d" (__time));
+ return __time;
}
/*
@@ -503,27 +405,23 @@ do_clear_global_summary(void)
#define CHSC_FLAG_SIGA_SYNC_DONE_ON_THININTS 0x08
#define CHSC_FLAG_SIGA_SYNC_DONE_ON_OUTB_PCIS 0x04
-#ifdef QDIO_PERFORMANCE_STATS
struct qdio_perf_stats {
- unsigned int tl_runs;
+ unsigned long tl_runs;
- unsigned int siga_outs;
- unsigned int siga_ins;
- unsigned int siga_syncs;
- unsigned int pcis;
- unsigned int thinints;
- unsigned int fast_reqs;
+ unsigned long siga_outs;
+ unsigned long siga_ins;
+ unsigned long siga_syncs;
+ unsigned long pcis;
+ unsigned long thinints;
+ unsigned long fast_reqs;
__u64 start_time_outbound;
- unsigned int outbound_cnt;
- unsigned int outbound_time;
+ unsigned long outbound_cnt;
+ unsigned long outbound_time;
__u64 start_time_inbound;
- unsigned int inbound_cnt;
- unsigned int inbound_time;
+ unsigned long inbound_cnt;
+ unsigned long inbound_time;
};
-#endif /* QDIO_PERFORMANCE_STATS */
-
-#define atomic_swap(a,b) xchg((int*)a.counter,b)
/* unlikely as the later the better */
#define SYNC_MEMORY if (unlikely(q->siga_sync)) qdio_siga_sync_q(q)
diff --git a/drivers/s390/crypto/ap_bus.c b/drivers/s390/crypto/ap_bus.c
index 6ed0985c0c9..ad60afe5dd1 100644
--- a/drivers/s390/crypto/ap_bus.c
+++ b/drivers/s390/crypto/ap_bus.c
@@ -33,11 +33,12 @@
#include <linux/kthread.h>
#include <linux/mutex.h>
#include <asm/s390_rdev.h>
+#include <asm/reset.h>
#include "ap_bus.h"
/* Some prototypes. */
-static void ap_scan_bus(void *);
+static void ap_scan_bus(struct work_struct *);
static void ap_poll_all(unsigned long);
static void ap_poll_timeout(unsigned long);
static int ap_poll_thread_start(void);
@@ -71,7 +72,7 @@ static struct device *ap_root_device = NULL;
static struct workqueue_struct *ap_work_queue;
static struct timer_list ap_config_timer;
static int ap_config_time = AP_CONFIG_TIME;
-static DECLARE_WORK(ap_config_work, ap_scan_bus, NULL);
+static DECLARE_WORK(ap_config_work, ap_scan_bus);
/**
* Tasklet & timer for AP request polling.
@@ -431,7 +432,15 @@ static int ap_uevent (struct device *dev, char **envp, int num_envp,
ap_dev->device_type);
if (buffer_size - length <= 0)
return -ENOMEM;
- envp[1] = 0;
+ buffer += length;
+ buffer_size -= length;
+ /* Add MODALIAS= */
+ envp[1] = buffer;
+ length = scnprintf(buffer, buffer_size, "MODALIAS=ap:t%02X",
+ ap_dev->device_type);
+ if (buffer_size - length <= 0)
+ return -ENOMEM;
+ envp[2] = NULL;
return 0;
}
@@ -449,8 +458,6 @@ static int ap_device_probe(struct device *dev)
ap_dev->drv = ap_drv;
rc = ap_drv->probe ? ap_drv->probe(ap_dev) : -ENODEV;
- if (rc)
- ap_dev->unregistered = 1;
return rc;
}
@@ -487,14 +494,7 @@ static int ap_device_remove(struct device *dev)
struct ap_device *ap_dev = to_ap_dev(dev);
struct ap_driver *ap_drv = ap_dev->drv;
- spin_lock_bh(&ap_dev->lock);
- __ap_flush_queue(ap_dev);
- /**
- * set ->unregistered to 1 while holding the lock. This prevents
- * new messages to be put on the queue from now on.
- */
- ap_dev->unregistered = 1;
- spin_unlock_bh(&ap_dev->lock);
+ ap_flush_queue(ap_dev);
if (ap_drv->remove)
ap_drv->remove(ap_dev);
return 0;
@@ -733,7 +733,7 @@ static void ap_device_release(struct device *dev)
kfree(ap_dev);
}
-static void ap_scan_bus(void *data)
+static void ap_scan_bus(struct work_struct *unused)
{
struct ap_device *ap_dev;
struct device *dev;
@@ -748,11 +748,16 @@ static void ap_scan_bus(void *data)
dev = bus_find_device(&ap_bus_type, NULL,
(void *)(unsigned long)qid,
__ap_scan_bus);
+ rc = ap_query_queue(qid, &queue_depth, &device_type);
+ if (dev && rc) {
+ put_device(dev);
+ device_unregister(dev);
+ continue;
+ }
if (dev) {
put_device(dev);
continue;
}
- rc = ap_query_queue(qid, &queue_depth, &device_type);
if (rc)
continue;
rc = ap_init_queue(qid);
@@ -763,6 +768,7 @@ static void ap_scan_bus(void *data)
break;
ap_dev->qid = qid;
ap_dev->queue_depth = queue_depth;
+ ap_dev->unregistered = 1;
spin_lock_init(&ap_dev->lock);
INIT_LIST_HEAD(&ap_dev->pendingq);
INIT_LIST_HEAD(&ap_dev->requestq);
@@ -784,7 +790,12 @@ static void ap_scan_bus(void *data)
/* Add device attributes. */
rc = sysfs_create_group(&ap_dev->device.kobj,
&ap_dev_attr_group);
- if (rc)
+ if (!rc) {
+ spin_lock_bh(&ap_dev->lock);
+ ap_dev->unregistered = 0;
+ spin_unlock_bh(&ap_dev->lock);
+ }
+ else
device_unregister(&ap_dev->device);
}
}
@@ -970,6 +981,8 @@ void ap_queue_message(struct ap_device *ap_dev, struct ap_message *ap_msg)
rc = __ap_queue_message(ap_dev, ap_msg);
if (!rc)
wake_up(&ap_poll_wait);
+ if (rc == -ENODEV)
+ ap_dev->unregistered = 1;
} else {
ap_dev->drv->receive(ap_dev, ap_msg, ERR_PTR(-ENODEV));
rc = 0;
@@ -1028,6 +1041,8 @@ static int __ap_poll_all(struct device *dev, void *data)
spin_lock(&ap_dev->lock);
if (!ap_dev->unregistered) {
rc = ap_poll_queue(to_ap_dev(dev), (unsigned long *) data);
+ if (rc)
+ ap_dev->unregistered = 1;
} else
rc = 0;
spin_unlock(&ap_dev->lock);
@@ -1061,7 +1076,7 @@ static int ap_poll_thread(void *data)
unsigned long flags;
int requests;
- set_user_nice(current, -20);
+ set_user_nice(current, 19);
while (1) {
if (need_resched()) {
schedule();
@@ -1114,6 +1129,19 @@ static void ap_poll_thread_stop(void)
mutex_unlock(&ap_poll_thread_mutex);
}
+static void ap_reset(void)
+{
+ int i, j;
+
+ for (i = 0; i < AP_DOMAINS; i++)
+ for (j = 0; j < AP_DEVICES; j++)
+ ap_reset_queue(AP_MKQID(j, i));
+}
+
+static struct reset_call ap_reset_call = {
+ .fn = ap_reset,
+};
+
/**
* The module initialization code.
*/
@@ -1130,6 +1158,7 @@ int __init ap_module_init(void)
printk(KERN_WARNING "AP instructions not installed.\n");
return -ENODEV;
}
+ register_reset_call(&ap_reset_call);
/* Create /sys/bus/ap. */
rc = bus_register(&ap_bus_type);
@@ -1183,6 +1212,7 @@ out_bus:
bus_remove_file(&ap_bus_type, ap_bus_attrs[i]);
bus_unregister(&ap_bus_type);
out:
+ unregister_reset_call(&ap_reset_call);
return rc;
}
@@ -1213,6 +1243,7 @@ void ap_module_exit(void)
for (i = 0; ap_bus_attrs[i]; i++)
bus_remove_file(&ap_bus_type, ap_bus_attrs[i]);
bus_unregister(&ap_bus_type);
+ unregister_reset_call(&ap_reset_call);
}
#ifndef CONFIG_ZCRYPT_MONOLITHIC
diff --git a/drivers/s390/net/claw.h b/drivers/s390/net/claw.h
index 969be465309..1ee9a6f0654 100644
--- a/drivers/s390/net/claw.h
+++ b/drivers/s390/net/claw.h
@@ -29,7 +29,7 @@
#define CLAW_COMPLETE 0xff /* flag to indicate i/o completed */
/*-----------------------------------------------------*
-* CLAW control comand code *
+* CLAW control command code *
*------------------------------------------------------*/
#define SYSTEM_VALIDATE_REQUEST 0x01 /* System Validate request */
diff --git a/drivers/s390/net/iucv.c b/drivers/s390/net/iucv.c
index 821dde86e24..1476ce2b437 100644
--- a/drivers/s390/net/iucv.c
+++ b/drivers/s390/net/iucv.c
@@ -116,7 +116,7 @@ static DEFINE_SPINLOCK(iucv_irq_queue_lock);
*Internal function prototypes
*/
static void iucv_tasklet_handler(unsigned long);
-static void iucv_irq_handler(struct pt_regs *, __u16);
+static void iucv_irq_handler(__u16);
static DECLARE_TASKLET(iucv_tasklet,iucv_tasklet_handler,0);
@@ -534,19 +534,15 @@ iucv_add_handler (handler *new)
*
* Returns: return code from CP's IUCV call
*/
-static __inline__ ulong
-b2f0(__u32 code, void *parm)
+static inline ulong b2f0(__u32 code, void *parm)
{
+ register unsigned long reg0 asm ("0");
+ register unsigned long reg1 asm ("1");
iucv_dumpit("iparml before b2f0 call:", parm, sizeof(iucv_param));
- asm volatile (
- "LRA 1,0(%1)\n\t"
- "LR 0,%0\n\t"
- ".long 0xb2f01000"
- :
- : "d" (code), "a" (parm)
- : "0", "1"
- );
+ reg0 = code;
+ reg1 = virt_to_phys(parm);
+ asm volatile(".long 0xb2f01000" : : "d" (reg0), "a" (reg1));
iucv_dumpit("iparml after b2f0 call:", parm, sizeof(iucv_param));
@@ -1248,6 +1244,8 @@ iucv_purge (__u16 pathid, __u32 msgid, __u32 srccls, __u32 *audit)
static int
iucv_query_generic(int want_maxconn)
{
+ register unsigned long reg0 asm ("0");
+ register unsigned long reg1 asm ("1");
iparml_purge *parm = (iparml_purge *)grab_param();
int bufsize, maxconn;
int ccode;
@@ -1256,18 +1254,15 @@ iucv_query_generic(int want_maxconn)
* Call b2f0 and store R0 (max buffer size),
* R1 (max connections) and CC.
*/
- asm volatile (
- "LRA 1,0(%4)\n\t"
- "LR 0,%3\n\t"
- ".long 0xb2f01000\n\t"
- "IPM %0\n\t"
- "SRL %0,28\n\t"
- "ST 0,%1\n\t"
- "ST 1,%2\n\t"
- : "=d" (ccode), "=m" (bufsize), "=m" (maxconn)
- : "d" (QUERY), "a" (parm)
- : "0", "1", "cc"
- );
+ reg0 = QUERY;
+ reg1 = virt_to_phys(parm);
+ asm volatile(
+ " .long 0xb2f01000\n"
+ " ipm %0\n"
+ " srl %0,28\n"
+ : "=d" (ccode), "+d" (reg0), "+d" (reg1) : : "cc");
+ bufsize = reg0;
+ maxconn = reg1;
release_param(parm);
if (ccode)
@@ -2256,7 +2251,7 @@ iucv_sever(__u16 pathid, __u8 user_data[16])
* Places the interrupt buffer on a queue and schedules iucv_tasklet_handler().
*/
static void
-iucv_irq_handler(struct pt_regs *regs, __u16 code)
+iucv_irq_handler(__u16 code)
{
iucv_irqdata *irqdata;
diff --git a/drivers/s390/net/lcs.c b/drivers/s390/net/lcs.c
index 16ac68c27a2..e5665b6743a 100644
--- a/drivers/s390/net/lcs.c
+++ b/drivers/s390/net/lcs.c
@@ -54,6 +54,8 @@
#error Cannot compile lcs.c without some net devices switched on.
#endif
+#define PRINTK_HEADER " lcs: "
+
/**
* initialization string for output
*/
@@ -65,7 +67,7 @@ static char debug_buffer[255];
* Some prototypes.
*/
static void lcs_tasklet(unsigned long);
-static void lcs_start_kernel_thread(struct lcs_card *card);
+static void lcs_start_kernel_thread(struct work_struct *);
static void lcs_get_frames_cb(struct lcs_channel *, struct lcs_buffer *);
static int lcs_send_delipm(struct lcs_card *, struct lcs_ipm_list *);
static int lcs_recovery(void *ptr);
@@ -120,7 +122,7 @@ lcs_alloc_channel(struct lcs_channel *channel)
kzalloc(LCS_IOBUFFERSIZE, GFP_DMA | GFP_KERNEL);
if (channel->iob[cnt].data == NULL)
break;
- channel->iob[cnt].state = BUF_STATE_EMPTY;
+ channel->iob[cnt].state = LCS_BUF_STATE_EMPTY;
}
if (cnt < LCS_NUM_BUFFS) {
/* Not all io buffers could be allocated. */
@@ -236,7 +238,7 @@ lcs_setup_read_ccws(struct lcs_card *card)
((struct lcs_header *)
card->read.iob[cnt].data)->offset = LCS_ILLEGAL_OFFSET;
card->read.iob[cnt].callback = lcs_get_frames_cb;
- card->read.iob[cnt].state = BUF_STATE_READY;
+ card->read.iob[cnt].state = LCS_BUF_STATE_READY;
card->read.iob[cnt].count = LCS_IOBUFFERSIZE;
}
card->read.ccws[0].flags &= ~CCW_FLAG_PCI;
@@ -247,7 +249,7 @@ lcs_setup_read_ccws(struct lcs_card *card)
card->read.ccws[LCS_NUM_BUFFS].cda =
(__u32) __pa(card->read.ccws);
/* Setg initial state of the read channel. */
- card->read.state = CH_STATE_INIT;
+ card->read.state = LCS_CH_STATE_INIT;
card->read.io_idx = 0;
card->read.buf_idx = 0;
@@ -294,7 +296,7 @@ lcs_setup_write_ccws(struct lcs_card *card)
card->write.ccws[LCS_NUM_BUFFS].cda =
(__u32) __pa(card->write.ccws);
/* Set initial state of the write channel. */
- card->read.state = CH_STATE_INIT;
+ card->read.state = LCS_CH_STATE_INIT;
card->write.io_idx = 0;
card->write.buf_idx = 0;
@@ -496,7 +498,7 @@ lcs_start_channel(struct lcs_channel *channel)
channel->ccws + channel->io_idx, 0, 0,
DOIO_DENY_PREFETCH | DOIO_ALLOW_SUSPEND);
if (rc == 0)
- channel->state = CH_STATE_RUNNING;
+ channel->state = LCS_CH_STATE_RUNNING;
spin_unlock_irqrestore(get_ccwdev_lock(channel->ccwdev), flags);
if (rc) {
LCS_DBF_TEXT_(4,trace,"essh%s", channel->ccwdev->dev.bus_id);
@@ -520,8 +522,8 @@ lcs_clear_channel(struct lcs_channel *channel)
LCS_DBF_TEXT_(4,trace,"ecsc%s", channel->ccwdev->dev.bus_id);
return rc;
}
- wait_event(channel->wait_q, (channel->state == CH_STATE_CLEARED));
- channel->state = CH_STATE_STOPPED;
+ wait_event(channel->wait_q, (channel->state == LCS_CH_STATE_CLEARED));
+ channel->state = LCS_CH_STATE_STOPPED;
return rc;
}
@@ -535,11 +537,11 @@ lcs_stop_channel(struct lcs_channel *channel)
unsigned long flags;
int rc;
- if (channel->state == CH_STATE_STOPPED)
+ if (channel->state == LCS_CH_STATE_STOPPED)
return 0;
LCS_DBF_TEXT(4,trace,"haltsch");
LCS_DBF_TEXT_(4,trace,"%s", channel->ccwdev->dev.bus_id);
- channel->state = CH_STATE_INIT;
+ channel->state = LCS_CH_STATE_INIT;
spin_lock_irqsave(get_ccwdev_lock(channel->ccwdev), flags);
rc = ccw_device_halt(channel->ccwdev, (addr_t) channel);
spin_unlock_irqrestore(get_ccwdev_lock(channel->ccwdev), flags);
@@ -548,7 +550,7 @@ lcs_stop_channel(struct lcs_channel *channel)
return rc;
}
/* Asynchronous halt initialted. Wait for its completion. */
- wait_event(channel->wait_q, (channel->state == CH_STATE_HALTED));
+ wait_event(channel->wait_q, (channel->state == LCS_CH_STATE_HALTED));
lcs_clear_channel(channel);
return 0;
}
@@ -596,8 +598,8 @@ __lcs_get_buffer(struct lcs_channel *channel)
LCS_DBF_TEXT(5, trace, "_getbuff");
index = channel->io_idx;
do {
- if (channel->iob[index].state == BUF_STATE_EMPTY) {
- channel->iob[index].state = BUF_STATE_LOCKED;
+ if (channel->iob[index].state == LCS_BUF_STATE_EMPTY) {
+ channel->iob[index].state = LCS_BUF_STATE_LOCKED;
return channel->iob + index;
}
index = (index + 1) & (LCS_NUM_BUFFS - 1);
@@ -626,7 +628,7 @@ __lcs_resume_channel(struct lcs_channel *channel)
{
int rc;
- if (channel->state != CH_STATE_SUSPENDED)
+ if (channel->state != LCS_CH_STATE_SUSPENDED)
return 0;
if (channel->ccws[channel->io_idx].flags & CCW_FLAG_SUSPEND)
return 0;
@@ -636,7 +638,7 @@ __lcs_resume_channel(struct lcs_channel *channel)
LCS_DBF_TEXT_(4, trace, "ersc%s", channel->ccwdev->dev.bus_id);
PRINT_ERR("Error in lcs_resume_channel: rc=%d\n",rc);
} else
- channel->state = CH_STATE_RUNNING;
+ channel->state = LCS_CH_STATE_RUNNING;
return rc;
}
@@ -670,10 +672,10 @@ lcs_ready_buffer(struct lcs_channel *channel, struct lcs_buffer *buffer)
int index, rc;
LCS_DBF_TEXT(5, trace, "rdybuff");
- BUG_ON(buffer->state != BUF_STATE_LOCKED &&
- buffer->state != BUF_STATE_PROCESSED);
+ BUG_ON(buffer->state != LCS_BUF_STATE_LOCKED &&
+ buffer->state != LCS_BUF_STATE_PROCESSED);
spin_lock_irqsave(get_ccwdev_lock(channel->ccwdev), flags);
- buffer->state = BUF_STATE_READY;
+ buffer->state = LCS_BUF_STATE_READY;
index = buffer - channel->iob;
/* Set length. */
channel->ccws[index].count = buffer->count;
@@ -695,8 +697,8 @@ __lcs_processed_buffer(struct lcs_channel *channel, struct lcs_buffer *buffer)
int index, prev, next;
LCS_DBF_TEXT(5, trace, "prcsbuff");
- BUG_ON(buffer->state != BUF_STATE_READY);
- buffer->state = BUF_STATE_PROCESSED;
+ BUG_ON(buffer->state != LCS_BUF_STATE_READY);
+ buffer->state = LCS_BUF_STATE_PROCESSED;
index = buffer - channel->iob;
prev = (index - 1) & (LCS_NUM_BUFFS - 1);
next = (index + 1) & (LCS_NUM_BUFFS - 1);
@@ -704,7 +706,7 @@ __lcs_processed_buffer(struct lcs_channel *channel, struct lcs_buffer *buffer)
channel->ccws[index].flags |= CCW_FLAG_SUSPEND;
channel->ccws[index].flags &= ~CCW_FLAG_PCI;
/* Check the suspend bit of the previous buffer. */
- if (channel->iob[prev].state == BUF_STATE_READY) {
+ if (channel->iob[prev].state == LCS_BUF_STATE_READY) {
/*
* Previous buffer is in state ready. It might have
* happened in lcs_ready_buffer that the suspend bit
@@ -727,10 +729,10 @@ lcs_release_buffer(struct lcs_channel *channel, struct lcs_buffer *buffer)
unsigned long flags;
LCS_DBF_TEXT(5, trace, "relbuff");
- BUG_ON(buffer->state != BUF_STATE_LOCKED &&
- buffer->state != BUF_STATE_PROCESSED);
+ BUG_ON(buffer->state != LCS_BUF_STATE_LOCKED &&
+ buffer->state != LCS_BUF_STATE_PROCESSED);
spin_lock_irqsave(get_ccwdev_lock(channel->ccwdev), flags);
- buffer->state = BUF_STATE_EMPTY;
+ buffer->state = LCS_BUF_STATE_EMPTY;
spin_unlock_irqrestore(get_ccwdev_lock(channel->ccwdev), flags);
}
@@ -1147,7 +1149,7 @@ list_modified:
* get mac address for the relevant Multicast address
*/
static void
-lcs_get_mac_for_ipm(__u32 ipm, char *mac, struct net_device *dev)
+lcs_get_mac_for_ipm(__be32 ipm, char *mac, struct net_device *dev)
{
LCS_DBF_TEXT(4,trace, "getmac");
if (dev->type == ARPHRD_IEEE802_TR)
@@ -1264,7 +1266,7 @@ lcs_register_mc_addresses(void *data)
netif_carrier_off(card->dev);
netif_tx_disable(card->dev);
wait_event(card->write.wait_q,
- (card->write.state != CH_STATE_RUNNING));
+ (card->write.state != LCS_CH_STATE_RUNNING));
lcs_fix_multicast_list(card);
if (card->state == DEV_STATE_UP) {
netif_carrier_on(card->dev);
@@ -1404,7 +1406,7 @@ lcs_irq(struct ccw_device *cdev, unsigned long intparm, struct irb *irb)
}
}
/* How far in the ccw chain have we processed? */
- if ((channel->state != CH_STATE_INIT) &&
+ if ((channel->state != LCS_CH_STATE_INIT) &&
(irb->scsw.fctl & SCSW_FCTL_START_FUNC)) {
index = (struct ccw1 *) __va((addr_t) irb->scsw.cpa)
- channel->ccws;
@@ -1424,20 +1426,20 @@ lcs_irq(struct ccw_device *cdev, unsigned long intparm, struct irb *irb)
(irb->scsw.dstat & DEV_STAT_CHN_END) ||
(irb->scsw.dstat & DEV_STAT_UNIT_CHECK))
/* Mark channel as stopped. */
- channel->state = CH_STATE_STOPPED;
+ channel->state = LCS_CH_STATE_STOPPED;
else if (irb->scsw.actl & SCSW_ACTL_SUSPENDED)
/* CCW execution stopped on a suspend bit. */
- channel->state = CH_STATE_SUSPENDED;
+ channel->state = LCS_CH_STATE_SUSPENDED;
if (irb->scsw.fctl & SCSW_FCTL_HALT_FUNC) {
if (irb->scsw.cc != 0) {
ccw_device_halt(channel->ccwdev, (addr_t) channel);
return;
}
/* The channel has been stopped by halt_IO. */
- channel->state = CH_STATE_HALTED;
+ channel->state = LCS_CH_STATE_HALTED;
}
if (irb->scsw.fctl & SCSW_FCTL_CLEAR_FUNC) {
- channel->state = CH_STATE_CLEARED;
+ channel->state = LCS_CH_STATE_CLEARED;
}
/* Do the rest in the tasklet. */
tasklet_schedule(&channel->irq_tasklet);
@@ -1461,7 +1463,7 @@ lcs_tasklet(unsigned long data)
/* Check for processed buffers. */
iob = channel->iob;
buf_idx = channel->buf_idx;
- while (iob[buf_idx].state == BUF_STATE_PROCESSED) {
+ while (iob[buf_idx].state == LCS_BUF_STATE_PROCESSED) {
/* Do the callback thing. */
if (iob[buf_idx].callback != NULL)
iob[buf_idx].callback(channel, iob + buf_idx);
@@ -1469,12 +1471,12 @@ lcs_tasklet(unsigned long data)
}
channel->buf_idx = buf_idx;
- if (channel->state == CH_STATE_STOPPED)
+ if (channel->state == LCS_CH_STATE_STOPPED)
// FIXME: what if rc != 0 ??
rc = lcs_start_channel(channel);
spin_lock_irqsave(get_ccwdev_lock(channel->ccwdev), flags);
- if (channel->state == CH_STATE_SUSPENDED &&
- channel->iob[channel->io_idx].state == BUF_STATE_READY) {
+ if (channel->state == LCS_CH_STATE_SUSPENDED &&
+ channel->iob[channel->io_idx].state == LCS_BUF_STATE_READY) {
// FIXME: what if rc != 0 ??
rc = __lcs_resume_channel(channel);
}
@@ -1689,8 +1691,8 @@ lcs_detect(struct lcs_card *card)
card->state = DEV_STATE_UP;
} else {
card->state = DEV_STATE_DOWN;
- card->write.state = CH_STATE_INIT;
- card->read.state = CH_STATE_INIT;
+ card->write.state = LCS_CH_STATE_INIT;
+ card->read.state = LCS_CH_STATE_INIT;
}
return rc;
}
@@ -1705,8 +1707,8 @@ lcs_stopcard(struct lcs_card *card)
LCS_DBF_TEXT(3, setup, "stopcard");
- if (card->read.state != CH_STATE_STOPPED &&
- card->write.state != CH_STATE_STOPPED &&
+ if (card->read.state != LCS_CH_STATE_STOPPED &&
+ card->write.state != LCS_CH_STATE_STOPPED &&
card->state == DEV_STATE_UP) {
lcs_clear_multicast_list(card);
rc = lcs_send_stoplan(card,LCS_INITIATOR_TCPIP);
@@ -1722,8 +1724,9 @@ lcs_stopcard(struct lcs_card *card)
* Kernel Thread helper functions for LGW initiated commands
*/
static void
-lcs_start_kernel_thread(struct lcs_card *card)
+lcs_start_kernel_thread(struct work_struct *work)
{
+ struct lcs_card *card = container_of(work, struct lcs_card, kernel_thread_starter);
LCS_DBF_TEXT(5, trace, "krnthrd");
if (lcs_do_start_thread(card, LCS_RECOVERY_THREAD))
kernel_thread(lcs_recovery, (void *) card, SIGCHLD);
@@ -1871,7 +1874,7 @@ lcs_stop_device(struct net_device *dev)
netif_tx_disable(dev);
dev->flags &= ~IFF_UP;
wait_event(card->write.wait_q,
- (card->write.state != CH_STATE_RUNNING));
+ (card->write.state != LCS_CH_STATE_RUNNING));
rc = lcs_stopcard(card);
if (rc)
PRINT_ERR("Try it again!\n ");
@@ -2051,8 +2054,7 @@ lcs_probe_device(struct ccwgroup_device *ccwgdev)
ccwgdev->cdev[0]->handler = lcs_irq;
ccwgdev->cdev[1]->handler = lcs_irq;
card->gdev = ccwgdev;
- INIT_WORK(&card->kernel_thread_starter,
- (void *) lcs_start_kernel_thread, card);
+ INIT_WORK(&card->kernel_thread_starter, lcs_start_kernel_thread);
card->thread_start_mask = 0;
card->thread_allowed_mask = 0;
card->thread_running_mask = 0;
diff --git a/drivers/s390/net/lcs.h b/drivers/s390/net/lcs.h
index 93143932983..0e1e4a0a88f 100644
--- a/drivers/s390/net/lcs.h
+++ b/drivers/s390/net/lcs.h
@@ -23,11 +23,6 @@ do { \
} while (0)
/**
- * some more definitions for debug or output stuff
- */
-#define PRINTK_HEADER " lcs: "
-
-/**
* sysfs related stuff
*/
#define CARD_FROM_DEV(cdev) \
@@ -127,22 +122,22 @@ do { \
* LCS Buffer states
*/
enum lcs_buffer_states {
- BUF_STATE_EMPTY, /* buffer is empty */
- BUF_STATE_LOCKED, /* buffer is locked, don't touch */
- BUF_STATE_READY, /* buffer is ready for read/write */
- BUF_STATE_PROCESSED,
+ LCS_BUF_STATE_EMPTY, /* buffer is empty */
+ LCS_BUF_STATE_LOCKED, /* buffer is locked, don't touch */
+ LCS_BUF_STATE_READY, /* buffer is ready for read/write */
+ LCS_BUF_STATE_PROCESSED,
};
/**
* LCS Channel State Machine declarations
*/
enum lcs_channel_states {
- CH_STATE_INIT,
- CH_STATE_HALTED,
- CH_STATE_STOPPED,
- CH_STATE_RUNNING,
- CH_STATE_SUSPENDED,
- CH_STATE_CLEARED,
+ LCS_CH_STATE_INIT,
+ LCS_CH_STATE_HALTED,
+ LCS_CH_STATE_STOPPED,
+ LCS_CH_STATE_RUNNING,
+ LCS_CH_STATE_SUSPENDED,
+ LCS_CH_STATE_CLEARED,
};
/**
@@ -169,7 +164,7 @@ struct lcs_header {
} __attribute__ ((packed));
struct lcs_ip_mac_pair {
- __u32 ip_addr;
+ __be32 ip_addr;
__u8 mac_addr[LCS_MAC_LENGTH];
__u8 reserved[2];
} __attribute__ ((packed));
@@ -287,7 +282,7 @@ struct lcs_card {
enum lcs_dev_states state;
struct net_device *dev;
struct net_device_stats stats;
- unsigned short (*lan_type_trans)(struct sk_buff *skb,
+ __be16 (*lan_type_trans)(struct sk_buff *skb,
struct net_device *dev);
struct ccwgroup_device *gdev;
struct lcs_channel read;
diff --git a/drivers/s390/net/qeth.h b/drivers/s390/net/qeth.h
index 821383d8cbe..53c358c7d36 100644
--- a/drivers/s390/net/qeth.h
+++ b/drivers/s390/net/qeth.h
@@ -151,8 +151,6 @@ qeth_hex_dump(unsigned char *buf, size_t len)
#define SENSE_RESETTING_EVENT_BYTE 1
#define SENSE_RESETTING_EVENT_FLAG 0x80
-#define atomic_swap(a,b) xchg((int *)a.counter, b)
-
/*
* Common IO related definitions
*/
diff --git a/drivers/s390/net/qeth_eddp.c b/drivers/s390/net/qeth_eddp.c
index a363721cf28..6bb558a9a03 100644
--- a/drivers/s390/net/qeth_eddp.c
+++ b/drivers/s390/net/qeth_eddp.c
@@ -258,7 +258,7 @@ qeth_eddp_create_segment_hdrs(struct qeth_eddp_context *ctx,
static inline void
qeth_eddp_copy_data_tcp(char *dst, struct qeth_eddp_data *eddp, int len,
- u32 *hcsum)
+ __wsum *hcsum)
{
struct skb_frag_struct *frag;
int left_in_frag;
@@ -305,7 +305,7 @@ qeth_eddp_copy_data_tcp(char *dst, struct qeth_eddp_data *eddp, int len,
static inline void
qeth_eddp_create_segment_data_tcp(struct qeth_eddp_context *ctx,
struct qeth_eddp_data *eddp, int data_len,
- u32 hcsum)
+ __wsum hcsum)
{
u8 *page;
int page_remainder;
@@ -349,10 +349,10 @@ qeth_eddp_create_segment_data_tcp(struct qeth_eddp_context *ctx,
((struct tcphdr *)eddp->th_in_ctx)->check = csum_fold(hcsum);
}
-static inline u32
+static inline __wsum
qeth_eddp_check_tcp4_hdr(struct qeth_eddp_data *eddp, int data_len)
{
- u32 phcsum; /* pseudo header checksum */
+ __wsum phcsum; /* pseudo header checksum */
QETH_DBF_TEXT(trace, 5, "eddpckt4");
eddp->th.tcp.h.check = 0;
@@ -363,11 +363,11 @@ qeth_eddp_check_tcp4_hdr(struct qeth_eddp_data *eddp, int data_len)
return csum_partial((u8 *)&eddp->th, eddp->thl, phcsum);
}
-static inline u32
+static inline __wsum
qeth_eddp_check_tcp6_hdr(struct qeth_eddp_data *eddp, int data_len)
{
- u32 proto;
- u32 phcsum; /* pseudo header checksum */
+ __be32 proto;
+ __wsum phcsum; /* pseudo header checksum */
QETH_DBF_TEXT(trace, 5, "eddpckt6");
eddp->th.tcp.h.check = 0;
@@ -405,7 +405,7 @@ __qeth_eddp_fill_context_tcp(struct qeth_eddp_context *ctx,
{
struct tcphdr *tcph;
int data_len;
- u32 hcsum;
+ __wsum hcsum;
QETH_DBF_TEXT(trace, 5, "eddpftcp");
eddp->skb_offset = sizeof(struct qeth_hdr) + eddp->nhl + eddp->thl;
@@ -433,22 +433,22 @@ __qeth_eddp_fill_context_tcp(struct qeth_eddp_context *ctx,
eddp->qh.hdr.l3.length = data_len + eddp->nhl +
eddp->thl;
/* prepare ip hdr */
- if (eddp->skb->protocol == ETH_P_IP){
- eddp->nh.ip4.h.tot_len = data_len + eddp->nhl +
- eddp->thl;
+ if (eddp->skb->protocol == htons(ETH_P_IP)){
+ eddp->nh.ip4.h.tot_len = htons(data_len + eddp->nhl +
+ eddp->thl);
eddp->nh.ip4.h.check = 0;
eddp->nh.ip4.h.check =
ip_fast_csum((u8 *)&eddp->nh.ip4.h,
eddp->nh.ip4.h.ihl);
} else
- eddp->nh.ip6.h.payload_len = data_len + eddp->thl;
+ eddp->nh.ip6.h.payload_len = htons(data_len + eddp->thl);
/* prepare tcp hdr */
if (data_len == (eddp->skb->len - eddp->skb_offset)){
/* last segment -> set FIN and PSH flags */
eddp->th.tcp.h.fin = tcph->fin;
eddp->th.tcp.h.psh = tcph->psh;
}
- if (eddp->skb->protocol == ETH_P_IP)
+ if (eddp->skb->protocol == htons(ETH_P_IP))
hcsum = qeth_eddp_check_tcp4_hdr(eddp, data_len);
else
hcsum = qeth_eddp_check_tcp6_hdr(eddp, data_len);
@@ -458,9 +458,9 @@ __qeth_eddp_fill_context_tcp(struct qeth_eddp_context *ctx,
if (eddp->skb_offset >= eddp->skb->len)
break;
/* prepare headers for next round */
- if (eddp->skb->protocol == ETH_P_IP)
- eddp->nh.ip4.h.id++;
- eddp->th.tcp.h.seq += data_len;
+ if (eddp->skb->protocol == htons(ETH_P_IP))
+ eddp->nh.ip4.h.id = htons(ntohs(eddp->nh.ip4.h.id) + 1);
+ eddp->th.tcp.h.seq = htonl(ntohl(eddp->th.tcp.h.seq) + data_len);
}
}
@@ -472,7 +472,7 @@ qeth_eddp_fill_context_tcp(struct qeth_eddp_context *ctx,
QETH_DBF_TEXT(trace, 5, "eddpficx");
/* create our segmentation headers and copy original headers */
- if (skb->protocol == ETH_P_IP)
+ if (skb->protocol == htons(ETH_P_IP))
eddp = qeth_eddp_create_eddp_data(qhdr, (u8 *)skb->nh.iph,
skb->nh.iph->ihl*4,
(u8 *)skb->h.th, skb->h.th->doff*4);
@@ -490,7 +490,7 @@ qeth_eddp_fill_context_tcp(struct qeth_eddp_context *ctx,
memcpy(&eddp->mac, eth_hdr(skb), ETH_HLEN);
#ifdef CONFIG_QETH_VLAN
if (eddp->mac.h_proto == __constant_htons(ETH_P_8021Q)) {
- eddp->vlan[0] = __constant_htons(skb->protocol);
+ eddp->vlan[0] = skb->protocol;
eddp->vlan[1] = htons(vlan_tx_tag_get(skb));
}
#endif /* CONFIG_QETH_VLAN */
@@ -588,11 +588,11 @@ qeth_eddp_create_context_tcp(struct qeth_card *card, struct sk_buff *skb,
struct qeth_eddp_context *ctx = NULL;
QETH_DBF_TEXT(trace, 5, "creddpct");
- if (skb->protocol == ETH_P_IP)
+ if (skb->protocol == htons(ETH_P_IP))
ctx = qeth_eddp_create_context_generic(card, skb,
sizeof(struct qeth_hdr) + skb->nh.iph->ihl*4 +
skb->h.th->doff*4);
- else if (skb->protocol == ETH_P_IPV6)
+ else if (skb->protocol == htons(ETH_P_IPV6))
ctx = qeth_eddp_create_context_generic(card, skb,
sizeof(struct qeth_hdr) + sizeof(struct ipv6hdr) +
skb->h.th->doff*4);
diff --git a/drivers/s390/net/qeth_eddp.h b/drivers/s390/net/qeth_eddp.h
index cae9ba26505..103768d3bab 100644
--- a/drivers/s390/net/qeth_eddp.h
+++ b/drivers/s390/net/qeth_eddp.h
@@ -54,7 +54,7 @@ qeth_eddp_check_buffers_for_context(struct qeth_qdio_out_q *,
struct qeth_eddp_data {
struct qeth_hdr qh;
struct ethhdr mac;
- u16 vlan[2];
+ __be16 vlan[2];
union {
struct {
struct iphdr h;
diff --git a/drivers/s390/net/qeth_main.c b/drivers/s390/net/qeth_main.c
index 5613b4564fa..2bde4f1fb9c 100644
--- a/drivers/s390/net/qeth_main.c
+++ b/drivers/s390/net/qeth_main.c
@@ -1039,8 +1039,9 @@ qeth_do_start_thread(struct qeth_card *card, unsigned long thread)
}
static void
-qeth_start_kernel_thread(struct qeth_card *card)
+qeth_start_kernel_thread(struct work_struct *work)
{
+ struct qeth_card *card = container_of(work, struct qeth_card, kernel_thread_starter);
QETH_DBF_TEXT(trace , 2, "strthrd");
if (card->read.state != CH_STATE_UP &&
@@ -1103,8 +1104,7 @@ qeth_setup_card(struct qeth_card *card)
card->thread_start_mask = 0;
card->thread_allowed_mask = 0;
card->thread_running_mask = 0;
- INIT_WORK(&card->kernel_thread_starter,
- (void *)qeth_start_kernel_thread,card);
+ INIT_WORK(&card->kernel_thread_starter, qeth_start_kernel_thread);
INIT_LIST_HEAD(&card->ip_list);
card->ip_tbd_list = kmalloc(sizeof(struct list_head), GFP_KERNEL);
if (!card->ip_tbd_list) {
@@ -2982,7 +2982,7 @@ qeth_check_outbound_queue(struct qeth_qdio_out_q *queue)
*/
if ((atomic_read(&queue->used_buffers) <= QETH_LOW_WATERMARK_PACK) ||
!atomic_read(&queue->set_pci_flags_count)){
- if (atomic_swap(&queue->state, QETH_OUT_Q_LOCKED_FLUSH) ==
+ if (atomic_xchg(&queue->state, QETH_OUT_Q_LOCKED_FLUSH) ==
QETH_OUT_Q_UNLOCKED) {
/*
* If we get in here, there was no action in
@@ -3245,7 +3245,7 @@ qeth_free_qdio_buffers(struct qeth_card *card)
int i, j;
QETH_DBF_TEXT(trace, 2, "freeqdbf");
- if (atomic_swap(&card->qdio.state, QETH_QDIO_UNINITIALIZED) ==
+ if (atomic_xchg(&card->qdio.state, QETH_QDIO_UNINITIALIZED) ==
QETH_QDIO_UNINITIALIZED)
return;
kfree(card->qdio.in_q);
@@ -4366,7 +4366,7 @@ out:
if (flush_count)
qeth_flush_buffers(queue, 0, start_index, flush_count);
else if (!atomic_read(&queue->set_pci_flags_count))
- atomic_swap(&queue->state, QETH_OUT_Q_LOCKED_FLUSH);
+ atomic_xchg(&queue->state, QETH_OUT_Q_LOCKED_FLUSH);
/*
* queue->state will go from LOCKED -> UNLOCKED or from
* LOCKED_FLUSH -> LOCKED if output_handler wanted to 'notify' us
@@ -8067,7 +8067,7 @@ qeth_arp_constructor(struct neighbour *neigh)
neigh->parms = neigh_parms_clone(parms);
rcu_read_unlock();
- neigh->type = inet_addr_type(*(u32 *) neigh->primary_key);
+ neigh->type = inet_addr_type(*(__be32 *) neigh->primary_key);
neigh->nud_state = NUD_NOARP;
neigh->ops = arp_direct_ops;
neigh->output = neigh->ops->queue_xmit;
diff --git a/drivers/s390/s390mach.c b/drivers/s390/s390mach.c
index a914129a4da..e088b5e2871 100644
--- a/drivers/s390/s390mach.c
+++ b/drivers/s390/s390mach.c
@@ -208,7 +208,7 @@ s390_handle_mcck(void)
*/
__ctl_clear_bit(14, 24); /* Disable WARNING MCH */
if (xchg(&mchchk_wng_posted, 1) == 0)
- kill_proc(1, SIGPWR, 1);
+ kill_cad_pid(SIGPWR, 1);
}
#endif
@@ -253,11 +253,12 @@ s390_revalidate_registers(struct mci *mci)
kill_task = 1;
#ifndef CONFIG_64BIT
- asm volatile("ld 0,0(%0)\n"
- "ld 2,8(%0)\n"
- "ld 4,16(%0)\n"
- "ld 6,24(%0)"
- : : "a" (&S390_lowcore.floating_pt_save_area));
+ asm volatile(
+ " ld 0,0(%0)\n"
+ " ld 2,8(%0)\n"
+ " ld 4,16(%0)\n"
+ " ld 6,24(%0)"
+ : : "a" (&S390_lowcore.floating_pt_save_area));
#endif
if (MACHINE_HAS_IEEE) {
@@ -274,37 +275,36 @@ s390_revalidate_registers(struct mci *mci)
* Floating point control register can't be restored.
* Task will be terminated.
*/
- asm volatile ("lfpc 0(%0)" : : "a" (&zero), "m" (zero));
+ asm volatile("lfpc 0(%0)" : : "a" (&zero), "m" (zero));
kill_task = 1;
- }
- else
- asm volatile (
- "lfpc 0(%0)"
- : : "a" (fpt_creg_save_area));
-
- asm volatile("ld 0,0(%0)\n"
- "ld 1,8(%0)\n"
- "ld 2,16(%0)\n"
- "ld 3,24(%0)\n"
- "ld 4,32(%0)\n"
- "ld 5,40(%0)\n"
- "ld 6,48(%0)\n"
- "ld 7,56(%0)\n"
- "ld 8,64(%0)\n"
- "ld 9,72(%0)\n"
- "ld 10,80(%0)\n"
- "ld 11,88(%0)\n"
- "ld 12,96(%0)\n"
- "ld 13,104(%0)\n"
- "ld 14,112(%0)\n"
- "ld 15,120(%0)\n"
- : : "a" (fpt_save_area));
+ } else
+ asm volatile("lfpc 0(%0)" : : "a" (fpt_creg_save_area));
+
+ asm volatile(
+ " ld 0,0(%0)\n"
+ " ld 1,8(%0)\n"
+ " ld 2,16(%0)\n"
+ " ld 3,24(%0)\n"
+ " ld 4,32(%0)\n"
+ " ld 5,40(%0)\n"
+ " ld 6,48(%0)\n"
+ " ld 7,56(%0)\n"
+ " ld 8,64(%0)\n"
+ " ld 9,72(%0)\n"
+ " ld 10,80(%0)\n"
+ " ld 11,88(%0)\n"
+ " ld 12,96(%0)\n"
+ " ld 13,104(%0)\n"
+ " ld 14,112(%0)\n"
+ " ld 15,120(%0)\n"
+ : : "a" (fpt_save_area));
}
/* Revalidate access registers */
- asm volatile("lam 0,15,0(%0)"
- : : "a" (&S390_lowcore.access_regs_save_area));
+ asm volatile(
+ " lam 0,15,0(%0)"
+ : : "a" (&S390_lowcore.access_regs_save_area));
if (!mci->ar)
/*
* Access registers have unknown contents.
@@ -321,11 +321,13 @@ s390_revalidate_registers(struct mci *mci)
s390_handle_damage("invalid control registers.");
else
#ifdef CONFIG_64BIT
- asm volatile("lctlg 0,15,0(%0)"
- : : "a" (&S390_lowcore.cregs_save_area));
+ asm volatile(
+ " lctlg 0,15,0(%0)"
+ : : "a" (&S390_lowcore.cregs_save_area));
#else
- asm volatile("lctl 0,15,0(%0)"
- : : "a" (&S390_lowcore.cregs_save_area));
+ asm volatile(
+ " lctl 0,15,0(%0)"
+ : : "a" (&S390_lowcore.cregs_save_area));
#endif
/*
@@ -339,20 +341,23 @@ s390_revalidate_registers(struct mci *mci)
* old contents (should be zero) otherwise set it to zero.
*/
if (!mci->pr)
- asm volatile("sr 0,0\n"
- "sckpf"
- : : : "0", "cc");
+ asm volatile(
+ " sr 0,0\n"
+ " sckpf"
+ : : : "0", "cc");
else
asm volatile(
- "l 0,0(%0)\n"
- "sckpf"
- : : "a" (&S390_lowcore.tod_progreg_save_area) : "0", "cc");
+ " l 0,0(%0)\n"
+ " sckpf"
+ : : "a" (&S390_lowcore.tod_progreg_save_area)
+ : "0", "cc");
#endif
/* Revalidate clock comparator register */
- asm volatile ("stck 0(%1)\n"
- "sckc 0(%1)"
- : "=m" (tmpclock) : "a" (&(tmpclock)) : "cc", "memory");
+ asm volatile(
+ " stck 0(%1)\n"
+ " sckc 0(%1)"
+ : "=m" (tmpclock) : "a" (&(tmpclock)) : "cc", "memory");
/* Check if old PSW is valid */
if (!mci->wp)
diff --git a/drivers/s390/scsi/zfcp_def.h b/drivers/s390/scsi/zfcp_def.h
index 8f882690994..32933ed54b8 100644
--- a/drivers/s390/scsi/zfcp_def.h
+++ b/drivers/s390/scsi/zfcp_def.h
@@ -107,6 +107,10 @@ zfcp_address_to_sg(void *address, struct scatterlist *list)
(ZFCP_MAX_SBALS_PER_REQ * ZFCP_MAX_SBALES_PER_SBAL - 2)
/* request ID + QTCB in SBALE 0 + 1 of first SBAL in chain */
+#define ZFCP_MAX_SECTORS (ZFCP_MAX_SBALES_PER_REQ * 8)
+ /* max. number of (data buffer) SBALEs in largest SBAL chain
+ multiplied with number of sectors per 4k block */
+
/* FIXME(tune): free space should be one max. SBAL chain plus what? */
#define ZFCP_QDIO_PCI_INTERVAL (QDIO_MAX_BUFFERS_PER_Q \
- (ZFCP_MAX_SBALS_PER_REQ + 4))
@@ -1028,9 +1032,9 @@ struct zfcp_data {
wwn_t init_wwpn;
fcp_lun_t init_fcp_lun;
char *driver_version;
- kmem_cache_t *fsf_req_qtcb_cache;
- kmem_cache_t *sr_buffer_cache;
- kmem_cache_t *gid_pn_cache;
+ struct kmem_cache *fsf_req_qtcb_cache;
+ struct kmem_cache *sr_buffer_cache;
+ struct kmem_cache *gid_pn_cache;
};
/**
diff --git a/drivers/s390/scsi/zfcp_erp.c b/drivers/s390/scsi/zfcp_erp.c
index 862a411a4aa..c88babce9bc 100644
--- a/drivers/s390/scsi/zfcp_erp.c
+++ b/drivers/s390/scsi/zfcp_erp.c
@@ -1987,7 +1987,7 @@ zfcp_erp_adapter_strategy_open_qdio(struct zfcp_erp_action *erp_action)
sbale = &(adapter->response_queue.buffer[i]->element[0]);
sbale->length = 0;
sbale->flags = SBAL_FLAGS_LAST_ENTRY;
- sbale->addr = 0;
+ sbale->addr = NULL;
}
ZFCP_LOG_TRACE("calling do_QDIO on adapter %s (flags=0x%x, "
diff --git a/drivers/s390/scsi/zfcp_fsf.c b/drivers/s390/scsi/zfcp_fsf.c
index 277826cdd0c..067f1519eb0 100644
--- a/drivers/s390/scsi/zfcp_fsf.c
+++ b/drivers/s390/scsi/zfcp_fsf.c
@@ -109,7 +109,7 @@ zfcp_fsf_req_alloc(mempool_t *pool, int req_flags)
ptr = kmalloc(size, GFP_ATOMIC);
else
ptr = kmem_cache_alloc(zfcp_data.fsf_req_qtcb_cache,
- SLAB_ATOMIC);
+ GFP_ATOMIC);
}
if (unlikely(!ptr))
diff --git a/drivers/s390/scsi/zfcp_scsi.c b/drivers/s390/scsi/zfcp_scsi.c
index 7cafa34e4c7..452d96f92a1 100644
--- a/drivers/s390/scsi/zfcp_scsi.c
+++ b/drivers/s390/scsi/zfcp_scsi.c
@@ -58,6 +58,7 @@ struct zfcp_data zfcp_data = {
.cmd_per_lun = 1,
.use_clustering = 1,
.sdev_attrs = zfcp_sysfs_sdev_attrs,
+ .max_sectors = ZFCP_MAX_SECTORS,
},
.driver_version = ZFCP_VERSION,
};
@@ -301,7 +302,7 @@ zfcp_scsi_command_sync(struct zfcp_unit *unit, struct scsi_cmnd *scpnt,
int use_timer)
{
int ret;
- DECLARE_COMPLETION(wait);
+ DECLARE_COMPLETION_ONSTACK(wait);
scpnt->SCp.ptr = (void *) &wait; /* silent re-use */
scpnt->scsi_done = zfcp_scsi_command_sync_handler;