summaryrefslogtreecommitdiffstats
path: root/drivers/s390
diff options
context:
space:
mode:
authorDavid Howells <dhowells@redhat.com>2006-12-05 17:01:28 +0000
committerDavid Howells <dhowells@warthog.cambridge.redhat.com>2006-12-05 17:01:28 +0000
commit9db73724453a9350e1c22dbe732d427e2939a5c9 (patch)
tree15e3ead6413ae97398a54292acc199bee0864d42 /drivers/s390
parent4c1ac1b49122b805adfa4efc620592f68dccf5db (diff)
parente62438630ca37539c8cc1553710bbfaa3cf960a7 (diff)
Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux-2.6
Conflicts: drivers/ata/libata-scsi.c include/linux/libata.h Futher merge of Linus's head and compilation fixups. Signed-Off-By: David Howells <dhowells@redhat.com>
Diffstat (limited to 'drivers/s390')
-rw-r--r--drivers/s390/block/dasd.c16
-rw-r--r--drivers/s390/block/dasd_devmap.c36
-rw-r--r--drivers/s390/char/con3215.c50
-rw-r--r--drivers/s390/char/sclp_quiesce.c37
-rw-r--r--drivers/s390/cio/chsc.c82
-rw-r--r--drivers/s390/cio/cio.c128
-rw-r--r--drivers/s390/cio/css.h3
-rw-r--r--drivers/s390/cio/device.c17
-rw-r--r--drivers/s390/cio/device_fsm.c27
-rw-r--r--drivers/s390/cio/device_id.c10
-rw-r--r--drivers/s390/cio/device_pgid.c30
-rw-r--r--drivers/s390/cio/device_status.c3
-rw-r--r--drivers/s390/cio/qdio.c8
-rw-r--r--drivers/s390/cio/qdio.h4
-rw-r--r--drivers/s390/crypto/ap_bus.c10
-rw-r--r--drivers/s390/net/lcs.c78
-rw-r--r--drivers/s390/net/lcs.h25
-rw-r--r--drivers/s390/net/qeth.h2
-rw-r--r--drivers/s390/net/qeth_main.c6
19 files changed, 350 insertions, 222 deletions
diff --git a/drivers/s390/block/dasd.c b/drivers/s390/block/dasd.c
index 79ffef6bfaf..a2cef57d7bc 100644
--- a/drivers/s390/block/dasd.c
+++ b/drivers/s390/block/dasd.c
@@ -1264,15 +1264,21 @@ __dasd_check_expire(struct dasd_device * device)
if (list_empty(&device->ccw_queue))
return;
cqr = list_entry(device->ccw_queue.next, struct dasd_ccw_req, list);
- if (cqr->status == DASD_CQR_IN_IO && cqr->expires != 0) {
- if (time_after_eq(jiffies, cqr->expires + cqr->starttime)) {
+ if ((cqr->status == DASD_CQR_IN_IO && cqr->expires != 0) &&
+ (time_after_eq(jiffies, cqr->expires + cqr->starttime))) {
+ if (device->discipline->term_IO(cqr) != 0) {
+ /* Hmpf, try again in 5 sec */
+ dasd_set_timer(device, 5*HZ);
+ DEV_MESSAGE(KERN_ERR, device,
+ "internal error - timeout (%is) expired "
+ "for cqr %p, termination failed, "
+ "retrying in 5s",
+ (cqr->expires/HZ), cqr);
+ } else {
DEV_MESSAGE(KERN_ERR, device,
"internal error - timeout (%is) expired "
"for cqr %p (%i retries left)",
(cqr->expires/HZ), cqr, cqr->retries);
- if (device->discipline->term_IO(cqr) != 0)
- /* Hmpf, try again in 1/10 sec */
- dasd_set_timer(device, 10);
}
}
}
diff --git a/drivers/s390/block/dasd_devmap.c b/drivers/s390/block/dasd_devmap.c
index 91cf971f065..17fdd8c9f74 100644
--- a/drivers/s390/block/dasd_devmap.c
+++ b/drivers/s390/block/dasd_devmap.c
@@ -684,21 +684,26 @@ dasd_ro_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
struct dasd_devmap *devmap;
- int ro_flag;
+ int val;
+ char *endp;
devmap = dasd_devmap_from_cdev(to_ccwdev(dev));
if (IS_ERR(devmap))
return PTR_ERR(devmap);
- ro_flag = buf[0] == '1';
+
+ val = simple_strtoul(buf, &endp, 0);
+ if (((endp + 1) < (buf + count)) || (val > 1))
+ return -EINVAL;
+
spin_lock(&dasd_devmap_lock);
- if (ro_flag)
+ if (val)
devmap->features |= DASD_FEATURE_READONLY;
else
devmap->features &= ~DASD_FEATURE_READONLY;
if (devmap->device)
devmap->device->features = devmap->features;
if (devmap->device && devmap->device->gdp)
- set_disk_ro(devmap->device->gdp, ro_flag);
+ set_disk_ro(devmap->device->gdp, val);
spin_unlock(&dasd_devmap_lock);
return count;
}
@@ -729,17 +734,22 @@ dasd_use_diag_store(struct device *dev, struct device_attribute *attr,
{
struct dasd_devmap *devmap;
ssize_t rc;
- int use_diag;
+ int val;
+ char *endp;
devmap = dasd_devmap_from_cdev(to_ccwdev(dev));
if (IS_ERR(devmap))
return PTR_ERR(devmap);
- use_diag = buf[0] == '1';
+
+ val = simple_strtoul(buf, &endp, 0);
+ if (((endp + 1) < (buf + count)) || (val > 1))
+ return -EINVAL;
+
spin_lock(&dasd_devmap_lock);
/* Changing diag discipline flag is only allowed in offline state. */
rc = count;
if (!devmap->device) {
- if (use_diag)
+ if (val)
devmap->features |= DASD_FEATURE_USEDIAG;
else
devmap->features &= ~DASD_FEATURE_USEDIAG;
@@ -854,14 +864,20 @@ dasd_eer_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
struct dasd_devmap *devmap;
- int rc;
+ int val, rc;
+ char *endp;
devmap = dasd_devmap_from_cdev(to_ccwdev(dev));
if (IS_ERR(devmap))
return PTR_ERR(devmap);
if (!devmap->device)
- return count;
- if (buf[0] == '1') {
+ return -ENODEV;
+
+ val = simple_strtoul(buf, &endp, 0);
+ if (((endp + 1) < (buf + count)) || (val > 1))
+ return -EINVAL;
+
+ if (val) {
rc = dasd_eer_enable(devmap->device);
if (rc)
return rc;
diff --git a/drivers/s390/char/con3215.c b/drivers/s390/char/con3215.c
index d7de175d53f..c9321b920e9 100644
--- a/drivers/s390/char/con3215.c
+++ b/drivers/s390/char/con3215.c
@@ -299,14 +299,14 @@ raw3215_timeout(unsigned long __data)
struct raw3215_info *raw = (struct raw3215_info *) __data;
unsigned long flags;
- spin_lock_irqsave(raw->lock, flags);
+ spin_lock_irqsave(get_ccwdev_lock(raw->cdev), flags);
if (raw->flags & RAW3215_TIMER_RUNS) {
del_timer(&raw->timer);
raw->flags &= ~RAW3215_TIMER_RUNS;
raw3215_mk_write_req(raw);
raw3215_start_io(raw);
}
- spin_unlock_irqrestore(raw->lock, flags);
+ spin_unlock_irqrestore(get_ccwdev_lock(raw->cdev), flags);
}
/*
@@ -355,10 +355,10 @@ raw3215_tasklet(void *data)
unsigned long flags;
raw = (struct raw3215_info *) data;
- spin_lock_irqsave(raw->lock, flags);
+ spin_lock_irqsave(get_ccwdev_lock(raw->cdev), flags);
raw3215_mk_write_req(raw);
raw3215_try_io(raw);
- spin_unlock_irqrestore(raw->lock, flags);
+ spin_unlock_irqrestore(get_ccwdev_lock(raw->cdev), flags);
/* Check for pending message from raw3215_irq */
if (raw->message != NULL) {
printk(raw->message, raw->msg_dstat, raw->msg_cstat);
@@ -512,9 +512,9 @@ raw3215_make_room(struct raw3215_info *raw, unsigned int length)
if (RAW3215_BUFFER_SIZE - raw->count >= length)
break;
/* there might be another cpu waiting for the lock */
- spin_unlock(raw->lock);
+ spin_unlock(get_ccwdev_lock(raw->cdev));
udelay(100);
- spin_lock(raw->lock);
+ spin_lock(get_ccwdev_lock(raw->cdev));
}
}
@@ -528,7 +528,7 @@ raw3215_write(struct raw3215_info *raw, const char *str, unsigned int length)
int c, count;
while (length > 0) {
- spin_lock_irqsave(raw->lock, flags);
+ spin_lock_irqsave(get_ccwdev_lock(raw->cdev), flags);
count = (length > RAW3215_BUFFER_SIZE) ?
RAW3215_BUFFER_SIZE : length;
length -= count;
@@ -555,7 +555,7 @@ raw3215_write(struct raw3215_info *raw, const char *str, unsigned int length)
/* start or queue request */
raw3215_try_io(raw);
}
- spin_unlock_irqrestore(raw->lock, flags);
+ spin_unlock_irqrestore(get_ccwdev_lock(raw->cdev), flags);
}
}
@@ -568,7 +568,7 @@ raw3215_putchar(struct raw3215_info *raw, unsigned char ch)
unsigned long flags;
unsigned int length, i;
- spin_lock_irqsave(raw->lock, flags);
+ spin_lock_irqsave(get_ccwdev_lock(raw->cdev), flags);
if (ch == '\t') {
length = TAB_STOP_SIZE - (raw->line_pos%TAB_STOP_SIZE);
raw->line_pos += length;
@@ -592,7 +592,7 @@ raw3215_putchar(struct raw3215_info *raw, unsigned char ch)
/* start or queue request */
raw3215_try_io(raw);
}
- spin_unlock_irqrestore(raw->lock, flags);
+ spin_unlock_irqrestore(get_ccwdev_lock(raw->cdev), flags);
}
/*
@@ -604,13 +604,13 @@ raw3215_flush_buffer(struct raw3215_info *raw)
{
unsigned long flags;
- spin_lock_irqsave(raw->lock, flags);
+ spin_lock_irqsave(get_ccwdev_lock(raw->cdev), flags);
if (raw->count > 0) {
raw->flags |= RAW3215_FLUSHING;
raw3215_try_io(raw);
raw->flags &= ~RAW3215_FLUSHING;
}
- spin_unlock_irqrestore(raw->lock, flags);
+ spin_unlock_irqrestore(get_ccwdev_lock(raw->cdev), flags);
}
/*
@@ -625,9 +625,9 @@ raw3215_startup(struct raw3215_info *raw)
return 0;
raw->line_pos = 0;
raw->flags |= RAW3215_ACTIVE;
- spin_lock_irqsave(raw->lock, flags);
+ spin_lock_irqsave(get_ccwdev_lock(raw->cdev), flags);
raw3215_try_io(raw);
- spin_unlock_irqrestore(raw->lock, flags);
+ spin_unlock_irqrestore(get_ccwdev_lock(raw->cdev), flags);
return 0;
}
@@ -644,21 +644,21 @@ raw3215_shutdown(struct raw3215_info *raw)
if (!(raw->flags & RAW3215_ACTIVE) || (raw->flags & RAW3215_FIXED))
return;
/* Wait for outstanding requests, then free irq */
- spin_lock_irqsave(raw->lock, flags);
+ spin_lock_irqsave(get_ccwdev_lock(raw->cdev), flags);
if ((raw->flags & RAW3215_WORKING) ||
raw->queued_write != NULL ||
raw->queued_read != NULL) {
raw->flags |= RAW3215_CLOSING;
add_wait_queue(&raw->empty_wait, &wait);
set_current_state(TASK_INTERRUPTIBLE);
- spin_unlock_irqrestore(raw->lock, flags);
+ spin_unlock_irqrestore(get_ccwdev_lock(raw->cdev), flags);
schedule();
- spin_lock_irqsave(raw->lock, flags);
+ spin_lock_irqsave(get_ccwdev_lock(raw->cdev), flags);
remove_wait_queue(&raw->empty_wait, &wait);
set_current_state(TASK_RUNNING);
raw->flags &= ~(RAW3215_ACTIVE | RAW3215_CLOSING);
}
- spin_unlock_irqrestore(raw->lock, flags);
+ spin_unlock_irqrestore(get_ccwdev_lock(raw->cdev), flags);
}
static int
@@ -686,7 +686,6 @@ raw3215_probe (struct ccw_device *cdev)
}
raw->cdev = cdev;
- raw->lock = get_ccwdev_lock(cdev);
raw->inbuf = (char *) raw + sizeof(struct raw3215_info);
memset(raw, 0, sizeof(struct raw3215_info));
raw->buffer = (char *) kmalloc(RAW3215_BUFFER_SIZE,
@@ -809,9 +808,9 @@ con3215_unblank(void)
unsigned long flags;
raw = raw3215[0]; /* console 3215 is the first one */
- spin_lock_irqsave(raw->lock, flags);
+ spin_lock_irqsave(get_ccwdev_lock(raw->cdev), flags);
raw3215_make_room(raw, RAW3215_BUFFER_SIZE);
- spin_unlock_irqrestore(raw->lock, flags);
+ spin_unlock_irqrestore(get_ccwdev_lock(raw->cdev), flags);
}
static int __init
@@ -873,7 +872,6 @@ con3215_init(void)
raw->buffer = (char *) alloc_bootmem_low(RAW3215_BUFFER_SIZE);
raw->inbuf = (char *) alloc_bootmem_low(RAW3215_INBUF_SIZE);
raw->cdev = cdev;
- raw->lock = get_ccwdev_lock(cdev);
cdev->dev.driver_data = raw;
cdev->handler = raw3215_irq;
@@ -1066,10 +1064,10 @@ tty3215_unthrottle(struct tty_struct * tty)
raw = (struct raw3215_info *) tty->driver_data;
if (raw->flags & RAW3215_THROTTLED) {
- spin_lock_irqsave(raw->lock, flags);
+ spin_lock_irqsave(get_ccwdev_lock(raw->cdev), flags);
raw->flags &= ~RAW3215_THROTTLED;
raw3215_try_io(raw);
- spin_unlock_irqrestore(raw->lock, flags);
+ spin_unlock_irqrestore(get_ccwdev_lock(raw->cdev), flags);
}
}
@@ -1096,10 +1094,10 @@ tty3215_start(struct tty_struct *tty)
raw = (struct raw3215_info *) tty->driver_data;
if (raw->flags & RAW3215_STOPPED) {
- spin_lock_irqsave(raw->lock, flags);
+ spin_lock_irqsave(get_ccwdev_lock(raw->cdev), flags);
raw->flags &= ~RAW3215_STOPPED;
raw3215_try_io(raw);
- spin_unlock_irqrestore(raw->lock, flags);
+ spin_unlock_irqrestore(get_ccwdev_lock(raw->cdev), flags);
}
}
diff --git a/drivers/s390/char/sclp_quiesce.c b/drivers/s390/char/sclp_quiesce.c
index 32004aae95c..ffa9282ce97 100644
--- a/drivers/s390/char/sclp_quiesce.c
+++ b/drivers/s390/char/sclp_quiesce.c
@@ -19,52 +19,17 @@
#include "sclp.h"
-
-#ifdef CONFIG_SMP
-/* Signal completion of shutdown process. All CPUs except the first to enter
- * this function: go to stopped state. First CPU: wait until all other
- * CPUs are in stopped or check stop state. Afterwards, load special PSW
- * to indicate completion. */
-static void
-do_load_quiesce_psw(void * __unused)
-{
- static atomic_t cpuid = ATOMIC_INIT(-1);
- psw_t quiesce_psw;
- int cpu;
-
- if (atomic_cmpxchg(&cpuid, -1, smp_processor_id()) != -1)
- signal_processor(smp_processor_id(), sigp_stop);
- /* Wait for all other cpus to enter stopped state */
- for_each_online_cpu(cpu) {
- if (cpu == smp_processor_id())
- continue;
- while(!smp_cpu_not_running(cpu))
- cpu_relax();
- }
- /* Quiesce the last cpu with the special psw */
- quiesce_psw.mask = PSW_BASE_BITS | PSW_MASK_WAIT;
- quiesce_psw.addr = 0xfff;
- __load_psw(quiesce_psw);
-}
-
-/* Shutdown handler. Perform shutdown function on all CPUs. */
-static void
-do_machine_quiesce(void)
-{
- on_each_cpu(do_load_quiesce_psw, NULL, 0, 0);
-}
-#else
/* Shutdown handler. Signal completion of shutdown by loading special PSW. */
static void
do_machine_quiesce(void)
{
psw_t quiesce_psw;
+ smp_send_stop();
quiesce_psw.mask = PSW_BASE_BITS | PSW_MASK_WAIT;
quiesce_psw.addr = 0xfff;
__load_psw(quiesce_psw);
}
-#endif
/* Handler for quiesce event. Start shutdown procedure. */
static void
diff --git a/drivers/s390/cio/chsc.c b/drivers/s390/cio/chsc.c
index 2d78f0f4a40..dbfb77b0392 100644
--- a/drivers/s390/cio/chsc.c
+++ b/drivers/s390/cio/chsc.c
@@ -251,6 +251,8 @@ s390_subchannel_remove_chpid(struct device *dev, void *data)
cc = cio_clear(sch);
if (cc == -ENODEV)
goto out_unreg;
+ /* Request retry of internal operation. */
+ device_set_intretry(sch);
/* Call handler. */
if (sch->driver && sch->driver->termination)
sch->driver->termination(&sch->dev);
@@ -711,9 +713,6 @@ static inline int check_for_io_on_path(struct subchannel *sch, int index)
{
int cc;
- if (!device_is_online(sch))
- /* cio could be doing I/O. */
- return 0;
cc = stsch(sch->schid, &sch->schib);
if (cc)
return 0;
@@ -722,6 +721,26 @@ static inline int check_for_io_on_path(struct subchannel *sch, int index)
return 0;
}
+static void terminate_internal_io(struct subchannel *sch)
+{
+ if (cio_clear(sch)) {
+ /* Recheck device in case clear failed. */
+ sch->lpm = 0;
+ if (device_trigger_verify(sch) != 0) {
+ if(css_enqueue_subchannel_slow(sch->schid)) {
+ css_clear_subchannel_slow_list();
+ need_rescan = 1;
+ }
+ }
+ return;
+ }
+ /* Request retry of internal operation. */
+ device_set_intretry(sch);
+ /* Call handler. */
+ if (sch->driver && sch->driver->termination)
+ sch->driver->termination(&sch->dev);
+}
+
static inline void
__s390_subchannel_vary_chpid(struct subchannel *sch, __u8 chpid, int on)
{
@@ -744,20 +763,26 @@ __s390_subchannel_vary_chpid(struct subchannel *sch, __u8 chpid, int on)
device_trigger_reprobe(sch);
else if (sch->driver && sch->driver->verify)
sch->driver->verify(&sch->dev);
- } else {
- sch->opm &= ~(0x80 >> chp);
- sch->lpm &= ~(0x80 >> chp);
- if (check_for_io_on_path(sch, chp))
+ break;
+ }
+ sch->opm &= ~(0x80 >> chp);
+ sch->lpm &= ~(0x80 >> chp);
+ if (check_for_io_on_path(sch, chp)) {
+ if (device_is_online(sch))
/* Path verification is done after killing. */
device_kill_io(sch);
- else if (!sch->lpm) {
+ else
+ /* Kill and retry internal I/O. */
+ terminate_internal_io(sch);
+ } else if (!sch->lpm) {
+ if (device_trigger_verify(sch) != 0) {
if (css_enqueue_subchannel_slow(sch->schid)) {
css_clear_subchannel_slow_list();
need_rescan = 1;
}
- } else if (sch->driver && sch->driver->verify)
- sch->driver->verify(&sch->dev);
- }
+ }
+ } else if (sch->driver && sch->driver->verify)
+ sch->driver->verify(&sch->dev);
break;
}
spin_unlock_irqrestore(&sch->lock, flags);
@@ -1465,41 +1490,6 @@ chsc_get_chp_desc(struct subchannel *sch, int chp_no)
return desc;
}
-static int reset_channel_path(struct channel_path *chp)
-{
- int cc;
-
- cc = rchp(chp->id);
- switch (cc) {
- case 0:
- return 0;
- case 2:
- return -EBUSY;
- default:
- return -ENODEV;
- }
-}
-
-static void reset_channel_paths_css(struct channel_subsystem *css)
-{
- int i;
-
- for (i = 0; i <= __MAX_CHPID; i++) {
- if (css->chps[i])
- reset_channel_path(css->chps[i]);
- }
-}
-
-void cio_reset_channel_paths(void)
-{
- int i;
-
- for (i = 0; i <= __MAX_CSSID; i++) {
- if (css[i] && css[i]->valid)
- reset_channel_paths_css(css[i]);
- }
-}
-
static int __init
chsc_alloc_sei_area(void)
{
diff --git a/drivers/s390/cio/cio.c b/drivers/s390/cio/cio.c
index 8936e460a80..20aee278384 100644
--- a/drivers/s390/cio/cio.c
+++ b/drivers/s390/cio/cio.c
@@ -21,6 +21,7 @@
#include <asm/irq.h>
#include <asm/irq_regs.h>
#include <asm/setup.h>
+#include <asm/reset.h>
#include "airq.h"
#include "cio.h"
#include "css.h"
@@ -28,6 +29,7 @@
#include "ioasm.h"
#include "blacklist.h"
#include "cio_debug.h"
+#include "../s390mach.h"
debug_info_t *cio_debug_msg_id;
debug_info_t *cio_debug_trace_id;
@@ -841,26 +843,12 @@ __clear_subchannel_easy(struct subchannel_id schid)
return -EBUSY;
}
-struct sch_match_id {
- struct subchannel_id schid;
- struct ccw_dev_id devid;
- int rc;
-};
-
-static int __shutdown_subchannel_easy_and_match(struct subchannel_id schid,
- void *data)
+static int __shutdown_subchannel_easy(struct subchannel_id schid, void *data)
{
struct schib schib;
- struct sch_match_id *match_id = data;
if (stsch_err(schid, &schib))
return -ENXIO;
- if (match_id && schib.pmcw.dnv &&
- (schib.pmcw.dev == match_id->devid.devno) &&
- (schid.ssid == match_id->devid.ssid)) {
- match_id->schid = schid;
- match_id->rc = 0;
- }
if (!schib.pmcw.ena)
return 0;
switch(__disable_subchannel_easy(schid, &schib)) {
@@ -876,27 +864,111 @@ static int __shutdown_subchannel_easy_and_match(struct subchannel_id schid,
return 0;
}
-static int clear_all_subchannels_and_match(struct ccw_dev_id *devid,
- struct subchannel_id *schid)
+static atomic_t chpid_reset_count;
+
+static void s390_reset_chpids_mcck_handler(void)
+{
+ struct crw crw;
+ struct mci *mci;
+
+ /* Check for pending channel report word. */
+ mci = (struct mci *)&S390_lowcore.mcck_interruption_code;
+ if (!mci->cp)
+ return;
+ /* Process channel report words. */
+ while (stcrw(&crw) == 0) {
+ /* Check for responses to RCHP. */
+ if (crw.slct && crw.rsc == CRW_RSC_CPATH)
+ atomic_dec(&chpid_reset_count);
+ }
+}
+
+#define RCHP_TIMEOUT (30 * USEC_PER_SEC)
+static void css_reset(void)
+{
+ int i, ret;
+ unsigned long long timeout;
+
+ /* Reset subchannels. */
+ for_each_subchannel(__shutdown_subchannel_easy, NULL);
+ /* Reset channel paths. */
+ s390_reset_mcck_handler = s390_reset_chpids_mcck_handler;
+ /* Enable channel report machine checks. */
+ __ctl_set_bit(14, 28);
+ /* Temporarily reenable machine checks. */
+ local_mcck_enable();
+ for (i = 0; i <= __MAX_CHPID; i++) {
+ ret = rchp(i);
+ if ((ret == 0) || (ret == 2))
+ /*
+ * rchp either succeeded, or another rchp is already
+ * in progress. In either case, we'll get a crw.
+ */
+ atomic_inc(&chpid_reset_count);
+ }
+ /* Wait for machine check for all channel paths. */
+ timeout = get_clock() + (RCHP_TIMEOUT << 12);
+ while (atomic_read(&chpid_reset_count) != 0) {
+ if (get_clock() > timeout)
+ break;
+ cpu_relax();
+ }
+ /* Disable machine checks again. */
+ local_mcck_disable();
+ /* Disable channel report machine checks. */
+ __ctl_clear_bit(14, 28);
+ s390_reset_mcck_handler = NULL;
+}
+
+static struct reset_call css_reset_call = {
+ .fn = css_reset,
+};
+
+static int __init init_css_reset_call(void)
+{
+ atomic_set(&chpid_reset_count, 0);
+ register_reset_call(&css_reset_call);
+ return 0;
+}
+
+arch_initcall(init_css_reset_call);
+
+struct sch_match_id {
+ struct subchannel_id schid;
+ struct ccw_dev_id devid;
+ int rc;
+};
+
+static int __reipl_subchannel_match(struct subchannel_id schid, void *data)
+{
+ struct schib schib;
+ struct sch_match_id *match_id = data;
+
+ if (stsch_err(schid, &schib))
+ return -ENXIO;
+ if (schib.pmcw.dnv &&
+ (schib.pmcw.dev == match_id->devid.devno) &&
+ (schid.ssid == match_id->devid.ssid)) {
+ match_id->schid = schid;
+ match_id->rc = 0;
+ return 1;
+ }
+ return 0;
+}
+
+static int reipl_find_schid(struct ccw_dev_id *devid,
+ struct subchannel_id *schid)
{
struct sch_match_id match_id;
match_id.devid = *devid;
match_id.rc = -ENODEV;
- local_irq_disable();
- for_each_subchannel(__shutdown_subchannel_easy_and_match, &match_id);
+ for_each_subchannel(__reipl_subchannel_match, &match_id);
if (match_id.rc == 0)
*schid = match_id.schid;
return match_id.rc;
}
-
-void clear_all_subchannels(void)
-{
- local_irq_disable();
- for_each_subchannel(__shutdown_subchannel_easy_and_match, NULL);
-}
-
extern void do_reipl_asm(__u32 schid);
/* Make sure all subchannels are quiet before we re-ipl an lpar. */
@@ -904,9 +976,9 @@ void reipl_ccw_dev(struct ccw_dev_id *devid)
{
struct subchannel_id schid;
- if (clear_all_subchannels_and_match(devid, &schid))
+ s390_reset_system();
+ if (reipl_find_schid(devid, &schid) != 0)
panic("IPL Device not found\n");
- cio_reset_channel_paths();
do_reipl_asm(*((__u32*)&schid));
}
diff --git a/drivers/s390/cio/css.h b/drivers/s390/cio/css.h
index 4c2ff833628..9ff064e7176 100644
--- a/drivers/s390/cio/css.h
+++ b/drivers/s390/cio/css.h
@@ -94,6 +94,7 @@ struct ccw_device_private {
unsigned int donotify:1; /* call notify function */
unsigned int recog_done:1; /* dev. recog. complete */
unsigned int fake_irb:1; /* deliver faked irb */
+ unsigned int intretry:1; /* retry internal operation */
} __attribute__((packed)) flags;
unsigned long intparm; /* user interruption parameter */
struct qdio_irq *qdio_data;
@@ -171,6 +172,8 @@ void device_trigger_reprobe(struct subchannel *);
/* Helper functions for vary on/off. */
int device_is_online(struct subchannel *);
void device_kill_io(struct subchannel *);
+void device_set_intretry(struct subchannel *sch);
+int device_trigger_verify(struct subchannel *sch);
/* Machine check helper function. */
void device_kill_pending_timer(struct subchannel *);
diff --git a/drivers/s390/cio/device.c b/drivers/s390/cio/device.c
index 39c98f94050..d3d3716ff84 100644
--- a/drivers/s390/cio/device.c
+++ b/drivers/s390/cio/device.c
@@ -687,8 +687,20 @@ io_subchannel_register(void *data)
cdev = data;
sch = to_subchannel(cdev->dev.parent);
+ /*
+ * io_subchannel_register() will also be called after device
+ * recognition has been done for a boxed device (which will already
+ * be registered). We need to reprobe since we may now have sense id
+ * information.
+ */
if (klist_node_attached(&cdev->dev.knode_parent)) {
- bus_rescan_devices(&ccw_bus_type);
+ if (!cdev->drv) {
+ ret = device_reprobe(&cdev->dev);
+ if (ret)
+ /* We can't do much here. */
+ dev_info(&cdev->dev, "device_reprobe() returned"
+ " %d\n", ret);
+ }
goto out;
}
/* make it known to the system */
@@ -948,6 +960,9 @@ io_subchannel_ioterm(struct device *dev)
cdev = dev->driver_data;
if (!cdev)
return;
+ /* Internal I/O will be retried by the interrupt handler. */
+ if (cdev->private->flags.intretry)
+ return;
cdev->private->state = DEV_STATE_CLEAR_VERIFY;
if (cdev->handler)
cdev->handler(cdev, cdev->private->intparm,
diff --git a/drivers/s390/cio/device_fsm.c b/drivers/s390/cio/device_fsm.c
index de3d0857db9..09c7672eb3f 100644
--- a/drivers/s390/cio/device_fsm.c
+++ b/drivers/s390/cio/device_fsm.c
@@ -59,6 +59,27 @@ device_set_disconnected(struct subchannel *sch)
cdev->private->state = DEV_STATE_DISCONNECTED;
}
+void device_set_intretry(struct subchannel *sch)
+{
+ struct ccw_device *cdev;
+
+ cdev = sch->dev.driver_data;
+ if (!cdev)
+ return;
+ cdev->private->flags.intretry = 1;
+}
+
+int device_trigger_verify(struct subchannel *sch)
+{
+ struct ccw_device *cdev;
+
+ cdev = sch->dev.driver_data;
+ if (!cdev || !cdev->online)
+ return -EINVAL;
+ dev_fsm_event(cdev, DEV_EVENT_VERIFY);
+ return 0;
+}
+
/*
* Timeout function. It just triggers a DEV_EVENT_TIMEOUT.
*/
@@ -893,6 +914,12 @@ ccw_device_w4sense(struct ccw_device *cdev, enum dev_event dev_event)
* had killed the original request.
*/
if (irb->scsw.fctl & (SCSW_FCTL_CLEAR_FUNC | SCSW_FCTL_HALT_FUNC)) {
+ /* Retry Basic Sense if requested. */
+ if (cdev->private->flags.intretry) {
+ cdev->private->flags.intretry = 0;
+ ccw_device_do_sense(cdev, irb);
+ return;
+ }
cdev->private->flags.dosense = 0;
memset(&cdev->private->irb, 0, sizeof(struct irb));
ccw_device_accumulate_irb(cdev, irb);
diff --git a/drivers/s390/cio/device_id.c b/drivers/s390/cio/device_id.c
index a74785b9e4e..f17275917fe 100644
--- a/drivers/s390/cio/device_id.c
+++ b/drivers/s390/cio/device_id.c
@@ -191,6 +191,8 @@ __ccw_device_sense_id_start(struct ccw_device *cdev)
if ((sch->opm & cdev->private->imask) != 0 &&
cdev->private->iretry > 0) {
cdev->private->iretry--;
+ /* Reset internal retry indication. */
+ cdev->private->flags.intretry = 0;
ret = cio_start (sch, cdev->private->iccws,
cdev->private->imask);
/* ret is 0, -EBUSY, -EACCES or -ENODEV */
@@ -237,8 +239,14 @@ ccw_device_check_sense_id(struct ccw_device *cdev)
return 0; /* Success */
}
/* Check the error cases. */
- if (irb->scsw.fctl & (SCSW_FCTL_HALT_FUNC | SCSW_FCTL_CLEAR_FUNC))
+ if (irb->scsw.fctl & (SCSW_FCTL_HALT_FUNC | SCSW_FCTL_CLEAR_FUNC)) {
+ /* Retry Sense ID if requested. */
+ if (cdev->private->flags.intretry) {
+ cdev->private->flags.intretry = 0;
+ return -EAGAIN;
+ }
return -ETIME;
+ }
if (irb->esw.esw0.erw.cons && (irb->ecw[0] & SNS0_CMD_REJECT)) {
/*
* if the device doesn't support the SenseID
diff --git a/drivers/s390/cio/device_pgid.c b/drivers/s390/cio/device_pgid.c
index 2975ce888c1..cb1879a9681 100644
--- a/drivers/s390/cio/device_pgid.c
+++ b/drivers/s390/cio/device_pgid.c
@@ -71,6 +71,8 @@ __ccw_device_sense_pgid_start(struct ccw_device *cdev)
ccw->cda = (__u32) __pa (&cdev->private->pgid[i]);
if (cdev->private->iretry > 0) {
cdev->private->iretry--;
+ /* Reset internal retry indication. */
+ cdev->private->flags.intretry = 0;
ret = cio_start (sch, cdev->private->iccws,
cdev->private->imask);
/* ret is 0, -EBUSY, -EACCES or -ENODEV */
@@ -122,8 +124,14 @@ __ccw_device_check_sense_pgid(struct ccw_device *cdev)
sch = to_subchannel(cdev->dev.parent);
irb = &cdev->private->irb;
- if (irb->scsw.fctl & (SCSW_FCTL_HALT_FUNC | SCSW_FCTL_CLEAR_FUNC))
+ if (irb->scsw.fctl & (SCSW_FCTL_HALT_FUNC | SCSW_FCTL_CLEAR_FUNC)) {
+ /* Retry Sense PGID if requested. */
+ if (cdev->private->flags.intretry) {
+ cdev->private->flags.intretry = 0;
+ return -EAGAIN;
+ }
return -ETIME;
+ }
if (irb->esw.esw0.erw.cons &&
(irb->ecw[0]&(SNS0_CMD_REJECT|SNS0_INTERVENTION_REQ))) {
/*
@@ -253,6 +261,8 @@ __ccw_device_do_pgid(struct ccw_device *cdev, __u8 func)
ret = -EACCES;
if (cdev->private->iretry > 0) {
cdev->private->iretry--;
+ /* Reset internal retry indication. */
+ cdev->private->flags.intretry = 0;
ret = cio_start (sch, cdev->private->iccws,
cdev->private->imask);
/* We expect an interrupt in case of success or busy
@@ -293,6 +303,8 @@ static int __ccw_device_do_nop(struct ccw_device *cdev)
ret = -EACCES;
if (cdev->private->iretry > 0) {
cdev->private->iretry--;
+ /* Reset internal retry indication. */
+ cdev->private->flags.intretry = 0;
ret = cio_start (sch, cdev->private->iccws,
cdev->private->imask);
/* We expect an interrupt in case of success or busy
@@ -321,8 +333,14 @@ __ccw_device_check_pgid(struct ccw_device *cdev)
sch = to_subchannel(cdev->dev.parent);
irb = &cdev->private->irb;
- if (irb->scsw.fctl & (SCSW_FCTL_HALT_FUNC | SCSW_FCTL_CLEAR_FUNC))
+ if (irb->scsw.fctl & (SCSW_FCTL_HALT_FUNC | SCSW_FCTL_CLEAR_FUNC)) {
+ /* Retry Set PGID if requested. */
+ if (cdev->private->flags.intretry) {
+ cdev->private->flags.intretry = 0;
+ return -EAGAIN;
+ }
return -ETIME;
+ }
if (irb->esw.esw0.erw.cons) {
if (irb->ecw[0] & SNS0_CMD_REJECT)
return -EOPNOTSUPP;
@@ -360,8 +378,14 @@ static int __ccw_device_check_nop(struct ccw_device *cdev)
sch = to_subchannel(cdev->dev.parent);
irb = &cdev->private->irb;
- if (irb->scsw.fctl & (SCSW_FCTL_HALT_FUNC | SCSW_FCTL_CLEAR_FUNC))
+ if (irb->scsw.fctl & (SCSW_FCTL_HALT_FUNC | SCSW_FCTL_CLEAR_FUNC)) {
+ /* Retry NOP if requested. */
+ if (cdev->private->flags.intretry) {
+ cdev->private->flags.intretry = 0;
+ return -EAGAIN;
+ }
return -ETIME;
+ }
if (irb->scsw.cc == 3) {
CIO_MSG_EVENT(2, "NOP - Device %04x on Subchannel 0.%x.%04x,"
" lpm %02X, became 'not operational'\n",
diff --git a/drivers/s390/cio/device_status.c b/drivers/s390/cio/device_status.c
index 3f7cbce4cd8..bdcf930f7be 100644
--- a/drivers/s390/cio/device_status.c
+++ b/drivers/s390/cio/device_status.c
@@ -319,6 +319,9 @@ ccw_device_do_sense(struct ccw_device *cdev, struct irb *irb)
sch->sense_ccw.count = SENSE_MAX_COUNT;
sch->sense_ccw.flags = CCW_FLAG_SLI;
+ /* Reset internal retry indication. */
+ cdev->private->flags.intretry = 0;
+
return cio_start (sch, &sch->sense_ccw, 0xff);
}
diff --git a/drivers/s390/cio/qdio.c b/drivers/s390/cio/qdio.c
index 476aa1da5cb..8d5fa1b4d11 100644
--- a/drivers/s390/cio/qdio.c
+++ b/drivers/s390/cio/qdio.c
@@ -481,7 +481,7 @@ qdio_stop_polling(struct qdio_q *q)
unsigned char state = 0;
struct qdio_irq *irq = (struct qdio_irq *) q->irq_ptr;
- if (!atomic_swap(&q->polling,0))
+ if (!atomic_xchg(&q->polling,0))
return 1;
QDIO_DBF_TEXT4(0,trace,"stoppoll");
@@ -1964,8 +1964,8 @@ qdio_irq_check_sense(struct subchannel_id schid, struct irb *irb)
QDIO_DBF_HEX0(0,sense,irb,QDIO_DBF_SENSE_LEN);
QDIO_PRINT_WARN("sense data available on qdio channel.\n");
- HEXDUMP16(WARN,"irb: ",irb);
- HEXDUMP16(WARN,"sense data: ",irb->ecw);
+ QDIO_HEXDUMP16(WARN,"irb: ",irb);
+ QDIO_HEXDUMP16(WARN,"sense data: ",irb->ecw);
}
}
@@ -3425,7 +3425,7 @@ do_qdio_handle_inbound(struct qdio_q *q, unsigned int callflags,
if ((used_elements+count==QDIO_MAX_BUFFERS_PER_Q)&&
(callflags&QDIO_FLAG_UNDER_INTERRUPT))
- atomic_swap(&q->polling,0);
+ atomic_xchg(&q->polling,0);
if (used_elements)
return;
diff --git a/drivers/s390/cio/qdio.h b/drivers/s390/cio/qdio.h
index 49bb9e371c3..42927c1b745 100644
--- a/drivers/s390/cio/qdio.h
+++ b/drivers/s390/cio/qdio.h
@@ -236,7 +236,7 @@ enum qdio_irq_states {
#define QDIO_PRINT_EMERG(x...) do { } while (0)
#endif
-#define HEXDUMP16(importance,header,ptr) \
+#define QDIO_HEXDUMP16(importance,header,ptr) \
QDIO_PRINT_##importance(header "%02x %02x %02x %02x " \
"%02x %02x %02x %02x %02x %02x %02x %02x " \
"%02x %02x %02x %02x\n",*(((char*)ptr)), \
@@ -429,8 +429,6 @@ struct qdio_perf_stats {
};
#endif /* QDIO_PERFORMANCE_STATS */
-#define atomic_swap(a,b) xchg((int*)a.counter,b)
-
/* unlikely as the later the better */
#define SYNC_MEMORY if (unlikely(q->siga_sync)) qdio_siga_sync_q(q)
#define SYNC_MEMORY_ALL if (unlikely(q->siga_sync)) \
diff --git a/drivers/s390/crypto/ap_bus.c b/drivers/s390/crypto/ap_bus.c
index 79d89c36891..6a54334ffe0 100644
--- a/drivers/s390/crypto/ap_bus.c
+++ b/drivers/s390/crypto/ap_bus.c
@@ -431,7 +431,15 @@ static int ap_uevent (struct device *dev, char **envp, int num_envp,
ap_dev->device_type);
if (buffer_size - length <= 0)
return -ENOMEM;
- envp[1] = 0;
+ buffer += length;
+ buffer_size -= length;
+ /* Add MODALIAS= */
+ envp[1] = buffer;
+ length = scnprintf(buffer, buffer_size, "MODALIAS=ap:t%02X",
+ ap_dev->device_type);
+ if (buffer_size - length <= 0)
+ return -ENOMEM;
+ envp[2] = NULL;
return 0;
}
diff --git a/drivers/s390/net/lcs.c b/drivers/s390/net/lcs.c
index 66a8aec6efa..08d4e47070b 100644
--- a/drivers/s390/net/lcs.c
+++ b/drivers/s390/net/lcs.c
@@ -54,6 +54,8 @@
#error Cannot compile lcs.c without some net devices switched on.
#endif
+#define PRINTK_HEADER " lcs: "
+
/**
* initialization string for output
*/
@@ -120,7 +122,7 @@ lcs_alloc_channel(struct lcs_channel *channel)
kzalloc(LCS_IOBUFFERSIZE, GFP_DMA | GFP_KERNEL);
if (channel->iob[cnt].data == NULL)
break;
- channel->iob[cnt].state = BUF_STATE_EMPTY;
+ channel->iob[cnt].state = LCS_BUF_STATE_EMPTY;
}
if (cnt < LCS_NUM_BUFFS) {
/* Not all io buffers could be allocated. */
@@ -236,7 +238,7 @@ lcs_setup_read_ccws(struct lcs_card *card)
((struct lcs_header *)
card->read.iob[cnt].data)->offset = LCS_ILLEGAL_OFFSET;
card->read.iob[cnt].callback = lcs_get_frames_cb;
- card->read.iob[cnt].state = BUF_STATE_READY;
+ card->read.iob[cnt].state = LCS_BUF_STATE_READY;
card->read.iob[cnt].count = LCS_IOBUFFERSIZE;
}
card->read.ccws[0].flags &= ~CCW_FLAG_PCI;
@@ -247,7 +249,7 @@ lcs_setup_read_ccws(struct lcs_card *card)
card->read.ccws[LCS_NUM_BUFFS].cda =
(__u32) __pa(card->read.ccws);
/* Setg initial state of the read channel. */
- card->read.state = CH_STATE_INIT;
+ card->read.state = LCS_CH_STATE_INIT;
card->read.io_idx = 0;
card->read.buf_idx = 0;
@@ -294,7 +296,7 @@ lcs_setup_write_ccws(struct lcs_card *card)
card->write.ccws[LCS_NUM_BUFFS].cda =
(__u32) __pa(card->write.ccws);
/* Set initial state of the write channel. */
- card->read.state = CH_STATE_INIT;
+ card->read.state = LCS_CH_STATE_INIT;
card->write.io_idx = 0;
card->write.buf_idx = 0;
@@ -496,7 +498,7 @@ lcs_start_channel(struct lcs_channel *channel)
channel->ccws + channel->io_idx, 0, 0,
DOIO_DENY_PREFETCH | DOIO_ALLOW_SUSPEND);
if (rc == 0)
- channel->state = CH_STATE_RUNNING;
+ channel->state = LCS_CH_STATE_RUNNING;
spin_unlock_irqrestore(get_ccwdev_lock(channel->ccwdev), flags);
if (rc) {
LCS_DBF_TEXT_(4,trace,"essh%s", channel->ccwdev->dev.bus_id);
@@ -520,8 +522,8 @@ lcs_clear_channel(struct lcs_channel *channel)
LCS_DBF_TEXT_(4,trace,"ecsc%s", channel->ccwdev->dev.bus_id);
return rc;
}
- wait_event(channel->wait_q, (channel->state == CH_STATE_CLEARED));
- channel->state = CH_STATE_STOPPED;
+ wait_event(channel->wait_q, (channel->state == LCS_CH_STATE_CLEARED));
+ channel->state = LCS_CH_STATE_STOPPED;
return rc;
}
@@ -535,11 +537,11 @@ lcs_stop_channel(struct lcs_channel *channel)
unsigned long flags;
int rc;
- if (channel->state == CH_STATE_STOPPED)
+ if (channel->state == LCS_CH_STATE_STOPPED)
return 0;
LCS_DBF_TEXT(4,trace,"haltsch");
LCS_DBF_TEXT_(4,trace,"%s", channel->ccwdev->dev.bus_id);
- channel->state = CH_STATE_INIT;
+ channel->state = LCS_CH_STATE_INIT;
spin_lock_irqsave(get_ccwdev_lock(channel->ccwdev), flags);
rc = ccw_device_halt(channel->ccwdev, (addr_t) channel);
spin_unlock_irqrestore(get_ccwdev_lock(channel->ccwdev), flags);
@@ -548,7 +550,7 @@ lcs_stop_channel(struct lcs_channel *channel)
return rc;
}
/* Asynchronous halt initialted. Wait for its completion. */
- wait_event(channel->wait_q, (channel->state == CH_STATE_HALTED));
+ wait_event(channel->wait_q, (channel->state == LCS_CH_STATE_HALTED));
lcs_clear_channel(channel);
return 0;
}
@@ -596,8 +598,8 @@ __lcs_get_buffer(struct lcs_channel *channel)
LCS_DBF_TEXT(5, trace, "_getbuff");
index = channel->io_idx;
do {
- if (channel->iob[index].state == BUF_STATE_EMPTY) {
- channel->iob[index].state = BUF_STATE_LOCKED;
+ if (channel->iob[index].state == LCS_BUF_STATE_EMPTY) {
+ channel->iob[index].state = LCS_BUF_STATE_LOCKED;
return channel->iob + index;
}
index = (index + 1) & (LCS_NUM_BUFFS - 1);
@@ -626,7 +628,7 @@ __lcs_resume_channel(struct lcs_channel *channel)
{
int rc;
- if (channel->state != CH_STATE_SUSPENDED)
+ if (channel->state != LCS_CH_STATE_SUSPENDED)
return 0;
if (channel->ccws[channel->io_idx].flags & CCW_FLAG_SUSPEND)
return 0;
@@ -636,7 +638,7 @@ __lcs_resume_channel(struct lcs_channel *channel)
LCS_DBF_TEXT_(4, trace, "ersc%s", channel->ccwdev->dev.bus_id);
PRINT_ERR("Error in lcs_resume_channel: rc=%d\n",rc);
} else
- channel->state = CH_STATE_RUNNING;
+ channel->state = LCS_CH_STATE_RUNNING;
return rc;
}
@@ -670,10 +672,10 @@ lcs_ready_buffer(struct lcs_channel *channel, struct lcs_buffer *buffer)
int index, rc;
LCS_DBF_TEXT(5, trace, "rdybuff");
- BUG_ON(buffer->state != BUF_STATE_LOCKED &&
- buffer->state != BUF_STATE_PROCESSED);
+ BUG_ON(buffer->state != LCS_BUF_STATE_LOCKED &&
+ buffer->state != LCS_BUF_STATE_PROCESSED);
spin_lock_irqsave(get_ccwdev_lock(channel->ccwdev), flags);
- buffer->state = BUF_STATE_READY;
+ buffer->state = LCS_BUF_STATE_READY;
index = buffer - channel->iob;
/* Set length. */
channel->ccws[index].count = buffer->count;
@@ -695,8 +697,8 @@ __lcs_processed_buffer(struct lcs_channel *channel, struct lcs_buffer *buffer)
int index, prev, next;
LCS_DBF_TEXT(5, trace, "prcsbuff");
- BUG_ON(buffer->state != BUF_STATE_READY);
- buffer->state = BUF_STATE_PROCESSED;
+ BUG_ON(buffer->state != LCS_BUF_STATE_READY);
+ buffer->state = LCS_BUF_STATE_PROCESSED;
index = buffer - channel->iob;
prev = (index - 1) & (LCS_NUM_BUFFS - 1);
next = (index + 1) & (LCS_NUM_BUFFS - 1);
@@ -704,7 +706,7 @@ __lcs_processed_buffer(struct lcs_channel *channel, struct lcs_buffer *buffer)
channel->ccws[index].flags |= CCW_FLAG_SUSPEND;
channel->ccws[index].flags &= ~CCW_FLAG_PCI;
/* Check the suspend bit of the previous buffer. */
- if (channel->iob[prev].state == BUF_STATE_READY) {
+ if (channel->iob[prev].state == LCS_BUF_STATE_READY) {
/*
* Previous buffer is in state ready. It might have
* happened in lcs_ready_buffer that the suspend bit
@@ -727,10 +729,10 @@ lcs_release_buffer(struct lcs_channel *channel, struct lcs_buffer *buffer)
unsigned long flags;
LCS_DBF_TEXT(5, trace, "relbuff");
- BUG_ON(buffer->state != BUF_STATE_LOCKED &&
- buffer->state != BUF_STATE_PROCESSED);
+ BUG_ON(buffer->state != LCS_BUF_STATE_LOCKED &&
+ buffer->state != LCS_BUF_STATE_PROCESSED);
spin_lock_irqsave(get_ccwdev_lock(channel->ccwdev), flags);
- buffer->state = BUF_STATE_EMPTY;
+ buffer->state = LCS_BUF_STATE_EMPTY;
spin_unlock_irqrestore(get_ccwdev_lock(channel->ccwdev), flags);
}
@@ -1264,7 +1266,7 @@ lcs_register_mc_addresses(void *data)
netif_carrier_off(card->dev);
netif_tx_disable(card->dev);
wait_event(card->write.wait_q,
- (card->write.state != CH_STATE_RUNNING));
+ (card->write.state != LCS_CH_STATE_RUNNING));
lcs_fix_multicast_list(card);
if (card->state == DEV_STATE_UP) {
netif_carrier_on(card->dev);
@@ -1404,7 +1406,7 @@ lcs_irq(struct ccw_device *cdev, unsigned long intparm, struct irb *irb)
}
}
/* How far in the ccw chain have we processed? */
- if ((channel->state != CH_STATE_INIT) &&
+ if ((channel->state != LCS_CH_STATE_INIT) &&
(irb->scsw.fctl & SCSW_FCTL_START_FUNC)) {
index = (struct ccw1 *) __va((addr_t) irb->scsw.cpa)
- channel->ccws;
@@ -1424,20 +1426,20 @@ lcs_irq(struct ccw_device *cdev, unsigned long intparm, struct irb *irb)
(irb->scsw.dstat & DEV_STAT_CHN_END) ||
(irb->scsw.dstat & DEV_STAT_UNIT_CHECK))
/* Mark channel as stopped. */
- channel->state = CH_STATE_STOPPED;
+ channel->state = LCS_CH_STATE_STOPPED;
else if (irb->scsw.actl & SCSW_ACTL_SUSPENDED)
/* CCW execution stopped on a suspend bit. */
- channel->state = CH_STATE_SUSPENDED;
+ channel->state = LCS_CH_STATE_SUSPENDED;
if (irb->scsw.fctl & SCSW_FCTL_HALT_FUNC) {
if (irb->scsw.cc != 0) {
ccw_device_halt(channel->ccwdev, (addr_t) channel);
return;
}
/* The channel has been stopped by halt_IO. */
- channel->state = CH_STATE_HALTED;
+ channel->state = LCS_CH_STATE_HALTED;
}
if (irb->scsw.fctl & SCSW_FCTL_CLEAR_FUNC) {
- channel->state = CH_STATE_CLEARED;
+ channel->state = LCS_CH_STATE_CLEARED;
}
/* Do the rest in the tasklet. */
tasklet_schedule(&channel->irq_tasklet);
@@ -1461,7 +1463,7 @@ lcs_tasklet(unsigned long data)
/* Check for processed buffers. */
iob = channel->iob;
buf_idx = channel->buf_idx;
- while (iob[buf_idx].state == BUF_STATE_PROCESSED) {
+ while (iob[buf_idx].state == LCS_BUF_STATE_PROCESSED) {
/* Do the callback thing. */
if (iob[buf_idx].callback != NULL)
iob[buf_idx].callback(channel, iob + buf_idx);
@@ -1469,12 +1471,12 @@ lcs_tasklet(unsigned long data)
}
channel->buf_idx = buf_idx;
- if (channel->state == CH_STATE_STOPPED)
+ if (channel->state == LCS_CH_STATE_STOPPED)
// FIXME: what if rc != 0 ??
rc = lcs_start_channel(channel);
spin_lock_irqsave(get_ccwdev_lock(channel->ccwdev), flags);
- if (channel->state == CH_STATE_SUSPENDED &&
- channel->iob[channel->io_idx].state == BUF_STATE_READY) {
+ if (channel->state == LCS_CH_STATE_SUSPENDED &&
+ channel->iob[channel->io_idx].state == LCS_BUF_STATE_READY) {
// FIXME: what if rc != 0 ??
rc = __lcs_resume_channel(channel);
}
@@ -1689,8 +1691,8 @@ lcs_detect(struct lcs_card *card)
card->state = DEV_STATE_UP;
} else {
card->state = DEV_STATE_DOWN;
- card->write.state = CH_STATE_INIT;
- card->read.state = CH_STATE_INIT;
+ card->write.state = LCS_CH_STATE_INIT;
+ card->read.state = LCS_CH_STATE_INIT;
}
return rc;
}
@@ -1705,8 +1707,8 @@ lcs_stopcard(struct lcs_card *card)
LCS_DBF_TEXT(3, setup, "stopcard");
- if (card->read.state != CH_STATE_STOPPED &&
- card->write.state != CH_STATE_STOPPED &&
+ if (card->read.state != LCS_CH_STATE_STOPPED &&
+ card->write.state != LCS_CH_STATE_STOPPED &&
card->state == DEV_STATE_UP) {
lcs_clear_multicast_list(card);
rc = lcs_send_stoplan(card,LCS_INITIATOR_TCPIP);
@@ -1871,7 +1873,7 @@ lcs_stop_device(struct net_device *dev)
netif_tx_disable(dev);
dev->flags &= ~IFF_UP;
wait_event(card->write.wait_q,
- (card->write.state != CH_STATE_RUNNING));
+ (card->write.state != LCS_CH_STATE_RUNNING));
rc = lcs_stopcard(card);
if (rc)
PRINT_ERR("Try it again!\n ");
diff --git a/drivers/s390/net/lcs.h b/drivers/s390/net/lcs.h
index b5247dc08b5..0e1e4a0a88f 100644
--- a/drivers/s390/net/lcs.h
+++ b/drivers/s390/net/lcs.h
@@ -23,11 +23,6 @@ do { \
} while (0)
/**
- * some more definitions for debug or output stuff
- */
-#define PRINTK_HEADER " lcs: "
-
-/**
* sysfs related stuff
*/
#define CARD_FROM_DEV(cdev) \
@@ -127,22 +122,22 @@ do { \
* LCS Buffer states
*/
enum lcs_buffer_states {
- BUF_STATE_EMPTY, /* buffer is empty */
- BUF_STATE_LOCKED, /* buffer is locked, don't touch */
- BUF_STATE_READY, /* buffer is ready for read/write */
- BUF_STATE_PROCESSED,
+ LCS_BUF_STATE_EMPTY, /* buffer is empty */
+ LCS_BUF_STATE_LOCKED, /* buffer is locked, don't touch */
+ LCS_BUF_STATE_READY, /* buffer is ready for read/write */
+ LCS_BUF_STATE_PROCESSED,
};
/**
* LCS Channel State Machine declarations
*/
enum lcs_channel_states {
- CH_STATE_INIT,
- CH_STATE_HALTED,
- CH_STATE_STOPPED,
- CH_STATE_RUNNING,
- CH_STATE_SUSPENDED,
- CH_STATE_CLEARED,
+ LCS_CH_STATE_INIT,
+ LCS_CH_STATE_HALTED,
+ LCS_CH_STATE_STOPPED,
+ LCS_CH_STATE_RUNNING,
+ LCS_CH_STATE_SUSPENDED,
+ LCS_CH_STATE_CLEARED,
};
/**
diff --git a/drivers/s390/net/qeth.h b/drivers/s390/net/qeth.h
index 821383d8cbe..53c358c7d36 100644
--- a/drivers/s390/net/qeth.h
+++ b/drivers/s390/net/qeth.h
@@ -151,8 +151,6 @@ qeth_hex_dump(unsigned char *buf, size_t len)
#define SENSE_RESETTING_EVENT_BYTE 1
#define SENSE_RESETTING_EVENT_FLAG 0x80
-#define atomic_swap(a,b) xchg((int *)a.counter, b)
-
/*
* Common IO related definitions
*/
diff --git a/drivers/s390/net/qeth_main.c b/drivers/s390/net/qeth_main.c
index 8364d5475ac..7fdc5272c44 100644
--- a/drivers/s390/net/qeth_main.c
+++ b/drivers/s390/net/qeth_main.c
@@ -2982,7 +2982,7 @@ qeth_check_outbound_queue(struct qeth_qdio_out_q *queue)
*/
if ((atomic_read(&queue->used_buffers) <= QETH_LOW_WATERMARK_PACK) ||
!atomic_read(&queue->set_pci_flags_count)){
- if (atomic_swap(&queue->state, QETH_OUT_Q_LOCKED_FLUSH) ==
+ if (atomic_xchg(&queue->state, QETH_OUT_Q_LOCKED_FLUSH) ==
QETH_OUT_Q_UNLOCKED) {
/*
* If we get in here, there was no action in
@@ -3245,7 +3245,7 @@ qeth_free_qdio_buffers(struct qeth_card *card)
int i, j;
QETH_DBF_TEXT(trace, 2, "freeqdbf");
- if (atomic_swap(&card->qdio.state, QETH_QDIO_UNINITIALIZED) ==
+ if (atomic_xchg(&card->qdio.state, QETH_QDIO_UNINITIALIZED) ==
QETH_QDIO_UNINITIALIZED)
return;
kfree(card->qdio.in_q);
@@ -4366,7 +4366,7 @@ out:
if (flush_count)
qeth_flush_buffers(queue, 0, start_index, flush_count);
else if (!atomic_read(&queue->set_pci_flags_count))
- atomic_swap(&queue->state, QETH_OUT_Q_LOCKED_FLUSH);
+ atomic_xchg(&queue->state, QETH_OUT_Q_LOCKED_FLUSH);
/*
* queue->state will go from LOCKED -> UNLOCKED or from
* LOCKED_FLUSH -> LOCKED if output_handler wanted to 'notify' us