summaryrefslogtreecommitdiffstats
path: root/drivers/s390
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/s390')
-rw-r--r--drivers/s390/block/dasd.c237
-rw-r--r--drivers/s390/block/dasd_3990_erp.c47
-rw-r--r--drivers/s390/block/dasd_alias.c77
-rw-r--r--drivers/s390/block/dasd_diag.c20
-rw-r--r--drivers/s390/block/dasd_eckd.c170
-rw-r--r--drivers/s390/block/dasd_eckd.h4
-rw-r--r--drivers/s390/block/dasd_eer.c5
-rw-r--r--drivers/s390/block/dasd_fba.c11
-rw-r--r--drivers/s390/block/dasd_int.h13
-rw-r--r--drivers/s390/block/dasd_ioctl.c4
-rw-r--r--drivers/s390/block/dasd_proc.c2
-rw-r--r--drivers/s390/char/con3215.c1
-rw-r--r--drivers/s390/char/con3270.c1
-rw-r--r--drivers/s390/char/fs3270.c12
-rw-r--r--drivers/s390/char/monreader.c8
-rw-r--r--drivers/s390/char/monwriter.c7
-rw-r--r--drivers/s390/char/sclp_cmd.c1
-rw-r--r--drivers/s390/char/tape.h9
-rw-r--r--drivers/s390/char/tape_34xx.c8
-rw-r--r--drivers/s390/char/tape_3590.c2
-rw-r--r--drivers/s390/char/tape_block.c17
-rw-r--r--drivers/s390/char/tape_char.c54
-rw-r--r--drivers/s390/char/tape_core.c65
-rw-r--r--drivers/s390/char/tape_proc.c2
-rw-r--r--drivers/s390/char/tty3270.c20
-rw-r--r--drivers/s390/char/vmlogrdr.c8
-rw-r--r--drivers/s390/char/vmur.c3
-rw-r--r--drivers/s390/char/vmwatchdog.c29
-rw-r--r--drivers/s390/cio/Makefile2
-rw-r--r--drivers/s390/cio/ccwreq.c328
-rw-r--r--drivers/s390/cio/chp.c2
-rw-r--r--drivers/s390/cio/cio.c1
-rw-r--r--drivers/s390/cio/cio.h8
-rw-r--r--drivers/s390/cio/cmf.c2
-rw-r--r--drivers/s390/cio/css.c57
-rw-r--r--drivers/s390/cio/css.h3
-rw-r--r--drivers/s390/cio/device.c1006
-rw-r--r--drivers/s390/cio/device.h25
-rw-r--r--drivers/s390/cio/device_fsm.c411
-rw-r--r--drivers/s390/cio/device_id.c375
-rw-r--r--drivers/s390/cio/device_ops.c142
-rw-r--r--drivers/s390/cio/device_pgid.c963
-rw-r--r--drivers/s390/cio/device_status.c3
-rw-r--r--drivers/s390/cio/io_sch.h73
-rw-r--r--drivers/s390/crypto/ap_bus.c31
-rw-r--r--drivers/s390/crypto/ap_bus.h18
-rw-r--r--drivers/s390/crypto/zcrypt_api.c11
-rw-r--r--drivers/s390/crypto/zcrypt_api.h2
-rw-r--r--drivers/s390/crypto/zcrypt_cex2a.c75
-rw-r--r--drivers/s390/crypto/zcrypt_pcica.c2
-rw-r--r--drivers/s390/crypto/zcrypt_pcicc.c2
-rw-r--r--drivers/s390/crypto/zcrypt_pcixcc.c38
-rw-r--r--drivers/s390/net/qeth_core_mpc.h2
-rw-r--r--drivers/s390/scsi/zfcp_aux.c388
-rw-r--r--drivers/s390/scsi/zfcp_ccw.c179
-rw-r--r--drivers/s390/scsi/zfcp_cfdc.c26
-rw-r--r--drivers/s390/scsi/zfcp_dbf.c134
-rw-r--r--drivers/s390/scsi/zfcp_dbf.h10
-rw-r--r--drivers/s390/scsi/zfcp_def.h323
-rw-r--r--drivers/s390/scsi/zfcp_erp.c138
-rw-r--r--drivers/s390/scsi/zfcp_ext.h38
-rw-r--r--drivers/s390/scsi/zfcp_fc.c677
-rw-r--r--drivers/s390/scsi/zfcp_fc.h260
-rw-r--r--drivers/s390/scsi/zfcp_fsf.c367
-rw-r--r--drivers/s390/scsi/zfcp_fsf.h53
-rw-r--r--drivers/s390/scsi/zfcp_scsi.c156
-rw-r--r--drivers/s390/scsi/zfcp_sysfs.c250
67 files changed, 3747 insertions, 3671 deletions
diff --git a/drivers/s390/block/dasd.c b/drivers/s390/block/dasd.c
index aaccc8ecfa8..fdb2e7c1450 100644
--- a/drivers/s390/block/dasd.c
+++ b/drivers/s390/block/dasd.c
@@ -24,7 +24,6 @@
#include <asm/ccwdev.h>
#include <asm/ebcdic.h>
#include <asm/idals.h>
-#include <asm/todclk.h>
#include <asm/itcw.h>
/* This is ugly... */
@@ -64,6 +63,7 @@ static void do_restore_device(struct work_struct *);
static void dasd_return_cqr_cb(struct dasd_ccw_req *, void *);
static void dasd_device_timeout(unsigned long);
static void dasd_block_timeout(unsigned long);
+static void __dasd_process_erp(struct dasd_device *, struct dasd_ccw_req *);
/*
* SECTION: Operations on the device structure.
@@ -960,7 +960,7 @@ static void dasd_device_timeout(unsigned long ptr)
device = (struct dasd_device *) ptr;
spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
/* re-activate request queue */
- device->stopped &= ~DASD_STOPPED_PENDING;
+ dasd_device_remove_stop_bits(device, DASD_STOPPED_PENDING);
spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
dasd_schedule_device_bh(device);
}
@@ -994,10 +994,9 @@ static void dasd_handle_killed_request(struct ccw_device *cdev,
return;
cqr = (struct dasd_ccw_req *) intparm;
if (cqr->status != DASD_CQR_IN_IO) {
- DBF_EVENT(DBF_DEBUG,
- "invalid status in handle_killed_request: "
- "bus_id %s, status %02x",
- dev_name(&cdev->dev), cqr->status);
+ DBF_EVENT_DEVID(DBF_DEBUG, cdev,
+ "invalid status in handle_killed_request: "
+ "%02x", cqr->status);
return;
}
@@ -1023,7 +1022,7 @@ void dasd_generic_handle_state_change(struct dasd_device *device)
/* First of all start sense subsystem status request. */
dasd_eer_snss(device);
- device->stopped &= ~DASD_STOPPED_PENDING;
+ dasd_device_remove_stop_bits(device, DASD_STOPPED_PENDING);
dasd_schedule_device_bh(device);
if (device->block)
dasd_schedule_block_bh(device->block);
@@ -1045,12 +1044,13 @@ void dasd_int_handler(struct ccw_device *cdev, unsigned long intparm,
case -EIO:
break;
case -ETIMEDOUT:
- DBF_EVENT(DBF_WARNING, "%s(%s): request timed out\n",
- __func__, dev_name(&cdev->dev));
+ DBF_EVENT_DEVID(DBF_WARNING, cdev, "%s: "
+ "request timed out\n", __func__);
break;
default:
- DBF_EVENT(DBF_WARNING, "%s(%s): unknown error %ld\n",
- __func__, dev_name(&cdev->dev), PTR_ERR(irb));
+ DBF_EVENT_DEVID(DBF_WARNING, cdev, "%s: "
+ "unknown error %ld\n", __func__,
+ PTR_ERR(irb));
}
dasd_handle_killed_request(cdev, intparm);
return;
@@ -1405,6 +1405,20 @@ void dasd_schedule_device_bh(struct dasd_device *device)
tasklet_hi_schedule(&device->tasklet);
}
+void dasd_device_set_stop_bits(struct dasd_device *device, int bits)
+{
+ device->stopped |= bits;
+}
+EXPORT_SYMBOL_GPL(dasd_device_set_stop_bits);
+
+void dasd_device_remove_stop_bits(struct dasd_device *device, int bits)
+{
+ device->stopped &= ~bits;
+ if (!device->stopped)
+ wake_up(&generic_waitq);
+}
+EXPORT_SYMBOL_GPL(dasd_device_remove_stop_bits);
+
/*
* Queue a request to the head of the device ccw_queue.
* Start the I/O if possible.
@@ -1465,58 +1479,135 @@ static inline int _wait_for_wakeup(struct dasd_ccw_req *cqr)
}
/*
- * Queue a request to the tail of the device ccw_queue and wait for
- * it's completion.
+ * checks if error recovery is necessary, returns 1 if yes, 0 otherwise.
*/
-int dasd_sleep_on(struct dasd_ccw_req *cqr)
+static int __dasd_sleep_on_erp(struct dasd_ccw_req *cqr)
{
struct dasd_device *device;
- int rc;
+ dasd_erp_fn_t erp_fn;
+ if (cqr->status == DASD_CQR_FILLED)
+ return 0;
device = cqr->startdev;
+ if (test_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags)) {
+ if (cqr->status == DASD_CQR_TERMINATED) {
+ device->discipline->handle_terminated_request(cqr);
+ return 1;
+ }
+ if (cqr->status == DASD_CQR_NEED_ERP) {
+ erp_fn = device->discipline->erp_action(cqr);
+ erp_fn(cqr);
+ return 1;
+ }
+ if (cqr->status == DASD_CQR_FAILED)
+ dasd_log_sense(cqr, &cqr->irb);
+ if (cqr->refers) {
+ __dasd_process_erp(device, cqr);
+ return 1;
+ }
+ }
+ return 0;
+}
- cqr->callback = dasd_wakeup_cb;
- cqr->callback_data = (void *) &generic_waitq;
- dasd_add_request_tail(cqr);
- wait_event(generic_waitq, _wait_for_wakeup(cqr));
+static int __dasd_sleep_on_loop_condition(struct dasd_ccw_req *cqr)
+{
+ if (test_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags)) {
+ if (cqr->refers) /* erp is not done yet */
+ return 1;
+ return ((cqr->status != DASD_CQR_DONE) &&
+ (cqr->status != DASD_CQR_FAILED));
+ } else
+ return (cqr->status == DASD_CQR_FILLED);
+}
- if (cqr->status == DASD_CQR_DONE)
+static int _dasd_sleep_on(struct dasd_ccw_req *maincqr, int interruptible)
+{
+ struct dasd_device *device;
+ int rc;
+ struct list_head ccw_queue;
+ struct dasd_ccw_req *cqr;
+
+ INIT_LIST_HEAD(&ccw_queue);
+ maincqr->status = DASD_CQR_FILLED;
+ device = maincqr->startdev;
+ list_add(&maincqr->blocklist, &ccw_queue);
+ for (cqr = maincqr; __dasd_sleep_on_loop_condition(cqr);
+ cqr = list_first_entry(&ccw_queue,
+ struct dasd_ccw_req, blocklist)) {
+
+ if (__dasd_sleep_on_erp(cqr))
+ continue;
+ if (cqr->status != DASD_CQR_FILLED) /* could be failed */
+ continue;
+
+ /* Non-temporary stop condition will trigger fail fast */
+ if (device->stopped & ~DASD_STOPPED_PENDING &&
+ test_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags) &&
+ (!dasd_eer_enabled(device))) {
+ cqr->status = DASD_CQR_FAILED;
+ continue;
+ }
+
+ /* Don't try to start requests if device is stopped */
+ if (interruptible) {
+ rc = wait_event_interruptible(
+ generic_waitq, !(device->stopped));
+ if (rc == -ERESTARTSYS) {
+ cqr->status = DASD_CQR_FAILED;
+ maincqr->intrc = rc;
+ continue;
+ }
+ } else
+ wait_event(generic_waitq, !(device->stopped));
+
+ cqr->callback = dasd_wakeup_cb;
+ cqr->callback_data = (void *) &generic_waitq;
+ dasd_add_request_tail(cqr);
+ if (interruptible) {
+ rc = wait_event_interruptible(
+ generic_waitq, _wait_for_wakeup(cqr));
+ if (rc == -ERESTARTSYS) {
+ dasd_cancel_req(cqr);
+ /* wait (non-interruptible) for final status */
+ wait_event(generic_waitq,
+ _wait_for_wakeup(cqr));
+ cqr->status = DASD_CQR_FAILED;
+ maincqr->intrc = rc;
+ continue;
+ }
+ } else
+ wait_event(generic_waitq, _wait_for_wakeup(cqr));
+ }
+
+ maincqr->endclk = get_clock();
+ if ((maincqr->status != DASD_CQR_DONE) &&
+ (maincqr->intrc != -ERESTARTSYS))
+ dasd_log_sense(maincqr, &maincqr->irb);
+ if (maincqr->status == DASD_CQR_DONE)
rc = 0;
- else if (cqr->intrc)
- rc = cqr->intrc;
+ else if (maincqr->intrc)
+ rc = maincqr->intrc;
else
rc = -EIO;
return rc;
}
/*
+ * Queue a request to the tail of the device ccw_queue and wait for
+ * it's completion.
+ */
+int dasd_sleep_on(struct dasd_ccw_req *cqr)
+{
+ return _dasd_sleep_on(cqr, 0);
+}
+
+/*
* Queue a request to the tail of the device ccw_queue and wait
* interruptible for it's completion.
*/
int dasd_sleep_on_interruptible(struct dasd_ccw_req *cqr)
{
- struct dasd_device *device;
- int rc;
-
- device = cqr->startdev;
- cqr->callback = dasd_wakeup_cb;
- cqr->callback_data = (void *) &generic_waitq;
- dasd_add_request_tail(cqr);
- rc = wait_event_interruptible(generic_waitq, _wait_for_wakeup(cqr));
- if (rc == -ERESTARTSYS) {
- dasd_cancel_req(cqr);
- /* wait (non-interruptible) for final status */
- wait_event(generic_waitq, _wait_for_wakeup(cqr));
- cqr->intrc = rc;
- }
-
- if (cqr->status == DASD_CQR_DONE)
- rc = 0;
- else if (cqr->intrc)
- rc = cqr->intrc;
- else
- rc = -EIO;
- return rc;
+ return _dasd_sleep_on(cqr, 1);
}
/*
@@ -1630,7 +1721,7 @@ static void dasd_block_timeout(unsigned long ptr)
block = (struct dasd_block *) ptr;
spin_lock_irqsave(get_ccwdev_lock(block->base->cdev), flags);
/* re-activate request queue */
- block->base->stopped &= ~DASD_STOPPED_PENDING;
+ dasd_device_remove_stop_bits(block->base, DASD_STOPPED_PENDING);
spin_unlock_irqrestore(get_ccwdev_lock(block->base->cdev), flags);
dasd_schedule_block_bh(block);
}
@@ -1657,11 +1748,10 @@ void dasd_block_clear_timer(struct dasd_block *block)
/*
* Process finished error recovery ccw.
*/
-static inline void __dasd_block_process_erp(struct dasd_block *block,
- struct dasd_ccw_req *cqr)
+static void __dasd_process_erp(struct dasd_device *device,
+ struct dasd_ccw_req *cqr)
{
dasd_erp_fn_t erp_fn;
- struct dasd_device *device = block->base;
if (cqr->status == DASD_CQR_DONE)
DBF_DEV_EVENT(DBF_NOTICE, device, "%s", "ERP successful");
@@ -1725,9 +1815,12 @@ static void __dasd_process_request_queue(struct dasd_block *block)
*/
if (!list_empty(&block->ccw_queue))
break;
- spin_lock_irqsave(get_ccwdev_lock(basedev->cdev), flags);
- basedev->stopped |= DASD_STOPPED_PENDING;
- spin_unlock_irqrestore(get_ccwdev_lock(basedev->cdev), flags);
+ spin_lock_irqsave(
+ get_ccwdev_lock(basedev->cdev), flags);
+ dasd_device_set_stop_bits(basedev,
+ DASD_STOPPED_PENDING);
+ spin_unlock_irqrestore(
+ get_ccwdev_lock(basedev->cdev), flags);
dasd_block_set_timer(block, HZ/2);
break;
}
@@ -1813,7 +1906,7 @@ restart:
cqr->status = DASD_CQR_FILLED;
cqr->retries = 255;
spin_lock_irqsave(get_ccwdev_lock(base->cdev), flags);
- base->stopped |= DASD_STOPPED_QUIESCE;
+ dasd_device_set_stop_bits(base, DASD_STOPPED_QUIESCE);
spin_unlock_irqrestore(get_ccwdev_lock(base->cdev),
flags);
goto restart;
@@ -1821,7 +1914,7 @@ restart:
/* Process finished ERP request. */
if (cqr->refers) {
- __dasd_block_process_erp(block, cqr);
+ __dasd_process_erp(base, cqr);
goto restart;
}
@@ -1952,7 +2045,7 @@ restart_cb:
/* Process finished ERP request. */
if (cqr->refers) {
spin_lock_bh(&block->queue_lock);
- __dasd_block_process_erp(block, cqr);
+ __dasd_process_erp(block->base, cqr);
spin_unlock_bh(&block->queue_lock);
/* restart list_for_xx loop since dasd_process_erp
* might remove multiple elements */
@@ -2208,18 +2301,11 @@ int dasd_generic_probe(struct ccw_device *cdev,
{
int ret;
- ret = ccw_device_set_options(cdev, CCWDEV_DO_PATHGROUP);
- if (ret) {
- DBF_EVENT(DBF_WARNING,
- "dasd_generic_probe: could not set ccw-device options "
- "for %s\n", dev_name(&cdev->dev));
- return ret;
- }
ret = dasd_add_sysfs_files(cdev);
if (ret) {
- DBF_EVENT(DBF_WARNING,
- "dasd_generic_probe: could not add sysfs entries "
- "for %s\n", dev_name(&cdev->dev));
+ DBF_EVENT_DEVID(DBF_WARNING, cdev, "%s",
+ "dasd_generic_probe: could not add "
+ "sysfs entries");
return ret;
}
cdev->handler = &dasd_int_handler;
@@ -2418,16 +2504,16 @@ int dasd_generic_notify(struct ccw_device *cdev, int event)
cqr->status = DASD_CQR_QUEUED;
cqr->retries++;
}
- device->stopped |= DASD_STOPPED_DC_WAIT;
+ dasd_device_set_stop_bits(device, DASD_STOPPED_DC_WAIT);
dasd_device_clear_timer(device);
dasd_schedule_device_bh(device);
ret = 1;
break;
case CIO_OPER:
/* FIXME: add a sanity check. */
- device->stopped &= ~DASD_STOPPED_DC_WAIT;
+ dasd_device_remove_stop_bits(device, DASD_STOPPED_DC_WAIT);
if (device->stopped & DASD_UNRESUMED_PM) {
- device->stopped &= ~DASD_UNRESUMED_PM;
+ dasd_device_remove_stop_bits(device, DASD_UNRESUMED_PM);
dasd_restore_device(device);
ret = 1;
break;
@@ -2452,7 +2538,7 @@ int dasd_generic_pm_freeze(struct ccw_device *cdev)
if (IS_ERR(device))
return PTR_ERR(device);
/* disallow new I/O */
- device->stopped |= DASD_STOPPED_PM;
+ dasd_device_set_stop_bits(device, DASD_STOPPED_PM);
/* clear active requests */
INIT_LIST_HEAD(&freeze_queue);
spin_lock_irq(get_ccwdev_lock(cdev));
@@ -2504,14 +2590,18 @@ int dasd_generic_restore_device(struct ccw_device *cdev)
return PTR_ERR(device);
/* allow new IO again */
- device->stopped &= ~DASD_STOPPED_PM;
- device->stopped &= ~DASD_UNRESUMED_PM;
+ dasd_device_remove_stop_bits(device,
+ (DASD_STOPPED_PM | DASD_UNRESUMED_PM));
dasd_schedule_device_bh(device);
- if (device->discipline->restore)
+ /*
+ * call discipline restore function
+ * if device is stopped do nothing e.g. for disconnected devices
+ */
+ if (device->discipline->restore && !(device->stopped))
rc = device->discipline->restore(device);
- if (rc)
+ if (rc || device->stopped)
/*
* if the resume failed for the DASD we put it in
* an UNRESUMED stop state
@@ -2561,8 +2651,7 @@ static struct dasd_ccw_req *dasd_generic_build_rdc(struct dasd_device *device,
cqr->startdev = device;
cqr->memdev = device;
cqr->expires = 10*HZ;
- clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags);
- cqr->retries = 2;
+ cqr->retries = 256;
cqr->buildclk = get_clock();
cqr->status = DASD_CQR_FILLED;
return cqr;
diff --git a/drivers/s390/block/dasd_3990_erp.c b/drivers/s390/block/dasd_3990_erp.c
index e8ff7b0c961..44796ba4eb9 100644
--- a/drivers/s390/block/dasd_3990_erp.c
+++ b/drivers/s390/block/dasd_3990_erp.c
@@ -12,7 +12,6 @@
#include <linux/timer.h>
#include <linux/slab.h>
#include <asm/idals.h>
-#include <asm/todclk.h>
#define PRINTK_HEADER "dasd_erp(3990): "
@@ -70,8 +69,7 @@ dasd_3990_erp_cleanup(struct dasd_ccw_req * erp, char final_status)
* processing until the started timer has expired or an related
* interrupt was received.
*/
-static void
-dasd_3990_erp_block_queue(struct dasd_ccw_req * erp, int expires)
+static void dasd_3990_erp_block_queue(struct dasd_ccw_req *erp, int expires)
{
struct dasd_device *device = erp->startdev;
@@ -81,10 +79,13 @@ dasd_3990_erp_block_queue(struct dasd_ccw_req * erp, int expires)
"blocking request queue for %is", expires/HZ);
spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
- device->stopped |= DASD_STOPPED_PENDING;
+ dasd_device_set_stop_bits(device, DASD_STOPPED_PENDING);
spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
erp->status = DASD_CQR_FILLED;
- dasd_block_set_timer(device->block, expires);
+ if (erp->block)
+ dasd_block_set_timer(erp->block, expires);
+ else
+ dasd_device_set_timer(device, expires);
}
/*
@@ -243,9 +244,13 @@ dasd_3990_erp_DCTL(struct dasd_ccw_req * erp, char modifier)
* DESCRIPTION
* Setup ERP to do the ERP action 1 (see Reference manual).
* Repeat the operation on a different channel path.
- * If all alternate paths have been tried, the request is posted with a
- * permanent error.
- * Note: duplex handling is not implemented (yet).
+ * As deviation from the recommended recovery action, we reset the path mask
+ * after we have tried each path and go through all paths a second time.
+ * This will cover situations where only one path at a time is actually down,
+ * but all paths fail and recover just with the same sequence and timing as
+ * we try to use them (flapping links).
+ * If all alternate paths have been tried twice, the request is posted with
+ * a permanent error.
*
* PARAMETER
* erp pointer to the current ERP
@@ -254,17 +259,25 @@ dasd_3990_erp_DCTL(struct dasd_ccw_req * erp, char modifier)
* erp pointer to the ERP
*
*/
-static struct dasd_ccw_req *
-dasd_3990_erp_action_1(struct dasd_ccw_req * erp)
+static struct dasd_ccw_req *dasd_3990_erp_action_1_sec(struct dasd_ccw_req *erp)
{
+ erp->function = dasd_3990_erp_action_1_sec;
+ dasd_3990_erp_alternate_path(erp);
+ return erp;
+}
+static struct dasd_ccw_req *dasd_3990_erp_action_1(struct dasd_ccw_req *erp)
+{
erp->function = dasd_3990_erp_action_1;
-
dasd_3990_erp_alternate_path(erp);
-
+ if (erp->status == DASD_CQR_FAILED) {
+ erp->status = DASD_CQR_FILLED;
+ erp->retries = 10;
+ erp->lpm = LPM_ANYPATH;
+ erp->function = dasd_3990_erp_action_1_sec;
+ }
return erp;
-
-} /* end dasd_3990_erp_action_1 */
+} /* end dasd_3990_erp_action_1(b) */
/*
* DASD_3990_ERP_ACTION_4
@@ -2295,6 +2308,7 @@ static struct dasd_ccw_req *dasd_3990_erp_add_erp(struct dasd_ccw_req *cqr)
return cqr;
}
+ ccw = cqr->cpaddr;
if (cqr->cpmode == 1) {
/* make a shallow copy of the original tcw but set new tsb */
erp->cpmode = 1;
@@ -2303,6 +2317,9 @@ static struct dasd_ccw_req *dasd_3990_erp_add_erp(struct dasd_ccw_req *cqr)
tsb = (struct tsb *) &tcw[1];
*tcw = *((struct tcw *)cqr->cpaddr);
tcw->tsb = (long)tsb;
+ } else if (ccw->cmd_code == DASD_ECKD_CCW_PSF) {
+ /* PSF cannot be chained from NOOP/TIC */
+ erp->cpaddr = cqr->cpaddr;
} else {
/* initialize request with default TIC to current ERP/CQR */
ccw = erp->cpaddr;
@@ -2487,6 +2504,8 @@ dasd_3990_erp_further_erp(struct dasd_ccw_req *erp)
erp = dasd_3990_erp_action_1(erp);
+ } else if (erp->function == dasd_3990_erp_action_1_sec) {
+ erp = dasd_3990_erp_action_1_sec(erp);
} else if (erp->function == dasd_3990_erp_action_5) {
/* retries have not been successful */
diff --git a/drivers/s390/block/dasd_alias.c b/drivers/s390/block/dasd_alias.c
index 70a008c0052..fd1231738ef 100644
--- a/drivers/s390/block/dasd_alias.c
+++ b/drivers/s390/block/dasd_alias.c
@@ -152,6 +152,7 @@ static struct alias_lcu *_allocate_lcu(struct dasd_uid *uid)
INIT_WORK(&lcu->suc_data.worker, summary_unit_check_handling_work);
INIT_DELAYED_WORK(&lcu->ruac_data.dwork, lcu_update_work);
spin_lock_init(&lcu->lock);
+ init_completion(&lcu->lcu_setup);
return lcu;
out_err4:
@@ -240,6 +241,67 @@ int dasd_alias_make_device_known_to_lcu(struct dasd_device *device)
}
/*
+ * The first device to be registered on an LCU will have to do
+ * some additional setup steps to configure that LCU on the
+ * storage server. All further devices should wait with their
+ * initialization until the first device is done.
+ * To synchronize this work, the first device will call
+ * dasd_alias_lcu_setup_complete when it is done, and all
+ * other devices will wait for it with dasd_alias_wait_for_lcu_setup.
+ */
+void dasd_alias_lcu_setup_complete(struct dasd_device *device)
+{
+ struct dasd_eckd_private *private;
+ unsigned long flags;
+ struct alias_server *server;
+ struct alias_lcu *lcu;
+ struct dasd_uid *uid;
+
+ private = (struct dasd_eckd_private *) device->private;
+ uid = &private->uid;
+ lcu = NULL;
+ spin_lock_irqsave(&aliastree.lock, flags);
+ server = _find_server(uid);
+ if (server)
+ lcu = _find_lcu(server, uid);
+ spin_unlock_irqrestore(&aliastree.lock, flags);
+ if (!lcu) {
+ DBF_EVENT_DEVID(DBF_ERR, device->cdev,
+ "could not find lcu for %04x %02x",
+ uid->ssid, uid->real_unit_addr);
+ WARN_ON(1);
+ return;
+ }
+ complete_all(&lcu->lcu_setup);
+}
+
+void dasd_alias_wait_for_lcu_setup(struct dasd_device *device)
+{
+ struct dasd_eckd_private *private;
+ unsigned long flags;
+ struct alias_server *server;
+ struct alias_lcu *lcu;
+ struct dasd_uid *uid;
+
+ private = (struct dasd_eckd_private *) device->private;
+ uid = &private->uid;
+ lcu = NULL;
+ spin_lock_irqsave(&aliastree.lock, flags);
+ server = _find_server(uid);
+ if (server)
+ lcu = _find_lcu(server, uid);
+ spin_unlock_irqrestore(&aliastree.lock, flags);
+ if (!lcu) {
+ DBF_EVENT_DEVID(DBF_ERR, device->cdev,
+ "could not find lcu for %04x %02x",
+ uid->ssid, uid->real_unit_addr);
+ WARN_ON(1);
+ return;
+ }
+ wait_for_completion(&lcu->lcu_setup);
+}
+
+/*
* This function removes a device from the scope of alias management.
* The complicated part is to make sure that it is not in use by
* any of the workers. If necessary cancel the work.
@@ -755,11 +817,11 @@ static void __stop_device_on_lcu(struct dasd_device *device,
{
/* If pos == device then device is already locked! */
if (pos == device) {
- pos->stopped |= DASD_STOPPED_SU;
+ dasd_device_set_stop_bits(pos, DASD_STOPPED_SU);
return;
}
spin_lock(get_ccwdev_lock(pos->cdev));
- pos->stopped |= DASD_STOPPED_SU;
+ dasd_device_set_stop_bits(pos, DASD_STOPPED_SU);
spin_unlock(get_ccwdev_lock(pos->cdev));
}
@@ -793,26 +855,26 @@ static void _unstop_all_devices_on_lcu(struct alias_lcu *lcu)
list_for_each_entry(device, &lcu->active_devices, alias_list) {
spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
- device->stopped &= ~DASD_STOPPED_SU;
+ dasd_device_remove_stop_bits(device, DASD_STOPPED_SU);
spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
}
list_for_each_entry(device, &lcu->inactive_devices, alias_list) {
spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
- device->stopped &= ~DASD_STOPPED_SU;
+ dasd_device_remove_stop_bits(device, DASD_STOPPED_SU);
spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
}
list_for_each_entry(pavgroup, &lcu->grouplist, group) {
list_for_each_entry(device, &pavgroup->baselist, alias_list) {
spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
- device->stopped &= ~DASD_STOPPED_SU;
+ dasd_device_remove_stop_bits(device, DASD_STOPPED_SU);
spin_unlock_irqrestore(get_ccwdev_lock(device->cdev),
flags);
}
list_for_each_entry(device, &pavgroup->aliaslist, alias_list) {
spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
- device->stopped &= ~DASD_STOPPED_SU;
+ dasd_device_remove_stop_bits(device, DASD_STOPPED_SU);
spin_unlock_irqrestore(get_ccwdev_lock(device->cdev),
flags);
}
@@ -836,7 +898,8 @@ static void summary_unit_check_handling_work(struct work_struct *work)
/* 2. reset summary unit check */
spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
- device->stopped &= ~(DASD_STOPPED_SU | DASD_STOPPED_PENDING);
+ dasd_device_remove_stop_bits(device,
+ (DASD_STOPPED_SU | DASD_STOPPED_PENDING));
spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
reset_summary_unit_check(lcu, device, suc_data->reason);
diff --git a/drivers/s390/block/dasd_diag.c b/drivers/s390/block/dasd_diag.c
index 4e49b4a6c88..f64d0db881b 100644
--- a/drivers/s390/block/dasd_diag.c
+++ b/drivers/s390/block/dasd_diag.c
@@ -24,7 +24,6 @@
#include <asm/ebcdic.h>
#include <asm/io.h>
#include <asm/s390_ext.h>
-#include <asm/todclk.h>
#include <asm/vtoc.h>
#include <asm/diag.h>
@@ -145,6 +144,15 @@ dasd_diag_erp(struct dasd_device *device)
mdsk_term_io(device);
rc = mdsk_init_io(device, device->block->bp_block, 0, NULL);
+ if (rc == 4) {
+ if (!(device->features & DASD_FEATURE_READONLY)) {
+ dev_warn(&device->cdev->dev,
+ "The access mode of a DIAG device changed"
+ " to read-only");
+ device->features |= DASD_FEATURE_READONLY;
+ }
+ rc = 0;
+ }
if (rc)
dev_warn(&device->cdev->dev, "DIAG ERP failed with "
"rc=%d\n", rc);
@@ -433,16 +441,20 @@ dasd_diag_check_device(struct dasd_device *device)
for (sb = 512; sb < bsize; sb = sb << 1)
block->s2b_shift++;
rc = mdsk_init_io(device, block->bp_block, 0, NULL);
- if (rc) {
+ if (rc && (rc != 4)) {
dev_warn(&device->cdev->dev, "DIAG initialization "
"failed with rc=%d\n", rc);
rc = -EIO;
} else {
+ if (rc == 4)
+ device->features |= DASD_FEATURE_READONLY;
dev_info(&device->cdev->dev,
- "New DASD with %ld byte/block, total size %ld KB\n",
+ "New DASD with %ld byte/block, total size %ld KB%s\n",
(unsigned long) block->bp_block,
(unsigned long) (block->blocks <<
- block->s2b_shift) >> 1);
+ block->s2b_shift) >> 1,
+ (rc == 4) ? ", read-only device" : "");
+ rc = 0;
}
out_label:
free_page((long) label);
diff --git a/drivers/s390/block/dasd_eckd.c b/drivers/s390/block/dasd_eckd.c
index 417b97cd3f9..5819dc02a14 100644
--- a/drivers/s390/block/dasd_eckd.c
+++ b/drivers/s390/block/dasd_eckd.c
@@ -24,7 +24,6 @@
#include <asm/idals.h>
#include <asm/ebcdic.h>
#include <asm/io.h>
-#include <asm/todclk.h>
#include <asm/uaccess.h>
#include <asm/cio.h>
#include <asm/ccwdev.h>
@@ -78,6 +77,11 @@ MODULE_DEVICE_TABLE(ccw, dasd_eckd_ids);
static struct ccw_driver dasd_eckd_driver; /* see below */
+#define INIT_CQR_OK 0
+#define INIT_CQR_UNFORMATTED 1
+#define INIT_CQR_ERROR 2
+
+
/* initial attempt at a probe function. this can be simplified once
* the other detection code is gone */
static int
@@ -86,11 +90,12 @@ dasd_eckd_probe (struct ccw_device *cdev)
int ret;
/* set ECKD specific ccw-device options */
- ret = ccw_device_set_options(cdev, CCWDEV_ALLOW_FORCE);
+ ret = ccw_device_set_options(cdev, CCWDEV_ALLOW_FORCE |
+ CCWDEV_DO_PATHGROUP | CCWDEV_DO_MULTIPATH);
if (ret) {
- DBF_EVENT(DBF_WARNING,
- "dasd_eckd_probe: could not set ccw-device options "
- "for %s\n", dev_name(&cdev->dev));
+ DBF_EVENT_DEVID(DBF_WARNING, cdev, "%s",
+ "dasd_eckd_probe: could not set "
+ "ccw-device options");
return ret;
}
ret = dasd_generic_probe(cdev, &dasd_eckd_discipline);
@@ -749,8 +754,7 @@ static struct dasd_ccw_req *dasd_eckd_build_rcd_lpm(struct dasd_device *device,
cqr->block = NULL;
cqr->expires = 10*HZ;
cqr->lpm = lpm;
- clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags);
- cqr->retries = 2;
+ cqr->retries = 256;
cqr->buildclk = get_clock();
cqr->status = DASD_CQR_FILLED;
return cqr;
@@ -885,16 +889,15 @@ static int dasd_eckd_read_conf(struct dasd_device *device)
rc = dasd_eckd_read_conf_lpm(device, &conf_data,
&conf_len, lpm);
if (rc && rc != -EOPNOTSUPP) { /* -EOPNOTSUPP is ok */
- DBF_EVENT(DBF_WARNING,
+ DBF_EVENT_DEVID(DBF_WARNING, device->cdev,
"Read configuration data returned "
- "error %d for device: %s", rc,
- dev_name(&device->cdev->dev));
+ "error %d", rc);
return rc;
}
if (conf_data == NULL) {
- DBF_EVENT(DBF_WARNING, "No configuration "
- "data retrieved for device: %s",
- dev_name(&device->cdev->dev));
+ DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "%s",
+ "No configuration data "
+ "retrieved");
continue; /* no error */
}
/* save first valid configuration data */
@@ -941,16 +944,14 @@ static int dasd_eckd_read_features(struct dasd_device *device)
sizeof(struct dasd_rssd_features)),
device);
if (IS_ERR(cqr)) {
- DBF_EVENT(DBF_WARNING, "Could not allocate initialization "
- "request for device: %s",
- dev_name(&device->cdev->dev));
+ DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "%s", "Could not "
+ "allocate initialization request");
return PTR_ERR(cqr);
}
cqr->startdev = device;
cqr->memdev = device;
cqr->block = NULL;
- clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags);
- cqr->retries = 5;
+ cqr->retries = 256;
cqr->expires = 10 * HZ;
/* Prepare for Read Subsystem Data */
@@ -1012,9 +1013,9 @@ static struct dasd_ccw_req *dasd_eckd_build_psf_ssc(struct dasd_device *device,
}
psf_ssc_data = (struct dasd_psf_ssc_data *)cqr->data;
psf_ssc_data->order = PSF_ORDER_SSC;
- psf_ssc_data->suborder = 0x40;
+ psf_ssc_data->suborder = 0xc0;
if (enable_pav) {
- psf_ssc_data->suborder |= 0x88;
+ psf_ssc_data->suborder |= 0x08;
psf_ssc_data->reserved[0] = 0x88;
}
ccw = cqr->cpaddr;
@@ -1025,6 +1026,7 @@ static struct dasd_ccw_req *dasd_eckd_build_psf_ssc(struct dasd_device *device,
cqr->startdev = device;
cqr->memdev = device;
cqr->block = NULL;
+ cqr->retries = 256;
cqr->expires = 10*HZ;
cqr->buildclk = get_clock();
cqr->status = DASD_CQR_FILLED;
@@ -1057,7 +1059,7 @@ dasd_eckd_psf_ssc(struct dasd_device *device, int enable_pav)
/*
* Valide storage server of current device.
*/
-static int dasd_eckd_validate_server(struct dasd_device *device)
+static void dasd_eckd_validate_server(struct dasd_device *device)
{
int rc;
struct dasd_eckd_private *private;
@@ -1068,15 +1070,12 @@ static int dasd_eckd_validate_server(struct dasd_device *device)
else
enable_pav = 1;
rc = dasd_eckd_psf_ssc(device, enable_pav);
+
/* may be requested feature is not available on server,
* therefore just report error and go ahead */
private = (struct dasd_eckd_private *) device->private;
- DBF_EVENT(DBF_WARNING, "PSF-SSC on storage subsystem %s.%s.%04x "
- "returned rc=%d for device: %s",
- private->uid.vendor, private->uid.serial,
- private->uid.ssid, rc, dev_name(&device->cdev->dev));
- /* RE-Read Configuration Data */
- return dasd_eckd_read_conf(device);
+ DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "PSF-SSC for SSID %04x "
+ "returned rc=%d", private->uid.ssid, rc);
}
/*
@@ -1090,6 +1089,15 @@ dasd_eckd_check_characteristics(struct dasd_device *device)
struct dasd_block *block;
int is_known, rc;
+ if (!ccw_device_is_pathgroup(device->cdev)) {
+ dev_warn(&device->cdev->dev,
+ "A channel path group could not be established\n");
+ return -EIO;
+ }
+ if (!ccw_device_is_multipath(device->cdev)) {
+ dev_info(&device->cdev->dev,
+ "The DASD is not operating in multipath mode\n");
+ }
private = (struct dasd_eckd_private *) device->private;
if (!private) {
private = kzalloc(sizeof(*private), GFP_KERNEL | GFP_DMA);
@@ -1123,9 +1131,9 @@ dasd_eckd_check_characteristics(struct dasd_device *device)
if (private->uid.type == UA_BASE_DEVICE) {
block = dasd_alloc_block();
if (IS_ERR(block)) {
- DBF_EVENT(DBF_WARNING, "could not allocate dasd "
- "block structure for device: %s",
- dev_name(&device->cdev->dev));
+ DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "%s",
+ "could not allocate dasd "
+ "block structure");
rc = PTR_ERR(block);
goto out_err1;
}
@@ -1139,12 +1147,21 @@ dasd_eckd_check_characteristics(struct dasd_device *device)
rc = is_known;
goto out_err2;
}
+ /*
+ * dasd_eckd_vaildate_server is done on the first device that
+ * is found for an LCU. All later other devices have to wait
+ * for it, so they will read the correct feature codes.
+ */
if (!is_known) {
- /* new lcu found */
- rc = dasd_eckd_validate_server(device); /* will switch pav on */
- if (rc)
- goto out_err3;
- }
+ dasd_eckd_validate_server(device);
+ dasd_alias_lcu_setup_complete(device);
+ } else
+ dasd_alias_wait_for_lcu_setup(device);
+
+ /* device may report different configuration data after LCU setup */
+ rc = dasd_eckd_read_conf(device);
+ if (rc)
+ goto out_err3;
/* Read Feature Codes */
dasd_eckd_read_features(device);
@@ -1153,9 +1170,8 @@ dasd_eckd_check_characteristics(struct dasd_device *device)
rc = dasd_generic_read_dev_chars(device, DASD_ECKD_MAGIC,
&private->rdc_data, 64);
if (rc) {
- DBF_EVENT(DBF_WARNING,
- "Read device characteristics failed, rc=%d for "
- "device: %s", rc, dev_name(&device->cdev->dev));
+ DBF_EVENT_DEVID(DBF_WARNING, device->cdev,
+ "Read device characteristic failed, rc=%d", rc);
goto out_err3;
}
/* find the vaild cylinder size */
@@ -1256,12 +1272,29 @@ dasd_eckd_analysis_ccw(struct dasd_device *device)
cqr->block = NULL;
cqr->startdev = device;
cqr->memdev = device;
- cqr->retries = 0;
+ cqr->retries = 255;
cqr->buildclk = get_clock();
cqr->status = DASD_CQR_FILLED;
return cqr;
}
+/* differentiate between 'no record found' and any other error */
+static int dasd_eckd_analysis_evaluation(struct dasd_ccw_req *init_cqr)
+{
+ char *sense;
+ if (init_cqr->status == DASD_CQR_DONE)
+ return INIT_CQR_OK;
+ else if (init_cqr->status == DASD_CQR_NEED_ERP ||
+ init_cqr->status == DASD_CQR_FAILED) {
+ sense = dasd_get_sense(&init_cqr->irb);
+ if (sense && (sense[1] & SNS1_NO_REC_FOUND))
+ return INIT_CQR_UNFORMATTED;
+ else
+ return INIT_CQR_ERROR;
+ } else
+ return INIT_CQR_ERROR;
+}
+
/*
* This is the callback function for the init_analysis cqr. It saves
* the status of the initial analysis ccw before it frees it and kicks
@@ -1269,21 +1302,20 @@ dasd_eckd_analysis_ccw(struct dasd_device *device)
* dasd_eckd_do_analysis again (if the devices has not been marked
* for deletion in the meantime).
*/
-static void
-dasd_eckd_analysis_callback(struct dasd_ccw_req *init_cqr, void *data)
+static void dasd_eckd_analysis_callback(struct dasd_ccw_req *init_cqr,
+ void *data)
{
struct dasd_eckd_private *private;
struct dasd_device *device;
device = init_cqr->startdev;
private = (struct dasd_eckd_private *) device->private;
- private->init_cqr_status = init_cqr->status;
+ private->init_cqr_status = dasd_eckd_analysis_evaluation(init_cqr);
dasd_sfree_request(init_cqr, device);
dasd_kick_device(device);
}
-static int
-dasd_eckd_start_analysis(struct dasd_block *block)
+static int dasd_eckd_start_analysis(struct dasd_block *block)
{
struct dasd_eckd_private *private;
struct dasd_ccw_req *init_cqr;
@@ -1295,27 +1327,44 @@ dasd_eckd_start_analysis(struct dasd_block *block)
init_cqr->callback = dasd_eckd_analysis_callback;
init_cqr->callback_data = NULL;
init_cqr->expires = 5*HZ;
+ /* first try without ERP, so we can later handle unformatted
+ * devices as special case
+ */
+ clear_bit(DASD_CQR_FLAGS_USE_ERP, &init_cqr->flags);
+ init_cqr->retries = 0;
dasd_add_request_head(init_cqr);
return -EAGAIN;
}
-static int
-dasd_eckd_end_analysis(struct dasd_block *block)
+static int dasd_eckd_end_analysis(struct dasd_block *block)
{
struct dasd_device *device;
struct dasd_eckd_private *private;
struct eckd_count *count_area;
unsigned int sb, blk_per_trk;
int status, i;
+ struct dasd_ccw_req *init_cqr;
device = block->base;
private = (struct dasd_eckd_private *) device->private;
status = private->init_cqr_status;
private->init_cqr_status = -1;
- if (status != DASD_CQR_DONE) {
- dev_warn(&device->cdev->dev,
- "The DASD is not formatted\n");
+ if (status == INIT_CQR_ERROR) {
+ /* try again, this time with full ERP */
+ init_cqr = dasd_eckd_analysis_ccw(device);
+ dasd_sleep_on(init_cqr);
+ status = dasd_eckd_analysis_evaluation(init_cqr);
+ dasd_sfree_request(init_cqr, device);
+ }
+
+ if (status == INIT_CQR_UNFORMATTED) {
+ dev_warn(&device->cdev->dev, "The DASD is not formatted\n");
return -EMEDIUMTYPE;
+ } else if (status == INIT_CQR_ERROR) {
+ dev_err(&device->cdev->dev,
+ "Detecting the DASD disk layout failed because "
+ "of an I/O error\n");
+ return -EIO;
}
private->uses_cdl = 1;
@@ -1607,8 +1656,7 @@ dasd_eckd_format_device(struct dasd_device * device,
}
fcp->startdev = device;
fcp->memdev = device;
- clear_bit(DASD_CQR_FLAGS_USE_ERP, &fcp->flags);
- fcp->retries = 5; /* set retry counter to enable default ERP */
+ fcp->retries = 256;
fcp->buildclk = get_clock();
fcp->status = DASD_CQR_FILLED;
return fcp;
@@ -2690,6 +2738,7 @@ dasd_eckd_performance(struct dasd_device *device, void __user *argp)
cqr->startdev = device;
cqr->memdev = device;
cqr->retries = 0;
+ clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags);
cqr->expires = 10 * HZ;
/* Prepare for Read Subsystem Data */
@@ -3240,11 +3289,15 @@ int dasd_eckd_restore_device(struct dasd_device *device)
if (is_known < 0)
return is_known;
if (!is_known) {
- /* new lcu found */
- rc = dasd_eckd_validate_server(device); /* will switch pav on */
- if (rc)
- goto out_err;
- }
+ dasd_eckd_validate_server(device);
+ dasd_alias_lcu_setup_complete(device);
+ } else
+ dasd_alias_wait_for_lcu_setup(device);
+
+ /* RE-Read Configuration Data */
+ rc = dasd_eckd_read_conf(device);
+ if (rc)
+ goto out_err;
/* Read Feature Codes */
dasd_eckd_read_features(device);
@@ -3253,9 +3306,8 @@ int dasd_eckd_restore_device(struct dasd_device *device)
rc = dasd_generic_read_dev_chars(device, DASD_ECKD_MAGIC,
&temp_rdc_data, 64);
if (rc) {
- DBF_EVENT(DBF_WARNING,
- "Read device characteristics failed, rc=%d for "
- "device: %s", rc, dev_name(&device->cdev->dev));
+ DBF_EVENT_DEVID(DBF_WARNING, device->cdev,
+ "Read device characteristic failed, rc=%d", rc);
goto out_err;
}
spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
diff --git a/drivers/s390/block/dasd_eckd.h b/drivers/s390/block/dasd_eckd.h
index ad45bcac3ce..864d53c0420 100644
--- a/drivers/s390/block/dasd_eckd.h
+++ b/drivers/s390/block/dasd_eckd.h
@@ -414,6 +414,7 @@ struct alias_lcu {
struct summary_unit_check_work_data suc_data;
struct read_uac_work_data ruac_data;
struct dasd_ccw_req *rsu_cqr;
+ struct completion lcu_setup;
};
struct alias_pav_group {
@@ -460,5 +461,6 @@ int dasd_alias_remove_device(struct dasd_device *);
struct dasd_device *dasd_alias_get_start_dev(struct dasd_device *);
void dasd_alias_handle_summary_unit_check(struct dasd_device *, struct irb *);
void dasd_eckd_reset_ccw_to_base_io(struct dasd_ccw_req *);
-
+void dasd_alias_lcu_setup_complete(struct dasd_device *);
+void dasd_alias_wait_for_lcu_setup(struct dasd_device *);
#endif /* DASD_ECKD_H */
diff --git a/drivers/s390/block/dasd_eer.c b/drivers/s390/block/dasd_eer.c
index d96039eae59..1f3e967aaba 100644
--- a/drivers/s390/block/dasd_eer.c
+++ b/drivers/s390/block/dasd_eer.c
@@ -536,7 +536,6 @@ static int dasd_eer_open(struct inode *inp, struct file *filp)
eerb = kzalloc(sizeof(struct eerbuffer), GFP_KERNEL);
if (!eerb)
return -ENOMEM;
- lock_kernel();
eerb->buffer_page_count = eer_pages;
if (eerb->buffer_page_count < 1 ||
eerb->buffer_page_count > INT_MAX / PAGE_SIZE) {
@@ -544,7 +543,6 @@ static int dasd_eer_open(struct inode *inp, struct file *filp)
DBF_EVENT(DBF_WARNING, "can't open device since module "
"parameter eer_pages is smaller than 1 or"
" bigger than %d", (int)(INT_MAX / PAGE_SIZE));
- unlock_kernel();
return -EINVAL;
}
eerb->buffersize = eerb->buffer_page_count * PAGE_SIZE;
@@ -552,14 +550,12 @@ static int dasd_eer_open(struct inode *inp, struct file *filp)
GFP_KERNEL);
if (!eerb->buffer) {
kfree(eerb);
- unlock_kernel();
return -ENOMEM;
}
if (dasd_eer_allocate_buffer_pages(eerb->buffer,
eerb->buffer_page_count)) {
kfree(eerb->buffer);
kfree(eerb);
- unlock_kernel();
return -ENOMEM;
}
filp->private_data = eerb;
@@ -567,7 +563,6 @@ static int dasd_eer_open(struct inode *inp, struct file *filp)
list_add(&eerb->list, &bufferlist);
spin_unlock_irqrestore(&bufferlock, flags);
- unlock_kernel();
return nonseekable_open(inp,filp);
}
diff --git a/drivers/s390/block/dasd_fba.c b/drivers/s390/block/dasd_fba.c
index f245377e8e2..0f152444ac7 100644
--- a/drivers/s390/block/dasd_fba.c
+++ b/drivers/s390/block/dasd_fba.c
@@ -20,7 +20,6 @@
#include <asm/idals.h>
#include <asm/ebcdic.h>
#include <asm/io.h>
-#include <asm/todclk.h>
#include <asm/ccwdev.h>
#include "dasd_int.h"
@@ -141,9 +140,8 @@ dasd_fba_check_characteristics(struct dasd_device *device)
}
block = dasd_alloc_block();
if (IS_ERR(block)) {
- DBF_EVENT(DBF_WARNING, "could not allocate dasd block "
- "structure for device: %s",
- dev_name(&device->cdev->dev));
+ DBF_EVENT_DEVID(DBF_WARNING, cdev, "%s", "could not allocate "
+ "dasd block structure");
device->private = NULL;
kfree(private);
return PTR_ERR(block);
@@ -155,9 +153,8 @@ dasd_fba_check_characteristics(struct dasd_device *device)
rc = dasd_generic_read_dev_chars(device, DASD_FBA_MAGIC,
&private->rdc_data, 32);
if (rc) {
- DBF_EVENT(DBF_WARNING, "Read device characteristics returned "
- "error %d for device: %s",
- rc, dev_name(&device->cdev->dev));
+ DBF_EVENT_DEVID(DBF_WARNING, cdev, "Read device "
+ "characteristics returned error %d", rc);
device->block = NULL;
dasd_free_block(block);
device->private = NULL;
diff --git a/drivers/s390/block/dasd_int.h b/drivers/s390/block/dasd_int.h
index 8afd9fa0087..e4c2143dabf 100644
--- a/drivers/s390/block/dasd_int.h
+++ b/drivers/s390/block/dasd_int.h
@@ -108,6 +108,16 @@ do { \
d_data); \
} while(0)
+#define DBF_EVENT_DEVID(d_level, d_cdev, d_str, d_data...) \
+do { \
+ struct ccw_dev_id __dev_id; \
+ ccw_device_get_id(d_cdev, &__dev_id); \
+ debug_sprintf_event(dasd_debug_area, \
+ d_level, \
+ "0.%x.%04x " d_str "\n", \
+ __dev_id.ssid, __dev_id.devno, d_data); \
+} while (0)
+
#define DBF_EXC(d_level, d_str, d_data...)\
do { \
debug_sprintf_exception(dasd_debug_area, \
@@ -595,6 +605,9 @@ int dasd_generic_restore_device(struct ccw_device *);
int dasd_generic_read_dev_chars(struct dasd_device *, int, void *, int);
char *dasd_get_sense(struct irb *);
+void dasd_device_set_stop_bits(struct dasd_device *, int);
+void dasd_device_remove_stop_bits(struct dasd_device *, int);
+
/* externals in dasd_devmap.c */
extern int dasd_max_devindex;
extern int dasd_probeonly;
diff --git a/drivers/s390/block/dasd_ioctl.c b/drivers/s390/block/dasd_ioctl.c
index f756a1b0c57..478bcdb90b6 100644
--- a/drivers/s390/block/dasd_ioctl.c
+++ b/drivers/s390/block/dasd_ioctl.c
@@ -101,7 +101,7 @@ static int dasd_ioctl_quiesce(struct dasd_block *block)
pr_info("%s: The DASD has been put in the quiesce "
"state\n", dev_name(&base->cdev->dev));
spin_lock_irqsave(get_ccwdev_lock(base->cdev), flags);
- base->stopped |= DASD_STOPPED_QUIESCE;
+ dasd_device_set_stop_bits(base, DASD_STOPPED_QUIESCE);
spin_unlock_irqrestore(get_ccwdev_lock(base->cdev), flags);
return 0;
}
@@ -122,7 +122,7 @@ static int dasd_ioctl_resume(struct dasd_block *block)
pr_info("%s: I/O operations have been resumed "
"on the DASD\n", dev_name(&base->cdev->dev));
spin_lock_irqsave(get_ccwdev_lock(base->cdev), flags);
- base->stopped &= ~DASD_STOPPED_QUIESCE;
+ dasd_device_remove_stop_bits(base, DASD_STOPPED_QUIESCE);
spin_unlock_irqrestore(get_ccwdev_lock(base->cdev), flags);
dasd_schedule_block_bh(block);
diff --git a/drivers/s390/block/dasd_proc.c b/drivers/s390/block/dasd_proc.c
index 654daa3cdfd..5f23eca8280 100644
--- a/drivers/s390/block/dasd_proc.c
+++ b/drivers/s390/block/dasd_proc.c
@@ -215,7 +215,7 @@ dasd_statistics_read(char *page, char **start, off_t off,
}
prof = &dasd_global_profile;
- /* prevent couter 'overflow' on output */
+ /* prevent counter 'overflow' on output */
for (factor = 1; (prof->dasd_io_reqs / factor) > 9999999;
factor *= 10);
diff --git a/drivers/s390/char/con3215.c b/drivers/s390/char/con3215.c
index 21639d6c996..9d61683b563 100644
--- a/drivers/s390/char/con3215.c
+++ b/drivers/s390/char/con3215.c
@@ -857,7 +857,6 @@ static struct console con3215 = {
/*
* 3215 console initialization code called from console_init().
- * NOTE: This is called before kmalloc is available.
*/
static int __init con3215_init(void)
{
diff --git a/drivers/s390/char/con3270.c b/drivers/s390/char/con3270.c
index bb838bdf829..6bca81aea39 100644
--- a/drivers/s390/char/con3270.c
+++ b/drivers/s390/char/con3270.c
@@ -572,7 +572,6 @@ static struct console con3270 = {
/*
* 3270 console initialization code called from console_init().
- * NOTE: This is called before kmalloc is available.
*/
static int __init
con3270_init(void)
diff --git a/drivers/s390/char/fs3270.c b/drivers/s390/char/fs3270.c
index 097d3846a82..28e4649fa9e 100644
--- a/drivers/s390/char/fs3270.c
+++ b/drivers/s390/char/fs3270.c
@@ -38,6 +38,8 @@ struct fs3270 {
size_t rdbuf_size; /* size of data returned by RDBUF */
};
+static DEFINE_MUTEX(fs3270_mutex);
+
static void
fs3270_wake_up(struct raw3270_request *rq, void *data)
{
@@ -74,7 +76,7 @@ fs3270_do_io(struct raw3270_view *view, struct raw3270_request *rq)
}
rc = raw3270_start(view, rq);
if (rc == 0) {
- /* Started sucessfully. Now wait for completion. */
+ /* Started successfully. Now wait for completion. */
wait_event(fp->wait, raw3270_request_final(rq));
}
} while (rc == -EACCES);
@@ -328,7 +330,7 @@ fs3270_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
if (!fp)
return -ENODEV;
rc = 0;
- lock_kernel();
+ mutex_lock(&fs3270_mutex);
switch (cmd) {
case TUBICMD:
fp->read_command = arg;
@@ -354,7 +356,7 @@ fs3270_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
rc = -EFAULT;
break;
}
- unlock_kernel();
+ mutex_unlock(&fs3270_mutex);
return rc;
}
@@ -437,7 +439,7 @@ fs3270_open(struct inode *inode, struct file *filp)
minor = tty->index + RAW3270_FIRSTMINOR;
tty_kref_put(tty);
}
- lock_kernel();
+ mutex_lock(&fs3270_mutex);
/* Check if some other program is already using fullscreen mode. */
fp = (struct fs3270 *) raw3270_find_view(&fs3270_fn, minor);
if (!IS_ERR(fp)) {
@@ -478,7 +480,7 @@ fs3270_open(struct inode *inode, struct file *filp)
}
filp->private_data = fp;
out:
- unlock_kernel();
+ mutex_unlock(&fs3270_mutex);
return rc;
}
diff --git a/drivers/s390/char/monreader.c b/drivers/s390/char/monreader.c
index 66e21dd2315..60473f86e1f 100644
--- a/drivers/s390/char/monreader.c
+++ b/drivers/s390/char/monreader.c
@@ -12,7 +12,6 @@
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/init.h>
-#include <linux/smp_lock.h>
#include <linux/errno.h>
#include <linux/types.h>
#include <linux/kernel.h>
@@ -283,7 +282,6 @@ static int mon_open(struct inode *inode, struct file *filp)
/*
* only one user allowed
*/
- lock_kernel();
rc = -EBUSY;
if (test_and_set_bit(MON_IN_USE, &mon_in_use))
goto out;
@@ -321,7 +319,6 @@ static int mon_open(struct inode *inode, struct file *filp)
}
filp->private_data = monpriv;
dev_set_drvdata(monreader_device, monpriv);
- unlock_kernel();
return nonseekable_open(inode, filp);
out_path:
@@ -331,7 +328,6 @@ out_priv:
out_use:
clear_bit(MON_IN_USE, &mon_in_use);
out:
- unlock_kernel();
return rc;
}
@@ -607,6 +603,10 @@ static int __init mon_init(void)
}
dcss_mkname(mon_dcss_name, &user_data_connect[8]);
+ /*
+ * misc_register() has to be the last action in module_init(), because
+ * file operations will be available right after this.
+ */
rc = misc_register(&mon_dev);
if (rc < 0 )
goto out;
diff --git a/drivers/s390/char/monwriter.c b/drivers/s390/char/monwriter.c
index 66fb8eba93f..6532ed8b4af 100644
--- a/drivers/s390/char/monwriter.c
+++ b/drivers/s390/char/monwriter.c
@@ -13,7 +13,6 @@
#include <linux/moduleparam.h>
#include <linux/init.h>
#include <linux/errno.h>
-#include <linux/smp_lock.h>
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/miscdevice.h>
@@ -185,13 +184,11 @@ static int monwrite_open(struct inode *inode, struct file *filp)
monpriv = kzalloc(sizeof(struct mon_private), GFP_KERNEL);
if (!monpriv)
return -ENOMEM;
- lock_kernel();
INIT_LIST_HEAD(&monpriv->list);
monpriv->hdr_to_read = sizeof(monpriv->hdr);
mutex_init(&monpriv->thread_mutex);
filp->private_data = monpriv;
list_add_tail(&monpriv->priv_list, &mon_priv_list);
- unlock_kernel();
return nonseekable_open(inode, filp);
}
@@ -364,6 +361,10 @@ static int __init mon_init(void)
goto out_driver;
}
+ /*
+ * misc_register() has to be the last action in module_init(), because
+ * file operations will be available right after this.
+ */
rc = misc_register(&mon_dev);
if (rc)
goto out_device;
diff --git a/drivers/s390/char/sclp_cmd.c b/drivers/s390/char/sclp_cmd.c
index 5cc11c636d3..28b5afc129c 100644
--- a/drivers/s390/char/sclp_cmd.c
+++ b/drivers/s390/char/sclp_cmd.c
@@ -84,6 +84,7 @@ static void __init sclp_read_info_early(void)
do {
memset(sccb, 0, sizeof(*sccb));
sccb->header.length = sizeof(*sccb);
+ sccb->header.function_code = 0x80;
sccb->header.control_mask[2] = 0x80;
rc = sclp_cmd_sync_early(commands[i], sccb);
} while (rc == -EBUSY);
diff --git a/drivers/s390/char/tape.h b/drivers/s390/char/tape.h
index a2633377470..7a242f07363 100644
--- a/drivers/s390/char/tape.h
+++ b/drivers/s390/char/tape.h
@@ -212,6 +212,9 @@ struct tape_device {
struct tape_class_device * nt;
struct tape_class_device * rt;
+ /* Device mutex to serialize tape commands. */
+ struct mutex mutex;
+
/* Device discipline information. */
struct tape_discipline * discipline;
void * discdata;
@@ -292,9 +295,9 @@ extern int tape_generic_pm_suspend(struct ccw_device *);
extern int tape_generic_probe(struct ccw_device *);
extern void tape_generic_remove(struct ccw_device *);
-extern struct tape_device *tape_get_device(int devindex);
-extern struct tape_device *tape_get_device_reference(struct tape_device *);
-extern struct tape_device *tape_put_device(struct tape_device *);
+extern struct tape_device *tape_find_device(int devindex);
+extern struct tape_device *tape_get_device(struct tape_device *);
+extern void tape_put_device(struct tape_device *);
/* Externals from tape_char.c */
extern int tapechar_init(void);
diff --git a/drivers/s390/char/tape_34xx.c b/drivers/s390/char/tape_34xx.c
index 2fe45ff77b7..3657fe103c2 100644
--- a/drivers/s390/char/tape_34xx.c
+++ b/drivers/s390/char/tape_34xx.c
@@ -113,16 +113,16 @@ tape_34xx_work_handler(struct work_struct *work)
{
struct tape_34xx_work *p =
container_of(work, struct tape_34xx_work, work);
+ struct tape_device *device = p->device;
switch(p->op) {
case TO_MSEN:
- tape_34xx_medium_sense(p->device);
+ tape_34xx_medium_sense(device);
break;
default:
DBF_EVENT(3, "T34XX: internal error: unknown work\n");
}
-
- p->device = tape_put_device(p->device);
+ tape_put_device(device);
kfree(p);
}
@@ -136,7 +136,7 @@ tape_34xx_schedule_work(struct tape_device *device, enum tape_op op)
INIT_WORK(&p->work, tape_34xx_work_handler);
- p->device = tape_get_device_reference(device);
+ p->device = tape_get_device(device);
p->op = op;
schedule_work(&p->work);
diff --git a/drivers/s390/char/tape_3590.c b/drivers/s390/char/tape_3590.c
index e4cc3aae916..0c72aadb839 100644
--- a/drivers/s390/char/tape_3590.c
+++ b/drivers/s390/char/tape_3590.c
@@ -608,7 +608,7 @@ tape_3590_schedule_work(struct tape_device *device, enum tape_op op)
INIT_WORK(&p->work, tape_3590_work_handler);
- p->device = tape_get_device_reference(device);
+ p->device = tape_get_device(device);
p->op = op;
schedule_work(&p->work);
diff --git a/drivers/s390/char/tape_block.c b/drivers/s390/char/tape_block.c
index 0c0705b91c2..4799cc2f73c 100644
--- a/drivers/s390/char/tape_block.c
+++ b/drivers/s390/char/tape_block.c
@@ -54,7 +54,7 @@ static const struct block_device_operations tapeblock_fops = {
.owner = THIS_MODULE,
.open = tapeblock_open,
.release = tapeblock_release,
- .locked_ioctl = tapeblock_ioctl,
+ .ioctl = tapeblock_ioctl,
.media_changed = tapeblock_medium_changed,
.revalidate_disk = tapeblock_revalidate_disk,
};
@@ -239,7 +239,7 @@ tapeblock_setup_device(struct tape_device * device)
disk->major = tapeblock_major;
disk->first_minor = device->first_minor;
disk->fops = &tapeblock_fops;
- disk->private_data = tape_get_device_reference(device);
+ disk->private_data = tape_get_device(device);
disk->queue = blkdat->request_queue;
set_capacity(disk, 0);
sprintf(disk->disk_name, "btibm%d",
@@ -247,11 +247,11 @@ tapeblock_setup_device(struct tape_device * device)
blkdat->disk = disk;
blkdat->medium_changed = 1;
- blkdat->request_queue->queuedata = tape_get_device_reference(device);
+ blkdat->request_queue->queuedata = tape_get_device(device);
add_disk(disk);
- tape_get_device_reference(device);
+ tape_get_device(device);
INIT_WORK(&blkdat->requeue_task, tapeblock_requeue);
return 0;
@@ -274,13 +274,14 @@ tapeblock_cleanup_device(struct tape_device *device)
}
del_gendisk(device->blk_data.disk);
- device->blk_data.disk->private_data =
- tape_put_device(device->blk_data.disk->private_data);
+ device->blk_data.disk->private_data = NULL;
+ tape_put_device(device);
put_disk(device->blk_data.disk);
device->blk_data.disk = NULL;
cleanup_queue:
- device->blk_data.request_queue->queuedata = tape_put_device(device);
+ device->blk_data.request_queue->queuedata = NULL;
+ tape_put_device(device);
blk_cleanup_queue(device->blk_data.request_queue);
device->blk_data.request_queue = NULL;
@@ -363,7 +364,7 @@ tapeblock_open(struct block_device *bdev, fmode_t mode)
struct tape_device * device;
int rc;
- device = tape_get_device_reference(disk->private_data);
+ device = tape_get_device(disk->private_data);
if (device->required_tapemarks) {
DBF_EVENT(2, "TBLOCK: missing tapemarks\n");
diff --git a/drivers/s390/char/tape_char.c b/drivers/s390/char/tape_char.c
index 31566c55adf..23d773a0d11 100644
--- a/drivers/s390/char/tape_char.c
+++ b/drivers/s390/char/tape_char.c
@@ -33,8 +33,7 @@ static ssize_t tapechar_read(struct file *, char __user *, size_t, loff_t *);
static ssize_t tapechar_write(struct file *, const char __user *, size_t, loff_t *);
static int tapechar_open(struct inode *,struct file *);
static int tapechar_release(struct inode *,struct file *);
-static int tapechar_ioctl(struct inode *, struct file *, unsigned int,
- unsigned long);
+static long tapechar_ioctl(struct file *, unsigned int, unsigned long);
static long tapechar_compat_ioctl(struct file *, unsigned int,
unsigned long);
@@ -43,7 +42,7 @@ static const struct file_operations tape_fops =
.owner = THIS_MODULE,
.read = tapechar_read,
.write = tapechar_write,
- .ioctl = tapechar_ioctl,
+ .unlocked_ioctl = tapechar_ioctl,
.compat_ioctl = tapechar_compat_ioctl,
.open = tapechar_open,
.release = tapechar_release,
@@ -170,7 +169,6 @@ tapechar_read(struct file *filp, char __user *data, size_t count, loff_t *ppos)
if (rc == 0) {
rc = block_size - request->rescnt;
DBF_EVENT(6, "TCHAR:rbytes: %x\n", rc);
- filp->f_pos += rc;
/* Copy data from idal buffer to user space. */
if (idal_buffer_to_user(device->char_data.idal_buf,
data, rc) != 0)
@@ -238,7 +236,6 @@ tapechar_write(struct file *filp, const char __user *data, size_t count, loff_t
break;
DBF_EVENT(6, "TCHAR:wbytes: %lx\n",
block_size - request->rescnt);
- filp->f_pos += block_size - request->rescnt;
written += block_size - request->rescnt;
if (request->rescnt != 0)
break;
@@ -286,26 +283,20 @@ tapechar_open (struct inode *inode, struct file *filp)
if (imajor(filp->f_path.dentry->d_inode) != tapechar_major)
return -ENODEV;
- lock_kernel();
minor = iminor(filp->f_path.dentry->d_inode);
- device = tape_get_device(minor / TAPE_MINORS_PER_DEV);
+ device = tape_find_device(minor / TAPE_MINORS_PER_DEV);
if (IS_ERR(device)) {
- DBF_EVENT(3, "TCHAR:open: tape_get_device() failed\n");
- rc = PTR_ERR(device);
- goto out;
+ DBF_EVENT(3, "TCHAR:open: tape_find_device() failed\n");
+ return PTR_ERR(device);
}
-
rc = tape_open(device);
if (rc == 0) {
filp->private_data = device;
- rc = nonseekable_open(inode, filp);
- }
- else
+ nonseekable_open(inode, filp);
+ } else
tape_put_device(device);
-out:
- unlock_kernel();
return rc;
}
@@ -342,7 +333,8 @@ tapechar_release(struct inode *inode, struct file *filp)
device->char_data.idal_buf = NULL;
}
tape_release(device);
- filp->private_data = tape_put_device(device);
+ filp->private_data = NULL;
+ tape_put_device(device);
return 0;
}
@@ -351,16 +343,11 @@ tapechar_release(struct inode *inode, struct file *filp)
* Tape device io controls.
*/
static int
-tapechar_ioctl(struct inode *inp, struct file *filp,
- unsigned int no, unsigned long data)
+__tapechar_ioctl(struct tape_device *device,
+ unsigned int no, unsigned long data)
{
- struct tape_device *device;
int rc;
- DBF_EVENT(6, "TCHAR:ioct\n");
-
- device = (struct tape_device *) filp->private_data;
-
if (no == MTIOCTOP) {
struct mtop op;
@@ -453,15 +440,30 @@ tapechar_ioctl(struct inode *inp, struct file *filp,
}
static long
+tapechar_ioctl(struct file *filp, unsigned int no, unsigned long data)
+{
+ struct tape_device *device;
+ long rc;
+
+ DBF_EVENT(6, "TCHAR:ioct\n");
+
+ device = (struct tape_device *) filp->private_data;
+ mutex_lock(&device->mutex);
+ rc = __tapechar_ioctl(device, no, data);
+ mutex_unlock(&device->mutex);
+ return rc;
+}
+
+static long
tapechar_compat_ioctl(struct file *filp, unsigned int no, unsigned long data)
{
struct tape_device *device = filp->private_data;
int rval = -ENOIOCTLCMD;
if (device->discipline->ioctl_fn) {
- lock_kernel();
+ mutex_lock(&device->mutex);
rval = device->discipline->ioctl_fn(device, no, data);
- unlock_kernel();
+ mutex_unlock(&device->mutex);
if (rval == -EINVAL)
rval = -ENOIOCTLCMD;
}
diff --git a/drivers/s390/char/tape_core.c b/drivers/s390/char/tape_core.c
index 5cd31e07164..f5d6802dc5d 100644
--- a/drivers/s390/char/tape_core.c
+++ b/drivers/s390/char/tape_core.c
@@ -492,6 +492,7 @@ tape_alloc_device(void)
kfree(device);
return ERR_PTR(-ENOMEM);
}
+ mutex_init(&device->mutex);
INIT_LIST_HEAD(&device->req_queue);
INIT_LIST_HEAD(&device->node);
init_waitqueue_head(&device->state_change_wq);
@@ -511,11 +512,12 @@ tape_alloc_device(void)
* increment the reference count.
*/
struct tape_device *
-tape_get_device_reference(struct tape_device *device)
+tape_get_device(struct tape_device *device)
{
- DBF_EVENT(4, "tape_get_device_reference(%p) = %i\n", device,
- atomic_inc_return(&device->ref_count));
+ int count;
+ count = atomic_inc_return(&device->ref_count);
+ DBF_EVENT(4, "tape_get_device(%p) = %i\n", device, count);
return device;
}
@@ -525,32 +527,25 @@ tape_get_device_reference(struct tape_device *device)
* The function returns a NULL pointer to be used by the caller
* for clearing reference pointers.
*/
-struct tape_device *
+void
tape_put_device(struct tape_device *device)
{
- int remain;
+ int count;
- remain = atomic_dec_return(&device->ref_count);
- if (remain > 0) {
- DBF_EVENT(4, "tape_put_device(%p) -> %i\n", device, remain);
- } else {
- if (remain < 0) {
- DBF_EVENT(4, "put device without reference\n");
- } else {
- DBF_EVENT(4, "tape_free_device(%p)\n", device);
- kfree(device->modeset_byte);
- kfree(device);
- }
+ count = atomic_dec_return(&device->ref_count);
+ DBF_EVENT(4, "tape_put_device(%p) -> %i\n", device, count);
+ BUG_ON(count < 0);
+ if (count == 0) {
+ kfree(device->modeset_byte);
+ kfree(device);
}
-
- return NULL;
}
/*
* Find tape device by a device index.
*/
struct tape_device *
-tape_get_device(int devindex)
+tape_find_device(int devindex)
{
struct tape_device *device, *tmp;
@@ -558,7 +553,7 @@ tape_get_device(int devindex)
read_lock(&tape_device_lock);
list_for_each_entry(tmp, &tape_device_list, node) {
if (tmp->first_minor / TAPE_MINORS_PER_DEV == devindex) {
- device = tape_get_device_reference(tmp);
+ device = tape_get_device(tmp);
break;
}
}
@@ -579,7 +574,8 @@ tape_generic_probe(struct ccw_device *cdev)
device = tape_alloc_device();
if (IS_ERR(device))
return -ENODEV;
- ccw_device_set_options(cdev, CCWDEV_DO_PATHGROUP);
+ ccw_device_set_options(cdev, CCWDEV_DO_PATHGROUP |
+ CCWDEV_DO_MULTIPATH);
ret = sysfs_create_group(&cdev->dev.kobj, &tape_attr_group);
if (ret) {
tape_put_device(device);
@@ -606,7 +602,8 @@ __tape_discard_requests(struct tape_device *device)
list_del(&request->list);
/* Decrease ref_count for removed request. */
- request->device = tape_put_device(device);
+ request->device = NULL;
+ tape_put_device(device);
request->rc = -EIO;
if (request->callback != NULL)
request->callback(request, request->callback_data);
@@ -664,9 +661,11 @@ tape_generic_remove(struct ccw_device *cdev)
tape_cleanup_device(device);
}
- if (!dev_get_drvdata(&cdev->dev)) {
+ device = dev_get_drvdata(&cdev->dev);
+ if (device) {
sysfs_remove_group(&cdev->dev.kobj, &tape_attr_group);
- dev_set_drvdata(&cdev->dev, tape_put_device(dev_get_drvdata(&cdev->dev)));
+ dev_set_drvdata(&cdev->dev, NULL);
+ tape_put_device(device);
}
}
@@ -721,9 +720,8 @@ tape_free_request (struct tape_request * request)
{
DBF_LH(6, "Free request %p\n", request);
- if (request->device != NULL) {
- request->device = tape_put_device(request->device);
- }
+ if (request->device)
+ tape_put_device(request->device);
kfree(request->cpdata);
kfree(request->cpaddr);
kfree(request);
@@ -838,7 +836,8 @@ static void tape_long_busy_timeout(unsigned long data)
BUG_ON(request->status != TAPE_REQUEST_LONG_BUSY);
DBF_LH(6, "%08x: Long busy timeout.\n", device->cdev_id);
__tape_start_next_request(device);
- device->lb_timeout.data = (unsigned long) tape_put_device(device);
+ device->lb_timeout.data = 0UL;
+ tape_put_device(device);
spin_unlock_irq(get_ccwdev_lock(device->cdev));
}
@@ -918,7 +917,7 @@ __tape_start_request(struct tape_device *device, struct tape_request *request)
}
/* Increase use count of device for the added request. */
- request->device = tape_get_device_reference(device);
+ request->device = tape_get_device(device);
if (list_empty(&device->req_queue)) {
/* No other requests are on the queue. Start this one. */
@@ -1117,8 +1116,8 @@ __tape_do_irq (struct ccw_device *cdev, unsigned long intparm, struct irb *irb)
if (req->status == TAPE_REQUEST_LONG_BUSY) {
DBF_EVENT(3, "(%08x): del timer\n", device->cdev_id);
if (del_timer(&device->lb_timeout)) {
- device->lb_timeout.data = (unsigned long)
- tape_put_device(device);
+ device->lb_timeout.data = 0UL;
+ tape_put_device(device);
__tape_start_next_request(device);
}
return;
@@ -1173,7 +1172,7 @@ __tape_do_irq (struct ccw_device *cdev, unsigned long intparm, struct irb *irb)
break;
case TAPE_IO_LONG_BUSY:
device->lb_timeout.data =
- (unsigned long)tape_get_device_reference(device);
+ (unsigned long) tape_get_device(device);
device->lb_timeout.expires = jiffies +
LONG_BUSY_TIMEOUT * HZ;
DBF_EVENT(3, "(%08x): add timer\n", device->cdev_id);
@@ -1326,7 +1325,7 @@ EXPORT_SYMBOL(tape_generic_online);
EXPORT_SYMBOL(tape_generic_offline);
EXPORT_SYMBOL(tape_generic_pm_suspend);
EXPORT_SYMBOL(tape_put_device);
-EXPORT_SYMBOL(tape_get_device_reference);
+EXPORT_SYMBOL(tape_get_device);
EXPORT_SYMBOL(tape_state_verbose);
EXPORT_SYMBOL(tape_op_verbose);
EXPORT_SYMBOL(tape_state_set);
diff --git a/drivers/s390/char/tape_proc.c b/drivers/s390/char/tape_proc.c
index 202f4213293..ebd820ccfb2 100644
--- a/drivers/s390/char/tape_proc.c
+++ b/drivers/s390/char/tape_proc.c
@@ -45,7 +45,7 @@ static int tape_proc_show(struct seq_file *m, void *v)
seq_printf(m, "TapeNo\tBusID CuType/Model\t"
"DevType/Model\tBlkSize\tState\tOp\tMedState\n");
}
- device = tape_get_device(n);
+ device = tape_find_device(n);
if (IS_ERR(device))
return 0;
spin_lock_irq(get_ccwdev_lock(device->cdev));
diff --git a/drivers/s390/char/tty3270.c b/drivers/s390/char/tty3270.c
index 38385677c65..911822db614 100644
--- a/drivers/s390/char/tty3270.c
+++ b/drivers/s390/char/tty3270.c
@@ -19,6 +19,7 @@
#include <linux/slab.h>
#include <linux/bootmem.h>
+#include <linux/compat.h>
#include <asm/ccwdev.h>
#include <asm/cio.h>
@@ -1731,6 +1732,22 @@ tty3270_ioctl(struct tty_struct *tty, struct file *file,
return kbd_ioctl(tp->kbd, file, cmd, arg);
}
+#ifdef CONFIG_COMPAT
+static long
+tty3270_compat_ioctl(struct tty_struct *tty, struct file *file,
+ unsigned int cmd, unsigned long arg)
+{
+ struct tty3270 *tp;
+
+ tp = tty->driver_data;
+ if (!tp)
+ return -ENODEV;
+ if (tty->flags & (1 << TTY_IO_ERROR))
+ return -EIO;
+ return kbd_ioctl(tp->kbd, file, cmd, (unsigned long)compat_ptr(arg));
+}
+#endif
+
static const struct tty_operations tty3270_ops = {
.open = tty3270_open,
.close = tty3270_close,
@@ -1745,6 +1762,9 @@ static const struct tty_operations tty3270_ops = {
.hangup = tty3270_hangup,
.wait_until_sent = tty3270_wait_until_sent,
.ioctl = tty3270_ioctl,
+#ifdef CONFIG_COMPAT
+ .compat_ioctl = tty3270_compat_ioctl,
+#endif
.set_termios = tty3270_set_termios
};
diff --git a/drivers/s390/char/vmlogrdr.c b/drivers/s390/char/vmlogrdr.c
index d1a142fa3eb..899aa795bf3 100644
--- a/drivers/s390/char/vmlogrdr.c
+++ b/drivers/s390/char/vmlogrdr.c
@@ -312,11 +312,9 @@ static int vmlogrdr_open (struct inode *inode, struct file *filp)
return -ENOSYS;
/* Besure this device hasn't already been opened */
- lock_kernel();
spin_lock_bh(&logptr->priv_lock);
if (logptr->dev_in_use) {
spin_unlock_bh(&logptr->priv_lock);
- unlock_kernel();
return -EBUSY;
}
logptr->dev_in_use = 1;
@@ -360,9 +358,8 @@ static int vmlogrdr_open (struct inode *inode, struct file *filp)
|| (logptr->iucv_path_severed));
if (logptr->iucv_path_severed)
goto out_record;
- ret = nonseekable_open(inode, filp);
- unlock_kernel();
- return ret;
+ nonseekable_open(inode, filp);
+ return 0;
out_record:
if (logptr->autorecording)
@@ -372,7 +369,6 @@ out_path:
logptr->path = NULL;
out_dev:
logptr->dev_in_use = 0;
- unlock_kernel();
return -EIO;
}
diff --git a/drivers/s390/char/vmur.c b/drivers/s390/char/vmur.c
index 77571b68539..cc56fc708ba 100644
--- a/drivers/s390/char/vmur.c
+++ b/drivers/s390/char/vmur.c
@@ -695,7 +695,6 @@ static int ur_open(struct inode *inode, struct file *file)
if (accmode == O_RDWR)
return -EACCES;
- lock_kernel();
/*
* We treat the minor number as the devno of the ur device
* to find in the driver tree.
@@ -749,7 +748,6 @@ static int ur_open(struct inode *inode, struct file *file)
goto fail_urfile_free;
urf->file_reclen = rc;
file->private_data = urf;
- unlock_kernel();
return 0;
fail_urfile_free:
@@ -761,7 +759,6 @@ fail_unlock:
fail_put:
urdev_put(urd);
out:
- unlock_kernel();
return rc;
}
diff --git a/drivers/s390/char/vmwatchdog.c b/drivers/s390/char/vmwatchdog.c
index f2bc287b69e..c974058e48d 100644
--- a/drivers/s390/char/vmwatchdog.c
+++ b/drivers/s390/char/vmwatchdog.c
@@ -19,7 +19,6 @@
#include <linux/moduleparam.h>
#include <linux/suspend.h>
#include <linux/watchdog.h>
-#include <linux/smp_lock.h>
#include <asm/ebcdic.h>
#include <asm/io.h>
@@ -49,6 +48,8 @@ static unsigned int vmwdt_interval = 60;
static unsigned long vmwdt_is_open;
static int vmwdt_expect_close;
+static DEFINE_MUTEX(vmwdt_mutex);
+
#define VMWDT_OPEN 0 /* devnode is open or suspend in progress */
#define VMWDT_RUNNING 1 /* The watchdog is armed */
@@ -133,15 +134,11 @@ static int __init vmwdt_probe(void)
static int vmwdt_open(struct inode *i, struct file *f)
{
int ret;
- lock_kernel();
- if (test_and_set_bit(VMWDT_OPEN, &vmwdt_is_open)) {
- unlock_kernel();
+ if (test_and_set_bit(VMWDT_OPEN, &vmwdt_is_open))
return -EBUSY;
- }
ret = vmwdt_keepalive();
if (ret)
clear_bit(VMWDT_OPEN, &vmwdt_is_open);
- unlock_kernel();
return ret ? ret : nonseekable_open(i, f);
}
@@ -160,8 +157,7 @@ static struct watchdog_info vmwdt_info = {
.identity = "z/VM Watchdog Timer",
};
-static int vmwdt_ioctl(struct inode *i, struct file *f,
- unsigned int cmd, unsigned long arg)
+static int __vmwdt_ioctl(unsigned int cmd, unsigned long arg)
{
switch (cmd) {
case WDIOC_GETSUPPORT:
@@ -205,10 +201,19 @@ static int vmwdt_ioctl(struct inode *i, struct file *f,
case WDIOC_KEEPALIVE:
return vmwdt_keepalive();
}
-
return -EINVAL;
}
+static long vmwdt_ioctl(struct file *f, unsigned int cmd, unsigned long arg)
+{
+ int rc;
+
+ mutex_lock(&vmwdt_mutex);
+ rc = __vmwdt_ioctl(cmd, arg);
+ mutex_unlock(&vmwdt_mutex);
+ return (long) rc;
+}
+
static ssize_t vmwdt_write(struct file *f, const char __user *buf,
size_t count, loff_t *ppos)
{
@@ -288,7 +293,7 @@ static struct notifier_block vmwdt_power_notifier = {
static const struct file_operations vmwdt_fops = {
.open = &vmwdt_open,
.release = &vmwdt_close,
- .ioctl = &vmwdt_ioctl,
+ .unlocked_ioctl = &vmwdt_ioctl,
.write = &vmwdt_write,
.owner = THIS_MODULE,
};
@@ -309,6 +314,10 @@ static int __init vmwdt_init(void)
ret = register_pm_notifier(&vmwdt_power_notifier);
if (ret)
return ret;
+ /*
+ * misc_register() has to be the last action in module_init(), because
+ * file operations will be available right after this.
+ */
ret = misc_register(&vmwdt_dev);
if (ret) {
unregister_pm_notifier(&vmwdt_power_notifier);
diff --git a/drivers/s390/cio/Makefile b/drivers/s390/cio/Makefile
index fa4c9662f65..d033414f759 100644
--- a/drivers/s390/cio/Makefile
+++ b/drivers/s390/cio/Makefile
@@ -3,7 +3,7 @@
#
obj-y += airq.o blacklist.o chsc.o cio.o css.o chp.o idset.o isc.o \
- fcx.o itcw.o crw.o
+ fcx.o itcw.o crw.o ccwreq.o
ccw_device-objs += device.o device_fsm.o device_ops.o
ccw_device-objs += device_id.o device_pgid.o device_status.o
obj-y += ccw_device.o cmf.o
diff --git a/drivers/s390/cio/ccwreq.c b/drivers/s390/cio/ccwreq.c
new file mode 100644
index 00000000000..9509e386093
--- /dev/null
+++ b/drivers/s390/cio/ccwreq.c
@@ -0,0 +1,328 @@
+/*
+ * Handling of internal CCW device requests.
+ *
+ * Copyright IBM Corp. 2009
+ * Author(s): Peter Oberparleiter <peter.oberparleiter@de.ibm.com>
+ */
+
+#include <linux/types.h>
+#include <linux/err.h>
+#include <asm/ccwdev.h>
+#include <asm/cio.h>
+
+#include "io_sch.h"
+#include "cio.h"
+#include "device.h"
+#include "cio_debug.h"
+
+/**
+ * lpm_adjust - adjust path mask
+ * @lpm: path mask to adjust
+ * @mask: mask of available paths
+ *
+ * Shift @lpm right until @lpm and @mask have at least one bit in common or
+ * until @lpm is zero. Return the resulting lpm.
+ */
+int lpm_adjust(int lpm, int mask)
+{
+ while (lpm && ((lpm & mask) == 0))
+ lpm >>= 1;
+ return lpm;
+}
+
+/*
+ * Adjust path mask to use next path and reset retry count. Return resulting
+ * path mask.
+ */
+static u16 ccwreq_next_path(struct ccw_device *cdev)
+{
+ struct ccw_request *req = &cdev->private->req;
+
+ req->retries = req->maxretries;
+ req->mask = lpm_adjust(req->mask >>= 1, req->lpm);
+
+ return req->mask;
+}
+
+/*
+ * Clean up device state and report to callback.
+ */
+static void ccwreq_stop(struct ccw_device *cdev, int rc)
+{
+ struct subchannel *sch = to_subchannel(cdev->dev.parent);
+ struct ccw_request *req = &cdev->private->req;
+
+ if (req->done)
+ return;
+ req->done = 1;
+ ccw_device_set_timeout(cdev, 0);
+ memset(&cdev->private->irb, 0, sizeof(struct irb));
+ sch->lpm = sch->schib.pmcw.pam;
+ if (rc && rc != -ENODEV && req->drc)
+ rc = req->drc;
+ req->callback(cdev, req->data, rc);
+}
+
+/*
+ * (Re-)Start the operation until retries and paths are exhausted.
+ */
+static void ccwreq_do(struct ccw_device *cdev)
+{
+ struct ccw_request *req = &cdev->private->req;
+ struct subchannel *sch = to_subchannel(cdev->dev.parent);
+ struct ccw1 *cp = req->cp;
+ int rc = -EACCES;
+
+ while (req->mask) {
+ if (req->retries-- == 0) {
+ /* Retries exhausted, try next path. */
+ ccwreq_next_path(cdev);
+ continue;
+ }
+ /* Perform start function. */
+ sch->lpm = 0xff;
+ memset(&cdev->private->irb, 0, sizeof(struct irb));
+ rc = cio_start(sch, cp, (u8) req->mask);
+ if (rc == 0) {
+ /* I/O started successfully. */
+ ccw_device_set_timeout(cdev, req->timeout);
+ return;
+ }
+ if (rc == -ENODEV) {
+ /* Permanent device error. */
+ break;
+ }
+ if (rc == -EACCES) {
+ /* Permant path error. */
+ ccwreq_next_path(cdev);
+ continue;
+ }
+ /* Temporary improper status. */
+ rc = cio_clear(sch);
+ if (rc)
+ break;
+ return;
+ }
+ ccwreq_stop(cdev, rc);
+}
+
+/**
+ * ccw_request_start - perform I/O request
+ * @cdev: ccw device
+ *
+ * Perform the I/O request specified by cdev->req.
+ */
+void ccw_request_start(struct ccw_device *cdev)
+{
+ struct ccw_request *req = &cdev->private->req;
+
+ /* Try all paths twice to counter link flapping. */
+ req->mask = 0x8080;
+ req->retries = req->maxretries;
+ req->mask = lpm_adjust(req->mask, req->lpm);
+ req->drc = 0;
+ req->done = 0;
+ req->cancel = 0;
+ if (!req->mask)
+ goto out_nopath;
+ ccwreq_do(cdev);
+ return;
+
+out_nopath:
+ ccwreq_stop(cdev, -EACCES);
+}
+
+/**
+ * ccw_request_cancel - cancel running I/O request
+ * @cdev: ccw device
+ *
+ * Cancel the I/O request specified by cdev->req. Return non-zero if request
+ * has already finished, zero otherwise.
+ */
+int ccw_request_cancel(struct ccw_device *cdev)
+{
+ struct subchannel *sch = to_subchannel(cdev->dev.parent);
+ struct ccw_request *req = &cdev->private->req;
+ int rc;
+
+ if (req->done)
+ return 1;
+ req->cancel = 1;
+ rc = cio_clear(sch);
+ if (rc)
+ ccwreq_stop(cdev, rc);
+ return 0;
+}
+
+/*
+ * Return the status of the internal I/O started on the specified ccw device.
+ * Perform BASIC SENSE if required.
+ */
+static enum io_status ccwreq_status(struct ccw_device *cdev, struct irb *lcirb)
+{
+ struct irb *irb = &cdev->private->irb;
+ struct cmd_scsw *scsw = &irb->scsw.cmd;
+
+ /* Perform BASIC SENSE if needed. */
+ if (ccw_device_accumulate_and_sense(cdev, lcirb))
+ return IO_RUNNING;
+ /* Check for halt/clear interrupt. */
+ if (scsw->fctl & (SCSW_FCTL_HALT_FUNC | SCSW_FCTL_CLEAR_FUNC))
+ return IO_KILLED;
+ /* Check for path error. */
+ if (scsw->cc == 3 || scsw->pno)
+ return IO_PATH_ERROR;
+ /* Handle BASIC SENSE data. */
+ if (irb->esw.esw0.erw.cons) {
+ CIO_TRACE_EVENT(2, "sensedata");
+ CIO_HEX_EVENT(2, &cdev->private->dev_id,
+ sizeof(struct ccw_dev_id));
+ CIO_HEX_EVENT(2, &cdev->private->irb.ecw, SENSE_MAX_COUNT);
+ /* Check for command reject. */
+ if (irb->ecw[0] & SNS0_CMD_REJECT)
+ return IO_REJECTED;
+ /* Assume that unexpected SENSE data implies an error. */
+ return IO_STATUS_ERROR;
+ }
+ /* Check for channel errors. */
+ if (scsw->cstat != 0)
+ return IO_STATUS_ERROR;
+ /* Check for device errors. */
+ if (scsw->dstat & ~(DEV_STAT_CHN_END | DEV_STAT_DEV_END))
+ return IO_STATUS_ERROR;
+ /* Check for final state. */
+ if (!(scsw->dstat & DEV_STAT_DEV_END))
+ return IO_RUNNING;
+ /* Check for other improper status. */
+ if (scsw->cc == 1 && (scsw->stctl & SCSW_STCTL_ALERT_STATUS))
+ return IO_STATUS_ERROR;
+ return IO_DONE;
+}
+
+/*
+ * Log ccw request status.
+ */
+static void ccwreq_log_status(struct ccw_device *cdev, enum io_status status)
+{
+ struct ccw_request *req = &cdev->private->req;
+ struct {
+ struct ccw_dev_id dev_id;
+ u16 retries;
+ u8 lpm;
+ u8 status;
+ } __attribute__ ((packed)) data;
+ data.dev_id = cdev->private->dev_id;
+ data.retries = req->retries;
+ data.lpm = (u8) req->mask;
+ data.status = (u8) status;
+ CIO_TRACE_EVENT(2, "reqstat");
+ CIO_HEX_EVENT(2, &data, sizeof(data));
+}
+
+/**
+ * ccw_request_handler - interrupt handler for I/O request procedure.
+ * @cdev: ccw device
+ *
+ * Handle interrupt during I/O request procedure.
+ */
+void ccw_request_handler(struct ccw_device *cdev)
+{
+ struct ccw_request *req = &cdev->private->req;
+ struct irb *irb = (struct irb *) __LC_IRB;
+ enum io_status status;
+ int rc = -EOPNOTSUPP;
+
+ /* Check status of I/O request. */
+ status = ccwreq_status(cdev, irb);
+ if (req->filter)
+ status = req->filter(cdev, req->data, irb, status);
+ if (status != IO_RUNNING)
+ ccw_device_set_timeout(cdev, 0);
+ if (status != IO_DONE && status != IO_RUNNING)
+ ccwreq_log_status(cdev, status);
+ switch (status) {
+ case IO_DONE:
+ break;
+ case IO_RUNNING:
+ return;
+ case IO_REJECTED:
+ goto err;
+ case IO_PATH_ERROR:
+ goto out_next_path;
+ case IO_STATUS_ERROR:
+ goto out_restart;
+ case IO_KILLED:
+ /* Check if request was cancelled on purpose. */
+ if (req->cancel) {
+ rc = -EIO;
+ goto err;
+ }
+ goto out_restart;
+ }
+ /* Check back with request initiator. */
+ if (!req->check)
+ goto out;
+ switch (req->check(cdev, req->data)) {
+ case 0:
+ break;
+ case -EAGAIN:
+ goto out_restart;
+ case -EACCES:
+ goto out_next_path;
+ default:
+ goto err;
+ }
+out:
+ ccwreq_stop(cdev, 0);
+ return;
+
+out_next_path:
+ /* Try next path and restart I/O. */
+ if (!ccwreq_next_path(cdev)) {
+ rc = -EACCES;
+ goto err;
+ }
+out_restart:
+ /* Restart. */
+ ccwreq_do(cdev);
+ return;
+err:
+ ccwreq_stop(cdev, rc);
+}
+
+
+/**
+ * ccw_request_timeout - timeout handler for I/O request procedure
+ * @cdev: ccw device
+ *
+ * Handle timeout during I/O request procedure.
+ */
+void ccw_request_timeout(struct ccw_device *cdev)
+{
+ struct subchannel *sch = to_subchannel(cdev->dev.parent);
+ struct ccw_request *req = &cdev->private->req;
+ int rc;
+
+ if (!ccwreq_next_path(cdev)) {
+ /* set the final return code for this request */
+ req->drc = -ETIME;
+ }
+ rc = cio_clear(sch);
+ if (rc)
+ goto err;
+ return;
+
+err:
+ ccwreq_stop(cdev, rc);
+}
+
+/**
+ * ccw_request_notoper - notoper handler for I/O request procedure
+ * @cdev: ccw device
+ *
+ * Handle timeout during I/O request procedure.
+ */
+void ccw_request_notoper(struct ccw_device *cdev)
+{
+ ccwreq_stop(cdev, -ENODEV);
+}
diff --git a/drivers/s390/cio/chp.c b/drivers/s390/cio/chp.c
index 8ab51608da5..c268a2e5b7c 100644
--- a/drivers/s390/cio/chp.c
+++ b/drivers/s390/cio/chp.c
@@ -65,7 +65,7 @@ static void set_chp_logically_online(struct chp_id chpid, int onoff)
chpid_to_chp(chpid)->state = onoff;
}
-/* On succes return 0 if channel-path is varied offline, 1 if it is varied
+/* On success return 0 if channel-path is varied offline, 1 if it is varied
* online. Return -ENODEV if channel-path is not registered. */
int chp_get_status(struct chp_id chpid)
{
diff --git a/drivers/s390/cio/cio.c b/drivers/s390/cio/cio.c
index 138124fcfca..126f240715a 100644
--- a/drivers/s390/cio/cio.c
+++ b/drivers/s390/cio/cio.c
@@ -618,6 +618,7 @@ void __irq_entry do_IRQ(struct pt_regs *regs)
old_regs = set_irq_regs(regs);
s390_idle_check();
irq_enter();
+ __get_cpu_var(s390_idle).nohz_delay = 1;
if (S390_lowcore.int_clock >= S390_lowcore.clock_comparator)
/* Serve timer interrupts first. */
clock_comparator_work();
diff --git a/drivers/s390/cio/cio.h b/drivers/s390/cio/cio.h
index 2e43558c704..bf7f80f5a33 100644
--- a/drivers/s390/cio/cio.h
+++ b/drivers/s390/cio/cio.h
@@ -68,6 +68,11 @@ struct schib {
__u8 mda[4]; /* model dependent area */
} __attribute__ ((packed,aligned(4)));
+enum sch_todo {
+ SCH_TODO_NOTHING,
+ SCH_TODO_UNREG,
+};
+
/* subchannel data structure used by I/O subroutines */
struct subchannel {
struct subchannel_id schid;
@@ -95,7 +100,8 @@ struct subchannel {
struct device dev; /* entry in device tree */
struct css_driver *driver;
void *private; /* private per subchannel type data */
- struct work_struct work;
+ enum sch_todo todo;
+ struct work_struct todo_work;
struct schib_config config;
} __attribute__ ((aligned(8)));
diff --git a/drivers/s390/cio/cmf.c b/drivers/s390/cio/cmf.c
index 30f51611130..2985eb43948 100644
--- a/drivers/s390/cio/cmf.c
+++ b/drivers/s390/cio/cmf.c
@@ -462,7 +462,7 @@ static struct cmb_area cmb_area = {
* block of memory, which can not be moved as long as any channel
* is active. Therefore, a maximum number of subchannels needs to
* be defined somewhere. This is a module parameter, defaulting to
- * a resonable value of 1024, or 32 kb of memory.
+ * a reasonable value of 1024, or 32 kb of memory.
* Current kernels don't allow kmalloc with more than 128kb, so the
* maximum is 4096.
*/
diff --git a/drivers/s390/cio/css.c b/drivers/s390/cio/css.c
index 91c25706fa8..92ff88ac110 100644
--- a/drivers/s390/cio/css.c
+++ b/drivers/s390/cio/css.c
@@ -133,6 +133,8 @@ out:
return rc;
}
+static void css_sch_todo(struct work_struct *work);
+
static struct subchannel *
css_alloc_subchannel(struct subchannel_id schid)
{
@@ -147,6 +149,7 @@ css_alloc_subchannel(struct subchannel_id schid)
kfree(sch);
return ERR_PTR(ret);
}
+ INIT_WORK(&sch->todo_work, css_sch_todo);
return sch;
}
@@ -190,6 +193,51 @@ void css_sch_device_unregister(struct subchannel *sch)
}
EXPORT_SYMBOL_GPL(css_sch_device_unregister);
+static void css_sch_todo(struct work_struct *work)
+{
+ struct subchannel *sch;
+ enum sch_todo todo;
+
+ sch = container_of(work, struct subchannel, todo_work);
+ /* Find out todo. */
+ spin_lock_irq(sch->lock);
+ todo = sch->todo;
+ CIO_MSG_EVENT(4, "sch_todo: sch=0.%x.%04x, todo=%d\n", sch->schid.ssid,
+ sch->schid.sch_no, todo);
+ sch->todo = SCH_TODO_NOTHING;
+ spin_unlock_irq(sch->lock);
+ /* Perform todo. */
+ if (todo == SCH_TODO_UNREG)
+ css_sch_device_unregister(sch);
+ /* Release workqueue ref. */
+ put_device(&sch->dev);
+}
+
+/**
+ * css_sched_sch_todo - schedule a subchannel operation
+ * @sch: subchannel
+ * @todo: todo
+ *
+ * Schedule the operation identified by @todo to be performed on the slow path
+ * workqueue. Do nothing if another operation with higher priority is already
+ * scheduled. Needs to be called with subchannel lock held.
+ */
+void css_sched_sch_todo(struct subchannel *sch, enum sch_todo todo)
+{
+ CIO_MSG_EVENT(4, "sch_todo: sched sch=0.%x.%04x todo=%d\n",
+ sch->schid.ssid, sch->schid.sch_no, todo);
+ if (sch->todo >= todo)
+ return;
+ /* Get workqueue ref. */
+ if (!get_device(&sch->dev))
+ return;
+ sch->todo = todo;
+ if (!queue_work(slow_path_wq, &sch->todo_work)) {
+ /* Already queued, release workqueue ref. */
+ put_device(&sch->dev);
+ }
+}
+
static void ssd_from_pmcw(struct chsc_ssd_info *ssd, struct pmcw *pmcw)
{
int i;
@@ -376,8 +424,8 @@ static int css_evaluate_new_subchannel(struct subchannel_id schid, int slow)
/* Unusable - ignore. */
return 0;
}
- CIO_MSG_EVENT(4, "Evaluating schid 0.%x.%04x, event %d, unknown, "
- "slow path.\n", schid.ssid, schid.sch_no, CIO_OPER);
+ CIO_MSG_EVENT(4, "event: sch 0.%x.%04x, new\n", schid.ssid,
+ schid.sch_no);
return css_probe_device(schid);
}
@@ -394,6 +442,10 @@ static int css_evaluate_known_subchannel(struct subchannel *sch, int slow)
"Got subchannel machine check but "
"no sch_event handler provided.\n");
}
+ if (ret != 0 && ret != -EAGAIN) {
+ CIO_MSG_EVENT(2, "eval: sch 0.%x.%04x, rc=%d\n",
+ sch->schid.ssid, sch->schid.sch_no, ret);
+ }
return ret;
}
@@ -684,6 +736,7 @@ static int __init setup_css(int nr)
css->pseudo_subchannel->dev.parent = &css->device;
css->pseudo_subchannel->dev.release = css_subchannel_release;
dev_set_name(&css->pseudo_subchannel->dev, "defunct");
+ mutex_init(&css->pseudo_subchannel->reg_mutex);
ret = cio_create_sch_lock(css->pseudo_subchannel);
if (ret) {
kfree(css->pseudo_subchannel);
diff --git a/drivers/s390/cio/css.h b/drivers/s390/cio/css.h
index 68d6b0bf151..fe84b92cde6 100644
--- a/drivers/s390/cio/css.h
+++ b/drivers/s390/cio/css.h
@@ -11,6 +11,8 @@
#include <asm/chpid.h>
#include <asm/schid.h>
+#include "cio.h"
+
/*
* path grouping stuff
*/
@@ -151,4 +153,5 @@ int css_sch_is_valid(struct schib *);
extern struct workqueue_struct *slow_path_wq;
void css_wait_for_slow_path(void);
+void css_sched_sch_todo(struct subchannel *sch, enum sch_todo todo);
#endif
diff --git a/drivers/s390/cio/device.c b/drivers/s390/cio/device.c
index 2490b741e16..9fecfb4223a 100644
--- a/drivers/s390/cio/device.c
+++ b/drivers/s390/cio/device.c
@@ -7,6 +7,10 @@
* Cornelia Huck (cornelia.huck@de.ibm.com)
* Martin Schwidefsky (schwidefsky@de.ibm.com)
*/
+
+#define KMSG_COMPONENT "cio"
+#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
+
#include <linux/module.h>
#include <linux/init.h>
#include <linux/spinlock.h>
@@ -299,53 +303,18 @@ int ccw_device_is_orphan(struct ccw_device *cdev)
static void ccw_device_unregister(struct ccw_device *cdev)
{
- if (test_and_clear_bit(1, &cdev->private->registered)) {
+ if (device_is_registered(&cdev->dev)) {
+ /* Undo device_add(). */
device_del(&cdev->dev);
+ }
+ if (cdev->private->flags.initialized) {
+ cdev->private->flags.initialized = 0;
/* Release reference from device_initialize(). */
put_device(&cdev->dev);
}
}
-static void ccw_device_remove_orphan_cb(struct work_struct *work)
-{
- struct ccw_device_private *priv;
- struct ccw_device *cdev;
-
- priv = container_of(work, struct ccw_device_private, kick_work);
- cdev = priv->cdev;
- ccw_device_unregister(cdev);
- /* Release cdev reference for workqueue processing. */
- put_device(&cdev->dev);
-}
-
-static void
-ccw_device_remove_disconnected(struct ccw_device *cdev)
-{
- unsigned long flags;
-
- /*
- * Forced offline in disconnected state means
- * 'throw away device'.
- */
- if (ccw_device_is_orphan(cdev)) {
- /*
- * Deregister ccw device.
- * Unfortunately, we cannot do this directly from the
- * attribute method.
- */
- /* Get cdev reference for workqueue processing. */
- if (!get_device(&cdev->dev))
- return;
- spin_lock_irqsave(cdev->ccwlock, flags);
- cdev->private->state = DEV_STATE_NOT_OPER;
- spin_unlock_irqrestore(cdev->ccwlock, flags);
- PREPARE_WORK(&cdev->private->kick_work,
- ccw_device_remove_orphan_cb);
- queue_work(slow_path_wq, &cdev->private->kick_work);
- } else
- /* Deregister subchannel, which will kill the ccw device. */
- ccw_device_schedule_sch_unregister(cdev);
-}
+static void io_subchannel_quiesce(struct subchannel *);
/**
* ccw_device_set_offline() - disable a ccw device for I/O
@@ -360,7 +329,8 @@ ccw_device_remove_disconnected(struct ccw_device *cdev)
*/
int ccw_device_set_offline(struct ccw_device *cdev)
{
- int ret;
+ struct subchannel *sch;
+ int ret, state;
if (!cdev)
return -ENODEV;
@@ -374,6 +344,7 @@ int ccw_device_set_offline(struct ccw_device *cdev)
}
cdev->online = 0;
spin_lock_irq(cdev->ccwlock);
+ sch = to_subchannel(cdev->dev.parent);
/* Wait until a final state or DISCONNECTED is reached */
while (!dev_fsm_final_state(cdev) &&
cdev->private->state != DEV_STATE_DISCONNECTED) {
@@ -382,20 +353,37 @@ int ccw_device_set_offline(struct ccw_device *cdev)
cdev->private->state == DEV_STATE_DISCONNECTED));
spin_lock_irq(cdev->ccwlock);
}
- ret = ccw_device_offline(cdev);
- if (ret)
- goto error;
+ do {
+ ret = ccw_device_offline(cdev);
+ if (!ret)
+ break;
+ CIO_MSG_EVENT(0, "ccw_device_offline returned %d, device "
+ "0.%x.%04x\n", ret, cdev->private->dev_id.ssid,
+ cdev->private->dev_id.devno);
+ if (ret != -EBUSY)
+ goto error;
+ state = cdev->private->state;
+ spin_unlock_irq(cdev->ccwlock);
+ io_subchannel_quiesce(sch);
+ spin_lock_irq(cdev->ccwlock);
+ cdev->private->state = state;
+ } while (ret == -EBUSY);
spin_unlock_irq(cdev->ccwlock);
wait_event(cdev->private->wait_q, (dev_fsm_final_state(cdev) ||
cdev->private->state == DEV_STATE_DISCONNECTED));
+ /* Inform the user if set offline failed. */
+ if (cdev->private->state == DEV_STATE_BOXED) {
+ pr_warning("%s: The device entered boxed state while "
+ "being set offline\n", dev_name(&cdev->dev));
+ } else if (cdev->private->state == DEV_STATE_NOT_OPER) {
+ pr_warning("%s: The device stopped operating while "
+ "being set offline\n", dev_name(&cdev->dev));
+ }
/* Give up reference from ccw_device_set_online(). */
put_device(&cdev->dev);
return 0;
error:
- CIO_MSG_EVENT(0, "ccw_device_offline returned %d, device 0.%x.%04x\n",
- ret, cdev->private->dev_id.ssid,
- cdev->private->dev_id.devno);
cdev->private->state = DEV_STATE_OFFLINE;
dev_fsm_event(cdev, DEV_EVENT_NOTOPER);
spin_unlock_irq(cdev->ccwlock);
@@ -448,6 +436,16 @@ int ccw_device_set_online(struct ccw_device *cdev)
if ((cdev->private->state != DEV_STATE_ONLINE) &&
(cdev->private->state != DEV_STATE_W4SENSE)) {
spin_unlock_irq(cdev->ccwlock);
+ /* Inform the user that set online failed. */
+ if (cdev->private->state == DEV_STATE_BOXED) {
+ pr_warning("%s: Setting the device online failed "
+ "because it is boxed\n",
+ dev_name(&cdev->dev));
+ } else if (cdev->private->state == DEV_STATE_NOT_OPER) {
+ pr_warning("%s: Setting the device online failed "
+ "because it is not operational\n",
+ dev_name(&cdev->dev));
+ }
/* Give up online reference since onlining failed. */
put_device(&cdev->dev);
return -ENODEV;
@@ -494,27 +492,22 @@ error:
static int online_store_handle_offline(struct ccw_device *cdev)
{
- if (cdev->private->state == DEV_STATE_DISCONNECTED)
- ccw_device_remove_disconnected(cdev);
- else if (cdev->online && cdev->drv && cdev->drv->set_offline)
+ if (cdev->private->state == DEV_STATE_DISCONNECTED) {
+ spin_lock_irq(cdev->ccwlock);
+ ccw_device_sched_todo(cdev, CDEV_TODO_UNREG_EVAL);
+ spin_unlock_irq(cdev->ccwlock);
+ } else if (cdev->online && cdev->drv && cdev->drv->set_offline)
return ccw_device_set_offline(cdev);
return 0;
}
static int online_store_recog_and_online(struct ccw_device *cdev)
{
- int ret;
-
/* Do device recognition, if needed. */
if (cdev->private->state == DEV_STATE_BOXED) {
- ret = ccw_device_recognition(cdev);
- if (ret) {
- CIO_MSG_EVENT(0, "Couldn't start recognition "
- "for device 0.%x.%04x (ret=%d)\n",
- cdev->private->dev_id.ssid,
- cdev->private->dev_id.devno, ret);
- return ret;
- }
+ spin_lock_irq(cdev->ccwlock);
+ ccw_device_recognition(cdev);
+ spin_unlock_irq(cdev->ccwlock);
wait_event(cdev->private->wait_q,
cdev->private->flags.recog_done);
if (cdev->private->state != DEV_STATE_OFFLINE)
@@ -553,11 +546,10 @@ static ssize_t online_store (struct device *dev, struct device_attribute *attr,
int force, ret;
unsigned long i;
- if ((cdev->private->state != DEV_STATE_OFFLINE &&
- cdev->private->state != DEV_STATE_ONLINE &&
- cdev->private->state != DEV_STATE_BOXED &&
- cdev->private->state != DEV_STATE_DISCONNECTED) ||
- atomic_cmpxchg(&cdev->private->onoff, 0, 1) != 0)
+ if (!dev_fsm_final_state(cdev) &&
+ cdev->private->state != DEV_STATE_DISCONNECTED)
+ return -EAGAIN;
+ if (atomic_cmpxchg(&cdev->private->onoff, 0, 1) != 0)
return -EAGAIN;
if (cdev->drv && !try_module_get(cdev->drv->owner)) {
@@ -665,81 +657,31 @@ static int ccw_device_register(struct ccw_device *cdev)
cdev->private->dev_id.devno);
if (ret)
return ret;
- ret = device_add(dev);
- if (ret)
- return ret;
-
- set_bit(1, &cdev->private->registered);
- return ret;
+ return device_add(dev);
}
-struct match_data {
- struct ccw_dev_id dev_id;
- struct ccw_device * sibling;
-};
-
-static int
-match_devno(struct device * dev, void * data)
-{
- struct match_data * d = data;
- struct ccw_device * cdev;
-
- cdev = to_ccwdev(dev);
- if ((cdev->private->state == DEV_STATE_DISCONNECTED) &&
- !ccw_device_is_orphan(cdev) &&
- ccw_dev_id_is_equal(&cdev->private->dev_id, &d->dev_id) &&
- (cdev != d->sibling))
- return 1;
- return 0;
-}
-
-static struct ccw_device * get_disc_ccwdev_by_dev_id(struct ccw_dev_id *dev_id,
- struct ccw_device *sibling)
-{
- struct device *dev;
- struct match_data data;
-
- data.dev_id = *dev_id;
- data.sibling = sibling;
- dev = bus_find_device(&ccw_bus_type, NULL, &data, match_devno);
-
- return dev ? to_ccwdev(dev) : NULL;
-}
-
-static int match_orphan(struct device *dev, void *data)
+static int match_dev_id(struct device *dev, void *data)
{
- struct ccw_dev_id *dev_id;
- struct ccw_device *cdev;
+ struct ccw_device *cdev = to_ccwdev(dev);
+ struct ccw_dev_id *dev_id = data;
- dev_id = data;
- cdev = to_ccwdev(dev);
return ccw_dev_id_is_equal(&cdev->private->dev_id, dev_id);
}
-static struct ccw_device *
-get_orphaned_ccwdev_by_dev_id(struct channel_subsystem *css,
- struct ccw_dev_id *dev_id)
+static struct ccw_device *get_ccwdev_by_dev_id(struct ccw_dev_id *dev_id)
{
struct device *dev;
- dev = device_find_child(&css->pseudo_subchannel->dev, dev_id,
- match_orphan);
+ dev = bus_find_device(&ccw_bus_type, NULL, dev_id, match_dev_id);
return dev ? to_ccwdev(dev) : NULL;
}
-void ccw_device_do_unbind_bind(struct work_struct *work)
+static void ccw_device_do_unbind_bind(struct ccw_device *cdev)
{
- struct ccw_device_private *priv;
- struct ccw_device *cdev;
- struct subchannel *sch;
int ret;
- priv = container_of(work, struct ccw_device_private, kick_work);
- cdev = priv->cdev;
- sch = to_subchannel(cdev->dev.parent);
-
- if (test_bit(1, &cdev->private->registered)) {
+ if (device_is_registered(&cdev->dev)) {
device_release_driver(&cdev->dev);
ret = device_attach(&cdev->dev);
WARN_ON(ret == -ENODEV);
@@ -773,6 +715,8 @@ static struct ccw_device * io_subchannel_allocate_dev(struct subchannel *sch)
return ERR_PTR(-ENOMEM);
}
+static void ccw_device_todo(struct work_struct *work);
+
static int io_subchannel_initialize_dev(struct subchannel *sch,
struct ccw_device *cdev)
{
@@ -780,7 +724,7 @@ static int io_subchannel_initialize_dev(struct subchannel *sch,
atomic_set(&cdev->private->onoff, 0);
cdev->dev.parent = &sch->dev;
cdev->dev.release = ccw_device_release;
- INIT_WORK(&cdev->private->kick_work, NULL);
+ INIT_WORK(&cdev->private->todo_work, ccw_device_todo);
cdev->dev.groups = ccwdev_attr_groups;
/* Do first half of device_register. */
device_initialize(&cdev->dev);
@@ -789,6 +733,7 @@ static int io_subchannel_initialize_dev(struct subchannel *sch,
put_device(&cdev->dev);
return -ENODEV;
}
+ cdev->private->flags.initialized = 1;
return 0;
}
@@ -806,76 +751,7 @@ static struct ccw_device * io_subchannel_create_ccwdev(struct subchannel *sch)
return cdev;
}
-static int io_subchannel_recog(struct ccw_device *, struct subchannel *);
-
-static void sch_attach_device(struct subchannel *sch,
- struct ccw_device *cdev)
-{
- css_update_ssd_info(sch);
- spin_lock_irq(sch->lock);
- sch_set_cdev(sch, cdev);
- cdev->private->schid = sch->schid;
- cdev->ccwlock = sch->lock;
- ccw_device_trigger_reprobe(cdev);
- spin_unlock_irq(sch->lock);
-}
-
-static void sch_attach_disconnected_device(struct subchannel *sch,
- struct ccw_device *cdev)
-{
- struct subchannel *other_sch;
- int ret;
-
- /* Get reference for new parent. */
- if (!get_device(&sch->dev))
- return;
- other_sch = to_subchannel(cdev->dev.parent);
- /* Note: device_move() changes cdev->dev.parent */
- ret = device_move(&cdev->dev, &sch->dev, DPM_ORDER_PARENT_BEFORE_DEV);
- if (ret) {
- CIO_MSG_EVENT(0, "Moving disconnected device 0.%x.%04x failed "
- "(ret=%d)!\n", cdev->private->dev_id.ssid,
- cdev->private->dev_id.devno, ret);
- /* Put reference for new parent. */
- put_device(&sch->dev);
- return;
- }
- sch_set_cdev(other_sch, NULL);
- /* No need to keep a subchannel without ccw device around. */
- css_sch_device_unregister(other_sch);
- sch_attach_device(sch, cdev);
- /* Put reference for old parent. */
- put_device(&other_sch->dev);
-}
-
-static void sch_attach_orphaned_device(struct subchannel *sch,
- struct ccw_device *cdev)
-{
- int ret;
- struct subchannel *pseudo_sch;
-
- /* Get reference for new parent. */
- if (!get_device(&sch->dev))
- return;
- pseudo_sch = to_subchannel(cdev->dev.parent);
- /*
- * Try to move the ccw device to its new subchannel.
- * Note: device_move() changes cdev->dev.parent
- */
- ret = device_move(&cdev->dev, &sch->dev, DPM_ORDER_PARENT_BEFORE_DEV);
- if (ret) {
- CIO_MSG_EVENT(0, "Moving device 0.%x.%04x from orphanage "
- "failed (ret=%d)!\n",
- cdev->private->dev_id.ssid,
- cdev->private->dev_id.devno, ret);
- /* Put reference for new parent. */
- put_device(&sch->dev);
- return;
- }
- sch_attach_device(sch, cdev);
- /* Put reference on pseudo subchannel. */
- put_device(&pseudo_sch->dev);
-}
+static void io_subchannel_recog(struct ccw_device *, struct subchannel *);
static void sch_create_and_recog_new_device(struct subchannel *sch)
{
@@ -888,100 +764,19 @@ static void sch_create_and_recog_new_device(struct subchannel *sch)
css_sch_device_unregister(sch);
return;
}
- spin_lock_irq(sch->lock);
- sch_set_cdev(sch, cdev);
- spin_unlock_irq(sch->lock);
/* Start recognition for the new ccw device. */
- if (io_subchannel_recog(cdev, sch)) {
- spin_lock_irq(sch->lock);
- sch_set_cdev(sch, NULL);
- spin_unlock_irq(sch->lock);
- css_sch_device_unregister(sch);
- /* Put reference from io_subchannel_create_ccwdev(). */
- put_device(&sch->dev);
- /* Give up initial reference. */
- put_device(&cdev->dev);
- }
-}
-
-
-void ccw_device_move_to_orphanage(struct work_struct *work)
-{
- struct ccw_device_private *priv;
- struct ccw_device *cdev;
- struct ccw_device *replacing_cdev;
- struct subchannel *sch;
- int ret;
- struct channel_subsystem *css;
- struct ccw_dev_id dev_id;
-
- priv = container_of(work, struct ccw_device_private, kick_work);
- cdev = priv->cdev;
- sch = to_subchannel(cdev->dev.parent);
- css = to_css(sch->dev.parent);
- dev_id.devno = sch->schib.pmcw.dev;
- dev_id.ssid = sch->schid.ssid;
-
- /* Increase refcount for pseudo subchannel. */
- get_device(&css->pseudo_subchannel->dev);
- /*
- * Move the orphaned ccw device to the orphanage so the replacing
- * ccw device can take its place on the subchannel.
- * Note: device_move() changes cdev->dev.parent
- */
- ret = device_move(&cdev->dev, &css->pseudo_subchannel->dev,
- DPM_ORDER_NONE);
- if (ret) {
- CIO_MSG_EVENT(0, "Moving device 0.%x.%04x to orphanage failed "
- "(ret=%d)!\n", cdev->private->dev_id.ssid,
- cdev->private->dev_id.devno, ret);
- /* Decrease refcount for pseudo subchannel again. */
- put_device(&css->pseudo_subchannel->dev);
- return;
- }
- cdev->ccwlock = css->pseudo_subchannel->lock;
- /*
- * Search for the replacing ccw device
- * - among the disconnected devices
- * - in the orphanage
- */
- replacing_cdev = get_disc_ccwdev_by_dev_id(&dev_id, cdev);
- if (replacing_cdev) {
- sch_attach_disconnected_device(sch, replacing_cdev);
- /* Release reference from get_disc_ccwdev_by_dev_id() */
- put_device(&replacing_cdev->dev);
- /* Release reference of subchannel from old cdev. */
- put_device(&sch->dev);
- return;
- }
- replacing_cdev = get_orphaned_ccwdev_by_dev_id(css, &dev_id);
- if (replacing_cdev) {
- sch_attach_orphaned_device(sch, replacing_cdev);
- /* Release reference from get_orphaned_ccwdev_by_dev_id() */
- put_device(&replacing_cdev->dev);
- /* Release reference of subchannel from old cdev. */
- put_device(&sch->dev);
- return;
- }
- sch_create_and_recog_new_device(sch);
- /* Release reference of subchannel from old cdev. */
- put_device(&sch->dev);
+ io_subchannel_recog(cdev, sch);
}
/*
* Register recognized device.
*/
-static void
-io_subchannel_register(struct work_struct *work)
+static void io_subchannel_register(struct ccw_device *cdev)
{
- struct ccw_device_private *priv;
- struct ccw_device *cdev;
struct subchannel *sch;
int ret;
unsigned long flags;
- priv = container_of(work, struct ccw_device_private, kick_work);
- cdev = priv->cdev;
sch = to_subchannel(cdev->dev.parent);
/*
* Check if subchannel is still registered. It may have become
@@ -1033,41 +828,23 @@ out:
cdev->private->flags.recog_done = 1;
wake_up(&cdev->private->wait_q);
out_err:
- /* Release reference for workqueue processing. */
- put_device(&cdev->dev);
if (atomic_dec_and_test(&ccw_device_init_count))
wake_up(&ccw_device_init_wq);
}
-static void ccw_device_call_sch_unregister(struct work_struct *work)
+static void ccw_device_call_sch_unregister(struct ccw_device *cdev)
{
- struct ccw_device_private *priv;
- struct ccw_device *cdev;
struct subchannel *sch;
- priv = container_of(work, struct ccw_device_private, kick_work);
- cdev = priv->cdev;
/* Get subchannel reference for local processing. */
if (!get_device(cdev->dev.parent))
return;
sch = to_subchannel(cdev->dev.parent);
css_sch_device_unregister(sch);
- /* Release cdev reference for workqueue processing.*/
- put_device(&cdev->dev);
/* Release subchannel reference for local processing. */
put_device(&sch->dev);
}
-void ccw_device_schedule_sch_unregister(struct ccw_device *cdev)
-{
- /* Get cdev reference for workqueue processing. */
- if (!get_device(&cdev->dev))
- return;
- PREPARE_WORK(&cdev->private->kick_work,
- ccw_device_call_sch_unregister);
- queue_work(slow_path_wq, &cdev->private->kick_work);
-}
-
/*
* subchannel recognition done. Called from the state machine.
*/
@@ -1083,7 +860,8 @@ io_subchannel_recog_done(struct ccw_device *cdev)
/* Device did not respond in time. */
case DEV_STATE_NOT_OPER:
cdev->private->flags.recog_done = 1;
- ccw_device_schedule_sch_unregister(cdev);
+ /* Remove device found not operational. */
+ ccw_device_sched_todo(cdev, CDEV_TODO_UNREG);
if (atomic_dec_and_test(&ccw_device_init_count))
wake_up(&ccw_device_init_wq);
break;
@@ -1092,22 +870,15 @@ io_subchannel_recog_done(struct ccw_device *cdev)
* We can't register the device in interrupt context so
* we schedule a work item.
*/
- if (!get_device(&cdev->dev))
- break;
- PREPARE_WORK(&cdev->private->kick_work,
- io_subchannel_register);
- queue_work(slow_path_wq, &cdev->private->kick_work);
+ ccw_device_sched_todo(cdev, CDEV_TODO_REGISTER);
break;
}
}
-static int
-io_subchannel_recog(struct ccw_device *cdev, struct subchannel *sch)
+static void io_subchannel_recog(struct ccw_device *cdev, struct subchannel *sch)
{
- int rc;
struct ccw_device_private *priv;
- sch_set_cdev(sch, cdev);
cdev->ccwlock = sch->lock;
/* Init private data. */
@@ -1125,62 +896,81 @@ io_subchannel_recog(struct ccw_device *cdev, struct subchannel *sch)
/* Start async. device sensing. */
spin_lock_irq(sch->lock);
- rc = ccw_device_recognition(cdev);
+ sch_set_cdev(sch, cdev);
+ ccw_device_recognition(cdev);
spin_unlock_irq(sch->lock);
- if (rc) {
- if (atomic_dec_and_test(&ccw_device_init_count))
- wake_up(&ccw_device_init_wq);
- }
- return rc;
}
-static void ccw_device_move_to_sch(struct work_struct *work)
+static int ccw_device_move_to_sch(struct ccw_device *cdev,
+ struct subchannel *sch)
{
- struct ccw_device_private *priv;
- int rc;
- struct subchannel *sch;
- struct ccw_device *cdev;
- struct subchannel *former_parent;
+ struct subchannel *old_sch;
+ int rc, old_enabled = 0;
- priv = container_of(work, struct ccw_device_private, kick_work);
- sch = priv->sch;
- cdev = priv->cdev;
- former_parent = to_subchannel(cdev->dev.parent);
- /* Get reference for new parent. */
+ old_sch = to_subchannel(cdev->dev.parent);
+ /* Obtain child reference for new parent. */
if (!get_device(&sch->dev))
- return;
+ return -ENODEV;
+
+ if (!sch_is_pseudo_sch(old_sch)) {
+ spin_lock_irq(old_sch->lock);
+ old_enabled = old_sch->schib.pmcw.ena;
+ rc = 0;
+ if (old_enabled)
+ rc = cio_disable_subchannel(old_sch);
+ spin_unlock_irq(old_sch->lock);
+ if (rc == -EBUSY) {
+ /* Release child reference for new parent. */
+ put_device(&sch->dev);
+ return rc;
+ }
+ }
+
mutex_lock(&sch->reg_mutex);
- /*
- * Try to move the ccw device to its new subchannel.
- * Note: device_move() changes cdev->dev.parent
- */
rc = device_move(&cdev->dev, &sch->dev, DPM_ORDER_PARENT_BEFORE_DEV);
mutex_unlock(&sch->reg_mutex);
if (rc) {
- CIO_MSG_EVENT(0, "Moving device 0.%x.%04x to subchannel "
- "0.%x.%04x failed (ret=%d)!\n",
+ CIO_MSG_EVENT(0, "device_move(0.%x.%04x,0.%x.%04x)=%d\n",
cdev->private->dev_id.ssid,
cdev->private->dev_id.devno, sch->schid.ssid,
- sch->schid.sch_no, rc);
- css_sch_device_unregister(sch);
- /* Put reference for new parent again. */
+ sch->schib.pmcw.dev, rc);
+ if (old_enabled) {
+ /* Try to reenable the old subchannel. */
+ spin_lock_irq(old_sch->lock);
+ cio_enable_subchannel(old_sch, (u32)(addr_t)old_sch);
+ spin_unlock_irq(old_sch->lock);
+ }
+ /* Release child reference for new parent. */
put_device(&sch->dev);
- goto out;
+ return rc;
}
- if (!sch_is_pseudo_sch(former_parent)) {
- spin_lock_irq(former_parent->lock);
- sch_set_cdev(former_parent, NULL);
- spin_unlock_irq(former_parent->lock);
- css_sch_device_unregister(former_parent);
- /* Reset intparm to zeroes. */
- former_parent->config.intparm = 0;
- cio_commit_config(former_parent);
+ /* Clean up old subchannel. */
+ if (!sch_is_pseudo_sch(old_sch)) {
+ spin_lock_irq(old_sch->lock);
+ sch_set_cdev(old_sch, NULL);
+ spin_unlock_irq(old_sch->lock);
+ css_schedule_eval(old_sch->schid);
}
- sch_attach_device(sch, cdev);
-out:
- /* Put reference for old parent. */
- put_device(&former_parent->dev);
- put_device(&cdev->dev);
+ /* Release child reference for old parent. */
+ put_device(&old_sch->dev);
+ /* Initialize new subchannel. */
+ spin_lock_irq(sch->lock);
+ cdev->private->schid = sch->schid;
+ cdev->ccwlock = sch->lock;
+ if (!sch_is_pseudo_sch(sch))
+ sch_set_cdev(sch, cdev);
+ spin_unlock_irq(sch->lock);
+ if (!sch_is_pseudo_sch(sch))
+ css_update_ssd_info(sch);
+ return 0;
+}
+
+static int ccw_device_move_to_orph(struct ccw_device *cdev)
+{
+ struct subchannel *sch = to_subchannel(cdev->dev.parent);
+ struct channel_subsystem *css = to_css(sch->dev.parent);
+
+ return ccw_device_move_to_sch(cdev, css->pseudo_subchannel);
}
static void io_subchannel_irq(struct subchannel *sch)
@@ -1199,9 +989,6 @@ void io_subchannel_init_config(struct subchannel *sch)
{
memset(&sch->config, 0, sizeof(sch->config));
sch->config.csense = 1;
- /* Use subchannel mp mode when there is more than 1 installed CHPID. */
- if ((sch->schib.pmcw.pim & (sch->schib.pmcw.pim - 1)) != 0)
- sch->config.mp = 1;
}
static void io_subchannel_init_fields(struct subchannel *sch)
@@ -1222,23 +1009,6 @@ static void io_subchannel_init_fields(struct subchannel *sch)
io_subchannel_init_config(sch);
}
-static void io_subchannel_do_unreg(struct work_struct *work)
-{
- struct subchannel *sch;
-
- sch = container_of(work, struct subchannel, work);
- css_sch_device_unregister(sch);
- put_device(&sch->dev);
-}
-
-/* Schedule unregister if we have no cdev. */
-static void io_subchannel_schedule_removal(struct subchannel *sch)
-{
- get_device(&sch->dev);
- INIT_WORK(&sch->work, io_subchannel_do_unreg);
- queue_work(slow_path_wq, &sch->work);
-}
-
/*
* Note: We always return 0 so that we bind to the device even on error.
* This is needed so that our remove function is called on unregister.
@@ -1247,8 +1017,6 @@ static int io_subchannel_probe(struct subchannel *sch)
{
struct ccw_device *cdev;
int rc;
- unsigned long flags;
- struct ccw_dev_id dev_id;
if (cio_is_console(sch->schid)) {
rc = sysfs_create_group(&sch->dev.kobj,
@@ -1268,6 +1036,7 @@ static int io_subchannel_probe(struct subchannel *sch)
cdev = sch_get_cdev(sch);
cdev->dev.groups = ccwdev_attr_groups;
device_initialize(&cdev->dev);
+ cdev->private->flags.initialized = 1;
ccw_device_register(cdev);
/*
* Check if the device is already online. If it is
@@ -1292,44 +1061,14 @@ static int io_subchannel_probe(struct subchannel *sch)
sch->private = kzalloc(sizeof(struct io_subchannel_private),
GFP_KERNEL | GFP_DMA);
if (!sch->private)
- goto out_err;
- /*
- * First check if a fitting device may be found amongst the
- * disconnected devices or in the orphanage.
- */
- dev_id.devno = sch->schib.pmcw.dev;
- dev_id.ssid = sch->schid.ssid;
- cdev = get_disc_ccwdev_by_dev_id(&dev_id, NULL);
- if (!cdev)
- cdev = get_orphaned_ccwdev_by_dev_id(to_css(sch->dev.parent),
- &dev_id);
- if (cdev) {
- /*
- * Schedule moving the device until when we have a registered
- * subchannel to move to and succeed the probe. We can
- * unregister later again, when the probe is through.
- */
- cdev->private->sch = sch;
- PREPARE_WORK(&cdev->private->kick_work,
- ccw_device_move_to_sch);
- queue_work(slow_path_wq, &cdev->private->kick_work);
- return 0;
- }
- cdev = io_subchannel_create_ccwdev(sch);
- if (IS_ERR(cdev))
- goto out_err;
- rc = io_subchannel_recog(cdev, sch);
- if (rc) {
- spin_lock_irqsave(sch->lock, flags);
- io_subchannel_recog_done(cdev);
- spin_unlock_irqrestore(sch->lock, flags);
- }
+ goto out_schedule;
+ css_schedule_eval(sch->schid);
return 0;
-out_err:
- kfree(sch->private);
- sysfs_remove_group(&sch->dev.kobj, &io_subchannel_attr_group);
+
out_schedule:
- io_subchannel_schedule_removal(sch);
+ spin_lock_irq(sch->lock);
+ css_sched_sch_todo(sch, SCH_TODO_UNREG);
+ spin_unlock_irq(sch->lock);
return 0;
}
@@ -1337,32 +1076,23 @@ static int
io_subchannel_remove (struct subchannel *sch)
{
struct ccw_device *cdev;
- unsigned long flags;
cdev = sch_get_cdev(sch);
if (!cdev)
- return 0;
+ goto out_free;
+ io_subchannel_quiesce(sch);
/* Set ccw device to not operational and drop reference. */
- spin_lock_irqsave(cdev->ccwlock, flags);
+ spin_lock_irq(cdev->ccwlock);
sch_set_cdev(sch, NULL);
cdev->private->state = DEV_STATE_NOT_OPER;
- spin_unlock_irqrestore(cdev->ccwlock, flags);
+ spin_unlock_irq(cdev->ccwlock);
ccw_device_unregister(cdev);
+out_free:
kfree(sch->private);
sysfs_remove_group(&sch->dev.kobj, &io_subchannel_attr_group);
return 0;
}
-static int io_subchannel_notify(struct subchannel *sch, int event)
-{
- struct ccw_device *cdev;
-
- cdev = sch_get_cdev(sch);
- if (!cdev)
- return 0;
- return ccw_device_notify(cdev, event);
-}
-
static void io_subchannel_verify(struct subchannel *sch)
{
struct ccw_device *cdev;
@@ -1372,36 +1102,6 @@ static void io_subchannel_verify(struct subchannel *sch)
dev_fsm_event(cdev, DEV_EVENT_VERIFY);
}
-static int check_for_io_on_path(struct subchannel *sch, int mask)
-{
- if (cio_update_schib(sch))
- return 0;
- if (scsw_actl(&sch->schib.scsw) && sch->schib.pmcw.lpum == mask)
- return 1;
- return 0;
-}
-
-static void terminate_internal_io(struct subchannel *sch,
- struct ccw_device *cdev)
-{
- if (cio_clear(sch)) {
- /* Recheck device in case clear failed. */
- sch->lpm = 0;
- if (cdev->online)
- dev_fsm_event(cdev, DEV_EVENT_VERIFY);
- else
- css_schedule_eval(sch->schid);
- return;
- }
- cdev->private->state = DEV_STATE_CLEAR_VERIFY;
- /* Request retry of internal operation. */
- cdev->private->flags.intretry = 1;
- /* Call handler. */
- if (cdev->handler)
- cdev->handler(cdev, cdev->private->intparm,
- ERR_PTR(-EIO));
-}
-
static void io_subchannel_terminate_path(struct subchannel *sch, u8 mask)
{
struct ccw_device *cdev;
@@ -1409,18 +1109,24 @@ static void io_subchannel_terminate_path(struct subchannel *sch, u8 mask)
cdev = sch_get_cdev(sch);
if (!cdev)
return;
- if (check_for_io_on_path(sch, mask)) {
- if (cdev->private->state == DEV_STATE_ONLINE)
- ccw_device_kill_io(cdev);
- else {
- terminate_internal_io(sch, cdev);
- /* Re-start path verification. */
- dev_fsm_event(cdev, DEV_EVENT_VERIFY);
- }
- } else
- /* trigger path verification. */
- dev_fsm_event(cdev, DEV_EVENT_VERIFY);
+ if (cio_update_schib(sch))
+ goto err;
+ /* Check for I/O on path. */
+ if (scsw_actl(&sch->schib.scsw) == 0 || sch->schib.pmcw.lpum != mask)
+ goto out;
+ if (cdev->private->state == DEV_STATE_ONLINE) {
+ ccw_device_kill_io(cdev);
+ goto out;
+ }
+ if (cio_clear(sch))
+ goto err;
+out:
+ /* Trigger path verification. */
+ dev_fsm_event(cdev, DEV_EVENT_VERIFY);
+ return;
+err:
+ dev_fsm_event(cdev, DEV_EVENT_NOTOPER);
}
static int io_subchannel_chp_event(struct subchannel *sch,
@@ -1457,46 +1163,41 @@ static int io_subchannel_chp_event(struct subchannel *sch,
return 0;
}
-static void
-io_subchannel_shutdown(struct subchannel *sch)
+static void io_subchannel_quiesce(struct subchannel *sch)
{
struct ccw_device *cdev;
int ret;
+ spin_lock_irq(sch->lock);
cdev = sch_get_cdev(sch);
-
if (cio_is_console(sch->schid))
- return;
+ goto out_unlock;
if (!sch->schib.pmcw.ena)
- /* Nothing to do. */
- return;
+ goto out_unlock;
ret = cio_disable_subchannel(sch);
if (ret != -EBUSY)
- /* Subchannel is disabled, we're done. */
- return;
- cdev->private->state = DEV_STATE_QUIESCE;
+ goto out_unlock;
if (cdev->handler)
- cdev->handler(cdev, cdev->private->intparm,
- ERR_PTR(-EIO));
- ret = ccw_device_cancel_halt_clear(cdev);
- if (ret == -EBUSY) {
- ccw_device_set_timeout(cdev, HZ/10);
- wait_event(cdev->private->wait_q, dev_fsm_final_state(cdev));
+ cdev->handler(cdev, cdev->private->intparm, ERR_PTR(-EIO));
+ while (ret == -EBUSY) {
+ cdev->private->state = DEV_STATE_QUIESCE;
+ ret = ccw_device_cancel_halt_clear(cdev);
+ if (ret == -EBUSY) {
+ ccw_device_set_timeout(cdev, HZ/10);
+ spin_unlock_irq(sch->lock);
+ wait_event(cdev->private->wait_q,
+ cdev->private->state != DEV_STATE_QUIESCE);
+ spin_lock_irq(sch->lock);
+ }
+ ret = cio_disable_subchannel(sch);
}
- cio_disable_subchannel(sch);
+out_unlock:
+ spin_unlock_irq(sch->lock);
}
-static int io_subchannel_get_status(struct subchannel *sch)
+static void io_subchannel_shutdown(struct subchannel *sch)
{
- struct schib schib;
-
- if (stsch(sch->schid, &schib) || !schib.pmcw.dnv)
- return CIO_GONE;
- if (sch->schib.pmcw.dnv && (schib.pmcw.dev != sch->schib.pmcw.dev))
- return CIO_REVALIDATE;
- if (!sch->lpm)
- return CIO_NO_PATH;
- return CIO_OPER;
+ io_subchannel_quiesce(sch);
}
static int device_is_disconnected(struct ccw_device *cdev)
@@ -1575,20 +1276,16 @@ static void ccw_device_schedule_recovery(void)
static int purge_fn(struct device *dev, void *data)
{
struct ccw_device *cdev = to_ccwdev(dev);
- struct ccw_device_private *priv = cdev->private;
- int unreg;
+ struct ccw_dev_id *id = &cdev->private->dev_id;
spin_lock_irq(cdev->ccwlock);
- unreg = is_blacklisted(priv->dev_id.ssid, priv->dev_id.devno) &&
- (priv->state == DEV_STATE_OFFLINE);
+ if (is_blacklisted(id->ssid, id->devno) &&
+ (cdev->private->state == DEV_STATE_OFFLINE)) {
+ CIO_MSG_EVENT(3, "ccw: purging 0.%x.%04x\n", id->ssid,
+ id->devno);
+ ccw_device_sched_todo(cdev, CDEV_TODO_UNREG);
+ }
spin_unlock_irq(cdev->ccwlock);
- if (!unreg)
- goto out;
- CIO_MSG_EVENT(3, "ccw: purging 0.%x.%04x\n", priv->dev_id.ssid,
- priv->dev_id.devno);
- ccw_device_schedule_sch_unregister(cdev);
-
-out:
/* Abort loop in case of pending signal. */
if (signal_pending(current))
return -EINTR;
@@ -1630,91 +1327,169 @@ void ccw_device_set_notoper(struct ccw_device *cdev)
cdev->private->state = DEV_STATE_NOT_OPER;
}
-static int io_subchannel_sch_event(struct subchannel *sch, int slow)
+enum io_sch_action {
+ IO_SCH_UNREG,
+ IO_SCH_ORPH_UNREG,
+ IO_SCH_ATTACH,
+ IO_SCH_UNREG_ATTACH,
+ IO_SCH_ORPH_ATTACH,
+ IO_SCH_REPROBE,
+ IO_SCH_VERIFY,
+ IO_SCH_DISC,
+ IO_SCH_NOP,
+};
+
+static enum io_sch_action sch_get_action(struct subchannel *sch)
+{
+ struct ccw_device *cdev;
+
+ cdev = sch_get_cdev(sch);
+ if (cio_update_schib(sch)) {
+ /* Not operational. */
+ if (!cdev)
+ return IO_SCH_UNREG;
+ if (!ccw_device_notify(cdev, CIO_GONE))
+ return IO_SCH_UNREG;
+ return IO_SCH_ORPH_UNREG;
+ }
+ /* Operational. */
+ if (!cdev)
+ return IO_SCH_ATTACH;
+ if (sch->schib.pmcw.dev != cdev->private->dev_id.devno) {
+ if (!ccw_device_notify(cdev, CIO_GONE))
+ return IO_SCH_UNREG_ATTACH;
+ return IO_SCH_ORPH_ATTACH;
+ }
+ if ((sch->schib.pmcw.pam & sch->opm) == 0) {
+ if (!ccw_device_notify(cdev, CIO_NO_PATH))
+ return IO_SCH_UNREG;
+ return IO_SCH_DISC;
+ }
+ if (device_is_disconnected(cdev))
+ return IO_SCH_REPROBE;
+ if (cdev->online)
+ return IO_SCH_VERIFY;
+ return IO_SCH_NOP;
+}
+
+/**
+ * io_subchannel_sch_event - process subchannel event
+ * @sch: subchannel
+ * @process: non-zero if function is called in process context
+ *
+ * An unspecified event occurred for this subchannel. Adjust data according
+ * to the current operational state of the subchannel and device. Return
+ * zero when the event has been handled sufficiently or -EAGAIN when this
+ * function should be called again in process context.
+ */
+static int io_subchannel_sch_event(struct subchannel *sch, int process)
{
- int event, ret, disc;
unsigned long flags;
- enum { NONE, UNREGISTER, UNREGISTER_PROBE, REPROBE, DISC } action;
struct ccw_device *cdev;
+ struct ccw_dev_id dev_id;
+ enum io_sch_action action;
+ int rc = -EAGAIN;
spin_lock_irqsave(sch->lock, flags);
+ if (!device_is_registered(&sch->dev))
+ goto out_unlock;
+ if (work_pending(&sch->todo_work))
+ goto out_unlock;
cdev = sch_get_cdev(sch);
- disc = device_is_disconnected(cdev);
- if (disc && slow) {
- /* Disconnected devices are evaluated directly only.*/
- spin_unlock_irqrestore(sch->lock, flags);
- return 0;
- }
- /* No interrupt after machine check - kill pending timers. */
- if (cdev)
- ccw_device_set_timeout(cdev, 0);
- if (!disc && !slow) {
- /* Non-disconnected devices are evaluated on the slow path. */
- spin_unlock_irqrestore(sch->lock, flags);
- return -EAGAIN;
+ if (cdev && work_pending(&cdev->private->todo_work))
+ goto out_unlock;
+ action = sch_get_action(sch);
+ CIO_MSG_EVENT(2, "event: sch 0.%x.%04x, process=%d, action=%d\n",
+ sch->schid.ssid, sch->schid.sch_no, process,
+ action);
+ /* Perform immediate actions while holding the lock. */
+ switch (action) {
+ case IO_SCH_REPROBE:
+ /* Trigger device recognition. */
+ ccw_device_trigger_reprobe(cdev);
+ rc = 0;
+ goto out_unlock;
+ case IO_SCH_VERIFY:
+ /* Trigger path verification. */
+ io_subchannel_verify(sch);
+ rc = 0;
+ goto out_unlock;
+ case IO_SCH_DISC:
+ ccw_device_set_disconnected(cdev);
+ rc = 0;
+ goto out_unlock;
+ case IO_SCH_ORPH_UNREG:
+ case IO_SCH_ORPH_ATTACH:
+ ccw_device_set_disconnected(cdev);
+ break;
+ case IO_SCH_UNREG_ATTACH:
+ case IO_SCH_UNREG:
+ if (cdev)
+ ccw_device_set_notoper(cdev);
+ break;
+ case IO_SCH_NOP:
+ rc = 0;
+ goto out_unlock;
+ default:
+ break;
}
- event = io_subchannel_get_status(sch);
- CIO_MSG_EVENT(4, "Evaluating schid 0.%x.%04x, event %d, %s, %s path.\n",
- sch->schid.ssid, sch->schid.sch_no, event,
- disc ? "disconnected" : "normal",
- slow ? "slow" : "fast");
- /* Analyze subchannel status. */
- action = NONE;
- switch (event) {
- case CIO_NO_PATH:
- if (disc) {
- /* Check if paths have become available. */
- action = REPROBE;
- break;
- }
- /* fall through */
- case CIO_GONE:
- /* Ask driver what to do with device. */
- if (io_subchannel_notify(sch, event))
- action = DISC;
- else
- action = UNREGISTER;
+ spin_unlock_irqrestore(sch->lock, flags);
+ /* All other actions require process context. */
+ if (!process)
+ goto out;
+ /* Handle attached ccw device. */
+ switch (action) {
+ case IO_SCH_ORPH_UNREG:
+ case IO_SCH_ORPH_ATTACH:
+ /* Move ccw device to orphanage. */
+ rc = ccw_device_move_to_orph(cdev);
+ if (rc)
+ goto out;
break;
- case CIO_REVALIDATE:
- /* Device will be removed, so no notify necessary. */
- if (disc)
- /* Reprobe because immediate unregister might block. */
- action = REPROBE;
- else
- action = UNREGISTER_PROBE;
+ case IO_SCH_UNREG_ATTACH:
+ /* Unregister ccw device. */
+ ccw_device_unregister(cdev);
break;
- case CIO_OPER:
- if (disc)
- /* Get device operational again. */
- action = REPROBE;
+ default:
break;
}
- /* Perform action. */
- ret = 0;
+ /* Handle subchannel. */
switch (action) {
- case UNREGISTER:
- case UNREGISTER_PROBE:
- ccw_device_set_notoper(cdev);
- /* Unregister device (will use subchannel lock). */
- spin_unlock_irqrestore(sch->lock, flags);
+ case IO_SCH_ORPH_UNREG:
+ case IO_SCH_UNREG:
css_sch_device_unregister(sch);
- spin_lock_irqsave(sch->lock, flags);
break;
- case REPROBE:
+ case IO_SCH_ORPH_ATTACH:
+ case IO_SCH_UNREG_ATTACH:
+ case IO_SCH_ATTACH:
+ dev_id.ssid = sch->schid.ssid;
+ dev_id.devno = sch->schib.pmcw.dev;
+ cdev = get_ccwdev_by_dev_id(&dev_id);
+ if (!cdev) {
+ sch_create_and_recog_new_device(sch);
+ break;
+ }
+ rc = ccw_device_move_to_sch(cdev, sch);
+ if (rc) {
+ /* Release reference from get_ccwdev_by_dev_id() */
+ put_device(&cdev->dev);
+ goto out;
+ }
+ spin_lock_irqsave(sch->lock, flags);
ccw_device_trigger_reprobe(cdev);
- break;
- case DISC:
- ccw_device_set_disconnected(cdev);
+ spin_unlock_irqrestore(sch->lock, flags);
+ /* Release reference from get_ccwdev_by_dev_id() */
+ put_device(&cdev->dev);
break;
default:
break;
}
- spin_unlock_irqrestore(sch->lock, flags);
- /* Probe if necessary. */
- if (action == UNREGISTER_PROBE)
- ret = css_probe_device(sch->schid);
+ return 0;
- return ret;
+out_unlock:
+ spin_unlock_irqrestore(sch->lock, flags);
+out:
+ return rc;
}
#ifdef CONFIG_CCW_CONSOLE
@@ -1744,10 +1519,7 @@ static int ccw_device_console_enable(struct ccw_device *cdev,
sch->driver = &io_subchannel_driver;
/* Initialize the ccw_device structure. */
cdev->dev.parent= &sch->dev;
- rc = io_subchannel_recog(cdev, sch);
- if (rc)
- return rc;
-
+ io_subchannel_recog(cdev, sch);
/* Now wait for the async. recognition to come to an end. */
spin_lock_irq(cdev->ccwlock);
while (!dev_fsm_final_state(cdev))
@@ -1763,7 +1535,7 @@ static int ccw_device_console_enable(struct ccw_device *cdev,
rc = 0;
out_unlock:
spin_unlock_irq(cdev->ccwlock);
- return 0;
+ return rc;
}
struct ccw_device *
@@ -1919,7 +1691,7 @@ static int ccw_device_pm_prepare(struct device *dev)
{
struct ccw_device *cdev = to_ccwdev(dev);
- if (work_pending(&cdev->private->kick_work))
+ if (work_pending(&cdev->private->todo_work))
return -EAGAIN;
/* Fail while device is being set online/offline. */
if (atomic_read(&cdev->private->onoff))
@@ -2005,7 +1777,6 @@ static int ccw_device_pm_thaw(struct device *dev)
static void __ccw_device_pm_restore(struct ccw_device *cdev)
{
struct subchannel *sch = to_subchannel(cdev->dev.parent);
- int ret;
if (cio_is_console(sch->schid))
goto out;
@@ -2015,22 +1786,10 @@ static void __ccw_device_pm_restore(struct ccw_device *cdev)
*/
spin_lock_irq(sch->lock);
cdev->private->flags.resuming = 1;
- ret = ccw_device_recognition(cdev);
+ ccw_device_recognition(cdev);
spin_unlock_irq(sch->lock);
- if (ret) {
- CIO_MSG_EVENT(0, "Couldn't start recognition for device "
- "0.%x.%04x (ret=%d)\n",
- cdev->private->dev_id.ssid,
- cdev->private->dev_id.devno, ret);
- spin_lock_irq(sch->lock);
- cdev->private->state = DEV_STATE_DISCONNECTED;
- spin_unlock_irq(sch->lock);
- /* notify driver after the resume cb */
- goto out;
- }
wait_event(cdev->private->wait_q, dev_fsm_final_state(cdev) ||
cdev->private->state == DEV_STATE_DISCONNECTED);
-
out:
cdev->private->flags.resuming = 0;
}
@@ -2040,7 +1799,7 @@ static int resume_handle_boxed(struct ccw_device *cdev)
cdev->private->state = DEV_STATE_BOXED;
if (ccw_device_notify(cdev, CIO_BOXED))
return 0;
- ccw_device_schedule_sch_unregister(cdev);
+ ccw_device_sched_todo(cdev, CDEV_TODO_UNREG);
return -ENODEV;
}
@@ -2049,7 +1808,7 @@ static int resume_handle_disc(struct ccw_device *cdev)
cdev->private->state = DEV_STATE_DISCONNECTED;
if (ccw_device_notify(cdev, CIO_GONE))
return 0;
- ccw_device_schedule_sch_unregister(cdev);
+ ccw_device_sched_todo(cdev, CDEV_TODO_UNREG);
return -ENODEV;
}
@@ -2094,9 +1853,7 @@ static int ccw_device_pm_restore(struct device *dev)
/* check if the device type has changed */
if (!ccw_device_test_sense_data(cdev)) {
ccw_device_update_sense_data(cdev);
- PREPARE_WORK(&cdev->private->kick_work,
- ccw_device_do_unbind_bind);
- queue_work(ccw_device_work, &cdev->private->kick_work);
+ ccw_device_sched_todo(cdev, CDEV_TODO_REBIND);
ret = -ENODEV;
goto out_unlock;
}
@@ -2140,7 +1897,7 @@ out_disc_unlock:
goto out_restore;
out_unreg_unlock:
- ccw_device_schedule_sch_unregister(cdev);
+ ccw_device_sched_todo(cdev, CDEV_TODO_UNREG_EVAL);
ret = -ENODEV;
out_unlock:
spin_unlock_irq(sch->lock);
@@ -2205,6 +1962,77 @@ ccw_device_get_subchannel_id(struct ccw_device *cdev)
return sch->schid;
}
+static void ccw_device_todo(struct work_struct *work)
+{
+ struct ccw_device_private *priv;
+ struct ccw_device *cdev;
+ struct subchannel *sch;
+ enum cdev_todo todo;
+
+ priv = container_of(work, struct ccw_device_private, todo_work);
+ cdev = priv->cdev;
+ sch = to_subchannel(cdev->dev.parent);
+ /* Find out todo. */
+ spin_lock_irq(cdev->ccwlock);
+ todo = priv->todo;
+ priv->todo = CDEV_TODO_NOTHING;
+ CIO_MSG_EVENT(4, "cdev_todo: cdev=0.%x.%04x todo=%d\n",
+ priv->dev_id.ssid, priv->dev_id.devno, todo);
+ spin_unlock_irq(cdev->ccwlock);
+ /* Perform todo. */
+ switch (todo) {
+ case CDEV_TODO_ENABLE_CMF:
+ cmf_reenable(cdev);
+ break;
+ case CDEV_TODO_REBIND:
+ ccw_device_do_unbind_bind(cdev);
+ break;
+ case CDEV_TODO_REGISTER:
+ io_subchannel_register(cdev);
+ break;
+ case CDEV_TODO_UNREG_EVAL:
+ if (!sch_is_pseudo_sch(sch))
+ css_schedule_eval(sch->schid);
+ /* fall-through */
+ case CDEV_TODO_UNREG:
+ if (sch_is_pseudo_sch(sch))
+ ccw_device_unregister(cdev);
+ else
+ ccw_device_call_sch_unregister(cdev);
+ break;
+ default:
+ break;
+ }
+ /* Release workqueue ref. */
+ put_device(&cdev->dev);
+}
+
+/**
+ * ccw_device_sched_todo - schedule ccw device operation
+ * @cdev: ccw device
+ * @todo: todo
+ *
+ * Schedule the operation identified by @todo to be performed on the slow path
+ * workqueue. Do nothing if another operation with higher priority is already
+ * scheduled. Needs to be called with ccwdev lock held.
+ */
+void ccw_device_sched_todo(struct ccw_device *cdev, enum cdev_todo todo)
+{
+ CIO_MSG_EVENT(4, "cdev_todo: sched cdev=0.%x.%04x todo=%d\n",
+ cdev->private->dev_id.ssid, cdev->private->dev_id.devno,
+ todo);
+ if (cdev->private->todo >= todo)
+ return;
+ cdev->private->todo = todo;
+ /* Get workqueue ref. */
+ if (!get_device(&cdev->dev))
+ return;
+ if (!queue_work(slow_path_wq, &cdev->private->todo_work)) {
+ /* Already queued, release workqueue ref. */
+ put_device(&cdev->dev);
+ }
+}
+
MODULE_LICENSE("GPL");
EXPORT_SYMBOL(ccw_device_set_online);
EXPORT_SYMBOL(ccw_device_set_offline);
diff --git a/drivers/s390/cio/device.h b/drivers/s390/cio/device.h
index 246c6482842..bcfe13e4263 100644
--- a/drivers/s390/cio/device.h
+++ b/drivers/s390/cio/device.h
@@ -21,7 +21,6 @@ enum dev_state {
DEV_STATE_DISBAND_PGID,
DEV_STATE_BOXED,
/* states to wait for i/o completion before doing something */
- DEV_STATE_CLEAR_VERIFY,
DEV_STATE_TIMEOUT_KILL,
DEV_STATE_QUIESCE,
/* special states for devices gone not operational */
@@ -29,6 +28,7 @@ enum dev_state {
DEV_STATE_DISCONNECTED_SENSE_ID,
DEV_STATE_CMFCHANGE,
DEV_STATE_CMFUPDATE,
+ DEV_STATE_STEAL_LOCK,
/* last element! */
NR_DEV_STATES
};
@@ -81,17 +81,16 @@ void io_subchannel_init_config(struct subchannel *sch);
int ccw_device_cancel_halt_clear(struct ccw_device *);
-void ccw_device_do_unbind_bind(struct work_struct *);
-void ccw_device_move_to_orphanage(struct work_struct *);
int ccw_device_is_orphan(struct ccw_device *);
-int ccw_device_recognition(struct ccw_device *);
+void ccw_device_recognition(struct ccw_device *);
int ccw_device_online(struct ccw_device *);
int ccw_device_offline(struct ccw_device *);
void ccw_device_update_sense_data(struct ccw_device *);
int ccw_device_test_sense_data(struct ccw_device *);
void ccw_device_schedule_sch_unregister(struct ccw_device *);
int ccw_purge_blacklisted(void);
+void ccw_device_sched_todo(struct ccw_device *cdev, enum cdev_todo todo);
/* Function prototypes for device status and basic sense stuff. */
void ccw_device_accumulate_irb(struct ccw_device *, struct irb *);
@@ -99,24 +98,28 @@ void ccw_device_accumulate_basic_sense(struct ccw_device *, struct irb *);
int ccw_device_accumulate_and_sense(struct ccw_device *, struct irb *);
int ccw_device_do_sense(struct ccw_device *, struct irb *);
+/* Function prototype for internal request handling. */
+int lpm_adjust(int lpm, int mask);
+void ccw_request_start(struct ccw_device *);
+int ccw_request_cancel(struct ccw_device *cdev);
+void ccw_request_handler(struct ccw_device *cdev);
+void ccw_request_timeout(struct ccw_device *cdev);
+void ccw_request_notoper(struct ccw_device *cdev);
+
/* Function prototypes for sense id stuff. */
void ccw_device_sense_id_start(struct ccw_device *);
-void ccw_device_sense_id_irq(struct ccw_device *, enum dev_event);
void ccw_device_sense_id_done(struct ccw_device *, int);
/* Function prototypes for path grouping stuff. */
-void ccw_device_sense_pgid_start(struct ccw_device *);
-void ccw_device_sense_pgid_irq(struct ccw_device *, enum dev_event);
-void ccw_device_sense_pgid_done(struct ccw_device *, int);
-
void ccw_device_verify_start(struct ccw_device *);
-void ccw_device_verify_irq(struct ccw_device *, enum dev_event);
void ccw_device_verify_done(struct ccw_device *, int);
void ccw_device_disband_start(struct ccw_device *);
-void ccw_device_disband_irq(struct ccw_device *, enum dev_event);
void ccw_device_disband_done(struct ccw_device *, int);
+void ccw_device_stlck_start(struct ccw_device *, void *, void *, void *);
+void ccw_device_stlck_done(struct ccw_device *, void *, int);
+
int ccw_device_call_handler(struct ccw_device *);
int ccw_device_stlck(struct ccw_device *);
diff --git a/drivers/s390/cio/device_fsm.c b/drivers/s390/cio/device_fsm.c
index b9613d7df9e..ae760658a13 100644
--- a/drivers/s390/cio/device_fsm.c
+++ b/drivers/s390/cio/device_fsm.c
@@ -229,8 +229,8 @@ ccw_device_recog_done(struct ccw_device *cdev, int state)
sch = to_subchannel(cdev->dev.parent);
- ccw_device_set_timeout(cdev, 0);
- cio_disable_subchannel(sch);
+ if (cio_disable_subchannel(sch))
+ state = DEV_STATE_NOT_OPER;
/*
* Now that we tried recognition, we have performed device selection
* through ssch() and the path information is up to date.
@@ -263,22 +263,10 @@ ccw_device_recog_done(struct ccw_device *cdev, int state)
}
switch (state) {
case DEV_STATE_NOT_OPER:
- CIO_MSG_EVENT(2, "SenseID : unknown device %04x on "
- "subchannel 0.%x.%04x\n",
- cdev->private->dev_id.devno,
- sch->schid.ssid, sch->schid.sch_no);
break;
case DEV_STATE_OFFLINE:
if (!cdev->online) {
ccw_device_update_sense_data(cdev);
- /* Issue device info message. */
- CIO_MSG_EVENT(4, "SenseID : device 0.%x.%04x reports: "
- "CU Type/Mod = %04X/%02X, Dev Type/Mod "
- "= %04X/%02X\n",
- cdev->private->dev_id.ssid,
- cdev->private->dev_id.devno,
- cdev->id.cu_type, cdev->id.cu_model,
- cdev->id.dev_type, cdev->id.dev_model);
break;
}
cdev->private->state = DEV_STATE_OFFLINE;
@@ -289,16 +277,10 @@ ccw_device_recog_done(struct ccw_device *cdev, int state)
wake_up(&cdev->private->wait_q);
} else {
ccw_device_update_sense_data(cdev);
- PREPARE_WORK(&cdev->private->kick_work,
- ccw_device_do_unbind_bind);
- queue_work(ccw_device_work, &cdev->private->kick_work);
+ ccw_device_sched_todo(cdev, CDEV_TODO_REBIND);
}
return;
case DEV_STATE_BOXED:
- CIO_MSG_EVENT(0, "SenseID : boxed device %04x on "
- " subchannel 0.%x.%04x\n",
- cdev->private->dev_id.devno,
- sch->schid.ssid, sch->schid.sch_no);
if (cdev->id.cu_type != 0) { /* device was recognized before */
cdev->private->flags.recog_done = 1;
cdev->private->state = DEV_STATE_BOXED;
@@ -343,28 +325,16 @@ int ccw_device_notify(struct ccw_device *cdev, int event)
return cdev->drv->notify ? cdev->drv->notify(cdev, event) : 0;
}
-static void cmf_reenable_delayed(struct work_struct *work)
-{
- struct ccw_device_private *priv;
- struct ccw_device *cdev;
-
- priv = container_of(work, struct ccw_device_private, kick_work);
- cdev = priv->cdev;
- cmf_reenable(cdev);
-}
-
static void ccw_device_oper_notify(struct ccw_device *cdev)
{
if (ccw_device_notify(cdev, CIO_OPER)) {
/* Reenable channel measurements, if needed. */
- PREPARE_WORK(&cdev->private->kick_work, cmf_reenable_delayed);
- queue_work(ccw_device_work, &cdev->private->kick_work);
+ ccw_device_sched_todo(cdev, CDEV_TODO_ENABLE_CMF);
return;
}
/* Driver doesn't want device back. */
ccw_device_set_notoper(cdev);
- PREPARE_WORK(&cdev->private->kick_work, ccw_device_do_unbind_bind);
- queue_work(ccw_device_work, &cdev->private->kick_work);
+ ccw_device_sched_todo(cdev, CDEV_TODO_REBIND);
}
/*
@@ -392,14 +362,14 @@ ccw_device_done(struct ccw_device *cdev, int state)
CIO_MSG_EVENT(0, "Boxed device %04x on subchannel %04x\n",
cdev->private->dev_id.devno, sch->schid.sch_no);
if (cdev->online && !ccw_device_notify(cdev, CIO_BOXED))
- ccw_device_schedule_sch_unregister(cdev);
+ ccw_device_sched_todo(cdev, CDEV_TODO_UNREG);
cdev->private->flags.donotify = 0;
break;
case DEV_STATE_NOT_OPER:
CIO_MSG_EVENT(0, "Device %04x gone on subchannel %04x\n",
cdev->private->dev_id.devno, sch->schid.sch_no);
if (!ccw_device_notify(cdev, CIO_GONE))
- ccw_device_schedule_sch_unregister(cdev);
+ ccw_device_sched_todo(cdev, CDEV_TODO_UNREG);
else
ccw_device_set_disconnected(cdev);
cdev->private->flags.donotify = 0;
@@ -409,7 +379,7 @@ ccw_device_done(struct ccw_device *cdev, int state)
"%04x\n", cdev->private->dev_id.devno,
sch->schid.sch_no);
if (!ccw_device_notify(cdev, CIO_NO_PATH))
- ccw_device_schedule_sch_unregister(cdev);
+ ccw_device_sched_todo(cdev, CDEV_TODO_UNREG);
else
ccw_device_set_disconnected(cdev);
cdev->private->flags.donotify = 0;
@@ -425,107 +395,12 @@ ccw_device_done(struct ccw_device *cdev, int state)
wake_up(&cdev->private->wait_q);
}
-static int cmp_pgid(struct pgid *p1, struct pgid *p2)
-{
- char *c1;
- char *c2;
-
- c1 = (char *)p1;
- c2 = (char *)p2;
-
- return memcmp(c1 + 1, c2 + 1, sizeof(struct pgid) - 1);
-}
-
-static void __ccw_device_get_common_pgid(struct ccw_device *cdev)
-{
- int i;
- int last;
-
- last = 0;
- for (i = 0; i < 8; i++) {
- if (cdev->private->pgid[i].inf.ps.state1 == SNID_STATE1_RESET)
- /* No PGID yet */
- continue;
- if (cdev->private->pgid[last].inf.ps.state1 ==
- SNID_STATE1_RESET) {
- /* First non-zero PGID */
- last = i;
- continue;
- }
- if (cmp_pgid(&cdev->private->pgid[i],
- &cdev->private->pgid[last]) == 0)
- /* Non-conflicting PGIDs */
- continue;
-
- /* PGID mismatch, can't pathgroup. */
- CIO_MSG_EVENT(0, "SNID - pgid mismatch for device "
- "0.%x.%04x, can't pathgroup\n",
- cdev->private->dev_id.ssid,
- cdev->private->dev_id.devno);
- cdev->private->options.pgroup = 0;
- return;
- }
- if (cdev->private->pgid[last].inf.ps.state1 ==
- SNID_STATE1_RESET)
- /* No previous pgid found */
- memcpy(&cdev->private->pgid[0],
- &channel_subsystems[0]->global_pgid,
- sizeof(struct pgid));
- else
- /* Use existing pgid */
- memcpy(&cdev->private->pgid[0], &cdev->private->pgid[last],
- sizeof(struct pgid));
-}
-
-/*
- * Function called from device_pgid.c after sense path ground has completed.
- */
-void
-ccw_device_sense_pgid_done(struct ccw_device *cdev, int err)
-{
- struct subchannel *sch;
-
- sch = to_subchannel(cdev->dev.parent);
- switch (err) {
- case -EOPNOTSUPP: /* path grouping not supported, use nop instead. */
- cdev->private->options.pgroup = 0;
- break;
- case 0: /* success */
- case -EACCES: /* partial success, some paths not operational */
- /* Check if all pgids are equal or 0. */
- __ccw_device_get_common_pgid(cdev);
- break;
- case -ETIME: /* Sense path group id stopped by timeout. */
- case -EUSERS: /* device is reserved for someone else. */
- ccw_device_done(cdev, DEV_STATE_BOXED);
- return;
- default:
- ccw_device_done(cdev, DEV_STATE_NOT_OPER);
- return;
- }
- /* Start Path Group verification. */
- cdev->private->state = DEV_STATE_VERIFY;
- cdev->private->flags.doverify = 0;
- ccw_device_verify_start(cdev);
-}
-
/*
* Start device recognition.
*/
-int
-ccw_device_recognition(struct ccw_device *cdev)
+void ccw_device_recognition(struct ccw_device *cdev)
{
- struct subchannel *sch;
- int ret;
-
- sch = to_subchannel(cdev->dev.parent);
- ret = cio_enable_subchannel(sch, (u32)(addr_t)sch);
- if (ret != 0)
- /* Couldn't enable the subchannel for i/o. Sick device. */
- return ret;
-
- /* After 60s the device recognition is considered to have failed. */
- ccw_device_set_timeout(cdev, 60*HZ);
+ struct subchannel *sch = to_subchannel(cdev->dev.parent);
/*
* We used to start here with a sense pgid to find out whether a device
@@ -537,32 +412,33 @@ ccw_device_recognition(struct ccw_device *cdev)
*/
cdev->private->flags.recog_done = 0;
cdev->private->state = DEV_STATE_SENSE_ID;
+ if (cio_enable_subchannel(sch, (u32) (addr_t) sch)) {
+ ccw_device_recog_done(cdev, DEV_STATE_NOT_OPER);
+ return;
+ }
ccw_device_sense_id_start(cdev);
- return 0;
}
/*
- * Handle timeout in device recognition.
+ * Handle events for states that use the ccw request infrastructure.
*/
-static void
-ccw_device_recog_timeout(struct ccw_device *cdev, enum dev_event dev_event)
+static void ccw_device_request_event(struct ccw_device *cdev, enum dev_event e)
{
- int ret;
-
- ret = ccw_device_cancel_halt_clear(cdev);
- switch (ret) {
- case 0:
- ccw_device_recog_done(cdev, DEV_STATE_BOXED);
+ switch (e) {
+ case DEV_EVENT_NOTOPER:
+ ccw_request_notoper(cdev);
break;
- case -ENODEV:
- ccw_device_recog_done(cdev, DEV_STATE_NOT_OPER);
+ case DEV_EVENT_INTERRUPT:
+ ccw_request_handler(cdev);
+ break;
+ case DEV_EVENT_TIMEOUT:
+ ccw_request_timeout(cdev);
break;
default:
- ccw_device_set_timeout(cdev, 3*HZ);
+ break;
}
}
-
void
ccw_device_verify_done(struct ccw_device *cdev, int err)
{
@@ -571,21 +447,18 @@ ccw_device_verify_done(struct ccw_device *cdev, int err)
sch = to_subchannel(cdev->dev.parent);
/* Update schib - pom may have changed. */
if (cio_update_schib(sch)) {
- cdev->private->flags.donotify = 0;
- ccw_device_done(cdev, DEV_STATE_NOT_OPER);
- return;
+ err = -ENODEV;
+ goto callback;
}
/* Update lpm with verified path mask. */
sch->lpm = sch->vpm;
/* Repeat path verification? */
if (cdev->private->flags.doverify) {
- cdev->private->flags.doverify = 0;
ccw_device_verify_start(cdev);
return;
}
+callback:
switch (err) {
- case -EOPNOTSUPP: /* path grouping not supported, just set online. */
- cdev->private->options.pgroup = 0;
case 0:
ccw_device_done(cdev, DEV_STATE_ONLINE);
/* Deliver fake irb to device driver, if needed. */
@@ -604,18 +477,20 @@ ccw_device_verify_done(struct ccw_device *cdev, int err)
}
break;
case -ETIME:
+ case -EUSERS:
/* Reset oper notify indication after verify error. */
cdev->private->flags.donotify = 0;
ccw_device_done(cdev, DEV_STATE_BOXED);
break;
+ case -EACCES:
+ /* Reset oper notify indication after verify error. */
+ cdev->private->flags.donotify = 0;
+ ccw_device_done(cdev, DEV_STATE_DISCONNECTED);
+ break;
default:
/* Reset oper notify indication after verify error. */
cdev->private->flags.donotify = 0;
- if (cdev->online) {
- ccw_device_set_timeout(cdev, 0);
- dev_fsm_event(cdev, DEV_EVENT_NOTOPER);
- } else
- ccw_device_done(cdev, DEV_STATE_NOT_OPER);
+ ccw_device_done(cdev, DEV_STATE_NOT_OPER);
break;
}
}
@@ -640,17 +515,9 @@ ccw_device_online(struct ccw_device *cdev)
dev_fsm_event(cdev, DEV_EVENT_NOTOPER);
return ret;
}
- /* Do we want to do path grouping? */
- if (!cdev->private->options.pgroup) {
- /* Start initial path verification. */
- cdev->private->state = DEV_STATE_VERIFY;
- cdev->private->flags.doverify = 0;
- ccw_device_verify_start(cdev);
- return 0;
- }
- /* Do a SensePGID first. */
- cdev->private->state = DEV_STATE_SENSE_PGID;
- ccw_device_sense_pgid_start(cdev);
+ /* Start initial path verification. */
+ cdev->private->state = DEV_STATE_VERIFY;
+ ccw_device_verify_start(cdev);
return 0;
}
@@ -666,7 +533,6 @@ ccw_device_disband_done(struct ccw_device *cdev, int err)
break;
default:
cdev->private->flags.donotify = 0;
- dev_fsm_event(cdev, DEV_EVENT_NOTOPER);
ccw_device_done(cdev, DEV_STATE_NOT_OPER);
break;
}
@@ -703,7 +569,7 @@ ccw_device_offline(struct ccw_device *cdev)
if (cdev->private->state != DEV_STATE_ONLINE)
return -EINVAL;
/* Are we doing path grouping? */
- if (!cdev->private->options.pgroup) {
+ if (!cdev->private->flags.pgroup) {
/* No, set state offline immediately. */
ccw_device_done(cdev, DEV_STATE_OFFLINE);
return 0;
@@ -715,43 +581,13 @@ ccw_device_offline(struct ccw_device *cdev)
}
/*
- * Handle timeout in device online/offline process.
- */
-static void
-ccw_device_onoff_timeout(struct ccw_device *cdev, enum dev_event dev_event)
-{
- int ret;
-
- ret = ccw_device_cancel_halt_clear(cdev);
- switch (ret) {
- case 0:
- ccw_device_done(cdev, DEV_STATE_BOXED);
- break;
- case -ENODEV:
- ccw_device_done(cdev, DEV_STATE_NOT_OPER);
- break;
- default:
- ccw_device_set_timeout(cdev, 3*HZ);
- }
-}
-
-/*
- * Handle not oper event in device recognition.
- */
-static void
-ccw_device_recog_notoper(struct ccw_device *cdev, enum dev_event dev_event)
-{
- ccw_device_recog_done(cdev, DEV_STATE_NOT_OPER);
-}
-
-/*
* Handle not operational event in non-special state.
*/
static void ccw_device_generic_notoper(struct ccw_device *cdev,
enum dev_event dev_event)
{
if (!ccw_device_notify(cdev, CIO_GONE))
- ccw_device_schedule_sch_unregister(cdev);
+ ccw_device_sched_todo(cdev, CDEV_TODO_UNREG);
else
ccw_device_set_disconnected(cdev);
}
@@ -802,11 +638,27 @@ ccw_device_online_verify(struct ccw_device *cdev, enum dev_event dev_event)
}
/* Device is idle, we can do the path verification. */
cdev->private->state = DEV_STATE_VERIFY;
- cdev->private->flags.doverify = 0;
ccw_device_verify_start(cdev);
}
/*
+ * Handle path verification event in boxed state.
+ */
+static void ccw_device_boxed_verify(struct ccw_device *cdev,
+ enum dev_event dev_event)
+{
+ struct subchannel *sch = to_subchannel(cdev->dev.parent);
+
+ if (cdev->online) {
+ if (cio_enable_subchannel(sch, (u32) (addr_t) sch))
+ ccw_device_done(cdev, DEV_STATE_NOT_OPER);
+ else
+ ccw_device_online_verify(cdev, dev_event);
+ } else
+ css_schedule_eval(sch->schid);
+}
+
+/*
* Got an interrupt for a normal io (state online).
*/
static void
@@ -904,12 +756,6 @@ ccw_device_w4sense(struct ccw_device *cdev, enum dev_event dev_event)
*/
if (scsw_fctl(&irb->scsw) &
(SCSW_FCTL_CLEAR_FUNC | SCSW_FCTL_HALT_FUNC)) {
- /* Retry Basic Sense if requested. */
- if (cdev->private->flags.intretry) {
- cdev->private->flags.intretry = 0;
- ccw_device_do_sense(cdev, irb);
- return;
- }
cdev->private->flags.dosense = 0;
memset(&cdev->private->irb, 0, sizeof(struct irb));
ccw_device_accumulate_irb(cdev, irb);
@@ -933,21 +779,6 @@ call_handler:
}
static void
-ccw_device_clear_verify(struct ccw_device *cdev, enum dev_event dev_event)
-{
- struct irb *irb;
-
- irb = (struct irb *) __LC_IRB;
- /* Accumulate status. We don't do basic sense. */
- ccw_device_accumulate_irb(cdev, irb);
- /* Remember to clear irb to avoid residuals. */
- memset(&cdev->private->irb, 0, sizeof(struct irb));
- /* Try to start delayed device verification. */
- ccw_device_online_verify(cdev, 0);
- /* Note: Don't call handler for cio initiated clear! */
-}
-
-static void
ccw_device_killing_irq(struct ccw_device *cdev, enum dev_event dev_event)
{
struct subchannel *sch;
@@ -1004,32 +835,6 @@ ccw_device_delay_verify(struct ccw_device *cdev, enum dev_event dev_event)
}
static void
-ccw_device_stlck_done(struct ccw_device *cdev, enum dev_event dev_event)
-{
- struct irb *irb;
-
- switch (dev_event) {
- case DEV_EVENT_INTERRUPT:
- irb = (struct irb *) __LC_IRB;
- /* Check for unsolicited interrupt. */
- if ((scsw_stctl(&irb->scsw) ==
- (SCSW_STCTL_STATUS_PEND | SCSW_STCTL_ALERT_STATUS)) &&
- (!scsw_cc(&irb->scsw)))
- /* FIXME: we should restart stlck here, but this
- * is extremely unlikely ... */
- goto out_wakeup;
-
- ccw_device_accumulate_irb(cdev, irb);
- /* We don't care about basic sense etc. */
- break;
- default: /* timeout */
- break;
- }
-out_wakeup:
- wake_up(&cdev->private->wait_q);
-}
-
-static void
ccw_device_start_id(struct ccw_device *cdev, enum dev_event dev_event)
{
struct subchannel *sch;
@@ -1038,10 +843,6 @@ ccw_device_start_id(struct ccw_device *cdev, enum dev_event dev_event)
if (cio_enable_subchannel(sch, (u32)(addr_t)sch) != 0)
/* Couldn't enable the subchannel for i/o. Sick device. */
return;
-
- /* After 60s the device recognition is considered to have failed. */
- ccw_device_set_timeout(cdev, 60*HZ);
-
cdev->private->state = DEV_STATE_DISCONNECTED_SENSE_ID;
ccw_device_sense_id_start(cdev);
}
@@ -1072,22 +873,20 @@ void ccw_device_trigger_reprobe(struct ccw_device *cdev)
/* We should also udate ssd info, but this has to wait. */
/* Check if this is another device which appeared on the same sch. */
- if (sch->schib.pmcw.dev != cdev->private->dev_id.devno) {
- PREPARE_WORK(&cdev->private->kick_work,
- ccw_device_move_to_orphanage);
- queue_work(slow_path_wq, &cdev->private->kick_work);
- } else
+ if (sch->schib.pmcw.dev != cdev->private->dev_id.devno)
+ css_schedule_eval(sch->schid);
+ else
ccw_device_start_id(cdev, 0);
}
-static void
-ccw_device_offline_irq(struct ccw_device *cdev, enum dev_event dev_event)
+static void ccw_device_disabled_irq(struct ccw_device *cdev,
+ enum dev_event dev_event)
{
struct subchannel *sch;
sch = to_subchannel(cdev->dev.parent);
/*
- * An interrupt in state offline means a previous disable was not
+ * An interrupt in a disabled state means a previous disable was not
* successful - should not happen, but we try to disable again.
*/
cio_disable_subchannel(sch);
@@ -1113,10 +912,7 @@ static void
ccw_device_quiesce_done(struct ccw_device *cdev, enum dev_event dev_event)
{
ccw_device_set_timeout(cdev, 0);
- if (dev_event == DEV_EVENT_NOTOPER)
- cdev->private->state = DEV_STATE_NOT_OPER;
- else
- cdev->private->state = DEV_STATE_OFFLINE;
+ cdev->private->state = DEV_STATE_NOT_OPER;
wake_up(&cdev->private->wait_q);
}
@@ -1126,17 +922,11 @@ ccw_device_quiesce_timeout(struct ccw_device *cdev, enum dev_event dev_event)
int ret;
ret = ccw_device_cancel_halt_clear(cdev);
- switch (ret) {
- case 0:
- cdev->private->state = DEV_STATE_OFFLINE;
- wake_up(&cdev->private->wait_q);
- break;
- case -ENODEV:
+ if (ret == -EBUSY) {
+ ccw_device_set_timeout(cdev, HZ/10);
+ } else {
cdev->private->state = DEV_STATE_NOT_OPER;
wake_up(&cdev->private->wait_q);
- break;
- default:
- ccw_device_set_timeout(cdev, HZ/10);
}
}
@@ -1150,50 +940,37 @@ ccw_device_nop(struct ccw_device *cdev, enum dev_event dev_event)
}
/*
- * Bug operation action.
- */
-static void
-ccw_device_bug(struct ccw_device *cdev, enum dev_event dev_event)
-{
- CIO_MSG_EVENT(0, "Internal state [%i][%i] not handled for device "
- "0.%x.%04x\n", cdev->private->state, dev_event,
- cdev->private->dev_id.ssid,
- cdev->private->dev_id.devno);
- BUG();
-}
-
-/*
* device statemachine
*/
fsm_func_t *dev_jumptable[NR_DEV_STATES][NR_DEV_EVENTS] = {
[DEV_STATE_NOT_OPER] = {
[DEV_EVENT_NOTOPER] = ccw_device_nop,
- [DEV_EVENT_INTERRUPT] = ccw_device_bug,
+ [DEV_EVENT_INTERRUPT] = ccw_device_disabled_irq,
[DEV_EVENT_TIMEOUT] = ccw_device_nop,
[DEV_EVENT_VERIFY] = ccw_device_nop,
},
[DEV_STATE_SENSE_PGID] = {
- [DEV_EVENT_NOTOPER] = ccw_device_generic_notoper,
- [DEV_EVENT_INTERRUPT] = ccw_device_sense_pgid_irq,
- [DEV_EVENT_TIMEOUT] = ccw_device_onoff_timeout,
+ [DEV_EVENT_NOTOPER] = ccw_device_request_event,
+ [DEV_EVENT_INTERRUPT] = ccw_device_request_event,
+ [DEV_EVENT_TIMEOUT] = ccw_device_request_event,
[DEV_EVENT_VERIFY] = ccw_device_nop,
},
[DEV_STATE_SENSE_ID] = {
- [DEV_EVENT_NOTOPER] = ccw_device_recog_notoper,
- [DEV_EVENT_INTERRUPT] = ccw_device_sense_id_irq,
- [DEV_EVENT_TIMEOUT] = ccw_device_recog_timeout,
+ [DEV_EVENT_NOTOPER] = ccw_device_request_event,
+ [DEV_EVENT_INTERRUPT] = ccw_device_request_event,
+ [DEV_EVENT_TIMEOUT] = ccw_device_request_event,
[DEV_EVENT_VERIFY] = ccw_device_nop,
},
[DEV_STATE_OFFLINE] = {
[DEV_EVENT_NOTOPER] = ccw_device_generic_notoper,
- [DEV_EVENT_INTERRUPT] = ccw_device_offline_irq,
+ [DEV_EVENT_INTERRUPT] = ccw_device_disabled_irq,
[DEV_EVENT_TIMEOUT] = ccw_device_nop,
[DEV_EVENT_VERIFY] = ccw_device_offline_verify,
},
[DEV_STATE_VERIFY] = {
- [DEV_EVENT_NOTOPER] = ccw_device_generic_notoper,
- [DEV_EVENT_INTERRUPT] = ccw_device_verify_irq,
- [DEV_EVENT_TIMEOUT] = ccw_device_onoff_timeout,
+ [DEV_EVENT_NOTOPER] = ccw_device_request_event,
+ [DEV_EVENT_INTERRUPT] = ccw_device_request_event,
+ [DEV_EVENT_TIMEOUT] = ccw_device_request_event,
[DEV_EVENT_VERIFY] = ccw_device_delay_verify,
},
[DEV_STATE_ONLINE] = {
@@ -1209,24 +986,18 @@ fsm_func_t *dev_jumptable[NR_DEV_STATES][NR_DEV_EVENTS] = {
[DEV_EVENT_VERIFY] = ccw_device_online_verify,
},
[DEV_STATE_DISBAND_PGID] = {
- [DEV_EVENT_NOTOPER] = ccw_device_generic_notoper,
- [DEV_EVENT_INTERRUPT] = ccw_device_disband_irq,
- [DEV_EVENT_TIMEOUT] = ccw_device_onoff_timeout,
+ [DEV_EVENT_NOTOPER] = ccw_device_request_event,
+ [DEV_EVENT_INTERRUPT] = ccw_device_request_event,
+ [DEV_EVENT_TIMEOUT] = ccw_device_request_event,
[DEV_EVENT_VERIFY] = ccw_device_nop,
},
[DEV_STATE_BOXED] = {
[DEV_EVENT_NOTOPER] = ccw_device_generic_notoper,
- [DEV_EVENT_INTERRUPT] = ccw_device_stlck_done,
- [DEV_EVENT_TIMEOUT] = ccw_device_stlck_done,
- [DEV_EVENT_VERIFY] = ccw_device_nop,
- },
- /* states to wait for i/o completion before doing something */
- [DEV_STATE_CLEAR_VERIFY] = {
- [DEV_EVENT_NOTOPER] = ccw_device_generic_notoper,
- [DEV_EVENT_INTERRUPT] = ccw_device_clear_verify,
+ [DEV_EVENT_INTERRUPT] = ccw_device_nop,
[DEV_EVENT_TIMEOUT] = ccw_device_nop,
- [DEV_EVENT_VERIFY] = ccw_device_nop,
+ [DEV_EVENT_VERIFY] = ccw_device_boxed_verify,
},
+ /* states to wait for i/o completion before doing something */
[DEV_STATE_TIMEOUT_KILL] = {
[DEV_EVENT_NOTOPER] = ccw_device_generic_notoper,
[DEV_EVENT_INTERRUPT] = ccw_device_killing_irq,
@@ -1243,13 +1014,13 @@ fsm_func_t *dev_jumptable[NR_DEV_STATES][NR_DEV_EVENTS] = {
[DEV_STATE_DISCONNECTED] = {
[DEV_EVENT_NOTOPER] = ccw_device_nop,
[DEV_EVENT_INTERRUPT] = ccw_device_start_id,
- [DEV_EVENT_TIMEOUT] = ccw_device_bug,
+ [DEV_EVENT_TIMEOUT] = ccw_device_nop,
[DEV_EVENT_VERIFY] = ccw_device_start_id,
},
[DEV_STATE_DISCONNECTED_SENSE_ID] = {
- [DEV_EVENT_NOTOPER] = ccw_device_recog_notoper,
- [DEV_EVENT_INTERRUPT] = ccw_device_sense_id_irq,
- [DEV_EVENT_TIMEOUT] = ccw_device_recog_timeout,
+ [DEV_EVENT_NOTOPER] = ccw_device_request_event,
+ [DEV_EVENT_INTERRUPT] = ccw_device_request_event,
+ [DEV_EVENT_TIMEOUT] = ccw_device_request_event,
[DEV_EVENT_VERIFY] = ccw_device_nop,
},
[DEV_STATE_CMFCHANGE] = {
@@ -1264,6 +1035,12 @@ fsm_func_t *dev_jumptable[NR_DEV_STATES][NR_DEV_EVENTS] = {
[DEV_EVENT_TIMEOUT] = ccw_device_update_cmfblock,
[DEV_EVENT_VERIFY] = ccw_device_update_cmfblock,
},
+ [DEV_STATE_STEAL_LOCK] = {
+ [DEV_EVENT_NOTOPER] = ccw_device_request_event,
+ [DEV_EVENT_INTERRUPT] = ccw_device_request_event,
+ [DEV_EVENT_TIMEOUT] = ccw_device_request_event,
+ [DEV_EVENT_VERIFY] = ccw_device_nop,
+ },
};
EXPORT_SYMBOL_GPL(ccw_device_set_timeout);
diff --git a/drivers/s390/cio/device_id.c b/drivers/s390/cio/device_id.c
index 1bdaa614e34..78a0b43862c 100644
--- a/drivers/s390/cio/device_id.c
+++ b/drivers/s390/cio/device_id.c
@@ -1,40 +1,39 @@
/*
- * drivers/s390/cio/device_id.c
+ * CCW device SENSE ID I/O handling.
*
- * Copyright (C) 2002 IBM Deutschland Entwicklung GmbH,
- * IBM Corporation
- * Author(s): Cornelia Huck (cornelia.huck@de.ibm.com)
- * Martin Schwidefsky (schwidefsky@de.ibm.com)
- *
- * Sense ID functions.
+ * Copyright IBM Corp. 2002,2009
+ * Author(s): Cornelia Huck <cornelia.huck@de.ibm.com>
+ * Martin Schwidefsky <schwidefsky@de.ibm.com>
+ * Peter Oberparleiter <peter.oberparleiter@de.ibm.com>
*/
-#include <linux/module.h>
-#include <linux/init.h>
#include <linux/kernel.h>
-
+#include <linux/string.h>
+#include <linux/types.h>
+#include <linux/errno.h>
#include <asm/ccwdev.h>
-#include <asm/delay.h>
+#include <asm/setup.h>
#include <asm/cio.h>
-#include <asm/lowcore.h>
#include <asm/diag.h>
#include "cio.h"
#include "cio_debug.h"
-#include "css.h"
#include "device.h"
-#include "ioasm.h"
#include "io_sch.h"
+#define SENSE_ID_RETRIES 256
+#define SENSE_ID_TIMEOUT (10 * HZ)
+#define SENSE_ID_MIN_LEN 4
+#define SENSE_ID_BASIC_LEN 7
+
/**
- * vm_vdev_to_cu_type - Convert vm virtual device into control unit type
- * for certain devices.
- * @class: virtual device class
- * @type: virtual device type
+ * diag210_to_senseid - convert diag 0x210 data to sense id information
+ * @senseid: sense id
+ * @diag: diag 0x210 data
*
- * Returns control unit type if a match was made or %0xffff otherwise.
+ * Return 0 on success, non-zero otherwise.
*/
-static int vm_vdev_to_cu_type(int class, int type)
+static int diag210_to_senseid(struct senseid *senseid, struct diag210 *diag)
{
static struct {
int class, type, cu_type;
@@ -71,253 +70,153 @@ static int vm_vdev_to_cu_type(int class, int type)
};
int i;
- for (i = 0; i < ARRAY_SIZE(vm_devices); i++)
- if (class == vm_devices[i].class && type == vm_devices[i].type)
- return vm_devices[i].cu_type;
+ /* Special case for osa devices. */
+ if (diag->vrdcvcla == 0x02 && diag->vrdcvtyp == 0x20) {
+ senseid->cu_type = 0x3088;
+ senseid->cu_model = 0x60;
+ senseid->reserved = 0xff;
+ return 0;
+ }
+ for (i = 0; i < ARRAY_SIZE(vm_devices); i++) {
+ if (diag->vrdcvcla == vm_devices[i].class &&
+ diag->vrdcvtyp == vm_devices[i].type) {
+ senseid->cu_type = vm_devices[i].cu_type;
+ senseid->reserved = 0xff;
+ return 0;
+ }
+ }
- return 0xffff;
+ return -ENODEV;
}
/**
- * diag_get_dev_info - retrieve device information via DIAG X'210'
- * @devno: device number
- * @ps: pointer to sense ID data area
+ * diag_get_dev_info - retrieve device information via diag 0x210
+ * @cdev: ccw device
*
* Returns zero on success, non-zero otherwise.
*/
-static int diag_get_dev_info(u16 devno, struct senseid *ps)
+static int diag210_get_dev_info(struct ccw_device *cdev)
{
+ struct ccw_dev_id *dev_id = &cdev->private->dev_id;
+ struct senseid *senseid = &cdev->private->senseid;
struct diag210 diag_data;
- int ccode;
-
- CIO_TRACE_EVENT (4, "VMvdinf");
-
- diag_data = (struct diag210) {
- .vrdcdvno = devno,
- .vrdclen = sizeof (diag_data),
- };
-
- ccode = diag210 (&diag_data);
- if ((ccode == 0) || (ccode == 2)) {
- ps->reserved = 0xff;
-
- /* Special case for osa devices. */
- if (diag_data.vrdcvcla == 0x02 && diag_data.vrdcvtyp == 0x20) {
- ps->cu_type = 0x3088;
- ps->cu_model = 0x60;
- return 0;
- }
- ps->cu_type = vm_vdev_to_cu_type(diag_data.vrdcvcla,
- diag_data.vrdcvtyp);
- if (ps->cu_type != 0xffff)
- return 0;
- }
-
- CIO_MSG_EVENT(0, "DIAG X'210' for device %04X returned (cc = %d):"
- "vdev class : %02X, vdev type : %04X \n ... "
- "rdev class : %02X, rdev type : %04X, "
- "rdev model: %02X\n",
- devno, ccode,
- diag_data.vrdcvcla, diag_data.vrdcvtyp,
- diag_data.vrdcrccl, diag_data.vrdccrty,
- diag_data.vrdccrmd);
-
+ int rc;
+
+ if (dev_id->ssid != 0)
+ return -ENODEV;
+ memset(&diag_data, 0, sizeof(diag_data));
+ diag_data.vrdcdvno = dev_id->devno;
+ diag_data.vrdclen = sizeof(diag_data);
+ rc = diag210(&diag_data);
+ CIO_TRACE_EVENT(4, "diag210");
+ CIO_HEX_EVENT(4, &rc, sizeof(rc));
+ CIO_HEX_EVENT(4, &diag_data, sizeof(diag_data));
+ if (rc != 0 && rc != 2)
+ goto err_failed;
+ if (diag210_to_senseid(senseid, &diag_data))
+ goto err_unknown;
+ return 0;
+
+err_unknown:
+ CIO_MSG_EVENT(0, "snsid: device 0.%x.%04x: unknown diag210 data\n",
+ dev_id->ssid, dev_id->devno);
+ return -ENODEV;
+err_failed:
+ CIO_MSG_EVENT(0, "snsid: device 0.%x.%04x: diag210 failed (rc=%d)\n",
+ dev_id->ssid, dev_id->devno, rc);
return -ENODEV;
}
/*
- * Start Sense ID helper function.
- * Try to obtain the 'control unit'/'device type' information
- * associated with the subchannel.
+ * Initialize SENSE ID data.
*/
-static int
-__ccw_device_sense_id_start(struct ccw_device *cdev)
-{
- struct subchannel *sch;
- struct ccw1 *ccw;
- int ret;
-
- sch = to_subchannel(cdev->dev.parent);
- /* Setup sense channel program. */
- ccw = cdev->private->iccws;
- ccw->cmd_code = CCW_CMD_SENSE_ID;
- ccw->cda = (__u32) __pa (&cdev->private->senseid);
- ccw->count = sizeof (struct senseid);
- ccw->flags = CCW_FLAG_SLI;
-
- /* Reset device status. */
- memset(&cdev->private->irb, 0, sizeof(struct irb));
-
- /* Try on every path. */
- ret = -ENODEV;
- while (cdev->private->imask != 0) {
- cdev->private->senseid.cu_type = 0xFFFF;
- if ((sch->opm & cdev->private->imask) != 0 &&
- cdev->private->iretry > 0) {
- cdev->private->iretry--;
- /* Reset internal retry indication. */
- cdev->private->flags.intretry = 0;
- ret = cio_start (sch, cdev->private->iccws,
- cdev->private->imask);
- /* ret is 0, -EBUSY, -EACCES or -ENODEV */
- if (ret != -EACCES)
- return ret;
- }
- cdev->private->imask >>= 1;
- cdev->private->iretry = 5;
- }
- return ret;
-}
-
-void
-ccw_device_sense_id_start(struct ccw_device *cdev)
+static void snsid_init(struct ccw_device *cdev)
{
- int ret;
-
- memset (&cdev->private->senseid, 0, sizeof (struct senseid));
- cdev->private->imask = 0x80;
- cdev->private->iretry = 5;
- ret = __ccw_device_sense_id_start(cdev);
- if (ret && ret != -EBUSY)
- ccw_device_sense_id_done(cdev, ret);
+ cdev->private->flags.esid = 0;
+ memset(&cdev->private->senseid, 0, sizeof(cdev->private->senseid));
+ cdev->private->senseid.cu_type = 0xffff;
}
/*
- * Called from interrupt context to check if a valid answer
- * to Sense ID was received.
+ * Check for complete SENSE ID data.
*/
-static int
-ccw_device_check_sense_id(struct ccw_device *cdev)
+static int snsid_check(struct ccw_device *cdev, void *data)
{
- struct subchannel *sch;
- struct irb *irb;
-
- sch = to_subchannel(cdev->dev.parent);
- irb = &cdev->private->irb;
-
- /* Check the error cases. */
- if (irb->scsw.cmd.fctl & (SCSW_FCTL_HALT_FUNC | SCSW_FCTL_CLEAR_FUNC)) {
- /* Retry Sense ID if requested. */
- if (cdev->private->flags.intretry) {
- cdev->private->flags.intretry = 0;
- return -EAGAIN;
- }
- return -ETIME;
- }
- if (irb->esw.esw0.erw.cons && (irb->ecw[0] & SNS0_CMD_REJECT)) {
- /*
- * if the device doesn't support the SenseID
- * command further retries wouldn't help ...
- * NB: We don't check here for intervention required like we
- * did before, because tape devices with no tape inserted
- * may present this status *in conjunction with* the
- * sense id information. So, for intervention required,
- * we use the "whack it until it talks" strategy...
- */
- CIO_MSG_EVENT(0, "SenseID : device %04x on Subchannel "
- "0.%x.%04x reports cmd reject\n",
- cdev->private->dev_id.devno, sch->schid.ssid,
- sch->schid.sch_no);
+ struct cmd_scsw *scsw = &cdev->private->irb.scsw.cmd;
+ int len = sizeof(struct senseid) - scsw->count;
+
+ /* Check for incomplete SENSE ID data. */
+ if (len < SENSE_ID_MIN_LEN)
+ goto out_restart;
+ if (cdev->private->senseid.cu_type == 0xffff)
+ goto out_restart;
+ /* Check for incompatible SENSE ID data. */
+ if (cdev->private->senseid.reserved != 0xff)
return -EOPNOTSUPP;
- }
- if (irb->esw.esw0.erw.cons) {
- CIO_MSG_EVENT(2, "SenseID : UC on dev 0.%x.%04x, "
- "lpum %02X, cnt %02d, sns :"
- " %02X%02X%02X%02X %02X%02X%02X%02X ...\n",
- cdev->private->dev_id.ssid,
- cdev->private->dev_id.devno,
- irb->esw.esw0.sublog.lpum,
- irb->esw.esw0.erw.scnt,
- irb->ecw[0], irb->ecw[1],
- irb->ecw[2], irb->ecw[3],
- irb->ecw[4], irb->ecw[5],
- irb->ecw[6], irb->ecw[7]);
- return -EAGAIN;
- }
- if (irb->scsw.cmd.cc == 3) {
- u8 lpm;
+ /* Check for extended-identification information. */
+ if (len > SENSE_ID_BASIC_LEN)
+ cdev->private->flags.esid = 1;
+ return 0;
- lpm = to_io_private(sch)->orb.cmd.lpm;
- if ((lpm & sch->schib.pmcw.pim & sch->schib.pmcw.pam) != 0)
- CIO_MSG_EVENT(4, "SenseID : path %02X for device %04x "
- "on subchannel 0.%x.%04x is "
- "'not operational'\n", lpm,
- cdev->private->dev_id.devno,
- sch->schid.ssid, sch->schid.sch_no);
- return -EACCES;
- }
-
- /* Did we get a proper answer ? */
- if (irb->scsw.cmd.cc == 0 && cdev->private->senseid.cu_type != 0xFFFF &&
- cdev->private->senseid.reserved == 0xFF) {
- if (irb->scsw.cmd.count < sizeof(struct senseid) - 8)
- cdev->private->flags.esid = 1;
- return 0; /* Success */
- }
-
- /* Hmm, whatever happened, try again. */
- CIO_MSG_EVENT(2, "SenseID : start_IO() for device %04x on "
- "subchannel 0.%x.%04x returns status %02X%02X\n",
- cdev->private->dev_id.devno, sch->schid.ssid,
- sch->schid.sch_no,
- irb->scsw.cmd.dstat, irb->scsw.cmd.cstat);
+out_restart:
+ snsid_init(cdev);
return -EAGAIN;
}
/*
- * Got interrupt for Sense ID.
+ * Process SENSE ID request result.
*/
-void
-ccw_device_sense_id_irq(struct ccw_device *cdev, enum dev_event dev_event)
+static void snsid_callback(struct ccw_device *cdev, void *data, int rc)
{
- struct subchannel *sch;
- struct irb *irb;
- int ret;
-
- sch = to_subchannel(cdev->dev.parent);
- irb = (struct irb *) __LC_IRB;
- /* Retry sense id, if needed. */
- if (irb->scsw.cmd.stctl ==
- (SCSW_STCTL_STATUS_PEND | SCSW_STCTL_ALERT_STATUS)) {
- if ((irb->scsw.cmd.cc == 1) || !irb->scsw.cmd.actl) {
- ret = __ccw_device_sense_id_start(cdev);
- if (ret && ret != -EBUSY)
- ccw_device_sense_id_done(cdev, ret);
+ struct ccw_dev_id *id = &cdev->private->dev_id;
+ struct senseid *senseid = &cdev->private->senseid;
+ int vm = 0;
+
+ if (rc && MACHINE_IS_VM) {
+ /* Try diag 0x210 fallback on z/VM. */
+ snsid_init(cdev);
+ if (diag210_get_dev_info(cdev) == 0) {
+ rc = 0;
+ vm = 1;
}
- return;
}
- if (ccw_device_accumulate_and_sense(cdev, irb) != 0)
- return;
- ret = ccw_device_check_sense_id(cdev);
- memset(&cdev->private->irb, 0, sizeof(struct irb));
- switch (ret) {
- /* 0, -ETIME, -EOPNOTSUPP, -EAGAIN or -EACCES */
- case 0: /* Sense id succeeded. */
- case -ETIME: /* Sense id stopped by timeout. */
- ccw_device_sense_id_done(cdev, ret);
- break;
- case -EACCES: /* channel is not operational. */
- sch->lpm &= ~cdev->private->imask;
- cdev->private->imask >>= 1;
- cdev->private->iretry = 5;
- /* fall through. */
- case -EAGAIN: /* try again. */
- ret = __ccw_device_sense_id_start(cdev);
- if (ret == 0 || ret == -EBUSY)
- break;
- /* fall through. */
- default: /* Sense ID failed. Try asking VM. */
- if (MACHINE_IS_VM)
- ret = diag_get_dev_info(cdev->private->dev_id.devno,
- &cdev->private->senseid);
- else
- /*
- * If we can't couldn't identify the device type we
- * consider the device "not operational".
- */
- ret = -ENODEV;
+ CIO_MSG_EVENT(2, "snsid: device 0.%x.%04x: rc=%d %04x/%02x "
+ "%04x/%02x%s\n", id->ssid, id->devno, rc,
+ senseid->cu_type, senseid->cu_model, senseid->dev_type,
+ senseid->dev_model, vm ? " (diag210)" : "");
+ ccw_device_sense_id_done(cdev, rc);
+}
- ccw_device_sense_id_done(cdev, ret);
- break;
- }
+/**
+ * ccw_device_sense_id_start - perform SENSE ID
+ * @cdev: ccw device
+ *
+ * Execute a SENSE ID channel program on @cdev to update its sense id
+ * information. When finished, call ccw_device_sense_id_done with a
+ * return code specifying the result.
+ */
+void ccw_device_sense_id_start(struct ccw_device *cdev)
+{
+ struct subchannel *sch = to_subchannel(cdev->dev.parent);
+ struct ccw_request *req = &cdev->private->req;
+ struct ccw1 *cp = cdev->private->iccws;
+
+ CIO_TRACE_EVENT(4, "snsid");
+ CIO_HEX_EVENT(4, &cdev->private->dev_id, sizeof(cdev->private->dev_id));
+ /* Data setup. */
+ snsid_init(cdev);
+ /* Channel program setup. */
+ cp->cmd_code = CCW_CMD_SENSE_ID;
+ cp->cda = (u32) (addr_t) &cdev->private->senseid;
+ cp->count = sizeof(struct senseid);
+ cp->flags = CCW_FLAG_SLI;
+ /* Request setup. */
+ memset(req, 0, sizeof(*req));
+ req->cp = cp;
+ req->timeout = SENSE_ID_TIMEOUT;
+ req->maxretries = SENSE_ID_RETRIES;
+ req->lpm = sch->schib.pmcw.pam & sch->opm;
+ req->check = snsid_check;
+ req->callback = snsid_callback;
+ ccw_request_start(cdev);
}
diff --git a/drivers/s390/cio/device_ops.c b/drivers/s390/cio/device_ops.c
index 2d0efee8a29..6da84543dfe 100644
--- a/drivers/s390/cio/device_ops.c
+++ b/drivers/s390/cio/device_ops.c
@@ -11,6 +11,7 @@
#include <linux/list.h>
#include <linux/device.h>
#include <linux/delay.h>
+#include <linux/completion.h>
#include <asm/ccwdev.h>
#include <asm/idals.h>
@@ -46,6 +47,7 @@ int ccw_device_set_options_mask(struct ccw_device *cdev, unsigned long flags)
cdev->private->options.repall = (flags & CCWDEV_REPORT_ALL) != 0;
cdev->private->options.pgroup = (flags & CCWDEV_DO_PATHGROUP) != 0;
cdev->private->options.force = (flags & CCWDEV_ALLOW_FORCE) != 0;
+ cdev->private->options.mpath = (flags & CCWDEV_DO_MULTIPATH) != 0;
return 0;
}
@@ -74,6 +76,7 @@ int ccw_device_set_options(struct ccw_device *cdev, unsigned long flags)
cdev->private->options.repall |= (flags & CCWDEV_REPORT_ALL) != 0;
cdev->private->options.pgroup |= (flags & CCWDEV_DO_PATHGROUP) != 0;
cdev->private->options.force |= (flags & CCWDEV_ALLOW_FORCE) != 0;
+ cdev->private->options.mpath |= (flags & CCWDEV_DO_MULTIPATH) != 0;
return 0;
}
@@ -90,9 +93,34 @@ void ccw_device_clear_options(struct ccw_device *cdev, unsigned long flags)
cdev->private->options.repall &= (flags & CCWDEV_REPORT_ALL) == 0;
cdev->private->options.pgroup &= (flags & CCWDEV_DO_PATHGROUP) == 0;
cdev->private->options.force &= (flags & CCWDEV_ALLOW_FORCE) == 0;
+ cdev->private->options.mpath &= (flags & CCWDEV_DO_MULTIPATH) == 0;
}
/**
+ * ccw_device_is_pathgroup - determine if paths to this device are grouped
+ * @cdev: ccw device
+ *
+ * Return non-zero if there is a path group, zero otherwise.
+ */
+int ccw_device_is_pathgroup(struct ccw_device *cdev)
+{
+ return cdev->private->flags.pgroup;
+}
+EXPORT_SYMBOL(ccw_device_is_pathgroup);
+
+/**
+ * ccw_device_is_multipath - determine if device is operating in multipath mode
+ * @cdev: ccw device
+ *
+ * Return non-zero if device is operating in multipath mode, zero otherwise.
+ */
+int ccw_device_is_multipath(struct ccw_device *cdev)
+{
+ return cdev->private->flags.mpath;
+}
+EXPORT_SYMBOL(ccw_device_is_multipath);
+
+/**
* ccw_device_clear() - terminate I/O request processing
* @cdev: target ccw device
* @intparm: interruption parameter; value is only used if no I/O is
@@ -167,8 +195,7 @@ int ccw_device_start_key(struct ccw_device *cdev, struct ccw1 *cpa,
return -EINVAL;
if (cdev->private->state == DEV_STATE_NOT_OPER)
return -ENODEV;
- if (cdev->private->state == DEV_STATE_VERIFY ||
- cdev->private->state == DEV_STATE_CLEAR_VERIFY) {
+ if (cdev->private->state == DEV_STATE_VERIFY) {
/* Remember to fake irb when finished. */
if (!cdev->private->flags.fake_irb) {
cdev->private->flags.fake_irb = 1;
@@ -478,74 +505,65 @@ __u8 ccw_device_get_path_mask(struct ccw_device *cdev)
return sch->lpm;
}
-/*
- * Try to break the lock on a boxed device.
- */
-int
-ccw_device_stlck(struct ccw_device *cdev)
-{
- void *buf, *buf2;
- unsigned long flags;
- struct subchannel *sch;
- int ret;
+struct stlck_data {
+ struct completion done;
+ int rc;
+};
- if (!cdev)
- return -ENODEV;
+void ccw_device_stlck_done(struct ccw_device *cdev, void *data, int rc)
+{
+ struct stlck_data *sdata = data;
- if (cdev->drv && !cdev->private->options.force)
- return -EINVAL;
+ sdata->rc = rc;
+ complete(&sdata->done);
+}
- sch = to_subchannel(cdev->dev.parent);
-
- CIO_TRACE_EVENT(2, "stl lock");
- CIO_TRACE_EVENT(2, dev_name(&cdev->dev));
+/*
+ * Perform unconditional reserve + release.
+ */
+int ccw_device_stlck(struct ccw_device *cdev)
+{
+ struct subchannel *sch = to_subchannel(cdev->dev.parent);
+ struct stlck_data data;
+ u8 *buffer;
+ int rc;
- buf = kmalloc(32*sizeof(char), GFP_DMA|GFP_KERNEL);
- if (!buf)
- return -ENOMEM;
- buf2 = kmalloc(32*sizeof(char), GFP_DMA|GFP_KERNEL);
- if (!buf2) {
- kfree(buf);
- return -ENOMEM;
+ /* Check if steal lock operation is valid for this device. */
+ if (cdev->drv) {
+ if (!cdev->private->options.force)
+ return -EINVAL;
}
- spin_lock_irqsave(sch->lock, flags);
- ret = cio_enable_subchannel(sch, (u32)(addr_t)sch);
- if (ret)
- goto out_unlock;
- /*
- * Setup ccw. We chain an unconditional reserve and a release so we
- * only break the lock.
- */
- cdev->private->iccws[0].cmd_code = CCW_CMD_STLCK;
- cdev->private->iccws[0].cda = (__u32) __pa(buf);
- cdev->private->iccws[0].count = 32;
- cdev->private->iccws[0].flags = CCW_FLAG_CC;
- cdev->private->iccws[1].cmd_code = CCW_CMD_RELEASE;
- cdev->private->iccws[1].cda = (__u32) __pa(buf2);
- cdev->private->iccws[1].count = 32;
- cdev->private->iccws[1].flags = 0;
- ret = cio_start(sch, cdev->private->iccws, 0);
- if (ret) {
- cio_disable_subchannel(sch); //FIXME: return code?
+ buffer = kzalloc(64, GFP_DMA | GFP_KERNEL);
+ if (!buffer)
+ return -ENOMEM;
+ init_completion(&data.done);
+ data.rc = -EIO;
+ spin_lock_irq(sch->lock);
+ rc = cio_enable_subchannel(sch, (u32) (addr_t) sch);
+ if (rc)
goto out_unlock;
+ /* Perform operation. */
+ cdev->private->state = DEV_STATE_STEAL_LOCK,
+ ccw_device_stlck_start(cdev, &data, &buffer[0], &buffer[32]);
+ spin_unlock_irq(sch->lock);
+ /* Wait for operation to finish. */
+ if (wait_for_completion_interruptible(&data.done)) {
+ /* Got a signal. */
+ spin_lock_irq(sch->lock);
+ ccw_request_cancel(cdev);
+ spin_unlock_irq(sch->lock);
+ wait_for_completion(&data.done);
}
- cdev->private->irb.scsw.cmd.actl |= SCSW_ACTL_START_PEND;
- spin_unlock_irqrestore(sch->lock, flags);
- wait_event(cdev->private->wait_q,
- cdev->private->irb.scsw.cmd.actl == 0);
- spin_lock_irqsave(sch->lock, flags);
- cio_disable_subchannel(sch); //FIXME: return code?
- if ((cdev->private->irb.scsw.cmd.dstat !=
- (DEV_STAT_CHN_END|DEV_STAT_DEV_END)) ||
- (cdev->private->irb.scsw.cmd.cstat != 0))
- ret = -EIO;
- /* Clear irb. */
- memset(&cdev->private->irb, 0, sizeof(struct irb));
+ rc = data.rc;
+ /* Check results. */
+ spin_lock_irq(sch->lock);
+ cio_disable_subchannel(sch);
+ cdev->private->state = DEV_STATE_BOXED;
out_unlock:
- kfree(buf);
- kfree(buf2);
- spin_unlock_irqrestore(sch->lock, flags);
- return ret;
+ spin_unlock_irq(sch->lock);
+ kfree(buffer);
+
+ return rc;
}
void *ccw_device_get_chp_desc(struct ccw_device *cdev, int chp_no)
diff --git a/drivers/s390/cio/device_pgid.c b/drivers/s390/cio/device_pgid.c
index fc5ca1dd52b..aad188e43b4 100644
--- a/drivers/s390/cio/device_pgid.c
+++ b/drivers/s390/cio/device_pgid.c
@@ -1,594 +1,561 @@
/*
- * drivers/s390/cio/device_pgid.c
+ * CCW device PGID and path verification I/O handling.
*
- * Copyright (C) 2002 IBM Deutschland Entwicklung GmbH,
- * IBM Corporation
- * Author(s): Cornelia Huck (cornelia.huck@de.ibm.com)
- * Martin Schwidefsky (schwidefsky@de.ibm.com)
- *
- * Path Group ID functions.
+ * Copyright IBM Corp. 2002,2009
+ * Author(s): Cornelia Huck <cornelia.huck@de.ibm.com>
+ * Martin Schwidefsky <schwidefsky@de.ibm.com>
+ * Peter Oberparleiter <peter.oberparleiter@de.ibm.com>
*/
-#include <linux/module.h>
-#include <linux/init.h>
-
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/types.h>
+#include <linux/errno.h>
+#include <linux/bitops.h>
#include <asm/ccwdev.h>
#include <asm/cio.h>
-#include <asm/delay.h>
-#include <asm/lowcore.h>
#include "cio.h"
#include "cio_debug.h"
-#include "css.h"
#include "device.h"
-#include "ioasm.h"
#include "io_sch.h"
+#define PGID_RETRIES 256
+#define PGID_TIMEOUT (10 * HZ)
+
/*
- * Helper function called from interrupt context to decide whether an
- * operation should be tried again.
+ * Process path verification data and report result.
*/
-static int __ccw_device_should_retry(union scsw *scsw)
+static void verify_done(struct ccw_device *cdev, int rc)
{
- /* CC is only valid if start function bit is set. */
- if ((scsw->cmd.fctl & SCSW_FCTL_START_FUNC) && scsw->cmd.cc == 1)
- return 1;
- /* No more activity. For sense and set PGID we stubbornly try again. */
- if (!scsw->cmd.actl)
- return 1;
- return 0;
+ struct subchannel *sch = to_subchannel(cdev->dev.parent);
+ struct ccw_dev_id *id = &cdev->private->dev_id;
+ int mpath = cdev->private->flags.mpath;
+ int pgroup = cdev->private->flags.pgroup;
+
+ if (rc)
+ goto out;
+ /* Ensure consistent multipathing state at device and channel. */
+ if (sch->config.mp != mpath) {
+ sch->config.mp = mpath;
+ rc = cio_commit_config(sch);
+ }
+out:
+ CIO_MSG_EVENT(2, "vrfy: device 0.%x.%04x: rc=%d pgroup=%d mpath=%d "
+ "vpm=%02x\n", id->ssid, id->devno, rc, pgroup, mpath,
+ sch->vpm);
+ ccw_device_verify_done(cdev, rc);
}
/*
- * Start Sense Path Group ID helper function. Used in ccw_device_recog
- * and ccw_device_sense_pgid.
+ * Create channel program to perform a NOOP.
*/
-static int
-__ccw_device_sense_pgid_start(struct ccw_device *cdev)
+static void nop_build_cp(struct ccw_device *cdev)
{
- struct subchannel *sch;
- struct ccw1 *ccw;
- int ret;
- int i;
-
- sch = to_subchannel(cdev->dev.parent);
- /* Return if we already checked on all paths. */
- if (cdev->private->imask == 0)
- return (sch->lpm == 0) ? -ENODEV : -EACCES;
- i = 8 - ffs(cdev->private->imask);
-
- /* Setup sense path group id channel program. */
- ccw = cdev->private->iccws;
- ccw->cmd_code = CCW_CMD_SENSE_PGID;
- ccw->count = sizeof (struct pgid);
- ccw->flags = CCW_FLAG_SLI;
-
- /* Reset device status. */
- memset(&cdev->private->irb, 0, sizeof(struct irb));
- /* Try on every path. */
- ret = -ENODEV;
- while (cdev->private->imask != 0) {
- /* Try every path multiple times. */
- ccw->cda = (__u32) __pa (&cdev->private->pgid[i]);
- if (cdev->private->iretry > 0) {
- cdev->private->iretry--;
- /* Reset internal retry indication. */
- cdev->private->flags.intretry = 0;
- ret = cio_start (sch, cdev->private->iccws,
- cdev->private->imask);
- /* ret is 0, -EBUSY, -EACCES or -ENODEV */
- if (ret != -EACCES)
- return ret;
- CIO_MSG_EVENT(3, "SNID - Device %04x on Subchannel "
- "0.%x.%04x, lpm %02X, became 'not "
- "operational'\n",
- cdev->private->dev_id.devno,
- sch->schid.ssid,
- sch->schid.sch_no, cdev->private->imask);
-
- }
- cdev->private->imask >>= 1;
- cdev->private->iretry = 5;
- i++;
- }
-
- return ret;
+ struct ccw_request *req = &cdev->private->req;
+ struct ccw1 *cp = cdev->private->iccws;
+
+ cp->cmd_code = CCW_CMD_NOOP;
+ cp->cda = 0;
+ cp->count = 0;
+ cp->flags = CCW_FLAG_SLI;
+ req->cp = cp;
}
-void
-ccw_device_sense_pgid_start(struct ccw_device *cdev)
+/*
+ * Perform NOOP on a single path.
+ */
+static void nop_do(struct ccw_device *cdev)
{
- int ret;
-
- /* Set a timeout of 60s */
- ccw_device_set_timeout(cdev, 60*HZ);
-
- cdev->private->state = DEV_STATE_SENSE_PGID;
- cdev->private->imask = 0x80;
- cdev->private->iretry = 5;
- memset (&cdev->private->pgid, 0, sizeof (cdev->private->pgid));
- ret = __ccw_device_sense_pgid_start(cdev);
- if (ret && ret != -EBUSY)
- ccw_device_sense_pgid_done(cdev, ret);
+ struct subchannel *sch = to_subchannel(cdev->dev.parent);
+ struct ccw_request *req = &cdev->private->req;
+
+ /* Adjust lpm. */
+ req->lpm = lpm_adjust(req->lpm, sch->schib.pmcw.pam & sch->opm);
+ if (!req->lpm)
+ goto out_nopath;
+ nop_build_cp(cdev);
+ ccw_request_start(cdev);
+ return;
+
+out_nopath:
+ verify_done(cdev, sch->vpm ? 0 : -EACCES);
}
/*
- * Called from interrupt context to check if a valid answer
- * to Sense Path Group ID was received.
+ * Adjust NOOP I/O status.
*/
-static int
-__ccw_device_check_sense_pgid(struct ccw_device *cdev)
+static enum io_status nop_filter(struct ccw_device *cdev, void *data,
+ struct irb *irb, enum io_status status)
{
- struct subchannel *sch;
- struct irb *irb;
- int i;
-
- sch = to_subchannel(cdev->dev.parent);
- irb = &cdev->private->irb;
- if (irb->scsw.cmd.fctl & (SCSW_FCTL_HALT_FUNC | SCSW_FCTL_CLEAR_FUNC)) {
- /* Retry Sense PGID if requested. */
- if (cdev->private->flags.intretry) {
- cdev->private->flags.intretry = 0;
- return -EAGAIN;
- }
- return -ETIME;
- }
- if (irb->esw.esw0.erw.cons &&
- (irb->ecw[0]&(SNS0_CMD_REJECT|SNS0_INTERVENTION_REQ))) {
- /*
- * If the device doesn't support the Sense Path Group ID
- * command further retries wouldn't help ...
- */
- return -EOPNOTSUPP;
- }
- if (irb->esw.esw0.erw.cons) {
- CIO_MSG_EVENT(2, "SNID - device 0.%x.%04x, unit check, "
- "lpum %02X, cnt %02d, sns : "
- "%02X%02X%02X%02X %02X%02X%02X%02X ...\n",
- cdev->private->dev_id.ssid,
- cdev->private->dev_id.devno,
- irb->esw.esw0.sublog.lpum,
- irb->esw.esw0.erw.scnt,
- irb->ecw[0], irb->ecw[1],
- irb->ecw[2], irb->ecw[3],
- irb->ecw[4], irb->ecw[5],
- irb->ecw[6], irb->ecw[7]);
- return -EAGAIN;
- }
- if (irb->scsw.cmd.cc == 3) {
- u8 lpm;
-
- lpm = to_io_private(sch)->orb.cmd.lpm;
- CIO_MSG_EVENT(3, "SNID - Device %04x on Subchannel 0.%x.%04x,"
- " lpm %02X, became 'not operational'\n",
- cdev->private->dev_id.devno, sch->schid.ssid,
- sch->schid.sch_no, lpm);
- return -EACCES;
- }
- i = 8 - ffs(cdev->private->imask);
- if (cdev->private->pgid[i].inf.ps.state2 == SNID_STATE2_RESVD_ELSE) {
- CIO_MSG_EVENT(2, "SNID - Device %04x on Subchannel 0.%x.%04x "
- "is reserved by someone else\n",
- cdev->private->dev_id.devno, sch->schid.ssid,
- sch->schid.sch_no);
- return -EUSERS;
- }
- return 0;
+ /* Only subchannel status might indicate a path error. */
+ if (status == IO_STATUS_ERROR && irb->scsw.cmd.cstat == 0)
+ return IO_DONE;
+ return status;
}
/*
- * Got interrupt for Sense Path Group ID.
+ * Process NOOP request result for a single path.
*/
-void
-ccw_device_sense_pgid_irq(struct ccw_device *cdev, enum dev_event dev_event)
+static void nop_callback(struct ccw_device *cdev, void *data, int rc)
{
- struct subchannel *sch;
- struct irb *irb;
- int ret;
-
- irb = (struct irb *) __LC_IRB;
-
- if (irb->scsw.cmd.stctl ==
- (SCSW_STCTL_STATUS_PEND | SCSW_STCTL_ALERT_STATUS)) {
- if (__ccw_device_should_retry(&irb->scsw)) {
- ret = __ccw_device_sense_pgid_start(cdev);
- if (ret && ret != -EBUSY)
- ccw_device_sense_pgid_done(cdev, ret);
- }
- return;
- }
- if (ccw_device_accumulate_and_sense(cdev, irb) != 0)
- return;
- sch = to_subchannel(cdev->dev.parent);
- ret = __ccw_device_check_sense_pgid(cdev);
- memset(&cdev->private->irb, 0, sizeof(struct irb));
- switch (ret) {
- /* 0, -ETIME, -EOPNOTSUPP, -EAGAIN, -EACCES or -EUSERS */
- case -EOPNOTSUPP: /* Sense Path Group ID not supported */
- ccw_device_sense_pgid_done(cdev, -EOPNOTSUPP);
- break;
- case -ETIME: /* Sense path group id stopped by timeout. */
- ccw_device_sense_pgid_done(cdev, -ETIME);
- break;
- case -EACCES: /* channel is not operational. */
- sch->lpm &= ~cdev->private->imask;
- /* Fall through. */
- case 0: /* Sense Path Group ID successful. */
- cdev->private->imask >>= 1;
- cdev->private->iretry = 5;
- /* Fall through. */
- case -EAGAIN: /* Try again. */
- ret = __ccw_device_sense_pgid_start(cdev);
- if (ret != 0 && ret != -EBUSY)
- ccw_device_sense_pgid_done(cdev, ret);
- break;
- case -EUSERS: /* device is reserved for someone else. */
- ccw_device_sense_pgid_done(cdev, -EUSERS);
- break;
- }
+ struct subchannel *sch = to_subchannel(cdev->dev.parent);
+ struct ccw_request *req = &cdev->private->req;
+
+ if (rc == 0)
+ sch->vpm |= req->lpm;
+ else if (rc != -EACCES)
+ goto err;
+ req->lpm >>= 1;
+ nop_do(cdev);
+ return;
+
+err:
+ verify_done(cdev, rc);
}
/*
- * Path Group ID helper function.
+ * Create channel program to perform SET PGID on a single path.
*/
-static int
-__ccw_device_do_pgid(struct ccw_device *cdev, __u8 func)
+static void spid_build_cp(struct ccw_device *cdev, u8 fn)
{
- struct subchannel *sch;
- struct ccw1 *ccw;
- int ret;
-
- sch = to_subchannel(cdev->dev.parent);
-
- /* Setup sense path group id channel program. */
- cdev->private->pgid[0].inf.fc = func;
- ccw = cdev->private->iccws;
- if (cdev->private->flags.pgid_single)
- cdev->private->pgid[0].inf.fc |= SPID_FUNC_SINGLE_PATH;
- else
- cdev->private->pgid[0].inf.fc |= SPID_FUNC_MULTI_PATH;
- ccw->cmd_code = CCW_CMD_SET_PGID;
- ccw->cda = (__u32) __pa (&cdev->private->pgid[0]);
- ccw->count = sizeof (struct pgid);
- ccw->flags = CCW_FLAG_SLI;
-
- /* Reset device status. */
- memset(&cdev->private->irb, 0, sizeof(struct irb));
-
- /* Try multiple times. */
- ret = -EACCES;
- if (cdev->private->iretry > 0) {
- cdev->private->iretry--;
- /* Reset internal retry indication. */
- cdev->private->flags.intretry = 0;
- ret = cio_start (sch, cdev->private->iccws,
- cdev->private->imask);
- /* We expect an interrupt in case of success or busy
- * indication. */
- if ((ret == 0) || (ret == -EBUSY))
- return ret;
- }
- /* PGID command failed on this path. */
- CIO_MSG_EVENT(3, "SPID - Device %04x on Subchannel "
- "0.%x.%04x, lpm %02X, became 'not operational'\n",
- cdev->private->dev_id.devno, sch->schid.ssid,
- sch->schid.sch_no, cdev->private->imask);
- return ret;
+ struct ccw_request *req = &cdev->private->req;
+ struct ccw1 *cp = cdev->private->iccws;
+ int i = 8 - ffs(req->lpm);
+ struct pgid *pgid = &cdev->private->pgid[i];
+
+ pgid->inf.fc = fn;
+ cp->cmd_code = CCW_CMD_SET_PGID;
+ cp->cda = (u32) (addr_t) pgid;
+ cp->count = sizeof(*pgid);
+ cp->flags = CCW_FLAG_SLI;
+ req->cp = cp;
}
/*
- * Helper function to send a nop ccw down a path.
+ * Perform establish/resign SET PGID on a single path.
*/
-static int __ccw_device_do_nop(struct ccw_device *cdev)
+static void spid_do(struct ccw_device *cdev)
{
- struct subchannel *sch;
- struct ccw1 *ccw;
- int ret;
-
- sch = to_subchannel(cdev->dev.parent);
-
- /* Setup nop channel program. */
- ccw = cdev->private->iccws;
- ccw->cmd_code = CCW_CMD_NOOP;
- ccw->cda = 0;
- ccw->count = 0;
- ccw->flags = CCW_FLAG_SLI;
-
- /* Reset device status. */
- memset(&cdev->private->irb, 0, sizeof(struct irb));
-
- /* Try multiple times. */
- ret = -EACCES;
- if (cdev->private->iretry > 0) {
- cdev->private->iretry--;
- /* Reset internal retry indication. */
- cdev->private->flags.intretry = 0;
- ret = cio_start (sch, cdev->private->iccws,
- cdev->private->imask);
- /* We expect an interrupt in case of success or busy
- * indication. */
- if ((ret == 0) || (ret == -EBUSY))
- return ret;
- }
- /* nop command failed on this path. */
- CIO_MSG_EVENT(3, "NOP - Device %04x on Subchannel "
- "0.%x.%04x, lpm %02X, became 'not operational'\n",
- cdev->private->dev_id.devno, sch->schid.ssid,
- sch->schid.sch_no, cdev->private->imask);
- return ret;
+ struct subchannel *sch = to_subchannel(cdev->dev.parent);
+ struct ccw_request *req = &cdev->private->req;
+ u8 fn;
+
+ /* Use next available path that is not already in correct state. */
+ req->lpm = lpm_adjust(req->lpm, sch->schib.pmcw.pam & ~sch->vpm);
+ if (!req->lpm)
+ goto out_nopath;
+ /* Channel program setup. */
+ if (req->lpm & sch->opm)
+ fn = SPID_FUNC_ESTABLISH;
+ else
+ fn = SPID_FUNC_RESIGN;
+ if (cdev->private->flags.mpath)
+ fn |= SPID_FUNC_MULTI_PATH;
+ spid_build_cp(cdev, fn);
+ ccw_request_start(cdev);
+ return;
+
+out_nopath:
+ verify_done(cdev, sch->vpm ? 0 : -EACCES);
}
+static void verify_start(struct ccw_device *cdev);
/*
- * Called from interrupt context to check if a valid answer
- * to Set Path Group ID was received.
+ * Process SET PGID request result for a single path.
*/
-static int
-__ccw_device_check_pgid(struct ccw_device *cdev)
+static void spid_callback(struct ccw_device *cdev, void *data, int rc)
{
- struct subchannel *sch;
- struct irb *irb;
-
- sch = to_subchannel(cdev->dev.parent);
- irb = &cdev->private->irb;
- if (irb->scsw.cmd.fctl & (SCSW_FCTL_HALT_FUNC | SCSW_FCTL_CLEAR_FUNC)) {
- /* Retry Set PGID if requested. */
- if (cdev->private->flags.intretry) {
- cdev->private->flags.intretry = 0;
- return -EAGAIN;
+ struct subchannel *sch = to_subchannel(cdev->dev.parent);
+ struct ccw_request *req = &cdev->private->req;
+
+ switch (rc) {
+ case 0:
+ sch->vpm |= req->lpm & sch->opm;
+ break;
+ case -EACCES:
+ break;
+ case -EOPNOTSUPP:
+ if (cdev->private->flags.mpath) {
+ /* Try without multipathing. */
+ cdev->private->flags.mpath = 0;
+ goto out_restart;
}
- return -ETIME;
+ /* Try without pathgrouping. */
+ cdev->private->flags.pgroup = 0;
+ goto out_restart;
+ default:
+ goto err;
}
- if (irb->esw.esw0.erw.cons) {
- if (irb->ecw[0] & SNS0_CMD_REJECT)
- return -EOPNOTSUPP;
- /* Hmm, whatever happened, try again. */
- CIO_MSG_EVENT(2, "SPID - device 0.%x.%04x, unit check, "
- "cnt %02d, "
- "sns : %02X%02X%02X%02X %02X%02X%02X%02X ...\n",
- cdev->private->dev_id.ssid,
- cdev->private->dev_id.devno,
- irb->esw.esw0.erw.scnt,
- irb->ecw[0], irb->ecw[1],
- irb->ecw[2], irb->ecw[3],
- irb->ecw[4], irb->ecw[5],
- irb->ecw[6], irb->ecw[7]);
- return -EAGAIN;
- }
- if (irb->scsw.cmd.cc == 3) {
- CIO_MSG_EVENT(3, "SPID - Device %04x on Subchannel 0.%x.%04x,"
- " lpm %02X, became 'not operational'\n",
- cdev->private->dev_id.devno, sch->schid.ssid,
- sch->schid.sch_no, cdev->private->imask);
- return -EACCES;
- }
- return 0;
+ req->lpm >>= 1;
+ spid_do(cdev);
+ return;
+
+out_restart:
+ verify_start(cdev);
+ return;
+err:
+ verify_done(cdev, rc);
+}
+
+static void spid_start(struct ccw_device *cdev)
+{
+ struct ccw_request *req = &cdev->private->req;
+
+ /* Initialize request data. */
+ memset(req, 0, sizeof(*req));
+ req->timeout = PGID_TIMEOUT;
+ req->maxretries = PGID_RETRIES;
+ req->lpm = 0x80;
+ req->callback = spid_callback;
+ spid_do(cdev);
+}
+
+static int pgid_cmp(struct pgid *p1, struct pgid *p2)
+{
+ return memcmp((char *) p1 + 1, (char *) p2 + 1,
+ sizeof(struct pgid) - 1);
}
/*
- * Called from interrupt context to check the path status after a nop has
- * been send.
+ * Determine pathgroup state from PGID data.
*/
-static int __ccw_device_check_nop(struct ccw_device *cdev)
+static void pgid_analyze(struct ccw_device *cdev, struct pgid **p,
+ int *mismatch, int *reserved, int *reset)
{
- struct subchannel *sch;
- struct irb *irb;
-
- sch = to_subchannel(cdev->dev.parent);
- irb = &cdev->private->irb;
- if (irb->scsw.cmd.fctl & (SCSW_FCTL_HALT_FUNC | SCSW_FCTL_CLEAR_FUNC)) {
- /* Retry NOP if requested. */
- if (cdev->private->flags.intretry) {
- cdev->private->flags.intretry = 0;
- return -EAGAIN;
+ struct pgid *pgid = &cdev->private->pgid[0];
+ struct pgid *first = NULL;
+ int lpm;
+ int i;
+
+ *mismatch = 0;
+ *reserved = 0;
+ *reset = 0;
+ for (i = 0, lpm = 0x80; i < 8; i++, pgid++, lpm >>= 1) {
+ if ((cdev->private->pgid_valid_mask & lpm) == 0)
+ continue;
+ if (pgid->inf.ps.state2 == SNID_STATE2_RESVD_ELSE)
+ *reserved = 1;
+ if (pgid->inf.ps.state1 == SNID_STATE1_RESET) {
+ /* A PGID was reset. */
+ *reset = 1;
+ continue;
}
- return -ETIME;
- }
- if (irb->scsw.cmd.cc == 3) {
- CIO_MSG_EVENT(3, "NOP - Device %04x on Subchannel 0.%x.%04x,"
- " lpm %02X, became 'not operational'\n",
- cdev->private->dev_id.devno, sch->schid.ssid,
- sch->schid.sch_no, cdev->private->imask);
- return -EACCES;
+ if (!first) {
+ first = pgid;
+ continue;
+ }
+ if (pgid_cmp(pgid, first) != 0)
+ *mismatch = 1;
}
- return 0;
+ if (!first)
+ first = &channel_subsystems[0]->global_pgid;
+ *p = first;
}
-static void
-__ccw_device_verify_start(struct ccw_device *cdev)
+static u8 pgid_to_vpm(struct ccw_device *cdev)
{
- struct subchannel *sch;
- __u8 func;
- int ret;
-
- sch = to_subchannel(cdev->dev.parent);
- /* Repeat for all paths. */
- for (; cdev->private->imask; cdev->private->imask >>= 1,
- cdev->private->iretry = 5) {
- if ((cdev->private->imask & sch->schib.pmcw.pam) == 0)
- /* Path not available, try next. */
+ struct subchannel *sch = to_subchannel(cdev->dev.parent);
+ struct pgid *pgid;
+ int i;
+ int lpm;
+ u8 vpm = 0;
+
+ /* Set VPM bits for paths which are already in the target state. */
+ for (i = 0; i < 8; i++) {
+ lpm = 0x80 >> i;
+ if ((cdev->private->pgid_valid_mask & lpm) == 0)
continue;
- if (cdev->private->options.pgroup) {
- if (sch->opm & cdev->private->imask)
- func = SPID_FUNC_ESTABLISH;
- else
- func = SPID_FUNC_RESIGN;
- ret = __ccw_device_do_pgid(cdev, func);
- } else
- ret = __ccw_device_do_nop(cdev);
- /* We expect an interrupt in case of success or busy
- * indication. */
- if (ret == 0 || ret == -EBUSY)
- return;
- /* Permanent path failure, try next. */
+ pgid = &cdev->private->pgid[i];
+ if (sch->opm & lpm) {
+ if (pgid->inf.ps.state1 != SNID_STATE1_GROUPED)
+ continue;
+ } else {
+ if (pgid->inf.ps.state1 != SNID_STATE1_UNGROUPED)
+ continue;
+ }
+ if (cdev->private->flags.mpath) {
+ if (pgid->inf.ps.state3 != SNID_STATE3_MULTI_PATH)
+ continue;
+ } else {
+ if (pgid->inf.ps.state3 != SNID_STATE3_SINGLE_PATH)
+ continue;
+ }
+ vpm |= lpm;
}
- /* Done with all paths. */
- ccw_device_verify_done(cdev, (sch->vpm != 0) ? 0 : -ENODEV);
+
+ return vpm;
}
-
-/*
- * Got interrupt for Set Path Group ID.
- */
-void
-ccw_device_verify_irq(struct ccw_device *cdev, enum dev_event dev_event)
+
+static void pgid_fill(struct ccw_device *cdev, struct pgid *pgid)
{
- struct subchannel *sch;
- struct irb *irb;
- int ret;
+ int i;
- irb = (struct irb *) __LC_IRB;
+ for (i = 0; i < 8; i++)
+ memcpy(&cdev->private->pgid[i], pgid, sizeof(struct pgid));
+}
- if (irb->scsw.cmd.stctl ==
- (SCSW_STCTL_STATUS_PEND | SCSW_STCTL_ALERT_STATUS)) {
- if (__ccw_device_should_retry(&irb->scsw))
- __ccw_device_verify_start(cdev);
- return;
+/*
+ * Process SENSE PGID data and report result.
+ */
+static void snid_done(struct ccw_device *cdev, int rc)
+{
+ struct ccw_dev_id *id = &cdev->private->dev_id;
+ struct subchannel *sch = to_subchannel(cdev->dev.parent);
+ struct pgid *pgid;
+ int mismatch = 0;
+ int reserved = 0;
+ int reset = 0;
+
+ if (rc)
+ goto out;
+ pgid_analyze(cdev, &pgid, &mismatch, &reserved, &reset);
+ if (reserved)
+ rc = -EUSERS;
+ else if (mismatch)
+ rc = -EOPNOTSUPP;
+ else {
+ sch->vpm = pgid_to_vpm(cdev);
+ pgid_fill(cdev, pgid);
}
- if (ccw_device_accumulate_and_sense(cdev, irb) != 0)
- return;
- sch = to_subchannel(cdev->dev.parent);
- if (cdev->private->options.pgroup)
- ret = __ccw_device_check_pgid(cdev);
- else
- ret = __ccw_device_check_nop(cdev);
- memset(&cdev->private->irb, 0, sizeof(struct irb));
-
- switch (ret) {
- /* 0, -ETIME, -EAGAIN, -EOPNOTSUPP or -EACCES */
+out:
+ CIO_MSG_EVENT(2, "snid: device 0.%x.%04x: rc=%d pvm=%02x vpm=%02x "
+ "mism=%d rsvd=%d reset=%d\n", id->ssid, id->devno, rc,
+ cdev->private->pgid_valid_mask, sch->vpm, mismatch,
+ reserved, reset);
+ switch (rc) {
case 0:
- /* Path verification ccw finished successfully, update lpm. */
- sch->vpm |= sch->opm & cdev->private->imask;
- /* Go on with next path. */
- cdev->private->imask >>= 1;
- cdev->private->iretry = 5;
- __ccw_device_verify_start(cdev);
+ /* Anything left to do? */
+ if (sch->vpm == sch->schib.pmcw.pam) {
+ verify_done(cdev, sch->vpm == 0 ? -EACCES : 0);
+ return;
+ }
+ /* Perform path-grouping. */
+ spid_start(cdev);
break;
case -EOPNOTSUPP:
- /*
- * One of those strange devices which claim to be able
- * to do multipathing but not for Set Path Group ID.
- */
- if (cdev->private->flags.pgid_single)
- cdev->private->options.pgroup = 0;
- else
- cdev->private->flags.pgid_single = 1;
- /* Retry */
- sch->vpm = 0;
- cdev->private->imask = 0x80;
- cdev->private->iretry = 5;
- /* fall through. */
- case -EAGAIN: /* Try again. */
- __ccw_device_verify_start(cdev);
- break;
- case -ETIME: /* Set path group id stopped by timeout. */
- ccw_device_verify_done(cdev, -ETIME);
- break;
- case -EACCES: /* channel is not operational. */
- cdev->private->imask >>= 1;
- cdev->private->iretry = 5;
- __ccw_device_verify_start(cdev);
+ /* Path-grouping not supported. */
+ cdev->private->flags.pgroup = 0;
+ cdev->private->flags.mpath = 0;
+ verify_start(cdev);
break;
+ default:
+ verify_done(cdev, rc);
}
}
-void
-ccw_device_verify_start(struct ccw_device *cdev)
+/*
+ * Create channel program to perform a SENSE PGID on a single path.
+ */
+static void snid_build_cp(struct ccw_device *cdev)
+{
+ struct ccw_request *req = &cdev->private->req;
+ struct ccw1 *cp = cdev->private->iccws;
+ int i = 8 - ffs(req->lpm);
+
+ /* Channel program setup. */
+ cp->cmd_code = CCW_CMD_SENSE_PGID;
+ cp->cda = (u32) (addr_t) &cdev->private->pgid[i];
+ cp->count = sizeof(struct pgid);
+ cp->flags = CCW_FLAG_SLI;
+ req->cp = cp;
+}
+
+/*
+ * Perform SENSE PGID on a single path.
+ */
+static void snid_do(struct ccw_device *cdev)
{
struct subchannel *sch = to_subchannel(cdev->dev.parent);
+ struct ccw_request *req = &cdev->private->req;
+
+ /* Adjust lpm if paths are not set in pam. */
+ req->lpm = lpm_adjust(req->lpm, sch->schib.pmcw.pam);
+ if (!req->lpm)
+ goto out_nopath;
+ snid_build_cp(cdev);
+ ccw_request_start(cdev);
+ return;
+
+out_nopath:
+ snid_done(cdev, cdev->private->pgid_valid_mask ? 0 : -EACCES);
+}
- cdev->private->flags.pgid_single = 0;
- cdev->private->imask = 0x80;
- cdev->private->iretry = 5;
+/*
+ * Process SENSE PGID request result for single path.
+ */
+static void snid_callback(struct ccw_device *cdev, void *data, int rc)
+{
+ struct ccw_request *req = &cdev->private->req;
+
+ if (rc == 0)
+ cdev->private->pgid_valid_mask |= req->lpm;
+ else if (rc != -EACCES)
+ goto err;
+ req->lpm >>= 1;
+ snid_do(cdev);
+ return;
+
+err:
+ snid_done(cdev, rc);
+}
- /* Start with empty vpm. */
- sch->vpm = 0;
+/*
+ * Perform path verification.
+ */
+static void verify_start(struct ccw_device *cdev)
+{
+ struct subchannel *sch = to_subchannel(cdev->dev.parent);
+ struct ccw_request *req = &cdev->private->req;
+ struct ccw_dev_id *devid = &cdev->private->dev_id;
- /* Get current pam. */
- if (cio_update_schib(sch)) {
- ccw_device_verify_done(cdev, -ENODEV);
- return;
+ sch->vpm = 0;
+ /* Initialize request data. */
+ memset(req, 0, sizeof(*req));
+ req->timeout = PGID_TIMEOUT;
+ req->maxretries = PGID_RETRIES;
+ req->lpm = 0x80;
+ if (cdev->private->flags.pgroup) {
+ CIO_TRACE_EVENT(4, "snid");
+ CIO_HEX_EVENT(4, devid, sizeof(*devid));
+ req->callback = snid_callback;
+ snid_do(cdev);
+ } else {
+ CIO_TRACE_EVENT(4, "nop");
+ CIO_HEX_EVENT(4, devid, sizeof(*devid));
+ req->filter = nop_filter;
+ req->callback = nop_callback;
+ nop_do(cdev);
}
- /* After 60s path verification is considered to have failed. */
- ccw_device_set_timeout(cdev, 60*HZ);
- __ccw_device_verify_start(cdev);
}
-static void
-__ccw_device_disband_start(struct ccw_device *cdev)
+/**
+ * ccw_device_verify_start - perform path verification
+ * @cdev: ccw device
+ *
+ * Perform an I/O on each available channel path to @cdev to determine which
+ * paths are operational. The resulting path mask is stored in sch->vpm.
+ * If device options specify pathgrouping, establish a pathgroup for the
+ * operational paths. When finished, call ccw_device_verify_done with a
+ * return code specifying the result.
+ */
+void ccw_device_verify_start(struct ccw_device *cdev)
{
- struct subchannel *sch;
- int ret;
-
- sch = to_subchannel(cdev->dev.parent);
- while (cdev->private->imask != 0) {
- if (sch->lpm & cdev->private->imask) {
- ret = __ccw_device_do_pgid(cdev, SPID_FUNC_DISBAND);
- if (ret == 0)
- return;
- }
- cdev->private->iretry = 5;
- cdev->private->imask >>= 1;
- }
- ccw_device_disband_done(cdev, (sch->lpm != 0) ? 0 : -ENODEV);
+ CIO_TRACE_EVENT(4, "vrfy");
+ CIO_HEX_EVENT(4, &cdev->private->dev_id, sizeof(cdev->private->dev_id));
+ /* Initialize PGID data. */
+ memset(cdev->private->pgid, 0, sizeof(cdev->private->pgid));
+ cdev->private->pgid_valid_mask = 0;
+ /*
+ * Initialize pathgroup and multipath state with target values.
+ * They may change in the course of path verification.
+ */
+ cdev->private->flags.pgroup = cdev->private->options.pgroup;
+ cdev->private->flags.mpath = cdev->private->options.mpath;
+ cdev->private->flags.doverify = 0;
+ verify_start(cdev);
}
/*
- * Got interrupt for Unset Path Group ID.
+ * Process disband SET PGID request result.
*/
-void
-ccw_device_disband_irq(struct ccw_device *cdev, enum dev_event dev_event)
+static void disband_callback(struct ccw_device *cdev, void *data, int rc)
{
- struct subchannel *sch;
- struct irb *irb;
- int ret;
+ struct subchannel *sch = to_subchannel(cdev->dev.parent);
+ struct ccw_dev_id *id = &cdev->private->dev_id;
+
+ if (rc)
+ goto out;
+ /* Ensure consistent multipathing state at device and channel. */
+ cdev->private->flags.mpath = 0;
+ if (sch->config.mp) {
+ sch->config.mp = 0;
+ rc = cio_commit_config(sch);
+ }
+out:
+ CIO_MSG_EVENT(0, "disb: device 0.%x.%04x: rc=%d\n", id->ssid, id->devno,
+ rc);
+ ccw_device_disband_done(cdev, rc);
+}
- irb = (struct irb *) __LC_IRB;
+/**
+ * ccw_device_disband_start - disband pathgroup
+ * @cdev: ccw device
+ *
+ * Execute a SET PGID channel program on @cdev to disband a previously
+ * established pathgroup. When finished, call ccw_device_disband_done with
+ * a return code specifying the result.
+ */
+void ccw_device_disband_start(struct ccw_device *cdev)
+{
+ struct subchannel *sch = to_subchannel(cdev->dev.parent);
+ struct ccw_request *req = &cdev->private->req;
+ u8 fn;
+
+ CIO_TRACE_EVENT(4, "disb");
+ CIO_HEX_EVENT(4, &cdev->private->dev_id, sizeof(cdev->private->dev_id));
+ /* Request setup. */
+ memset(req, 0, sizeof(*req));
+ req->timeout = PGID_TIMEOUT;
+ req->maxretries = PGID_RETRIES;
+ req->lpm = sch->schib.pmcw.pam & sch->opm;
+ req->callback = disband_callback;
+ fn = SPID_FUNC_DISBAND;
+ if (cdev->private->flags.mpath)
+ fn |= SPID_FUNC_MULTI_PATH;
+ spid_build_cp(cdev, fn);
+ ccw_request_start(cdev);
+}
- if (irb->scsw.cmd.stctl ==
- (SCSW_STCTL_STATUS_PEND | SCSW_STCTL_ALERT_STATUS)) {
- if (__ccw_device_should_retry(&irb->scsw))
- __ccw_device_disband_start(cdev);
- return;
- }
- if (ccw_device_accumulate_and_sense(cdev, irb) != 0)
- return;
- sch = to_subchannel(cdev->dev.parent);
- ret = __ccw_device_check_pgid(cdev);
- memset(&cdev->private->irb, 0, sizeof(struct irb));
- switch (ret) {
- /* 0, -ETIME, -EAGAIN, -EOPNOTSUPP or -EACCES */
- case 0: /* disband successful. */
- ccw_device_disband_done(cdev, ret);
- break;
- case -EOPNOTSUPP:
- /*
- * One of those strange devices which claim to be able
- * to do multipathing but not for Unset Path Group ID.
- */
- cdev->private->flags.pgid_single = 1;
- /* fall through. */
- case -EAGAIN: /* Try again. */
- __ccw_device_disband_start(cdev);
- break;
- case -ETIME: /* Set path group id stopped by timeout. */
- ccw_device_disband_done(cdev, -ETIME);
- break;
- case -EACCES: /* channel is not operational. */
- cdev->private->imask >>= 1;
- cdev->private->iretry = 5;
- __ccw_device_disband_start(cdev);
- break;
- }
+static void stlck_build_cp(struct ccw_device *cdev, void *buf1, void *buf2)
+{
+ struct ccw_request *req = &cdev->private->req;
+ struct ccw1 *cp = cdev->private->iccws;
+
+ cp[0].cmd_code = CCW_CMD_STLCK;
+ cp[0].cda = (u32) (addr_t) buf1;
+ cp[0].count = 32;
+ cp[0].flags = CCW_FLAG_CC;
+ cp[1].cmd_code = CCW_CMD_RELEASE;
+ cp[1].cda = (u32) (addr_t) buf2;
+ cp[1].count = 32;
+ cp[1].flags = 0;
+ req->cp = cp;
}
-void
-ccw_device_disband_start(struct ccw_device *cdev)
+static void stlck_callback(struct ccw_device *cdev, void *data, int rc)
{
- /* After 60s disbanding is considered to have failed. */
- ccw_device_set_timeout(cdev, 60*HZ);
+ ccw_device_stlck_done(cdev, data, rc);
+}
- cdev->private->flags.pgid_single = 0;
- cdev->private->iretry = 5;
- cdev->private->imask = 0x80;
- __ccw_device_disband_start(cdev);
+/**
+ * ccw_device_stlck_start - perform unconditional release
+ * @cdev: ccw device
+ * @data: data pointer to be passed to ccw_device_stlck_done
+ * @buf1: data pointer used in channel program
+ * @buf2: data pointer used in channel program
+ *
+ * Execute a channel program on @cdev to release an existing PGID reservation.
+ * When finished, call ccw_device_stlck_done with a return code specifying the
+ * result.
+ */
+void ccw_device_stlck_start(struct ccw_device *cdev, void *data, void *buf1,
+ void *buf2)
+{
+ struct subchannel *sch = to_subchannel(cdev->dev.parent);
+ struct ccw_request *req = &cdev->private->req;
+
+ CIO_TRACE_EVENT(4, "stlck");
+ CIO_HEX_EVENT(4, &cdev->private->dev_id, sizeof(cdev->private->dev_id));
+ /* Request setup. */
+ memset(req, 0, sizeof(*req));
+ req->timeout = PGID_TIMEOUT;
+ req->maxretries = PGID_RETRIES;
+ req->lpm = sch->schib.pmcw.pam & sch->opm;
+ req->data = data;
+ req->callback = stlck_callback;
+ stlck_build_cp(cdev, buf1, buf2);
+ ccw_request_start(cdev);
}
+
diff --git a/drivers/s390/cio/device_status.c b/drivers/s390/cio/device_status.c
index 5814dbee241..66d8066ef22 100644
--- a/drivers/s390/cio/device_status.c
+++ b/drivers/s390/cio/device_status.c
@@ -336,9 +336,6 @@ ccw_device_do_sense(struct ccw_device *cdev, struct irb *irb)
sense_ccw->count = SENSE_MAX_COUNT;
sense_ccw->flags = CCW_FLAG_SLI;
- /* Reset internal retry indication. */
- cdev->private->flags.intretry = 0;
-
rc = cio_start(sch, sense_ccw, 0xff);
if (rc == -ENODEV || rc == -EACCES)
dev_fsm_event(cdev, DEV_EVENT_VERIFY);
diff --git a/drivers/s390/cio/io_sch.h b/drivers/s390/cio/io_sch.h
index 0b8f381bd20..d72ae4c93af 100644
--- a/drivers/s390/cio/io_sch.h
+++ b/drivers/s390/cio/io_sch.h
@@ -1,7 +1,10 @@
#ifndef S390_IO_SCH_H
#define S390_IO_SCH_H
+#include <linux/types.h>
#include <asm/schid.h>
+#include <asm/ccwdev.h>
+#include "css.h"
/*
* command-mode operation request block
@@ -68,6 +71,52 @@ struct io_subchannel_private {
#define MAX_CIWS 8
/*
+ * Possible status values for a CCW request's I/O.
+ */
+enum io_status {
+ IO_DONE,
+ IO_RUNNING,
+ IO_STATUS_ERROR,
+ IO_PATH_ERROR,
+ IO_REJECTED,
+ IO_KILLED
+};
+
+/**
+ * ccw_request - Internal CCW request.
+ * @cp: channel program to start
+ * @timeout: maximum allowable time in jiffies between start I/O and interrupt
+ * @maxretries: number of retries per I/O operation and path
+ * @lpm: mask of paths to use
+ * @check: optional callback that determines if results are final
+ * @filter: optional callback to adjust request status based on IRB data
+ * @callback: final callback
+ * @data: user-defined pointer passed to all callbacks
+ * @mask: current path mask
+ * @retries: current number of retries
+ * @drc: delayed return code
+ * @cancel: non-zero if request was cancelled
+ * @done: non-zero if request was finished
+ */
+struct ccw_request {
+ struct ccw1 *cp;
+ unsigned long timeout;
+ u16 maxretries;
+ u8 lpm;
+ int (*check)(struct ccw_device *, void *);
+ enum io_status (*filter)(struct ccw_device *, void *, struct irb *,
+ enum io_status);
+ void (*callback)(struct ccw_device *, void *, int);
+ void *data;
+ /* These fields are used internally. */
+ u16 mask;
+ u16 retries;
+ int drc;
+ int cancel:1;
+ int done:1;
+} __attribute__((packed));
+
+/*
* sense-id response buffer layout
*/
struct senseid {
@@ -82,32 +131,43 @@ struct senseid {
struct ciw ciw[MAX_CIWS]; /* variable # of CIWs */
} __attribute__ ((packed, aligned(4)));
+enum cdev_todo {
+ CDEV_TODO_NOTHING,
+ CDEV_TODO_ENABLE_CMF,
+ CDEV_TODO_REBIND,
+ CDEV_TODO_REGISTER,
+ CDEV_TODO_UNREG,
+ CDEV_TODO_UNREG_EVAL,
+};
+
struct ccw_device_private {
struct ccw_device *cdev;
struct subchannel *sch;
int state; /* device state */
atomic_t onoff;
- unsigned long registered;
struct ccw_dev_id dev_id; /* device id */
struct subchannel_id schid; /* subchannel number */
- u8 imask; /* lpm mask for SNID/SID/SPGID */
- int iretry; /* retry counter SNID/SID/SPGID */
+ struct ccw_request req; /* internal I/O request */
+ int iretry;
+ u8 pgid_valid_mask; /* mask of valid PGIDs */
struct {
unsigned int fast:1; /* post with "channel end" */
unsigned int repall:1; /* report every interrupt status */
unsigned int pgroup:1; /* do path grouping */
unsigned int force:1; /* allow forced online */
+ unsigned int mpath:1; /* do multipathing */
} __attribute__ ((packed)) options;
struct {
- unsigned int pgid_single:1; /* use single path for Set PGID */
unsigned int esid:1; /* Ext. SenseID supported by HW */
unsigned int dosense:1; /* delayed SENSE required */
unsigned int doverify:1; /* delayed path verification */
unsigned int donotify:1; /* call notify function */
unsigned int recog_done:1; /* dev. recog. complete */
unsigned int fake_irb:1; /* deliver faked irb */
- unsigned int intretry:1; /* retry internal operation */
unsigned int resuming:1; /* recognition while resume */
+ unsigned int pgroup:1; /* pathgroup is set up */
+ unsigned int mpath:1; /* multipathing is set up */
+ unsigned int initialized:1; /* set if initial reference held */
} __attribute__((packed)) flags;
unsigned long intparm; /* user interruption parameter */
struct qdio_irq *qdio_data;
@@ -115,7 +175,8 @@ struct ccw_device_private {
struct senseid senseid; /* SenseID info */
struct pgid pgid[8]; /* path group IDs per chpid*/
struct ccw1 iccws[2]; /* ccws for SNID/SID/SPGID commands */
- struct work_struct kick_work;
+ struct work_struct todo_work;
+ enum cdev_todo todo;
wait_queue_head_t wait_q;
struct timer_list timer;
void *cmb; /* measurement information */
diff --git a/drivers/s390/crypto/ap_bus.c b/drivers/s390/crypto/ap_bus.c
index 1294876bf7b..20836eff88c 100644
--- a/drivers/s390/crypto/ap_bus.c
+++ b/drivers/s390/crypto/ap_bus.c
@@ -102,6 +102,7 @@ static atomic_t ap_poll_requests = ATOMIC_INIT(0);
static DECLARE_WAIT_QUEUE_HEAD(ap_poll_wait);
static struct task_struct *ap_poll_kthread = NULL;
static DEFINE_MUTEX(ap_poll_thread_mutex);
+static DEFINE_SPINLOCK(ap_poll_timer_lock);
static void *ap_interrupt_indicator;
static struct hrtimer ap_poll_timer;
/* In LPAR poll with 4kHz frequency. Poll every 250000 nanoseconds.
@@ -282,6 +283,7 @@ static int ap_queue_enable_interruption(ap_qid_t qid, void *ind)
* @psmid: The program supplied message identifier
* @msg: The message text
* @length: The message length
+ * @special: Special Bit
*
* Returns AP queue status structure.
* Condition code 1 on NQAP can't happen because the L bit is 1.
@@ -289,7 +291,8 @@ static int ap_queue_enable_interruption(ap_qid_t qid, void *ind)
* because a segment boundary was reached. The NQAP is repeated.
*/
static inline struct ap_queue_status
-__ap_send(ap_qid_t qid, unsigned long long psmid, void *msg, size_t length)
+__ap_send(ap_qid_t qid, unsigned long long psmid, void *msg, size_t length,
+ unsigned int special)
{
typedef struct { char _[length]; } msgblock;
register unsigned long reg0 asm ("0") = qid | 0x40000000UL;
@@ -299,6 +302,9 @@ __ap_send(ap_qid_t qid, unsigned long long psmid, void *msg, size_t length)
register unsigned long reg4 asm ("4") = (unsigned int) (psmid >> 32);
register unsigned long reg5 asm ("5") = (unsigned int) psmid;
+ if (special == 1)
+ reg0 |= 0x400000UL;
+
asm volatile (
"0: .long 0xb2ad0042\n" /* DQAP */
" brc 2,0b"
@@ -312,13 +318,15 @@ int ap_send(ap_qid_t qid, unsigned long long psmid, void *msg, size_t length)
{
struct ap_queue_status status;
- status = __ap_send(qid, psmid, msg, length);
+ status = __ap_send(qid, psmid, msg, length, 0);
switch (status.response_code) {
case AP_RESPONSE_NORMAL:
return 0;
case AP_RESPONSE_Q_FULL:
case AP_RESPONSE_RESET_IN_PROGRESS:
return -EBUSY;
+ case AP_RESPONSE_REQ_FAC_NOT_INST:
+ return -EINVAL;
default: /* Device is gone. */
return -ENODEV;
}
@@ -1008,7 +1016,7 @@ static int ap_probe_device_type(struct ap_device *ap_dev)
}
status = __ap_send(ap_dev->qid, 0x0102030405060708ULL,
- msg, sizeof(msg));
+ msg, sizeof(msg), 0);
if (status.response_code != AP_RESPONSE_NORMAL) {
rc = -ENODEV;
goto out_free;
@@ -1163,16 +1171,19 @@ ap_config_timeout(unsigned long ptr)
static inline void ap_schedule_poll_timer(void)
{
ktime_t hr_time;
+
+ spin_lock_bh(&ap_poll_timer_lock);
if (ap_using_interrupts() || ap_suspend_flag)
- return;
+ goto out;
if (hrtimer_is_queued(&ap_poll_timer))
- return;
+ goto out;
if (ktime_to_ns(hrtimer_expires_remaining(&ap_poll_timer)) <= 0) {
hr_time = ktime_set(0, poll_timeout);
hrtimer_forward_now(&ap_poll_timer, hr_time);
hrtimer_restart(&ap_poll_timer);
}
- return;
+out:
+ spin_unlock_bh(&ap_poll_timer_lock);
}
/**
@@ -1243,7 +1254,7 @@ static int ap_poll_write(struct ap_device *ap_dev, unsigned long *flags)
/* Start the next request on the queue. */
ap_msg = list_entry(ap_dev->requestq.next, struct ap_message, list);
status = __ap_send(ap_dev->qid, ap_msg->psmid,
- ap_msg->message, ap_msg->length);
+ ap_msg->message, ap_msg->length, ap_msg->special);
switch (status.response_code) {
case AP_RESPONSE_NORMAL:
atomic_inc(&ap_poll_requests);
@@ -1261,6 +1272,7 @@ static int ap_poll_write(struct ap_device *ap_dev, unsigned long *flags)
*flags |= 2;
break;
case AP_RESPONSE_MESSAGE_TOO_BIG:
+ case AP_RESPONSE_REQ_FAC_NOT_INST:
return -EINVAL;
default:
return -ENODEV;
@@ -1302,7 +1314,8 @@ static int __ap_queue_message(struct ap_device *ap_dev, struct ap_message *ap_ms
if (list_empty(&ap_dev->requestq) &&
ap_dev->queue_count < ap_dev->queue_depth) {
status = __ap_send(ap_dev->qid, ap_msg->psmid,
- ap_msg->message, ap_msg->length);
+ ap_msg->message, ap_msg->length,
+ ap_msg->special);
switch (status.response_code) {
case AP_RESPONSE_NORMAL:
list_add_tail(&ap_msg->list, &ap_dev->pendingq);
@@ -1317,6 +1330,7 @@ static int __ap_queue_message(struct ap_device *ap_dev, struct ap_message *ap_ms
ap_dev->requestq_count++;
ap_dev->total_request_count++;
return -EBUSY;
+ case AP_RESPONSE_REQ_FAC_NOT_INST:
case AP_RESPONSE_MESSAGE_TOO_BIG:
ap_dev->drv->receive(ap_dev, ap_msg, ERR_PTR(-EINVAL));
return -EINVAL;
@@ -1658,6 +1672,7 @@ int __init ap_module_init(void)
*/
if (MACHINE_IS_VM)
poll_timeout = 1500000;
+ spin_lock_init(&ap_poll_timer_lock);
hrtimer_init(&ap_poll_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
ap_poll_timer.function = ap_poll_timeout;
diff --git a/drivers/s390/crypto/ap_bus.h b/drivers/s390/crypto/ap_bus.h
index a3536224180..4785d07cd44 100644
--- a/drivers/s390/crypto/ap_bus.h
+++ b/drivers/s390/crypto/ap_bus.h
@@ -87,6 +87,7 @@ struct ap_queue_status {
#define AP_RESPONSE_INDEX_TOO_BIG 0x11
#define AP_RESPONSE_NO_FIRST_PART 0x13
#define AP_RESPONSE_MESSAGE_TOO_BIG 0x15
+#define AP_RESPONSE_REQ_FAC_NOT_INST 0x16
/*
* Known device types
@@ -96,8 +97,8 @@ struct ap_queue_status {
#define AP_DEVICE_TYPE_PCIXCC 5
#define AP_DEVICE_TYPE_CEX2A 6
#define AP_DEVICE_TYPE_CEX2C 7
-#define AP_DEVICE_TYPE_CEX2A2 8
-#define AP_DEVICE_TYPE_CEX2C2 9
+#define AP_DEVICE_TYPE_CEX3A 8
+#define AP_DEVICE_TYPE_CEX3C 9
/*
* AP reset flag states
@@ -161,12 +162,25 @@ struct ap_message {
size_t length; /* Message length. */
void *private; /* ap driver private pointer. */
+ unsigned int special:1; /* Used for special commands. */
};
#define AP_DEVICE(dt) \
.dev_type=(dt), \
.match_flags=AP_DEVICE_ID_MATCH_DEVICE_TYPE,
+/**
+ * ap_init_message() - Initialize ap_message.
+ * Initialize a message before using. Otherwise this might result in
+ * unexpected behaviour.
+ */
+static inline void ap_init_message(struct ap_message *ap_msg)
+{
+ ap_msg->psmid = 0;
+ ap_msg->length = 0;
+ ap_msg->special = 0;
+}
+
/*
* Note: don't use ap_send/ap_recv after using ap_queue_message
* for the first time. Otherwise the ap message queue will get
diff --git a/drivers/s390/crypto/zcrypt_api.c b/drivers/s390/crypto/zcrypt_api.c
index 65b6a96afe6..0d4d18bdd45 100644
--- a/drivers/s390/crypto/zcrypt_api.c
+++ b/drivers/s390/crypto/zcrypt_api.c
@@ -299,9 +299,7 @@ static ssize_t zcrypt_write(struct file *filp, const char __user *buf,
*/
static int zcrypt_open(struct inode *inode, struct file *filp)
{
- lock_kernel();
atomic_inc(&zcrypt_open_count);
- unlock_kernel();
return 0;
}
@@ -1009,6 +1007,10 @@ static int zcrypt_status_read(char *resp_buff, char **start, off_t offset,
zcrypt_count_type(ZCRYPT_CEX2C));
len += sprintf(resp_buff + len, "CEX2A count: %d\n",
zcrypt_count_type(ZCRYPT_CEX2A));
+ len += sprintf(resp_buff + len, "CEX3C count: %d\n",
+ zcrypt_count_type(ZCRYPT_CEX3C));
+ len += sprintf(resp_buff + len, "CEX3A count: %d\n",
+ zcrypt_count_type(ZCRYPT_CEX3A));
len += sprintf(resp_buff + len, "requestq count: %d\n",
zcrypt_requestq_count());
len += sprintf(resp_buff + len, "pendingq count: %d\n",
@@ -1017,7 +1019,7 @@ static int zcrypt_status_read(char *resp_buff, char **start, off_t offset,
atomic_read(&zcrypt_open_count));
zcrypt_status_mask(workarea);
len += sprinthx("Online devices: 1=PCICA 2=PCICC 3=PCIXCC(MCL2) "
- "4=PCIXCC(MCL3) 5=CEX2C 6=CEX2A",
+ "4=PCIXCC(MCL3) 5=CEX2C 6=CEX2A 7=CEX3C 8=CEX3A",
resp_buff+len, workarea, AP_DEVICES);
zcrypt_qdepth_mask(workarea);
len += sprinthx("Waiting work element counts",
@@ -1095,8 +1097,9 @@ static int zcrypt_status_write(struct file *file, const char __user *buffer,
* '0' for no device, '1' for PCICA, '2' for PCICC,
* '3' for PCIXCC_MCL2, '4' for PCIXCC_MCL3,
* '5' for CEX2C and '6' for CEX2A'
+ * '7' for CEX3C and '8' for CEX3A
*/
- if (*ptr >= '0' && *ptr <= '6')
+ if (*ptr >= '0' && *ptr <= '8')
j++;
else if (*ptr == 'd' || *ptr == 'D')
zcrypt_disable_card(j++);
diff --git a/drivers/s390/crypto/zcrypt_api.h b/drivers/s390/crypto/zcrypt_api.h
index 1d1ec74dadb..8e7ffbf2466 100644
--- a/drivers/s390/crypto/zcrypt_api.h
+++ b/drivers/s390/crypto/zcrypt_api.h
@@ -71,6 +71,8 @@ struct ica_z90_status {
#define ZCRYPT_PCIXCC_MCL3 4
#define ZCRYPT_CEX2C 5
#define ZCRYPT_CEX2A 6
+#define ZCRYPT_CEX3C 7
+#define ZCRYPT_CEX3A 8
/**
* Large random numbers are pulled in 4096 byte chunks from the crypto cards
diff --git a/drivers/s390/crypto/zcrypt_cex2a.c b/drivers/s390/crypto/zcrypt_cex2a.c
index 326ea08f67c..c6fb0aa8950 100644
--- a/drivers/s390/crypto/zcrypt_cex2a.c
+++ b/drivers/s390/crypto/zcrypt_cex2a.c
@@ -39,17 +39,24 @@
#define CEX2A_MIN_MOD_SIZE 1 /* 8 bits */
#define CEX2A_MAX_MOD_SIZE 256 /* 2048 bits */
+#define CEX3A_MIN_MOD_SIZE CEX2A_MIN_MOD_SIZE
+#define CEX3A_MAX_MOD_SIZE CEX2A_MAX_MOD_SIZE
#define CEX2A_SPEED_RATING 970
+#define CEX3A_SPEED_RATING 900 /* Fixme: Needs finetuning */
#define CEX2A_MAX_MESSAGE_SIZE 0x390 /* sizeof(struct type50_crb2_msg) */
#define CEX2A_MAX_RESPONSE_SIZE 0x110 /* max outputdatalength + type80_hdr */
+#define CEX3A_MAX_MESSAGE_SIZE CEX2A_MAX_MESSAGE_SIZE
+#define CEX3A_MAX_RESPONSE_SIZE CEX2A_MAX_RESPONSE_SIZE
+
#define CEX2A_CLEANUP_TIME (15*HZ)
+#define CEX3A_CLEANUP_TIME CEX2A_CLEANUP_TIME
static struct ap_device_id zcrypt_cex2a_ids[] = {
{ AP_DEVICE(AP_DEVICE_TYPE_CEX2A) },
- { AP_DEVICE(AP_DEVICE_TYPE_CEX2A2) },
+ { AP_DEVICE(AP_DEVICE_TYPE_CEX3A) },
{ /* end of list */ },
};
@@ -298,6 +305,7 @@ static long zcrypt_cex2a_modexpo(struct zcrypt_device *zdev,
struct completion work;
int rc;
+ ap_init_message(&ap_msg);
ap_msg.message = kmalloc(CEX2A_MAX_MESSAGE_SIZE, GFP_KERNEL);
if (!ap_msg.message)
return -ENOMEM;
@@ -335,6 +343,7 @@ static long zcrypt_cex2a_modexpo_crt(struct zcrypt_device *zdev,
struct completion work;
int rc;
+ ap_init_message(&ap_msg);
ap_msg.message = kmalloc(CEX2A_MAX_MESSAGE_SIZE, GFP_KERNEL);
if (!ap_msg.message)
return -ENOMEM;
@@ -373,31 +382,45 @@ static struct zcrypt_ops zcrypt_cex2a_ops = {
*/
static int zcrypt_cex2a_probe(struct ap_device *ap_dev)
{
- struct zcrypt_device *zdev;
- int rc;
-
- zdev = zcrypt_device_alloc(CEX2A_MAX_RESPONSE_SIZE);
- if (!zdev)
- return -ENOMEM;
- zdev->ap_dev = ap_dev;
- zdev->ops = &zcrypt_cex2a_ops;
- zdev->online = 1;
- zdev->user_space_type = ZCRYPT_CEX2A;
- zdev->type_string = "CEX2A";
- zdev->min_mod_size = CEX2A_MIN_MOD_SIZE;
- zdev->max_mod_size = CEX2A_MAX_MOD_SIZE;
- zdev->short_crt = 1;
- zdev->speed_rating = CEX2A_SPEED_RATING;
- ap_dev->reply = &zdev->reply;
- ap_dev->private = zdev;
- rc = zcrypt_device_register(zdev);
- if (rc)
- goto out_free;
- return 0;
-
-out_free:
- ap_dev->private = NULL;
- zcrypt_device_free(zdev);
+ struct zcrypt_device *zdev = NULL;
+ int rc = 0;
+
+ switch (ap_dev->device_type) {
+ case AP_DEVICE_TYPE_CEX2A:
+ zdev = zcrypt_device_alloc(CEX2A_MAX_RESPONSE_SIZE);
+ if (!zdev)
+ return -ENOMEM;
+ zdev->user_space_type = ZCRYPT_CEX2A;
+ zdev->type_string = "CEX2A";
+ zdev->min_mod_size = CEX2A_MIN_MOD_SIZE;
+ zdev->max_mod_size = CEX2A_MAX_MOD_SIZE;
+ zdev->short_crt = 1;
+ zdev->speed_rating = CEX2A_SPEED_RATING;
+ break;
+ case AP_DEVICE_TYPE_CEX3A:
+ zdev = zcrypt_device_alloc(CEX3A_MAX_RESPONSE_SIZE);
+ if (!zdev)
+ return -ENOMEM;
+ zdev->user_space_type = ZCRYPT_CEX3A;
+ zdev->type_string = "CEX3A";
+ zdev->min_mod_size = CEX3A_MIN_MOD_SIZE;
+ zdev->max_mod_size = CEX3A_MAX_MOD_SIZE;
+ zdev->short_crt = 1;
+ zdev->speed_rating = CEX3A_SPEED_RATING;
+ break;
+ }
+ if (zdev != NULL) {
+ zdev->ap_dev = ap_dev;
+ zdev->ops = &zcrypt_cex2a_ops;
+ zdev->online = 1;
+ ap_dev->reply = &zdev->reply;
+ ap_dev->private = zdev;
+ rc = zcrypt_device_register(zdev);
+ }
+ if (rc) {
+ ap_dev->private = NULL;
+ zcrypt_device_free(zdev);
+ }
return rc;
}
diff --git a/drivers/s390/crypto/zcrypt_pcica.c b/drivers/s390/crypto/zcrypt_pcica.c
index 17ba81b58c7..e78df3671ca 100644
--- a/drivers/s390/crypto/zcrypt_pcica.c
+++ b/drivers/s390/crypto/zcrypt_pcica.c
@@ -281,6 +281,7 @@ static long zcrypt_pcica_modexpo(struct zcrypt_device *zdev,
struct completion work;
int rc;
+ ap_init_message(&ap_msg);
ap_msg.message = kmalloc(PCICA_MAX_MESSAGE_SIZE, GFP_KERNEL);
if (!ap_msg.message)
return -ENOMEM;
@@ -318,6 +319,7 @@ static long zcrypt_pcica_modexpo_crt(struct zcrypt_device *zdev,
struct completion work;
int rc;
+ ap_init_message(&ap_msg);
ap_msg.message = kmalloc(PCICA_MAX_MESSAGE_SIZE, GFP_KERNEL);
if (!ap_msg.message)
return -ENOMEM;
diff --git a/drivers/s390/crypto/zcrypt_pcicc.c b/drivers/s390/crypto/zcrypt_pcicc.c
index f4b0c479543..a23726a0735 100644
--- a/drivers/s390/crypto/zcrypt_pcicc.c
+++ b/drivers/s390/crypto/zcrypt_pcicc.c
@@ -483,6 +483,7 @@ static long zcrypt_pcicc_modexpo(struct zcrypt_device *zdev,
struct completion work;
int rc;
+ ap_init_message(&ap_msg);
ap_msg.message = (void *) get_zeroed_page(GFP_KERNEL);
if (!ap_msg.message)
return -ENOMEM;
@@ -521,6 +522,7 @@ static long zcrypt_pcicc_modexpo_crt(struct zcrypt_device *zdev,
struct completion work;
int rc;
+ ap_init_message(&ap_msg);
ap_msg.message = (void *) get_zeroed_page(GFP_KERNEL);
if (!ap_msg.message)
return -ENOMEM;
diff --git a/drivers/s390/crypto/zcrypt_pcixcc.c b/drivers/s390/crypto/zcrypt_pcixcc.c
index 5677b40e4ac..79c120578e6 100644
--- a/drivers/s390/crypto/zcrypt_pcixcc.c
+++ b/drivers/s390/crypto/zcrypt_pcixcc.c
@@ -43,10 +43,13 @@
#define PCIXCC_MIN_MOD_SIZE 16 /* 128 bits */
#define PCIXCC_MIN_MOD_SIZE_OLD 64 /* 512 bits */
#define PCIXCC_MAX_MOD_SIZE 256 /* 2048 bits */
+#define CEX3C_MIN_MOD_SIZE PCIXCC_MIN_MOD_SIZE
+#define CEX3C_MAX_MOD_SIZE PCIXCC_MAX_MOD_SIZE
-#define PCIXCC_MCL2_SPEED_RATING 7870 /* FIXME: needs finetuning */
+#define PCIXCC_MCL2_SPEED_RATING 7870
#define PCIXCC_MCL3_SPEED_RATING 7870
-#define CEX2C_SPEED_RATING 8540
+#define CEX2C_SPEED_RATING 7000
+#define CEX3C_SPEED_RATING 6500 /* FIXME: needs finetuning */
#define PCIXCC_MAX_ICA_MESSAGE_SIZE 0x77c /* max size type6 v2 crt message */
#define PCIXCC_MAX_ICA_RESPONSE_SIZE 0x77c /* max size type86 v2 reply */
@@ -72,7 +75,7 @@ struct response_type {
static struct ap_device_id zcrypt_pcixcc_ids[] = {
{ AP_DEVICE(AP_DEVICE_TYPE_PCIXCC) },
{ AP_DEVICE(AP_DEVICE_TYPE_CEX2C) },
- { AP_DEVICE(AP_DEVICE_TYPE_CEX2C2) },
+ { AP_DEVICE(AP_DEVICE_TYPE_CEX3C) },
{ /* end of list */ },
};
@@ -326,6 +329,11 @@ static int XCRB_msg_to_type6CPRB_msgX(struct zcrypt_device *zdev,
function_code = ((unsigned char *)&msg->cprbx) + msg->cprbx.cprb_len;
memcpy(msg->hdr.function_code, function_code, sizeof(msg->hdr.function_code));
+ if (memcmp(function_code, "US", 2) == 0)
+ ap_msg->special = 1;
+ else
+ ap_msg->special = 0;
+
/* copy data block */
if (xcRB->request_data_length &&
copy_from_user(req_data, xcRB->request_data_address,
@@ -688,6 +696,7 @@ static long zcrypt_pcixcc_modexpo(struct zcrypt_device *zdev,
};
int rc;
+ ap_init_message(&ap_msg);
ap_msg.message = (void *) get_zeroed_page(GFP_KERNEL);
if (!ap_msg.message)
return -ENOMEM;
@@ -727,6 +736,7 @@ static long zcrypt_pcixcc_modexpo_crt(struct zcrypt_device *zdev,
};
int rc;
+ ap_init_message(&ap_msg);
ap_msg.message = (void *) get_zeroed_page(GFP_KERNEL);
if (!ap_msg.message)
return -ENOMEM;
@@ -766,6 +776,7 @@ static long zcrypt_pcixcc_send_cprb(struct zcrypt_device *zdev,
};
int rc;
+ ap_init_message(&ap_msg);
ap_msg.message = kmalloc(PCIXCC_MAX_XCRB_MESSAGE_SIZE, GFP_KERNEL);
if (!ap_msg.message)
return -ENOMEM;
@@ -805,6 +816,7 @@ static long zcrypt_pcixcc_rng(struct zcrypt_device *zdev,
};
int rc;
+ ap_init_message(&ap_msg);
ap_msg.message = kmalloc(PCIXCC_MAX_XCRB_MESSAGE_SIZE, GFP_KERNEL);
if (!ap_msg.message)
return -ENOMEM;
@@ -972,6 +984,7 @@ static int zcrypt_pcixcc_rng_supported(struct ap_device *ap_dev)
} __attribute__((packed)) *reply;
int rc, i;
+ ap_init_message(&ap_msg);
ap_msg.message = (void *) get_zeroed_page(GFP_KERNEL);
if (!ap_msg.message)
return -ENOMEM;
@@ -1016,14 +1029,15 @@ out_free:
static int zcrypt_pcixcc_probe(struct ap_device *ap_dev)
{
struct zcrypt_device *zdev;
- int rc;
+ int rc = 0;
zdev = zcrypt_device_alloc(PCIXCC_MAX_RESPONSE_SIZE);
if (!zdev)
return -ENOMEM;
zdev->ap_dev = ap_dev;
zdev->online = 1;
- if (ap_dev->device_type == AP_DEVICE_TYPE_PCIXCC) {
+ switch (ap_dev->device_type) {
+ case AP_DEVICE_TYPE_PCIXCC:
rc = zcrypt_pcixcc_mcl(ap_dev);
if (rc < 0) {
zcrypt_device_free(zdev);
@@ -1041,13 +1055,25 @@ static int zcrypt_pcixcc_probe(struct ap_device *ap_dev)
zdev->min_mod_size = PCIXCC_MIN_MOD_SIZE;
zdev->max_mod_size = PCIXCC_MAX_MOD_SIZE;
}
- } else {
+ break;
+ case AP_DEVICE_TYPE_CEX2C:
zdev->user_space_type = ZCRYPT_CEX2C;
zdev->type_string = "CEX2C";
zdev->speed_rating = CEX2C_SPEED_RATING;
zdev->min_mod_size = PCIXCC_MIN_MOD_SIZE;
zdev->max_mod_size = PCIXCC_MAX_MOD_SIZE;
+ break;
+ case AP_DEVICE_TYPE_CEX3C:
+ zdev->user_space_type = ZCRYPT_CEX3C;
+ zdev->type_string = "CEX3C";
+ zdev->speed_rating = CEX3C_SPEED_RATING;
+ zdev->min_mod_size = CEX3C_MIN_MOD_SIZE;
+ zdev->max_mod_size = CEX3C_MAX_MOD_SIZE;
+ break;
+ default:
+ goto out_free;
}
+
rc = zcrypt_pcixcc_rng_supported(ap_dev);
if (rc < 0) {
zcrypt_device_free(zdev);
diff --git a/drivers/s390/net/qeth_core_mpc.h b/drivers/s390/net/qeth_core_mpc.h
index 52c03438dbe..1ba51152f66 100644
--- a/drivers/s390/net/qeth_core_mpc.h
+++ b/drivers/s390/net/qeth_core_mpc.h
@@ -528,7 +528,7 @@ extern unsigned char ULP_ENABLE[];
(PDU_ENCAPSULATION(buffer) + 0x17)
#define QETH_ULP_ENABLE_RESP_LINK_TYPE(buffer) \
(PDU_ENCAPSULATION(buffer) + 0x2b)
-/* Layer 2 defintions */
+/* Layer 2 definitions */
#define QETH_PROT_LAYER2 0x08
#define QETH_PROT_TCPIP 0x03
#define QETH_PROT_OSN2 0x0a
diff --git a/drivers/s390/scsi/zfcp_aux.c b/drivers/s390/scsi/zfcp_aux.c
index 2889e5f2dfd..9d0c941b7d3 100644
--- a/drivers/s390/scsi/zfcp_aux.c
+++ b/drivers/s390/scsi/zfcp_aux.c
@@ -31,6 +31,7 @@
#include <linux/miscdevice.h>
#include <linux/seq_file.h>
#include "zfcp_ext.h"
+#include "zfcp_fc.h"
#define ZFCP_BUS_ID_SIZE 20
@@ -80,48 +81,40 @@ int zfcp_reqlist_isempty(struct zfcp_adapter *adapter)
static void __init zfcp_init_device_configure(char *busid, u64 wwpn, u64 lun)
{
- struct ccw_device *ccwdev;
+ struct ccw_device *cdev;
struct zfcp_adapter *adapter;
struct zfcp_port *port;
struct zfcp_unit *unit;
- ccwdev = get_ccwdev_by_busid(&zfcp_ccw_driver, busid);
- if (!ccwdev)
+ cdev = get_ccwdev_by_busid(&zfcp_ccw_driver, busid);
+ if (!cdev)
return;
- if (ccw_device_set_online(ccwdev))
- goto out_ccwdev;
+ if (ccw_device_set_online(cdev))
+ goto out_ccw_device;
- mutex_lock(&zfcp_data.config_mutex);
- adapter = dev_get_drvdata(&ccwdev->dev);
+ adapter = zfcp_ccw_adapter_by_cdev(cdev);
if (!adapter)
- goto out_unlock;
- zfcp_adapter_get(adapter);
+ goto out_ccw_device;
port = zfcp_get_port_by_wwpn(adapter, wwpn);
if (!port)
goto out_port;
- zfcp_port_get(port);
unit = zfcp_unit_enqueue(port, lun);
if (IS_ERR(unit))
goto out_unit;
- mutex_unlock(&zfcp_data.config_mutex);
zfcp_erp_unit_reopen(unit, 0, "auidc_1", NULL);
zfcp_erp_wait(adapter);
flush_work(&unit->scsi_work);
- mutex_lock(&zfcp_data.config_mutex);
- zfcp_unit_put(unit);
out_unit:
- zfcp_port_put(port);
+ put_device(&port->sysfs_device);
out_port:
- zfcp_adapter_put(adapter);
-out_unlock:
- mutex_unlock(&zfcp_data.config_mutex);
-out_ccwdev:
- put_device(&ccwdev->dev);
+ zfcp_ccw_adapter_put(adapter);
+out_ccw_device:
+ put_device(&cdev->dev);
return;
}
@@ -167,7 +160,7 @@ static int __init zfcp_module_init(void)
int retval = -ENOMEM;
zfcp_data.gpn_ft_cache = zfcp_cache_hw_align("zfcp_gpn",
- sizeof(struct ct_iu_gpn_ft_req));
+ sizeof(struct zfcp_fc_gpn_ft_req));
if (!zfcp_data.gpn_ft_cache)
goto out;
@@ -182,12 +175,14 @@ static int __init zfcp_module_init(void)
goto out_sr_cache;
zfcp_data.gid_pn_cache = zfcp_cache_hw_align("zfcp_gid",
- sizeof(struct zfcp_gid_pn_data));
+ sizeof(struct zfcp_fc_gid_pn));
if (!zfcp_data.gid_pn_cache)
goto out_gid_cache;
- mutex_init(&zfcp_data.config_mutex);
- rwlock_init(&zfcp_data.config_lock);
+ zfcp_data.adisc_cache = zfcp_cache_hw_align("zfcp_adisc",
+ sizeof(struct zfcp_fc_els_adisc));
+ if (!zfcp_data.adisc_cache)
+ goto out_adisc_cache;
zfcp_data.scsi_transport_template =
fc_attach_transport(&zfcp_transport_functions);
@@ -200,7 +195,7 @@ static int __init zfcp_module_init(void)
goto out_misc;
}
- retval = zfcp_ccw_register();
+ retval = ccw_driver_register(&zfcp_ccw_driver);
if (retval) {
pr_err("The zfcp device driver could not register with "
"the common I/O layer\n");
@@ -216,6 +211,8 @@ out_ccw_register:
out_misc:
fc_release_transport(zfcp_data.scsi_transport_template);
out_transport:
+ kmem_cache_destroy(zfcp_data.adisc_cache);
+out_adisc_cache:
kmem_cache_destroy(zfcp_data.gid_pn_cache);
out_gid_cache:
kmem_cache_destroy(zfcp_data.sr_buffer_cache);
@@ -229,6 +226,20 @@ out:
module_init(zfcp_module_init);
+static void __exit zfcp_module_exit(void)
+{
+ ccw_driver_unregister(&zfcp_ccw_driver);
+ misc_deregister(&zfcp_cfdc_misc);
+ fc_release_transport(zfcp_data.scsi_transport_template);
+ kmem_cache_destroy(zfcp_data.adisc_cache);
+ kmem_cache_destroy(zfcp_data.gid_pn_cache);
+ kmem_cache_destroy(zfcp_data.sr_buffer_cache);
+ kmem_cache_destroy(zfcp_data.qtcb_cache);
+ kmem_cache_destroy(zfcp_data.gpn_ft_cache);
+}
+
+module_exit(zfcp_module_exit);
+
/**
* zfcp_get_unit_by_lun - find unit in unit list of port by FCP LUN
* @port: pointer to port to search for unit
@@ -238,12 +249,18 @@ module_init(zfcp_module_init);
*/
struct zfcp_unit *zfcp_get_unit_by_lun(struct zfcp_port *port, u64 fcp_lun)
{
+ unsigned long flags;
struct zfcp_unit *unit;
- list_for_each_entry(unit, &port->unit_list_head, list)
- if ((unit->fcp_lun == fcp_lun) &&
- !(atomic_read(&unit->status) & ZFCP_STATUS_COMMON_REMOVE))
- return unit;
+ read_lock_irqsave(&port->unit_list_lock, flags);
+ list_for_each_entry(unit, &port->unit_list, list)
+ if (unit->fcp_lun == fcp_lun) {
+ if (!get_device(&unit->sysfs_device))
+ unit = NULL;
+ read_unlock_irqrestore(&port->unit_list_lock, flags);
+ return unit;
+ }
+ read_unlock_irqrestore(&port->unit_list_lock, flags);
return NULL;
}
@@ -257,18 +274,35 @@ struct zfcp_unit *zfcp_get_unit_by_lun(struct zfcp_port *port, u64 fcp_lun)
struct zfcp_port *zfcp_get_port_by_wwpn(struct zfcp_adapter *adapter,
u64 wwpn)
{
+ unsigned long flags;
struct zfcp_port *port;
- list_for_each_entry(port, &adapter->port_list_head, list)
- if ((port->wwpn == wwpn) &&
- !(atomic_read(&port->status) & ZFCP_STATUS_COMMON_REMOVE))
+ read_lock_irqsave(&adapter->port_list_lock, flags);
+ list_for_each_entry(port, &adapter->port_list, list)
+ if (port->wwpn == wwpn) {
+ if (!get_device(&port->sysfs_device))
+ port = NULL;
+ read_unlock_irqrestore(&adapter->port_list_lock, flags);
return port;
+ }
+ read_unlock_irqrestore(&adapter->port_list_lock, flags);
return NULL;
}
-static void zfcp_sysfs_unit_release(struct device *dev)
+/**
+ * zfcp_unit_release - dequeue unit
+ * @dev: pointer to device
+ *
+ * waits until all work is done on unit and removes it then from the unit->list
+ * of the associated port.
+ */
+static void zfcp_unit_release(struct device *dev)
{
- kfree(container_of(dev, struct zfcp_unit, sysfs_device));
+ struct zfcp_unit *unit = container_of(dev, struct zfcp_unit,
+ sysfs_device);
+
+ put_device(&unit->port->sysfs_device);
+ kfree(unit);
}
/**
@@ -276,43 +310,40 @@ static void zfcp_sysfs_unit_release(struct device *dev)
* @port: pointer to port where unit is added
* @fcp_lun: FCP LUN of unit to be enqueued
* Returns: pointer to enqueued unit on success, ERR_PTR on error
- * Locks: config_mutex must be held to serialize changes to the unit list
*
* Sets up some unit internal structures and creates sysfs entry.
*/
struct zfcp_unit *zfcp_unit_enqueue(struct zfcp_port *port, u64 fcp_lun)
{
struct zfcp_unit *unit;
+ int retval = -ENOMEM;
- read_lock_irq(&zfcp_data.config_lock);
- if (zfcp_get_unit_by_lun(port, fcp_lun)) {
- read_unlock_irq(&zfcp_data.config_lock);
- return ERR_PTR(-EINVAL);
+ get_device(&port->sysfs_device);
+
+ unit = zfcp_get_unit_by_lun(port, fcp_lun);
+ if (unit) {
+ put_device(&unit->sysfs_device);
+ retval = -EEXIST;
+ goto err_out;
}
- read_unlock_irq(&zfcp_data.config_lock);
unit = kzalloc(sizeof(struct zfcp_unit), GFP_KERNEL);
if (!unit)
- return ERR_PTR(-ENOMEM);
-
- atomic_set(&unit->refcount, 0);
- init_waitqueue_head(&unit->remove_wq);
- INIT_WORK(&unit->scsi_work, zfcp_scsi_scan);
+ goto err_out;
unit->port = port;
unit->fcp_lun = fcp_lun;
+ unit->sysfs_device.parent = &port->sysfs_device;
+ unit->sysfs_device.release = zfcp_unit_release;
if (dev_set_name(&unit->sysfs_device, "0x%016llx",
(unsigned long long) fcp_lun)) {
kfree(unit);
- return ERR_PTR(-ENOMEM);
+ goto err_out;
}
- unit->sysfs_device.parent = &port->sysfs_device;
- unit->sysfs_device.release = zfcp_sysfs_unit_release;
- dev_set_drvdata(&unit->sysfs_device, unit);
+ retval = -EINVAL;
- /* mark unit unusable as long as sysfs registration is not complete */
- atomic_set_mask(ZFCP_STATUS_COMMON_REMOVE, &unit->status);
+ INIT_WORK(&unit->scsi_work, zfcp_scsi_scan);
spin_lock_init(&unit->latencies.lock);
unit->latencies.write.channel.min = 0xFFFFFFFF;
@@ -324,50 +355,30 @@ struct zfcp_unit *zfcp_unit_enqueue(struct zfcp_port *port, u64 fcp_lun)
if (device_register(&unit->sysfs_device)) {
put_device(&unit->sysfs_device);
- return ERR_PTR(-EINVAL);
+ goto err_out;
}
if (sysfs_create_group(&unit->sysfs_device.kobj,
- &zfcp_sysfs_unit_attrs)) {
- device_unregister(&unit->sysfs_device);
- return ERR_PTR(-EINVAL);
- }
+ &zfcp_sysfs_unit_attrs))
+ goto err_out_put;
- zfcp_unit_get(unit);
+ write_lock_irq(&port->unit_list_lock);
+ list_add_tail(&unit->list, &port->unit_list);
+ write_unlock_irq(&port->unit_list_lock);
- write_lock_irq(&zfcp_data.config_lock);
- list_add_tail(&unit->list, &port->unit_list_head);
- atomic_clear_mask(ZFCP_STATUS_COMMON_REMOVE, &unit->status);
atomic_set_mask(ZFCP_STATUS_COMMON_RUNNING, &unit->status);
- write_unlock_irq(&zfcp_data.config_lock);
-
- zfcp_port_get(port);
-
return unit;
-}
-/**
- * zfcp_unit_dequeue - dequeue unit
- * @unit: pointer to zfcp_unit
- *
- * waits until all work is done on unit and removes it then from the unit->list
- * of the associated port.
- */
-void zfcp_unit_dequeue(struct zfcp_unit *unit)
-{
- wait_event(unit->remove_wq, atomic_read(&unit->refcount) == 0);
- write_lock_irq(&zfcp_data.config_lock);
- list_del(&unit->list);
- write_unlock_irq(&zfcp_data.config_lock);
- zfcp_port_put(unit->port);
- sysfs_remove_group(&unit->sysfs_device.kobj, &zfcp_sysfs_unit_attrs);
+err_out_put:
device_unregister(&unit->sysfs_device);
+err_out:
+ put_device(&port->sysfs_device);
+ return ERR_PTR(retval);
}
static int zfcp_allocate_low_mem_buffers(struct zfcp_adapter *adapter)
{
- /* must only be called with zfcp_data.config_mutex taken */
adapter->pool.erp_req =
mempool_create_kmalloc_pool(1, sizeof(struct zfcp_fsf_req));
if (!adapter->pool.erp_req)
@@ -405,9 +416,9 @@ static int zfcp_allocate_low_mem_buffers(struct zfcp_adapter *adapter)
if (!adapter->pool.status_read_data)
return -ENOMEM;
- adapter->pool.gid_pn_data =
+ adapter->pool.gid_pn =
mempool_create_slab_pool(1, zfcp_data.gid_pn_cache);
- if (!adapter->pool.gid_pn_data)
+ if (!adapter->pool.gid_pn)
return -ENOMEM;
return 0;
@@ -415,7 +426,6 @@ static int zfcp_allocate_low_mem_buffers(struct zfcp_adapter *adapter)
static void zfcp_free_low_mem_buffers(struct zfcp_adapter *adapter)
{
- /* zfcp_data.config_mutex must be held */
if (adapter->pool.erp_req)
mempool_destroy(adapter->pool.erp_req);
if (adapter->pool.scsi_req)
@@ -428,8 +438,8 @@ static void zfcp_free_low_mem_buffers(struct zfcp_adapter *adapter)
mempool_destroy(adapter->pool.status_read_req);
if (adapter->pool.status_read_data)
mempool_destroy(adapter->pool.status_read_data);
- if (adapter->pool.gid_pn_data)
- mempool_destroy(adapter->pool.gid_pn_data);
+ if (adapter->pool.gid_pn)
+ mempool_destroy(adapter->pool.gid_pn);
}
/**
@@ -497,53 +507,56 @@ static void zfcp_destroy_adapter_work_queue(struct zfcp_adapter *adapter)
* zfcp_adapter_enqueue - enqueue a new adapter to the list
* @ccw_device: pointer to the struct cc_device
*
- * Returns: 0 if a new adapter was successfully enqueued
- * -ENOMEM if alloc failed
+ * Returns: struct zfcp_adapter*
* Enqueues an adapter at the end of the adapter list in the driver data.
* All adapter internal structures are set up.
* Proc-fs entries are also created.
- * locks: config_mutex must be held to serialize changes to the adapter list
*/
-int zfcp_adapter_enqueue(struct ccw_device *ccw_device)
+struct zfcp_adapter *zfcp_adapter_enqueue(struct ccw_device *ccw_device)
{
struct zfcp_adapter *adapter;
- /*
- * Note: It is safe to release the list_lock, as any list changes
- * are protected by the config_mutex, which must be held to get here
- */
+ if (!get_device(&ccw_device->dev))
+ return ERR_PTR(-ENODEV);
adapter = kzalloc(sizeof(struct zfcp_adapter), GFP_KERNEL);
- if (!adapter)
- return -ENOMEM;
+ if (!adapter) {
+ put_device(&ccw_device->dev);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ kref_init(&adapter->ref);
ccw_device->handler = NULL;
adapter->ccw_device = ccw_device;
- atomic_set(&adapter->refcount, 0);
+
+ INIT_WORK(&adapter->stat_work, _zfcp_status_read_scheduler);
+ INIT_WORK(&adapter->scan_work, zfcp_fc_scan_ports);
if (zfcp_qdio_setup(adapter))
- goto qdio_failed;
+ goto failed;
if (zfcp_allocate_low_mem_buffers(adapter))
- goto low_mem_buffers_failed;
+ goto failed;
if (zfcp_reqlist_alloc(adapter))
- goto low_mem_buffers_failed;
+ goto failed;
if (zfcp_dbf_adapter_register(adapter))
- goto debug_register_failed;
+ goto failed;
if (zfcp_setup_adapter_work_queue(adapter))
- goto work_queue_failed;
+ goto failed;
if (zfcp_fc_gs_setup(adapter))
- goto generic_services_failed;
+ goto failed;
+
+ rwlock_init(&adapter->port_list_lock);
+ INIT_LIST_HEAD(&adapter->port_list);
- init_waitqueue_head(&adapter->remove_wq);
init_waitqueue_head(&adapter->erp_ready_wq);
init_waitqueue_head(&adapter->erp_done_wqh);
- INIT_LIST_HEAD(&adapter->port_list_head);
INIT_LIST_HEAD(&adapter->erp_ready_head);
INIT_LIST_HEAD(&adapter->erp_running_head);
@@ -553,83 +566,85 @@ int zfcp_adapter_enqueue(struct ccw_device *ccw_device)
rwlock_init(&adapter->abort_lock);
if (zfcp_erp_thread_setup(adapter))
- goto erp_thread_failed;
-
- INIT_WORK(&adapter->stat_work, _zfcp_status_read_scheduler);
- INIT_WORK(&adapter->scan_work, _zfcp_fc_scan_ports_later);
+ goto failed;
adapter->service_level.seq_print = zfcp_print_sl;
- /* mark adapter unusable as long as sysfs registration is not complete */
- atomic_set_mask(ZFCP_STATUS_COMMON_REMOVE, &adapter->status);
-
dev_set_drvdata(&ccw_device->dev, adapter);
if (sysfs_create_group(&ccw_device->dev.kobj,
&zfcp_sysfs_adapter_attrs))
- goto sysfs_failed;
-
- atomic_clear_mask(ZFCP_STATUS_COMMON_REMOVE, &adapter->status);
+ goto failed;
if (!zfcp_adapter_scsi_register(adapter))
- return 0;
+ return adapter;
-sysfs_failed:
- zfcp_erp_thread_kill(adapter);
-erp_thread_failed:
- zfcp_fc_gs_destroy(adapter);
-generic_services_failed:
+failed:
+ zfcp_adapter_unregister(adapter);
+ return ERR_PTR(-ENOMEM);
+}
+
+void zfcp_adapter_unregister(struct zfcp_adapter *adapter)
+{
+ struct ccw_device *cdev = adapter->ccw_device;
+
+ cancel_work_sync(&adapter->scan_work);
+ cancel_work_sync(&adapter->stat_work);
zfcp_destroy_adapter_work_queue(adapter);
-work_queue_failed:
+
+ zfcp_fc_wka_ports_force_offline(adapter->gs);
+ zfcp_adapter_scsi_unregister(adapter);
+ sysfs_remove_group(&cdev->dev.kobj, &zfcp_sysfs_adapter_attrs);
+
+ zfcp_erp_thread_kill(adapter);
zfcp_dbf_adapter_unregister(adapter->dbf);
-debug_register_failed:
- dev_set_drvdata(&ccw_device->dev, NULL);
- kfree(adapter->req_list);
-low_mem_buffers_failed:
- zfcp_free_low_mem_buffers(adapter);
-qdio_failed:
zfcp_qdio_destroy(adapter->qdio);
- kfree(adapter);
- return -ENOMEM;
+
+ zfcp_ccw_adapter_put(adapter); /* final put to release */
}
/**
- * zfcp_adapter_dequeue - remove the adapter from the resource list
- * @adapter: pointer to struct zfcp_adapter which should be removed
+ * zfcp_adapter_release - remove the adapter from the resource list
+ * @ref: pointer to struct kref
* locks: adapter list write lock is assumed to be held by caller
*/
-void zfcp_adapter_dequeue(struct zfcp_adapter *adapter)
+void zfcp_adapter_release(struct kref *ref)
{
- int retval = 0;
- unsigned long flags;
+ struct zfcp_adapter *adapter = container_of(ref, struct zfcp_adapter,
+ ref);
+ struct ccw_device *cdev = adapter->ccw_device;
- cancel_work_sync(&adapter->stat_work);
- zfcp_fc_wka_ports_force_offline(adapter->gs);
- sysfs_remove_group(&adapter->ccw_device->dev.kobj,
- &zfcp_sysfs_adapter_attrs);
dev_set_drvdata(&adapter->ccw_device->dev, NULL);
- /* sanity check: no pending FSF requests */
- spin_lock_irqsave(&adapter->req_list_lock, flags);
- retval = zfcp_reqlist_isempty(adapter);
- spin_unlock_irqrestore(&adapter->req_list_lock, flags);
- if (!retval)
- return;
-
zfcp_fc_gs_destroy(adapter);
- zfcp_erp_thread_kill(adapter);
- zfcp_destroy_adapter_work_queue(adapter);
- zfcp_dbf_adapter_unregister(adapter->dbf);
zfcp_free_low_mem_buffers(adapter);
- zfcp_qdio_destroy(adapter->qdio);
kfree(adapter->req_list);
kfree(adapter->fc_stats);
kfree(adapter->stats_reset_data);
kfree(adapter);
+ put_device(&cdev->dev);
}
-static void zfcp_sysfs_port_release(struct device *dev)
+/**
+ * zfcp_device_unregister - remove port, unit from system
+ * @dev: reference to device which is to be removed
+ * @grp: related reference to attribute group
+ *
+ * Helper function to unregister port, unit from system
+ */
+void zfcp_device_unregister(struct device *dev,
+ const struct attribute_group *grp)
{
- kfree(container_of(dev, struct zfcp_port, sysfs_device));
+ sysfs_remove_group(&dev->kobj, grp);
+ device_unregister(dev);
+}
+
+static void zfcp_port_release(struct device *dev)
+{
+ struct zfcp_port *port = container_of(dev, struct zfcp_port,
+ sysfs_device);
+
+ zfcp_ccw_adapter_put(port->adapter);
+ kfree(port);
}
/**
@@ -639,7 +654,6 @@ static void zfcp_sysfs_port_release(struct device *dev)
* @status: initial status for the port
* @d_id: destination id of the remote port to be enqueued
* Returns: pointer to enqueued port on success, ERR_PTR on error
- * Locks: config_mutex must be held to serialize changes to the port list
*
* All port internal structures are set up and the sysfs entry is generated.
* d_id is used to enqueue ports with a well known address like the Directory
@@ -649,20 +663,24 @@ struct zfcp_port *zfcp_port_enqueue(struct zfcp_adapter *adapter, u64 wwpn,
u32 status, u32 d_id)
{
struct zfcp_port *port;
+ int retval = -ENOMEM;
- read_lock_irq(&zfcp_data.config_lock);
- if (zfcp_get_port_by_wwpn(adapter, wwpn)) {
- read_unlock_irq(&zfcp_data.config_lock);
- return ERR_PTR(-EINVAL);
+ kref_get(&adapter->ref);
+
+ port = zfcp_get_port_by_wwpn(adapter, wwpn);
+ if (port) {
+ put_device(&port->sysfs_device);
+ retval = -EEXIST;
+ goto err_out;
}
- read_unlock_irq(&zfcp_data.config_lock);
port = kzalloc(sizeof(struct zfcp_port), GFP_KERNEL);
if (!port)
- return ERR_PTR(-ENOMEM);
+ goto err_out;
+
+ rwlock_init(&port->unit_list_lock);
+ INIT_LIST_HEAD(&port->unit_list);
- init_waitqueue_head(&port->remove_wq);
- INIT_LIST_HEAD(&port->unit_list_head);
INIT_WORK(&port->gid_pn_work, zfcp_fc_port_did_lookup);
INIT_WORK(&port->test_link_work, zfcp_fc_link_test_work);
INIT_WORK(&port->rport_work, zfcp_scsi_rport_work);
@@ -671,58 +689,38 @@ struct zfcp_port *zfcp_port_enqueue(struct zfcp_adapter *adapter, u64 wwpn,
port->d_id = d_id;
port->wwpn = wwpn;
port->rport_task = RPORT_NONE;
-
- /* mark port unusable as long as sysfs registration is not complete */
- atomic_set_mask(status | ZFCP_STATUS_COMMON_REMOVE, &port->status);
- atomic_set(&port->refcount, 0);
+ port->sysfs_device.parent = &adapter->ccw_device->dev;
+ port->sysfs_device.release = zfcp_port_release;
if (dev_set_name(&port->sysfs_device, "0x%016llx",
(unsigned long long)wwpn)) {
kfree(port);
- return ERR_PTR(-ENOMEM);
+ goto err_out;
}
- port->sysfs_device.parent = &adapter->ccw_device->dev;
- port->sysfs_device.release = zfcp_sysfs_port_release;
- dev_set_drvdata(&port->sysfs_device, port);
+ retval = -EINVAL;
if (device_register(&port->sysfs_device)) {
put_device(&port->sysfs_device);
- return ERR_PTR(-EINVAL);
+ goto err_out;
}
if (sysfs_create_group(&port->sysfs_device.kobj,
- &zfcp_sysfs_port_attrs)) {
- device_unregister(&port->sysfs_device);
- return ERR_PTR(-EINVAL);
- }
+ &zfcp_sysfs_port_attrs))
+ goto err_out_put;
- zfcp_port_get(port);
+ write_lock_irq(&adapter->port_list_lock);
+ list_add_tail(&port->list, &adapter->port_list);
+ write_unlock_irq(&adapter->port_list_lock);
- write_lock_irq(&zfcp_data.config_lock);
- list_add_tail(&port->list, &adapter->port_list_head);
- atomic_clear_mask(ZFCP_STATUS_COMMON_REMOVE, &port->status);
- atomic_set_mask(ZFCP_STATUS_COMMON_RUNNING, &port->status);
+ atomic_set_mask(status | ZFCP_STATUS_COMMON_RUNNING, &port->status);
- write_unlock_irq(&zfcp_data.config_lock);
-
- zfcp_adapter_get(adapter);
return port;
-}
-/**
- * zfcp_port_dequeue - dequeues a port from the port list of the adapter
- * @port: pointer to struct zfcp_port which should be removed
- */
-void zfcp_port_dequeue(struct zfcp_port *port)
-{
- write_lock_irq(&zfcp_data.config_lock);
- list_del(&port->list);
- write_unlock_irq(&zfcp_data.config_lock);
- wait_event(port->remove_wq, atomic_read(&port->refcount) == 0);
- cancel_work_sync(&port->rport_work); /* usually not necessary */
- zfcp_adapter_put(port->adapter);
- sysfs_remove_group(&port->sysfs_device.kobj, &zfcp_sysfs_port_attrs);
+err_out_put:
device_unregister(&port->sysfs_device);
+err_out:
+ zfcp_ccw_adapter_put(adapter);
+ return ERR_PTR(retval);
}
/**
diff --git a/drivers/s390/scsi/zfcp_ccw.c b/drivers/s390/scsi/zfcp_ccw.c
index e08339428ec..c22cb72a5ae 100644
--- a/drivers/s390/scsi/zfcp_ccw.c
+++ b/drivers/s390/scsi/zfcp_ccw.c
@@ -13,28 +13,34 @@
#define ZFCP_MODEL_PRIV 0x4
-static int zfcp_ccw_suspend(struct ccw_device *cdev)
+static DEFINE_SPINLOCK(zfcp_ccw_adapter_ref_lock);
+struct zfcp_adapter *zfcp_ccw_adapter_by_cdev(struct ccw_device *cdev)
{
- struct zfcp_adapter *adapter = dev_get_drvdata(&cdev->dev);
-
- if (!adapter)
- return 0;
-
- mutex_lock(&zfcp_data.config_mutex);
+ struct zfcp_adapter *adapter;
+ unsigned long flags;
- zfcp_erp_adapter_shutdown(adapter, 0, "ccsusp1", NULL);
- zfcp_erp_wait(adapter);
+ spin_lock_irqsave(&zfcp_ccw_adapter_ref_lock, flags);
+ adapter = dev_get_drvdata(&cdev->dev);
+ if (adapter)
+ kref_get(&adapter->ref);
+ spin_unlock_irqrestore(&zfcp_ccw_adapter_ref_lock, flags);
+ return adapter;
+}
- mutex_unlock(&zfcp_data.config_mutex);
+void zfcp_ccw_adapter_put(struct zfcp_adapter *adapter)
+{
+ unsigned long flags;
- return 0;
+ spin_lock_irqsave(&zfcp_ccw_adapter_ref_lock, flags);
+ kref_put(&adapter->ref, zfcp_adapter_release);
+ spin_unlock_irqrestore(&zfcp_ccw_adapter_ref_lock, flags);
}
static int zfcp_ccw_activate(struct ccw_device *cdev)
{
- struct zfcp_adapter *adapter = dev_get_drvdata(&cdev->dev);
+ struct zfcp_adapter *adapter = zfcp_ccw_adapter_by_cdev(cdev);
if (!adapter)
return 0;
@@ -46,6 +52,8 @@ static int zfcp_ccw_activate(struct ccw_device *cdev)
zfcp_erp_wait(adapter);
flush_work(&adapter->scan_work);
+ zfcp_ccw_adapter_put(adapter);
+
return 0;
}
@@ -67,28 +75,28 @@ int zfcp_ccw_priv_sch(struct zfcp_adapter *adapter)
/**
* zfcp_ccw_probe - probe function of zfcp driver
- * @ccw_device: pointer to belonging ccw device
+ * @cdev: pointer to belonging ccw device
*
* This function gets called by the common i/o layer for each FCP
* device found on the current system. This is only a stub to make cio
* work: To only allocate adapter resources for devices actually used,
* the allocation is deferred to the first call to ccw_set_online.
*/
-static int zfcp_ccw_probe(struct ccw_device *ccw_device)
+static int zfcp_ccw_probe(struct ccw_device *cdev)
{
return 0;
}
/**
* zfcp_ccw_remove - remove function of zfcp driver
- * @ccw_device: pointer to belonging ccw device
+ * @cdev: pointer to belonging ccw device
*
* This function gets called by the common i/o layer and removes an adapter
* from the system. Task of this function is to get rid of all units and
* ports that belong to this adapter. And in addition all resources of this
* adapter will be freed too.
*/
-static void zfcp_ccw_remove(struct ccw_device *ccw_device)
+static void zfcp_ccw_remove(struct ccw_device *cdev)
{
struct zfcp_adapter *adapter;
struct zfcp_port *port, *p;
@@ -96,49 +104,37 @@ static void zfcp_ccw_remove(struct ccw_device *ccw_device)
LIST_HEAD(unit_remove_lh);
LIST_HEAD(port_remove_lh);
- ccw_device_set_offline(ccw_device);
+ ccw_device_set_offline(cdev);
- mutex_lock(&zfcp_data.config_mutex);
- adapter = dev_get_drvdata(&ccw_device->dev);
+ adapter = zfcp_ccw_adapter_by_cdev(cdev);
if (!adapter)
- goto out;
- mutex_unlock(&zfcp_data.config_mutex);
+ return;
- cancel_work_sync(&adapter->scan_work);
-
- mutex_lock(&zfcp_data.config_mutex);
-
- /* this also removes the scsi devices, so call it first */
- zfcp_adapter_scsi_unregister(adapter);
-
- write_lock_irq(&zfcp_data.config_lock);
- list_for_each_entry_safe(port, p, &adapter->port_list_head, list) {
- list_for_each_entry_safe(unit, u, &port->unit_list_head, list) {
+ write_lock_irq(&adapter->port_list_lock);
+ list_for_each_entry_safe(port, p, &adapter->port_list, list) {
+ write_lock(&port->unit_list_lock);
+ list_for_each_entry_safe(unit, u, &port->unit_list, list)
list_move(&unit->list, &unit_remove_lh);
- atomic_set_mask(ZFCP_STATUS_COMMON_REMOVE,
- &unit->status);
- }
+ write_unlock(&port->unit_list_lock);
list_move(&port->list, &port_remove_lh);
- atomic_set_mask(ZFCP_STATUS_COMMON_REMOVE, &port->status);
}
- atomic_set_mask(ZFCP_STATUS_COMMON_REMOVE, &adapter->status);
- write_unlock_irq(&zfcp_data.config_lock);
+ write_unlock_irq(&adapter->port_list_lock);
+ zfcp_ccw_adapter_put(adapter); /* put from zfcp_ccw_adapter_by_cdev */
- list_for_each_entry_safe(port, p, &port_remove_lh, list) {
- list_for_each_entry_safe(unit, u, &unit_remove_lh, list)
- zfcp_unit_dequeue(unit);
- zfcp_port_dequeue(port);
- }
- wait_event(adapter->remove_wq, atomic_read(&adapter->refcount) == 0);
- zfcp_adapter_dequeue(adapter);
+ list_for_each_entry_safe(unit, u, &unit_remove_lh, list)
+ zfcp_device_unregister(&unit->sysfs_device,
+ &zfcp_sysfs_unit_attrs);
-out:
- mutex_unlock(&zfcp_data.config_mutex);
+ list_for_each_entry_safe(port, p, &port_remove_lh, list)
+ zfcp_device_unregister(&port->sysfs_device,
+ &zfcp_sysfs_port_attrs);
+
+ zfcp_adapter_unregister(adapter);
}
/**
* zfcp_ccw_set_online - set_online function of zfcp driver
- * @ccw_device: pointer to belonging ccw device
+ * @cdev: pointer to belonging ccw device
*
* This function gets called by the common i/o layer and sets an
* adapter into state online. The first call will allocate all
@@ -149,23 +145,20 @@ out:
* the SCSI stack, that the QDIO queues will be set up and that the
* adapter will be opened.
*/
-static int zfcp_ccw_set_online(struct ccw_device *ccw_device)
+static int zfcp_ccw_set_online(struct ccw_device *cdev)
{
- struct zfcp_adapter *adapter;
- int ret = 0;
-
- mutex_lock(&zfcp_data.config_mutex);
- adapter = dev_get_drvdata(&ccw_device->dev);
+ struct zfcp_adapter *adapter = zfcp_ccw_adapter_by_cdev(cdev);
if (!adapter) {
- ret = zfcp_adapter_enqueue(ccw_device);
- if (ret) {
- dev_err(&ccw_device->dev,
+ adapter = zfcp_adapter_enqueue(cdev);
+
+ if (IS_ERR(adapter)) {
+ dev_err(&cdev->dev,
"Setting up data structures for the "
"FCP adapter failed\n");
- goto out;
+ return PTR_ERR(adapter);
}
- adapter = dev_get_drvdata(&ccw_device->dev);
+ kref_get(&adapter->ref);
}
/* initialize request counter */
@@ -177,58 +170,61 @@ static int zfcp_ccw_set_online(struct ccw_device *ccw_device)
zfcp_erp_adapter_reopen(adapter, ZFCP_STATUS_COMMON_ERP_FAILED,
"ccsonl2", NULL);
zfcp_erp_wait(adapter);
-out:
- mutex_unlock(&zfcp_data.config_mutex);
- if (!ret)
- flush_work(&adapter->scan_work);
- return ret;
+
+ flush_work(&adapter->scan_work);
+
+ zfcp_ccw_adapter_put(adapter);
+ return 0;
}
/**
* zfcp_ccw_set_offline - set_offline function of zfcp driver
- * @ccw_device: pointer to belonging ccw device
+ * @cdev: pointer to belonging ccw device
*
* This function gets called by the common i/o layer and sets an adapter
* into state offline.
*/
-static int zfcp_ccw_set_offline(struct ccw_device *ccw_device)
+static int zfcp_ccw_set_offline(struct ccw_device *cdev)
{
- struct zfcp_adapter *adapter;
+ struct zfcp_adapter *adapter = zfcp_ccw_adapter_by_cdev(cdev);
+
+ if (!adapter)
+ return 0;
- mutex_lock(&zfcp_data.config_mutex);
- adapter = dev_get_drvdata(&ccw_device->dev);
zfcp_erp_adapter_shutdown(adapter, 0, "ccsoff1", NULL);
zfcp_erp_wait(adapter);
- mutex_unlock(&zfcp_data.config_mutex);
+
+ zfcp_ccw_adapter_put(adapter);
return 0;
}
/**
* zfcp_ccw_notify - ccw notify function
- * @ccw_device: pointer to belonging ccw device
+ * @cdev: pointer to belonging ccw device
* @event: indicates if adapter was detached or attached
*
* This function gets called by the common i/o layer if an adapter has gone
* or reappeared.
*/
-static int zfcp_ccw_notify(struct ccw_device *ccw_device, int event)
+static int zfcp_ccw_notify(struct ccw_device *cdev, int event)
{
- struct zfcp_adapter *adapter = dev_get_drvdata(&ccw_device->dev);
+ struct zfcp_adapter *adapter = zfcp_ccw_adapter_by_cdev(cdev);
+
+ if (!adapter)
+ return 1;
switch (event) {
case CIO_GONE:
- dev_warn(&adapter->ccw_device->dev,
- "The FCP device has been detached\n");
+ dev_warn(&cdev->dev, "The FCP device has been detached\n");
zfcp_erp_adapter_shutdown(adapter, 0, "ccnoti1", NULL);
break;
case CIO_NO_PATH:
- dev_warn(&adapter->ccw_device->dev,
+ dev_warn(&cdev->dev,
"The CHPID for the FCP device is offline\n");
zfcp_erp_adapter_shutdown(adapter, 0, "ccnoti2", NULL);
break;
case CIO_OPER:
- dev_info(&adapter->ccw_device->dev,
- "The FCP device is operational again\n");
+ dev_info(&cdev->dev, "The FCP device is operational again\n");
zfcp_erp_modify_adapter_status(adapter, "ccnoti3", NULL,
ZFCP_STATUS_COMMON_RUNNING,
ZFCP_SET);
@@ -236,11 +232,13 @@ static int zfcp_ccw_notify(struct ccw_device *ccw_device, int event)
"ccnoti4", NULL);
break;
case CIO_BOXED:
- dev_warn(&adapter->ccw_device->dev, "The FCP device "
- "did not respond within the specified time\n");
+ dev_warn(&cdev->dev, "The FCP device did not respond within "
+ "the specified time\n");
zfcp_erp_adapter_shutdown(adapter, 0, "ccnoti5", NULL);
break;
}
+
+ zfcp_ccw_adapter_put(adapter);
return 1;
}
@@ -250,18 +248,16 @@ static int zfcp_ccw_notify(struct ccw_device *ccw_device, int event)
*/
static void zfcp_ccw_shutdown(struct ccw_device *cdev)
{
- struct zfcp_adapter *adapter;
+ struct zfcp_adapter *adapter = zfcp_ccw_adapter_by_cdev(cdev);
- mutex_lock(&zfcp_data.config_mutex);
- adapter = dev_get_drvdata(&cdev->dev);
if (!adapter)
- goto out;
+ return;
zfcp_erp_adapter_shutdown(adapter, 0, "ccshut1", NULL);
zfcp_erp_wait(adapter);
zfcp_erp_thread_kill(adapter);
-out:
- mutex_unlock(&zfcp_data.config_mutex);
+
+ zfcp_ccw_adapter_put(adapter);
}
struct ccw_driver zfcp_ccw_driver = {
@@ -274,18 +270,7 @@ struct ccw_driver zfcp_ccw_driver = {
.set_offline = zfcp_ccw_set_offline,
.notify = zfcp_ccw_notify,
.shutdown = zfcp_ccw_shutdown,
- .freeze = zfcp_ccw_suspend,
+ .freeze = zfcp_ccw_set_offline,
.thaw = zfcp_ccw_activate,
.restore = zfcp_ccw_activate,
};
-
-/**
- * zfcp_ccw_register - ccw register function
- *
- * Registers the driver at the common i/o layer. This function will be called
- * at module load time/system start.
- */
-int __init zfcp_ccw_register(void)
-{
- return ccw_driver_register(&zfcp_ccw_driver);
-}
diff --git a/drivers/s390/scsi/zfcp_cfdc.c b/drivers/s390/scsi/zfcp_cfdc.c
index ef681dfed0c..f932400e980 100644
--- a/drivers/s390/scsi/zfcp_cfdc.c
+++ b/drivers/s390/scsi/zfcp_cfdc.c
@@ -86,22 +86,17 @@ static int zfcp_cfdc_copy_to_user(void __user *user_buffer,
static struct zfcp_adapter *zfcp_cfdc_get_adapter(u32 devno)
{
char busid[9];
- struct ccw_device *ccwdev;
- struct zfcp_adapter *adapter = NULL;
+ struct ccw_device *cdev;
+ struct zfcp_adapter *adapter;
snprintf(busid, sizeof(busid), "0.0.%04x", devno);
- ccwdev = get_ccwdev_by_busid(&zfcp_ccw_driver, busid);
- if (!ccwdev)
- goto out;
-
- adapter = dev_get_drvdata(&ccwdev->dev);
- if (!adapter)
- goto out_put;
-
- zfcp_adapter_get(adapter);
-out_put:
- put_device(&ccwdev->dev);
-out:
+ cdev = get_ccwdev_by_busid(&zfcp_ccw_driver, busid);
+ if (!cdev)
+ return NULL;
+
+ adapter = zfcp_ccw_adapter_by_cdev(cdev);
+
+ put_device(&cdev->dev);
return adapter;
}
@@ -212,7 +207,6 @@ static long zfcp_cfdc_dev_ioctl(struct file *file, unsigned int command,
retval = -ENXIO;
goto free_buffer;
}
- zfcp_adapter_get(adapter);
retval = zfcp_cfdc_sg_setup(data->command, fsf_cfdc->sg,
data_user->control_file);
@@ -245,7 +239,7 @@ static long zfcp_cfdc_dev_ioctl(struct file *file, unsigned int command,
free_sg:
zfcp_sg_free_table(fsf_cfdc->sg, ZFCP_CFDC_PAGES);
adapter_put:
- zfcp_adapter_put(adapter);
+ zfcp_ccw_adapter_put(adapter);
free_buffer:
kfree(data);
no_mem_sense:
diff --git a/drivers/s390/scsi/zfcp_dbf.c b/drivers/s390/scsi/zfcp_dbf.c
index 215b70749e9..84450955ae1 100644
--- a/drivers/s390/scsi/zfcp_dbf.c
+++ b/drivers/s390/scsi/zfcp_dbf.c
@@ -13,6 +13,7 @@
#include <asm/debug.h>
#include "zfcp_dbf.h"
#include "zfcp_ext.h"
+#include "zfcp_fc.h"
static u32 dbfsize = 4;
@@ -177,8 +178,7 @@ void _zfcp_dbf_hba_fsf_response(const char *tag2, int level,
case FSF_QTCB_SEND_ELS:
send_els = (struct zfcp_send_els *)fsf_req->data;
- response->u.els.d_id = qtcb->bottom.support.d_id;
- response->u.els.ls_code = send_els->ls_code >> 24;
+ response->u.els.d_id = ntoh24(qtcb->bottom.support.d_id);
break;
case FSF_QTCB_ABORT_FCP_CMND:
@@ -348,7 +348,6 @@ static void zfcp_dbf_hba_view_response(char **p,
case FSF_QTCB_SEND_ELS:
zfcp_dbf_out(p, "d_id", "0x%06x", r->u.els.d_id);
- zfcp_dbf_out(p, "ls_code", "0x%02x", r->u.els.ls_code);
break;
case FSF_QTCB_ABORT_FCP_CMND:
@@ -677,14 +676,14 @@ void zfcp_dbf_rec_action(char *id2, struct zfcp_erp_action *erp_action)
/**
* zfcp_dbf_san_ct_request - trace event for issued CT request
* @fsf_req: request containing issued CT data
+ * @d_id: destination id where ct request is sent to
*/
-void zfcp_dbf_san_ct_request(struct zfcp_fsf_req *fsf_req)
+void zfcp_dbf_san_ct_request(struct zfcp_fsf_req *fsf_req, u32 d_id)
{
- struct zfcp_send_ct *ct = (struct zfcp_send_ct *)fsf_req->data;
- struct zfcp_wka_port *wka_port = ct->wka_port;
- struct zfcp_adapter *adapter = wka_port->adapter;
+ struct zfcp_fsf_ct_els *ct = (struct zfcp_fsf_ct_els *)fsf_req->data;
+ struct zfcp_adapter *adapter = fsf_req->adapter;
struct zfcp_dbf *dbf = adapter->dbf;
- struct ct_hdr *hdr = sg_virt(ct->req);
+ struct fc_ct_hdr *hdr = sg_virt(ct->req);
struct zfcp_dbf_san_record *r = &dbf->san_buf;
struct zfcp_dbf_san_record_ct_request *oct = &r->u.ct_req;
int level = 3;
@@ -695,19 +694,18 @@ void zfcp_dbf_san_ct_request(struct zfcp_fsf_req *fsf_req)
strncpy(r->tag, "octc", ZFCP_DBF_TAG_SIZE);
r->fsf_reqid = fsf_req->req_id;
r->fsf_seqno = fsf_req->seq_no;
- r->s_id = fc_host_port_id(adapter->scsi_host);
- r->d_id = wka_port->d_id;
- oct->cmd_req_code = hdr->cmd_rsp_code;
- oct->revision = hdr->revision;
- oct->gs_type = hdr->gs_type;
- oct->gs_subtype = hdr->gs_subtype;
- oct->options = hdr->options;
- oct->max_res_size = hdr->max_res_size;
- oct->len = min((int)ct->req->length - (int)sizeof(struct ct_hdr),
+ oct->d_id = d_id;
+ oct->cmd_req_code = hdr->ct_cmd;
+ oct->revision = hdr->ct_rev;
+ oct->gs_type = hdr->ct_fs_type;
+ oct->gs_subtype = hdr->ct_fs_subtype;
+ oct->options = hdr->ct_options;
+ oct->max_res_size = hdr->ct_mr_size;
+ oct->len = min((int)ct->req->length - (int)sizeof(struct fc_ct_hdr),
ZFCP_DBF_SAN_MAX_PAYLOAD);
debug_event(dbf->san, level, r, sizeof(*r));
zfcp_dbf_hexdump(dbf->san, r, sizeof(*r), level,
- (void *)hdr + sizeof(struct ct_hdr), oct->len);
+ (void *)hdr + sizeof(struct fc_ct_hdr), oct->len);
spin_unlock_irqrestore(&dbf->san_lock, flags);
}
@@ -717,10 +715,9 @@ void zfcp_dbf_san_ct_request(struct zfcp_fsf_req *fsf_req)
*/
void zfcp_dbf_san_ct_response(struct zfcp_fsf_req *fsf_req)
{
- struct zfcp_send_ct *ct = (struct zfcp_send_ct *)fsf_req->data;
- struct zfcp_wka_port *wka_port = ct->wka_port;
- struct zfcp_adapter *adapter = wka_port->adapter;
- struct ct_hdr *hdr = sg_virt(ct->resp);
+ struct zfcp_fsf_ct_els *ct = (struct zfcp_fsf_ct_els *)fsf_req->data;
+ struct zfcp_adapter *adapter = fsf_req->adapter;
+ struct fc_ct_hdr *hdr = sg_virt(ct->resp);
struct zfcp_dbf *dbf = adapter->dbf;
struct zfcp_dbf_san_record *r = &dbf->san_buf;
struct zfcp_dbf_san_record_ct_response *rct = &r->u.ct_resp;
@@ -732,25 +729,23 @@ void zfcp_dbf_san_ct_response(struct zfcp_fsf_req *fsf_req)
strncpy(r->tag, "rctc", ZFCP_DBF_TAG_SIZE);
r->fsf_reqid = fsf_req->req_id;
r->fsf_seqno = fsf_req->seq_no;
- r->s_id = wka_port->d_id;
- r->d_id = fc_host_port_id(adapter->scsi_host);
- rct->cmd_rsp_code = hdr->cmd_rsp_code;
- rct->revision = hdr->revision;
- rct->reason_code = hdr->reason_code;
- rct->expl = hdr->reason_code_expl;
- rct->vendor_unique = hdr->vendor_unique;
- rct->max_res_size = hdr->max_res_size;
- rct->len = min((int)ct->resp->length - (int)sizeof(struct ct_hdr),
+ rct->cmd_rsp_code = hdr->ct_cmd;
+ rct->revision = hdr->ct_rev;
+ rct->reason_code = hdr->ct_reason;
+ rct->expl = hdr->ct_explan;
+ rct->vendor_unique = hdr->ct_vendor;
+ rct->max_res_size = hdr->ct_mr_size;
+ rct->len = min((int)ct->resp->length - (int)sizeof(struct fc_ct_hdr),
ZFCP_DBF_SAN_MAX_PAYLOAD);
debug_event(dbf->san, level, r, sizeof(*r));
zfcp_dbf_hexdump(dbf->san, r, sizeof(*r), level,
- (void *)hdr + sizeof(struct ct_hdr), rct->len);
+ (void *)hdr + sizeof(struct fc_ct_hdr), rct->len);
spin_unlock_irqrestore(&dbf->san_lock, flags);
}
static void zfcp_dbf_san_els(const char *tag, int level,
- struct zfcp_fsf_req *fsf_req, u32 s_id, u32 d_id,
- u8 ls_code, void *buffer, int buflen)
+ struct zfcp_fsf_req *fsf_req, u32 d_id,
+ void *buffer, int buflen)
{
struct zfcp_adapter *adapter = fsf_req->adapter;
struct zfcp_dbf *dbf = adapter->dbf;
@@ -762,9 +757,7 @@ static void zfcp_dbf_san_els(const char *tag, int level,
strncpy(rec->tag, tag, ZFCP_DBF_TAG_SIZE);
rec->fsf_reqid = fsf_req->req_id;
rec->fsf_seqno = fsf_req->seq_no;
- rec->s_id = s_id;
- rec->d_id = d_id;
- rec->u.els.ls_code = ls_code;
+ rec->u.els.d_id = d_id;
debug_event(dbf->san, level, rec, sizeof(*rec));
zfcp_dbf_hexdump(dbf->san, rec, sizeof(*rec), level,
buffer, min(buflen, ZFCP_DBF_SAN_MAX_PAYLOAD));
@@ -777,12 +770,11 @@ static void zfcp_dbf_san_els(const char *tag, int level,
*/
void zfcp_dbf_san_els_request(struct zfcp_fsf_req *fsf_req)
{
- struct zfcp_send_els *els = (struct zfcp_send_els *)fsf_req->data;
+ struct zfcp_fsf_ct_els *els = (struct zfcp_fsf_ct_els *)fsf_req->data;
+ u32 d_id = ntoh24(fsf_req->qtcb->bottom.support.d_id);
- zfcp_dbf_san_els("oels", 2, fsf_req,
- fc_host_port_id(els->adapter->scsi_host),
- els->d_id, *(u8 *) sg_virt(els->req),
- sg_virt(els->req), els->req->length);
+ zfcp_dbf_san_els("oels", 2, fsf_req, d_id,
+ sg_virt(els->req), els->req->length);
}
/**
@@ -791,12 +783,11 @@ void zfcp_dbf_san_els_request(struct zfcp_fsf_req *fsf_req)
*/
void zfcp_dbf_san_els_response(struct zfcp_fsf_req *fsf_req)
{
- struct zfcp_send_els *els = (struct zfcp_send_els *)fsf_req->data;
+ struct zfcp_fsf_ct_els *els = (struct zfcp_fsf_ct_els *)fsf_req->data;
+ u32 d_id = ntoh24(fsf_req->qtcb->bottom.support.d_id);
- zfcp_dbf_san_els("rels", 2, fsf_req, els->d_id,
- fc_host_port_id(els->adapter->scsi_host),
- *(u8 *)sg_virt(els->req), sg_virt(els->resp),
- els->resp->length);
+ zfcp_dbf_san_els("rels", 2, fsf_req, d_id,
+ sg_virt(els->resp), els->resp->length);
}
/**
@@ -805,16 +796,13 @@ void zfcp_dbf_san_els_response(struct zfcp_fsf_req *fsf_req)
*/
void zfcp_dbf_san_incoming_els(struct zfcp_fsf_req *fsf_req)
{
- struct zfcp_adapter *adapter = fsf_req->adapter;
struct fsf_status_read_buffer *buf =
(struct fsf_status_read_buffer *)fsf_req->data;
int length = (int)buf->length -
(int)((void *)&buf->payload - (void *)buf);
- zfcp_dbf_san_els("iels", 1, fsf_req, buf->d_id,
- fc_host_port_id(adapter->scsi_host),
- buf->payload.data[0], (void *)buf->payload.data,
- length);
+ zfcp_dbf_san_els("iels", 1, fsf_req, ntoh24(buf->d_id),
+ (void *)buf->payload.data, length);
}
static int zfcp_dbf_san_view_format(debug_info_t *id, struct debug_view *view,
@@ -829,11 +817,10 @@ static int zfcp_dbf_san_view_format(debug_info_t *id, struct debug_view *view,
zfcp_dbf_tag(&p, "tag", r->tag);
zfcp_dbf_out(&p, "fsf_reqid", "0x%0Lx", r->fsf_reqid);
zfcp_dbf_out(&p, "fsf_seqno", "0x%08x", r->fsf_seqno);
- zfcp_dbf_out(&p, "s_id", "0x%06x", r->s_id);
- zfcp_dbf_out(&p, "d_id", "0x%06x", r->d_id);
if (strncmp(r->tag, "octc", ZFCP_DBF_TAG_SIZE) == 0) {
struct zfcp_dbf_san_record_ct_request *ct = &r->u.ct_req;
+ zfcp_dbf_out(&p, "d_id", "0x%06x", ct->d_id);
zfcp_dbf_out(&p, "cmd_req_code", "0x%04x", ct->cmd_req_code);
zfcp_dbf_out(&p, "revision", "0x%02x", ct->revision);
zfcp_dbf_out(&p, "gs_type", "0x%02x", ct->gs_type);
@@ -852,7 +839,7 @@ static int zfcp_dbf_san_view_format(debug_info_t *id, struct debug_view *view,
strncmp(r->tag, "rels", ZFCP_DBF_TAG_SIZE) == 0 ||
strncmp(r->tag, "iels", ZFCP_DBF_TAG_SIZE) == 0) {
struct zfcp_dbf_san_record_els *els = &r->u.els;
- zfcp_dbf_out(&p, "ls_code", "0x%02x", els->ls_code);
+ zfcp_dbf_out(&p, "d_id", "0x%06x", els->d_id);
}
return p - out_buf;
}
@@ -870,8 +857,9 @@ void _zfcp_dbf_scsi(const char *tag, const char *tag2, int level,
struct zfcp_dbf_scsi_record *rec = &dbf->scsi_buf;
struct zfcp_dbf_dump *dump = (struct zfcp_dbf_dump *)rec;
unsigned long flags;
- struct fcp_rsp_iu *fcp_rsp;
- char *fcp_rsp_info = NULL, *fcp_sns_info = NULL;
+ struct fcp_resp_with_ext *fcp_rsp;
+ struct fcp_resp_rsp_info *fcp_rsp_info = NULL;
+ char *fcp_sns_info = NULL;
int offset = 0, buflen = 0;
spin_lock_irqsave(&dbf->scsi_lock, flags);
@@ -895,20 +883,22 @@ void _zfcp_dbf_scsi(const char *tag, const char *tag2, int level,
rec->scsi_allowed = scsi_cmnd->allowed;
}
if (fsf_req != NULL) {
- fcp_rsp = (struct fcp_rsp_iu *)
- &(fsf_req->qtcb->bottom.io.fcp_rsp);
- fcp_rsp_info = (unsigned char *) &fcp_rsp[1];
- fcp_sns_info =
- zfcp_get_fcp_sns_info_ptr(fcp_rsp);
-
- rec->rsp_validity = fcp_rsp->validity.value;
- rec->rsp_scsi_status = fcp_rsp->scsi_status;
- rec->rsp_resid = fcp_rsp->fcp_resid;
- if (fcp_rsp->validity.bits.fcp_rsp_len_valid)
- rec->rsp_code = *(fcp_rsp_info + 3);
- if (fcp_rsp->validity.bits.fcp_sns_len_valid) {
- buflen = min((int)fcp_rsp->fcp_sns_len,
- ZFCP_DBF_SCSI_MAX_FCP_SNS_INFO);
+ fcp_rsp = (struct fcp_resp_with_ext *)
+ &(fsf_req->qtcb->bottom.io.fcp_rsp);
+ fcp_rsp_info = (struct fcp_resp_rsp_info *)
+ &fcp_rsp[1];
+ fcp_sns_info = (char *) &fcp_rsp[1];
+ if (fcp_rsp->resp.fr_flags & FCP_RSP_LEN_VAL)
+ fcp_sns_info += fcp_rsp->ext.fr_sns_len;
+
+ rec->rsp_validity = fcp_rsp->resp.fr_flags;
+ rec->rsp_scsi_status = fcp_rsp->resp.fr_status;
+ rec->rsp_resid = fcp_rsp->ext.fr_resid;
+ if (fcp_rsp->resp.fr_flags & FCP_RSP_LEN_VAL)
+ rec->rsp_code = fcp_rsp_info->rsp_code;
+ if (fcp_rsp->resp.fr_flags & FCP_SNS_LEN_VAL) {
+ buflen = min(fcp_rsp->ext.fr_sns_len,
+ (u32)ZFCP_DBF_SCSI_MAX_FCP_SNS_INFO);
rec->sns_info_len = buflen;
memcpy(rec->sns_info, fcp_sns_info,
min(buflen,
@@ -1067,6 +1057,8 @@ err_out:
*/
void zfcp_dbf_adapter_unregister(struct zfcp_dbf *dbf)
{
+ if (!dbf)
+ return;
debug_unregister(dbf->scsi);
debug_unregister(dbf->san);
debug_unregister(dbf->hba);
diff --git a/drivers/s390/scsi/zfcp_dbf.h b/drivers/s390/scsi/zfcp_dbf.h
index 6b1461e8f84..8b7fd9a1033 100644
--- a/drivers/s390/scsi/zfcp_dbf.h
+++ b/drivers/s390/scsi/zfcp_dbf.h
@@ -22,6 +22,7 @@
#ifndef ZFCP_DBF_H
#define ZFCP_DBF_H
+#include <scsi/fc/fc_fcp.h>
#include "zfcp_ext.h"
#include "zfcp_fsf.h"
#include "zfcp_def.h"
@@ -122,7 +123,6 @@ struct zfcp_dbf_hba_record_response {
} unit;
struct {
u32 d_id;
- u8 ls_code;
} els;
} u;
} __attribute__ ((packed));
@@ -166,6 +166,7 @@ struct zfcp_dbf_san_record_ct_request {
u8 options;
u16 max_res_size;
u32 len;
+ u32 d_id;
} __attribute__ ((packed));
struct zfcp_dbf_san_record_ct_response {
@@ -179,16 +180,13 @@ struct zfcp_dbf_san_record_ct_response {
} __attribute__ ((packed));
struct zfcp_dbf_san_record_els {
- u8 ls_code;
- u32 len;
+ u32 d_id;
} __attribute__ ((packed));
struct zfcp_dbf_san_record {
u8 tag[ZFCP_DBF_TAG_SIZE];
u64 fsf_reqid;
u32 fsf_seqno;
- u32 s_id;
- u32 d_id;
union {
struct zfcp_dbf_san_record_ct_request ct_req;
struct zfcp_dbf_san_record_ct_response ct_resp;
@@ -343,7 +341,7 @@ static inline
void zfcp_dbf_scsi_devreset(const char *tag, u8 flag, struct zfcp_unit *unit,
struct scsi_cmnd *scsi_cmnd)
{
- zfcp_dbf_scsi(flag == FCP_TARGET_RESET ? "trst" : "lrst", tag, 1,
+ zfcp_dbf_scsi(flag == FCP_TMF_TGT_RESET ? "trst" : "lrst", tag, 1,
unit->port->adapter->dbf, scsi_cmnd, NULL, 0);
}
diff --git a/drivers/s390/scsi/zfcp_def.h b/drivers/s390/scsi/zfcp_def.h
index 7da2fad8f51..e1b5b88e2dd 100644
--- a/drivers/s390/scsi/zfcp_def.h
+++ b/drivers/s390/scsi/zfcp_def.h
@@ -71,131 +71,6 @@
/* timeout value for "default timer" for fsf requests */
#define ZFCP_FSF_REQUEST_TIMEOUT (60*HZ)
-/*************** FIBRE CHANNEL PROTOCOL SPECIFIC DEFINES ********************/
-
-/* task attribute values in FCP-2 FCP_CMND IU */
-#define SIMPLE_Q 0
-#define HEAD_OF_Q 1
-#define ORDERED_Q 2
-#define ACA_Q 4
-#define UNTAGGED 5
-
-/* task management flags in FCP-2 FCP_CMND IU */
-#define FCP_CLEAR_ACA 0x40
-#define FCP_TARGET_RESET 0x20
-#define FCP_LOGICAL_UNIT_RESET 0x10
-#define FCP_CLEAR_TASK_SET 0x04
-#define FCP_ABORT_TASK_SET 0x02
-
-#define FCP_CDB_LENGTH 16
-
-#define ZFCP_DID_MASK 0x00FFFFFF
-
-/* FCP(-2) FCP_CMND IU */
-struct fcp_cmnd_iu {
- u64 fcp_lun; /* FCP logical unit number */
- u8 crn; /* command reference number */
- u8 reserved0:5; /* reserved */
- u8 task_attribute:3; /* task attribute */
- u8 task_management_flags; /* task management flags */
- u8 add_fcp_cdb_length:6; /* additional FCP_CDB length */
- u8 rddata:1; /* read data */
- u8 wddata:1; /* write data */
- u8 fcp_cdb[FCP_CDB_LENGTH];
-} __attribute__((packed));
-
-/* FCP(-2) FCP_RSP IU */
-struct fcp_rsp_iu {
- u8 reserved0[10];
- union {
- struct {
- u8 reserved1:3;
- u8 fcp_conf_req:1;
- u8 fcp_resid_under:1;
- u8 fcp_resid_over:1;
- u8 fcp_sns_len_valid:1;
- u8 fcp_rsp_len_valid:1;
- } bits;
- u8 value;
- } validity;
- u8 scsi_status;
- u32 fcp_resid;
- u32 fcp_sns_len;
- u32 fcp_rsp_len;
-} __attribute__((packed));
-
-
-#define RSP_CODE_GOOD 0
-#define RSP_CODE_LENGTH_MISMATCH 1
-#define RSP_CODE_FIELD_INVALID 2
-#define RSP_CODE_RO_MISMATCH 3
-#define RSP_CODE_TASKMAN_UNSUPP 4
-#define RSP_CODE_TASKMAN_FAILED 5
-
-/* see fc-fs */
-#define LS_RSCN 0x61
-#define LS_LOGO 0x05
-#define LS_PLOGI 0x03
-
-struct fcp_rscn_head {
- u8 command;
- u8 page_length; /* always 0x04 */
- u16 payload_len;
-} __attribute__((packed));
-
-struct fcp_rscn_element {
- u8 reserved:2;
- u8 event_qual:4;
- u8 addr_format:2;
- u32 nport_did:24;
-} __attribute__((packed));
-
-/* see fc-ph */
-struct fcp_logo {
- u32 command;
- u32 nport_did;
- u64 nport_wwpn;
-} __attribute__((packed));
-
-/*
- * FC-FS stuff
- */
-#define R_A_TOV 10 /* seconds */
-
-#define ZFCP_LS_RLS 0x0f
-#define ZFCP_LS_ADISC 0x52
-#define ZFCP_LS_RPS 0x56
-#define ZFCP_LS_RSCN 0x61
-#define ZFCP_LS_RNID 0x78
-
-struct zfcp_ls_adisc {
- u8 code;
- u8 field[3];
- u32 hard_nport_id;
- u64 wwpn;
- u64 wwnn;
- u32 nport_id;
-} __attribute__ ((packed));
-
-/*
- * FC-GS-2 stuff
- */
-#define ZFCP_CT_REVISION 0x01
-#define ZFCP_CT_DIRECTORY_SERVICE 0xFC
-#define ZFCP_CT_NAME_SERVER 0x02
-#define ZFCP_CT_SYNCHRONOUS 0x00
-#define ZFCP_CT_SCSI_FCP 0x08
-#define ZFCP_CT_UNABLE_TO_PERFORM_CMD 0x09
-#define ZFCP_CT_GID_PN 0x0121
-#define ZFCP_CT_GPN_FT 0x0172
-#define ZFCP_CT_ACCEPT 0x8002
-#define ZFCP_CT_REJECT 0x8001
-
-/*
- * FC-GS-4 stuff
- */
-#define ZFCP_CT_TIMEOUT (3 * R_A_TOV)
-
/*************** ADAPTER/PORT/UNIT AND FSF_REQ STATUS FLAGS ******************/
/*
@@ -205,7 +80,6 @@ struct zfcp_ls_adisc {
#define ZFCP_COMMON_FLAGS 0xfff00000
/* common status bits */
-#define ZFCP_STATUS_COMMON_REMOVE 0x80000000
#define ZFCP_STATUS_COMMON_RUNNING 0x40000000
#define ZFCP_STATUS_COMMON_ERP_FAILED 0x20000000
#define ZFCP_STATUS_COMMON_UNBLOCKED 0x10000000
@@ -222,21 +96,10 @@ struct zfcp_ls_adisc {
#define ZFCP_STATUS_ADAPTER_ERP_PENDING 0x00000100
#define ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED 0x00000200
-/* FC-PH/FC-GS well-known address identifiers for generic services */
-#define ZFCP_DID_WKA 0xFFFFF0
-
/* remote port status */
#define ZFCP_STATUS_PORT_PHYS_OPEN 0x00000001
#define ZFCP_STATUS_PORT_LINK_TEST 0x00000002
-/* well known address (WKA) port status*/
-enum zfcp_wka_status {
- ZFCP_WKA_PORT_OFFLINE,
- ZFCP_WKA_PORT_CLOSING,
- ZFCP_WKA_PORT_OPENING,
- ZFCP_WKA_PORT_ONLINE,
-};
-
/* logical unit status */
#define ZFCP_STATUS_UNIT_SHARED 0x00000004
#define ZFCP_STATUS_UNIT_READONLY 0x00000008
@@ -247,10 +110,7 @@ enum zfcp_wka_status {
#define ZFCP_STATUS_FSFREQ_CLEANUP 0x00000010
#define ZFCP_STATUS_FSFREQ_ABORTSUCCEEDED 0x00000040
#define ZFCP_STATUS_FSFREQ_ABORTNOTNEEDED 0x00000080
-#define ZFCP_STATUS_FSFREQ_ABORTED 0x00000100
#define ZFCP_STATUS_FSFREQ_TMFUNCFAILED 0x00000200
-#define ZFCP_STATUS_FSFREQ_TMFUNCNOTSUPP 0x00000400
-#define ZFCP_STATUS_FSFREQ_RETRY 0x00000800
#define ZFCP_STATUS_FSFREQ_DISMISSED 0x00001000
/************************* STRUCTURE DEFINITIONS *****************************/
@@ -265,125 +125,10 @@ struct zfcp_adapter_mempool {
mempool_t *scsi_abort;
mempool_t *status_read_req;
mempool_t *status_read_data;
- mempool_t *gid_pn_data;
+ mempool_t *gid_pn;
mempool_t *qtcb_pool;
};
-/*
- * header for CT_IU
- */
-struct ct_hdr {
- u8 revision; // 0x01
- u8 in_id[3]; // 0x00
- u8 gs_type; // 0xFC Directory Service
- u8 gs_subtype; // 0x02 Name Server
- u8 options; // 0x00 single bidirectional exchange
- u8 reserved0;
- u16 cmd_rsp_code; // 0x0121 GID_PN, or 0x0100 GA_NXT
- u16 max_res_size; // <= (4096 - 16) / 4
- u8 reserved1;
- u8 reason_code;
- u8 reason_code_expl;
- u8 vendor_unique;
-} __attribute__ ((packed));
-
-/* nameserver request CT_IU -- for requests where
- * a port name is required */
-struct ct_iu_gid_pn_req {
- struct ct_hdr header;
- u64 wwpn;
-} __attribute__ ((packed));
-
-/* FS_ACC IU and data unit for GID_PN nameserver request */
-struct ct_iu_gid_pn_resp {
- struct ct_hdr header;
- u32 d_id;
-} __attribute__ ((packed));
-
-struct ct_iu_gpn_ft_req {
- struct ct_hdr header;
- u8 flags;
- u8 domain_id_scope;
- u8 area_id_scope;
- u8 fc4_type;
-} __attribute__ ((packed));
-
-
-/**
- * struct zfcp_send_ct - used to pass parameters to function zfcp_fsf_send_ct
- * @wka_port: port where the request is sent to
- * @req: scatter-gather list for request
- * @resp: scatter-gather list for response
- * @handler: handler function (called for response to the request)
- * @handler_data: data passed to handler function
- * @completion: completion for synchronization purposes
- * @status: used to pass error status to calling function
- */
-struct zfcp_send_ct {
- struct zfcp_wka_port *wka_port;
- struct scatterlist *req;
- struct scatterlist *resp;
- void (*handler)(unsigned long);
- unsigned long handler_data;
- struct completion *completion;
- int status;
-};
-
-/* used for name server requests in error recovery */
-struct zfcp_gid_pn_data {
- struct zfcp_send_ct ct;
- struct scatterlist req;
- struct scatterlist resp;
- struct ct_iu_gid_pn_req ct_iu_req;
- struct ct_iu_gid_pn_resp ct_iu_resp;
- struct zfcp_port *port;
-};
-
-/**
- * struct zfcp_send_els - used to pass parameters to function zfcp_fsf_send_els
- * @adapter: adapter where request is sent from
- * @port: port where ELS is destinated (port reference count has to be increased)
- * @d_id: destiniation id of port where request is sent to
- * @req: scatter-gather list for request
- * @resp: scatter-gather list for response
- * @handler: handler function (called for response to the request)
- * @handler_data: data passed to handler function
- * @completion: completion for synchronization purposes
- * @ls_code: hex code of ELS command
- * @status: used to pass error status to calling function
- */
-struct zfcp_send_els {
- struct zfcp_adapter *adapter;
- struct zfcp_port *port;
- u32 d_id;
- struct scatterlist *req;
- struct scatterlist *resp;
- void (*handler)(unsigned long);
- unsigned long handler_data;
- struct completion *completion;
- int ls_code;
- int status;
-};
-
-struct zfcp_wka_port {
- struct zfcp_adapter *adapter;
- wait_queue_head_t completion_wq;
- enum zfcp_wka_status status;
- atomic_t refcount;
- u32 d_id;
- u32 handle;
- struct mutex mutex;
- struct delayed_work work;
-};
-
-struct zfcp_wka_ports {
- struct zfcp_wka_port ms; /* management service */
- struct zfcp_wka_port ts; /* time service */
- struct zfcp_wka_port ds; /* directory service */
- struct zfcp_wka_port as; /* alias service */
- struct zfcp_wka_port ks; /* key distribution service */
-};
-
struct zfcp_qdio_queue {
struct qdio_buffer *sbal[QDIO_MAX_BUFFERS_PER_Q];
u8 first; /* index of next free bfr in queue */
@@ -446,9 +191,7 @@ struct zfcp_qdio {
};
struct zfcp_adapter {
- atomic_t refcount; /* reference count */
- wait_queue_head_t remove_wq; /* can be used to wait for
- refcount drop to zero */
+ struct kref ref;
u64 peer_wwnn; /* P2P peer WWNN */
u64 peer_wwpn; /* P2P peer WWPN */
u32 peer_d_id; /* P2P peer D_ID */
@@ -461,7 +204,8 @@ struct zfcp_adapter {
u32 hardware_version; /* of FCP channel */
u16 timer_ticks; /* time int for a tick */
struct Scsi_Host *scsi_host; /* Pointer to mid-layer */
- struct list_head port_list_head; /* remote port list */
+ struct list_head port_list; /* remote port list */
+ rwlock_t port_list_lock; /* port list lock */
unsigned long req_no; /* unique FSF req number */
struct list_head *req_list; /* list of pending reqs */
spinlock_t req_list_lock; /* request list lock */
@@ -485,7 +229,7 @@ struct zfcp_adapter {
u32 erp_low_mem_count; /* nr of erp actions waiting
for memory */
struct task_struct *erp_thread;
- struct zfcp_wka_ports *gs; /* generic services */
+ struct zfcp_fc_wka_ports *gs; /* generic services */
struct zfcp_dbf *dbf; /* debug traces */
struct zfcp_adapter_mempool pool; /* Adapter memory pools */
struct fc_host_statistics *fc_stats;
@@ -500,11 +244,9 @@ struct zfcp_port {
struct device sysfs_device; /* sysfs device */
struct fc_rport *rport; /* rport of fc transport class */
struct list_head list; /* list of remote ports */
- atomic_t refcount; /* reference count */
- wait_queue_head_t remove_wq; /* can be used to wait for
- refcount drop to zero */
struct zfcp_adapter *adapter; /* adapter used to access port */
- struct list_head unit_list_head; /* head of logical unit list */
+ struct list_head unit_list; /* head of logical unit list */
+ rwlock_t unit_list_lock; /* unit list lock */
atomic_t status; /* status of this remote port */
u64 wwnn; /* WWNN if known */
u64 wwpn; /* WWPN */
@@ -523,9 +265,6 @@ struct zfcp_port {
struct zfcp_unit {
struct device sysfs_device; /* sysfs device */
struct list_head list; /* list of logical units */
- atomic_t refcount; /* reference count */
- wait_queue_head_t remove_wq; /* can be used to wait for
- refcount drop to zero */
struct zfcp_port *port; /* remote port of unit */
atomic_t status; /* status of this logical unit */
u64 fcp_lun; /* own FCP_LUN */
@@ -601,14 +340,11 @@ struct zfcp_fsf_req {
struct zfcp_data {
struct scsi_host_template scsi_host_template;
struct scsi_transport_template *scsi_transport_template;
- rwlock_t config_lock; /* serialises changes
- to adapter/port/unit
- lists */
- struct mutex config_mutex;
struct kmem_cache *gpn_ft_cache;
struct kmem_cache *qtcb_cache;
struct kmem_cache *sr_buffer_cache;
struct kmem_cache *gid_pn_cache;
+ struct kmem_cache *adisc_cache;
};
/********************** ZFCP SPECIFIC DEFINES ********************************/
@@ -657,47 +393,4 @@ zfcp_reqlist_find_safe(struct zfcp_adapter *adapter, struct zfcp_fsf_req *req)
return NULL;
}
-/*
- * functions needed for reference/usage counting
- */
-
-static inline void
-zfcp_unit_get(struct zfcp_unit *unit)
-{
- atomic_inc(&unit->refcount);
-}
-
-static inline void
-zfcp_unit_put(struct zfcp_unit *unit)
-{
- if (atomic_dec_return(&unit->refcount) == 0)
- wake_up(&unit->remove_wq);
-}
-
-static inline void
-zfcp_port_get(struct zfcp_port *port)
-{
- atomic_inc(&port->refcount);
-}
-
-static inline void
-zfcp_port_put(struct zfcp_port *port)
-{
- if (atomic_dec_return(&port->refcount) == 0)
- wake_up(&port->remove_wq);
-}
-
-static inline void
-zfcp_adapter_get(struct zfcp_adapter *adapter)
-{
- atomic_inc(&adapter->refcount);
-}
-
-static inline void
-zfcp_adapter_put(struct zfcp_adapter *adapter)
-{
- if (atomic_dec_return(&adapter->refcount) == 0)
- wake_up(&adapter->remove_wq);
-}
-
#endif /* ZFCP_DEF_H */
diff --git a/drivers/s390/scsi/zfcp_erp.c b/drivers/s390/scsi/zfcp_erp.c
index f73e2180f33..b51a11a82e6 100644
--- a/drivers/s390/scsi/zfcp_erp.c
+++ b/drivers/s390/scsi/zfcp_erp.c
@@ -99,9 +99,12 @@ static void zfcp_erp_action_dismiss_port(struct zfcp_port *port)
if (atomic_read(&port->status) & ZFCP_STATUS_COMMON_ERP_INUSE)
zfcp_erp_action_dismiss(&port->erp_action);
- else
- list_for_each_entry(unit, &port->unit_list_head, list)
- zfcp_erp_action_dismiss_unit(unit);
+ else {
+ read_lock(&port->unit_list_lock);
+ list_for_each_entry(unit, &port->unit_list, list)
+ zfcp_erp_action_dismiss_unit(unit);
+ read_unlock(&port->unit_list_lock);
+ }
}
static void zfcp_erp_action_dismiss_adapter(struct zfcp_adapter *adapter)
@@ -110,9 +113,12 @@ static void zfcp_erp_action_dismiss_adapter(struct zfcp_adapter *adapter)
if (atomic_read(&adapter->status) & ZFCP_STATUS_COMMON_ERP_INUSE)
zfcp_erp_action_dismiss(&adapter->erp_action);
- else
- list_for_each_entry(port, &adapter->port_list_head, list)
+ else {
+ read_lock(&adapter->port_list_lock);
+ list_for_each_entry(port, &adapter->port_list, list)
zfcp_erp_action_dismiss_port(port);
+ read_unlock(&adapter->port_list_lock);
+ }
}
static int zfcp_erp_required_act(int want, struct zfcp_adapter *adapter,
@@ -168,7 +174,8 @@ static struct zfcp_erp_action *zfcp_erp_setup_act(int need,
switch (need) {
case ZFCP_ERP_ACTION_REOPEN_UNIT:
- zfcp_unit_get(unit);
+ if (!get_device(&unit->sysfs_device))
+ return NULL;
atomic_set_mask(ZFCP_STATUS_COMMON_ERP_INUSE, &unit->status);
erp_action = &unit->erp_action;
if (!(atomic_read(&unit->status) & ZFCP_STATUS_COMMON_RUNNING))
@@ -177,7 +184,8 @@ static struct zfcp_erp_action *zfcp_erp_setup_act(int need,
case ZFCP_ERP_ACTION_REOPEN_PORT:
case ZFCP_ERP_ACTION_REOPEN_PORT_FORCED:
- zfcp_port_get(port);
+ if (!get_device(&port->sysfs_device))
+ return NULL;
zfcp_erp_action_dismiss_port(port);
atomic_set_mask(ZFCP_STATUS_COMMON_ERP_INUSE, &port->status);
erp_action = &port->erp_action;
@@ -186,7 +194,7 @@ static struct zfcp_erp_action *zfcp_erp_setup_act(int need,
break;
case ZFCP_ERP_ACTION_REOPEN_ADAPTER:
- zfcp_adapter_get(adapter);
+ kref_get(&adapter->ref);
zfcp_erp_action_dismiss_adapter(adapter);
atomic_set_mask(ZFCP_STATUS_COMMON_ERP_INUSE, &adapter->status);
erp_action = &adapter->erp_action;
@@ -264,11 +272,16 @@ void zfcp_erp_adapter_reopen(struct zfcp_adapter *adapter, int clear,
{
unsigned long flags;
- read_lock_irqsave(&zfcp_data.config_lock, flags);
- write_lock(&adapter->erp_lock);
- _zfcp_erp_adapter_reopen(adapter, clear, id, ref);
- write_unlock(&adapter->erp_lock);
- read_unlock_irqrestore(&zfcp_data.config_lock, flags);
+ zfcp_erp_adapter_block(adapter, clear);
+ zfcp_scsi_schedule_rports_block(adapter);
+
+ write_lock_irqsave(&adapter->erp_lock, flags);
+ if (atomic_read(&adapter->status) & ZFCP_STATUS_COMMON_ERP_FAILED)
+ zfcp_erp_adapter_failed(adapter, "erareo1", NULL);
+ else
+ zfcp_erp_action_enqueue(ZFCP_ERP_ACTION_REOPEN_ADAPTER, adapter,
+ NULL, NULL, id, ref);
+ write_unlock_irqrestore(&adapter->erp_lock, flags);
}
/**
@@ -345,11 +358,9 @@ void zfcp_erp_port_forced_reopen(struct zfcp_port *port, int clear, char *id,
unsigned long flags;
struct zfcp_adapter *adapter = port->adapter;
- read_lock_irqsave(&zfcp_data.config_lock, flags);
- write_lock(&adapter->erp_lock);
+ write_lock_irqsave(&adapter->erp_lock, flags);
_zfcp_erp_port_forced_reopen(port, clear, id, ref);
- write_unlock(&adapter->erp_lock);
- read_unlock_irqrestore(&zfcp_data.config_lock, flags);
+ write_unlock_irqrestore(&adapter->erp_lock, flags);
}
static int _zfcp_erp_port_reopen(struct zfcp_port *port, int clear, char *id,
@@ -377,15 +388,13 @@ static int _zfcp_erp_port_reopen(struct zfcp_port *port, int clear, char *id,
*/
int zfcp_erp_port_reopen(struct zfcp_port *port, int clear, char *id, void *ref)
{
- unsigned long flags;
int retval;
+ unsigned long flags;
struct zfcp_adapter *adapter = port->adapter;
- read_lock_irqsave(&zfcp_data.config_lock, flags);
- write_lock(&adapter->erp_lock);
+ write_lock_irqsave(&adapter->erp_lock, flags);
retval = _zfcp_erp_port_reopen(port, clear, id, ref);
- write_unlock(&adapter->erp_lock);
- read_unlock_irqrestore(&zfcp_data.config_lock, flags);
+ write_unlock_irqrestore(&adapter->erp_lock, flags);
return retval;
}
@@ -424,11 +433,9 @@ void zfcp_erp_unit_reopen(struct zfcp_unit *unit, int clear, char *id,
struct zfcp_port *port = unit->port;
struct zfcp_adapter *adapter = port->adapter;
- read_lock_irqsave(&zfcp_data.config_lock, flags);
- write_lock(&adapter->erp_lock);
+ write_lock_irqsave(&adapter->erp_lock, flags);
_zfcp_erp_unit_reopen(unit, clear, id, ref);
- write_unlock(&adapter->erp_lock);
- read_unlock_irqrestore(&zfcp_data.config_lock, flags);
+ write_unlock_irqrestore(&adapter->erp_lock, flags);
}
static int status_change_set(unsigned long mask, atomic_t *status)
@@ -540,8 +547,10 @@ static void _zfcp_erp_port_reopen_all(struct zfcp_adapter *adapter,
{
struct zfcp_port *port;
- list_for_each_entry(port, &adapter->port_list_head, list)
+ read_lock(&adapter->port_list_lock);
+ list_for_each_entry(port, &adapter->port_list, list)
_zfcp_erp_port_reopen(port, clear, id, ref);
+ read_unlock(&adapter->port_list_lock);
}
static void _zfcp_erp_unit_reopen_all(struct zfcp_port *port, int clear,
@@ -549,8 +558,10 @@ static void _zfcp_erp_unit_reopen_all(struct zfcp_port *port, int clear,
{
struct zfcp_unit *unit;
- list_for_each_entry(unit, &port->unit_list_head, list)
+ read_lock(&port->unit_list_lock);
+ list_for_each_entry(unit, &port->unit_list, list)
_zfcp_erp_unit_reopen(unit, clear, id, ref);
+ read_unlock(&port->unit_list_lock);
}
static void zfcp_erp_strategy_followup_failed(struct zfcp_erp_action *act)
@@ -590,16 +601,14 @@ static void zfcp_erp_wakeup(struct zfcp_adapter *adapter)
{
unsigned long flags;
- read_lock_irqsave(&zfcp_data.config_lock, flags);
- read_lock(&adapter->erp_lock);
+ read_lock_irqsave(&adapter->erp_lock, flags);
if (list_empty(&adapter->erp_ready_head) &&
list_empty(&adapter->erp_running_head)) {
atomic_clear_mask(ZFCP_STATUS_ADAPTER_ERP_PENDING,
&adapter->status);
wake_up(&adapter->erp_done_wqh);
}
- read_unlock(&adapter->erp_lock);
- read_unlock_irqrestore(&zfcp_data.config_lock, flags);
+ read_unlock_irqrestore(&adapter->erp_lock, flags);
}
static int zfcp_erp_adapter_strategy_open_qdio(struct zfcp_erp_action *act)
@@ -1170,28 +1179,28 @@ static void zfcp_erp_action_cleanup(struct zfcp_erp_action *act, int result)
switch (act->action) {
case ZFCP_ERP_ACTION_REOPEN_UNIT:
if ((result == ZFCP_ERP_SUCCEEDED) && !unit->device) {
- zfcp_unit_get(unit);
+ get_device(&unit->sysfs_device);
if (scsi_queue_work(unit->port->adapter->scsi_host,
&unit->scsi_work) <= 0)
- zfcp_unit_put(unit);
+ put_device(&unit->sysfs_device);
}
- zfcp_unit_put(unit);
+ put_device(&unit->sysfs_device);
break;
case ZFCP_ERP_ACTION_REOPEN_PORT_FORCED:
case ZFCP_ERP_ACTION_REOPEN_PORT:
if (result == ZFCP_ERP_SUCCEEDED)
zfcp_scsi_schedule_rport_register(port);
- zfcp_port_put(port);
+ put_device(&port->sysfs_device);
break;
case ZFCP_ERP_ACTION_REOPEN_ADAPTER:
if (result == ZFCP_ERP_SUCCEEDED) {
register_service_level(&adapter->service_level);
- schedule_work(&adapter->scan_work);
+ queue_work(adapter->work_queue, &adapter->scan_work);
} else
unregister_service_level(&adapter->service_level);
- zfcp_adapter_put(adapter);
+ kref_put(&adapter->ref, zfcp_adapter_release);
break;
}
}
@@ -1214,12 +1223,12 @@ static int zfcp_erp_strategy_do_action(struct zfcp_erp_action *erp_action)
static int zfcp_erp_strategy(struct zfcp_erp_action *erp_action)
{
int retval;
- struct zfcp_adapter *adapter = erp_action->adapter;
unsigned long flags;
+ struct zfcp_adapter *adapter = erp_action->adapter;
- read_lock_irqsave(&zfcp_data.config_lock, flags);
- write_lock(&adapter->erp_lock);
+ kref_get(&adapter->ref);
+ write_lock_irqsave(&adapter->erp_lock, flags);
zfcp_erp_strategy_check_fsfreq(erp_action);
if (erp_action->status & ZFCP_STATUS_ERP_DISMISSED) {
@@ -1231,11 +1240,9 @@ static int zfcp_erp_strategy(struct zfcp_erp_action *erp_action)
zfcp_erp_action_to_running(erp_action);
/* no lock to allow for blocking operations */
- write_unlock(&adapter->erp_lock);
- read_unlock_irqrestore(&zfcp_data.config_lock, flags);
+ write_unlock_irqrestore(&adapter->erp_lock, flags);
retval = zfcp_erp_strategy_do_action(erp_action);
- read_lock_irqsave(&zfcp_data.config_lock, flags);
- write_lock(&adapter->erp_lock);
+ write_lock_irqsave(&adapter->erp_lock, flags);
if (erp_action->status & ZFCP_STATUS_ERP_DISMISSED)
retval = ZFCP_ERP_CONTINUES;
@@ -1273,12 +1280,12 @@ static int zfcp_erp_strategy(struct zfcp_erp_action *erp_action)
zfcp_erp_strategy_followup_failed(erp_action);
unlock:
- write_unlock(&adapter->erp_lock);
- read_unlock_irqrestore(&zfcp_data.config_lock, flags);
+ write_unlock_irqrestore(&adapter->erp_lock, flags);
if (retval != ZFCP_ERP_CONTINUES)
zfcp_erp_action_cleanup(erp_action, retval);
+ kref_put(&adapter->ref, zfcp_adapter_release);
return retval;
}
@@ -1415,6 +1422,7 @@ void zfcp_erp_modify_adapter_status(struct zfcp_adapter *adapter, char *id,
void *ref, u32 mask, int set_or_clear)
{
struct zfcp_port *port;
+ unsigned long flags;
u32 common_mask = mask & ZFCP_COMMON_FLAGS;
if (set_or_clear == ZFCP_SET) {
@@ -1429,10 +1437,13 @@ void zfcp_erp_modify_adapter_status(struct zfcp_adapter *adapter, char *id,
atomic_set(&adapter->erp_counter, 0);
}
- if (common_mask)
- list_for_each_entry(port, &adapter->port_list_head, list)
+ if (common_mask) {
+ read_lock_irqsave(&adapter->port_list_lock, flags);
+ list_for_each_entry(port, &adapter->port_list, list)
zfcp_erp_modify_port_status(port, id, ref, common_mask,
set_or_clear);
+ read_unlock_irqrestore(&adapter->port_list_lock, flags);
+ }
}
/**
@@ -1449,6 +1460,7 @@ void zfcp_erp_modify_port_status(struct zfcp_port *port, char *id, void *ref,
u32 mask, int set_or_clear)
{
struct zfcp_unit *unit;
+ unsigned long flags;
u32 common_mask = mask & ZFCP_COMMON_FLAGS;
if (set_or_clear == ZFCP_SET) {
@@ -1463,10 +1475,13 @@ void zfcp_erp_modify_port_status(struct zfcp_port *port, char *id, void *ref,
atomic_set(&port->erp_counter, 0);
}
- if (common_mask)
- list_for_each_entry(unit, &port->unit_list_head, list)
+ if (common_mask) {
+ read_lock_irqsave(&port->unit_list_lock, flags);
+ list_for_each_entry(unit, &port->unit_list, list)
zfcp_erp_modify_unit_status(unit, id, ref, common_mask,
set_or_clear);
+ read_unlock_irqrestore(&port->unit_list_lock, flags);
+ }
}
/**
@@ -1502,12 +1517,8 @@ void zfcp_erp_modify_unit_status(struct zfcp_unit *unit, char *id, void *ref,
*/
void zfcp_erp_port_boxed(struct zfcp_port *port, char *id, void *ref)
{
- unsigned long flags;
-
- read_lock_irqsave(&zfcp_data.config_lock, flags);
zfcp_erp_modify_port_status(port, id, ref,
ZFCP_STATUS_COMMON_ACCESS_BOXED, ZFCP_SET);
- read_unlock_irqrestore(&zfcp_data.config_lock, flags);
zfcp_erp_port_reopen(port, ZFCP_STATUS_COMMON_ERP_FAILED, id, ref);
}
@@ -1535,13 +1546,9 @@ void zfcp_erp_unit_boxed(struct zfcp_unit *unit, char *id, void *ref)
*/
void zfcp_erp_port_access_denied(struct zfcp_port *port, char *id, void *ref)
{
- unsigned long flags;
-
- read_lock_irqsave(&zfcp_data.config_lock, flags);
zfcp_erp_modify_port_status(port, id, ref,
ZFCP_STATUS_COMMON_ERP_FAILED |
ZFCP_STATUS_COMMON_ACCESS_DENIED, ZFCP_SET);
- read_unlock_irqrestore(&zfcp_data.config_lock, flags);
}
/**
@@ -1574,12 +1581,15 @@ static void zfcp_erp_port_access_changed(struct zfcp_port *port, char *id,
void *ref)
{
struct zfcp_unit *unit;
+ unsigned long flags;
int status = atomic_read(&port->status);
if (!(status & (ZFCP_STATUS_COMMON_ACCESS_DENIED |
ZFCP_STATUS_COMMON_ACCESS_BOXED))) {
- list_for_each_entry(unit, &port->unit_list_head, list)
+ read_lock_irqsave(&port->unit_list_lock, flags);
+ list_for_each_entry(unit, &port->unit_list, list)
zfcp_erp_unit_access_changed(unit, id, ref);
+ read_unlock_irqrestore(&port->unit_list_lock, flags);
return;
}
@@ -1595,14 +1605,14 @@ static void zfcp_erp_port_access_changed(struct zfcp_port *port, char *id,
void zfcp_erp_adapter_access_changed(struct zfcp_adapter *adapter, char *id,
void *ref)
{
- struct zfcp_port *port;
unsigned long flags;
+ struct zfcp_port *port;
if (adapter->connection_features & FSF_FEATURE_NPIV_MODE)
return;
- read_lock_irqsave(&zfcp_data.config_lock, flags);
- list_for_each_entry(port, &adapter->port_list_head, list)
+ read_lock_irqsave(&adapter->port_list_lock, flags);
+ list_for_each_entry(port, &adapter->port_list, list)
zfcp_erp_port_access_changed(port, id, ref);
- read_unlock_irqrestore(&zfcp_data.config_lock, flags);
+ read_unlock_irqrestore(&adapter->port_list_lock, flags);
}
diff --git a/drivers/s390/scsi/zfcp_ext.h b/drivers/s390/scsi/zfcp_ext.h
index b3f28deb450..03dec832b46 100644
--- a/drivers/s390/scsi/zfcp_ext.h
+++ b/drivers/s390/scsi/zfcp_ext.h
@@ -9,26 +9,31 @@
#ifndef ZFCP_EXT_H
#define ZFCP_EXT_H
+#include <linux/types.h>
+#include <scsi/fc/fc_els.h>
#include "zfcp_def.h"
+#include "zfcp_fc.h"
/* zfcp_aux.c */
extern struct zfcp_unit *zfcp_get_unit_by_lun(struct zfcp_port *, u64);
extern struct zfcp_port *zfcp_get_port_by_wwpn(struct zfcp_adapter *, u64);
-extern int zfcp_adapter_enqueue(struct ccw_device *);
-extern void zfcp_adapter_dequeue(struct zfcp_adapter *);
+extern struct zfcp_adapter *zfcp_adapter_enqueue(struct ccw_device *);
extern struct zfcp_port *zfcp_port_enqueue(struct zfcp_adapter *, u64, u32,
u32);
-extern void zfcp_port_dequeue(struct zfcp_port *);
extern struct zfcp_unit *zfcp_unit_enqueue(struct zfcp_port *, u64);
-extern void zfcp_unit_dequeue(struct zfcp_unit *);
extern int zfcp_reqlist_isempty(struct zfcp_adapter *);
extern void zfcp_sg_free_table(struct scatterlist *, int);
extern int zfcp_sg_setup_table(struct scatterlist *, int);
+extern void zfcp_device_unregister(struct device *,
+ const struct attribute_group *);
+extern void zfcp_adapter_release(struct kref *);
+extern void zfcp_adapter_unregister(struct zfcp_adapter *);
/* zfcp_ccw.c */
-extern int zfcp_ccw_register(void);
extern int zfcp_ccw_priv_sch(struct zfcp_adapter *);
extern struct ccw_driver zfcp_ccw_driver;
+extern struct zfcp_adapter *zfcp_ccw_adapter_by_cdev(struct ccw_device *);
+extern void zfcp_ccw_adapter_put(struct zfcp_adapter *);
/* zfcp_cfdc.c */
extern struct miscdevice zfcp_cfdc_misc;
@@ -51,7 +56,7 @@ extern void _zfcp_dbf_hba_fsf_unsol(const char *, int level, struct zfcp_dbf *,
struct fsf_status_read_buffer *);
extern void zfcp_dbf_hba_qdio(struct zfcp_dbf *, unsigned int, int, int);
extern void zfcp_dbf_hba_berr(struct zfcp_dbf *, struct zfcp_fsf_req *);
-extern void zfcp_dbf_san_ct_request(struct zfcp_fsf_req *);
+extern void zfcp_dbf_san_ct_request(struct zfcp_fsf_req *, u32);
extern void zfcp_dbf_san_ct_response(struct zfcp_fsf_req *);
extern void zfcp_dbf_san_els_request(struct zfcp_fsf_req *);
extern void zfcp_dbf_san_els_response(struct zfcp_fsf_req *);
@@ -92,24 +97,22 @@ extern void zfcp_erp_adapter_access_changed(struct zfcp_adapter *, char *,
extern void zfcp_erp_timeout_handler(unsigned long);
/* zfcp_fc.c */
-extern int zfcp_fc_scan_ports(struct zfcp_adapter *);
-extern void _zfcp_fc_scan_ports_later(struct work_struct *);
+extern void zfcp_fc_scan_ports(struct work_struct *);
extern void zfcp_fc_incoming_els(struct zfcp_fsf_req *);
extern void zfcp_fc_port_did_lookup(struct work_struct *);
extern void zfcp_fc_trigger_did_lookup(struct zfcp_port *);
-extern void zfcp_fc_plogi_evaluate(struct zfcp_port *, struct fsf_plogi *);
+extern void zfcp_fc_plogi_evaluate(struct zfcp_port *, struct fc_els_flogi *);
extern void zfcp_fc_test_link(struct zfcp_port *);
extern void zfcp_fc_link_test_work(struct work_struct *);
-extern void zfcp_fc_wka_ports_force_offline(struct zfcp_wka_ports *);
+extern void zfcp_fc_wka_ports_force_offline(struct zfcp_fc_wka_ports *);
extern int zfcp_fc_gs_setup(struct zfcp_adapter *);
extern void zfcp_fc_gs_destroy(struct zfcp_adapter *);
-extern int zfcp_fc_execute_els_fc_job(struct fc_bsg_job *);
-extern int zfcp_fc_execute_ct_fc_job(struct fc_bsg_job *);
+extern int zfcp_fc_exec_bsg_job(struct fc_bsg_job *);
/* zfcp_fsf.c */
extern int zfcp_fsf_open_port(struct zfcp_erp_action *);
-extern int zfcp_fsf_open_wka_port(struct zfcp_wka_port *);
-extern int zfcp_fsf_close_wka_port(struct zfcp_wka_port *);
+extern int zfcp_fsf_open_wka_port(struct zfcp_fc_wka_port *);
+extern int zfcp_fsf_close_wka_port(struct zfcp_fc_wka_port *);
extern int zfcp_fsf_close_port(struct zfcp_erp_action *);
extern int zfcp_fsf_close_physical_port(struct zfcp_erp_action *);
extern int zfcp_fsf_open_unit(struct zfcp_erp_action *);
@@ -125,8 +128,10 @@ extern struct zfcp_fsf_req *zfcp_fsf_control_file(struct zfcp_adapter *,
extern void zfcp_fsf_req_dismiss_all(struct zfcp_adapter *);
extern int zfcp_fsf_status_read(struct zfcp_qdio *);
extern int zfcp_status_read_refill(struct zfcp_adapter *adapter);
-extern int zfcp_fsf_send_ct(struct zfcp_send_ct *, mempool_t *);
-extern int zfcp_fsf_send_els(struct zfcp_send_els *);
+extern int zfcp_fsf_send_ct(struct zfcp_fc_wka_port *, struct zfcp_fsf_ct_els *,
+ mempool_t *);
+extern int zfcp_fsf_send_els(struct zfcp_adapter *, u32,
+ struct zfcp_fsf_ct_els *);
extern int zfcp_fsf_send_fcp_command_task(struct zfcp_unit *,
struct scsi_cmnd *);
extern void zfcp_fsf_req_free(struct zfcp_fsf_req *);
@@ -153,7 +158,6 @@ extern void zfcp_qdio_close(struct zfcp_qdio *);
extern struct zfcp_data zfcp_data;
extern int zfcp_adapter_scsi_register(struct zfcp_adapter *);
extern void zfcp_adapter_scsi_unregister(struct zfcp_adapter *);
-extern char *zfcp_get_fcp_sns_info_ptr(struct fcp_rsp_iu *);
extern struct fc_function_template zfcp_transport_functions;
extern void zfcp_scsi_rport_work(struct work_struct *);
extern void zfcp_scsi_schedule_rport_register(struct zfcp_port *);
diff --git a/drivers/s390/scsi/zfcp_fc.c b/drivers/s390/scsi/zfcp_fc.c
index df23bcead23..ac5e3b7a357 100644
--- a/drivers/s390/scsi/zfcp_fc.c
+++ b/drivers/s390/scsi/zfcp_fc.c
@@ -9,73 +9,38 @@
#define KMSG_COMPONENT "zfcp"
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
+#include <linux/types.h>
+#include <scsi/fc/fc_els.h>
+#include <scsi/libfc.h>
#include "zfcp_ext.h"
+#include "zfcp_fc.h"
-enum rscn_address_format {
- RSCN_PORT_ADDRESS = 0x0,
- RSCN_AREA_ADDRESS = 0x1,
- RSCN_DOMAIN_ADDRESS = 0x2,
- RSCN_FABRIC_ADDRESS = 0x3,
+static u32 zfcp_fc_rscn_range_mask[] = {
+ [ELS_ADDR_FMT_PORT] = 0xFFFFFF,
+ [ELS_ADDR_FMT_AREA] = 0xFFFF00,
+ [ELS_ADDR_FMT_DOM] = 0xFF0000,
+ [ELS_ADDR_FMT_FAB] = 0x000000,
};
-static u32 rscn_range_mask[] = {
- [RSCN_PORT_ADDRESS] = 0xFFFFFF,
- [RSCN_AREA_ADDRESS] = 0xFFFF00,
- [RSCN_DOMAIN_ADDRESS] = 0xFF0000,
- [RSCN_FABRIC_ADDRESS] = 0x000000,
-};
-
-struct gpn_ft_resp_acc {
- u8 control;
- u8 port_id[3];
- u8 reserved[4];
- u64 wwpn;
-} __attribute__ ((packed));
-
-#define ZFCP_CT_SIZE_ONE_PAGE (PAGE_SIZE - sizeof(struct ct_hdr))
-#define ZFCP_GPN_FT_ENTRIES (ZFCP_CT_SIZE_ONE_PAGE \
- / sizeof(struct gpn_ft_resp_acc))
-#define ZFCP_GPN_FT_BUFFERS 4
-#define ZFCP_GPN_FT_MAX_SIZE (ZFCP_GPN_FT_BUFFERS * PAGE_SIZE \
- - sizeof(struct ct_hdr))
-#define ZFCP_GPN_FT_MAX_ENTRIES ZFCP_GPN_FT_BUFFERS * (ZFCP_GPN_FT_ENTRIES + 1)
-
-struct ct_iu_gpn_ft_resp {
- struct ct_hdr header;
- struct gpn_ft_resp_acc accept[ZFCP_GPN_FT_ENTRIES];
-} __attribute__ ((packed));
-
-struct zfcp_gpn_ft {
- struct zfcp_send_ct ct;
- struct scatterlist sg_req;
- struct scatterlist sg_resp[ZFCP_GPN_FT_BUFFERS];
-};
-
-struct zfcp_fc_ns_handler_data {
- struct completion done;
- void (*handler)(unsigned long);
- unsigned long handler_data;
-};
-
-static int zfcp_fc_wka_port_get(struct zfcp_wka_port *wka_port)
+static int zfcp_fc_wka_port_get(struct zfcp_fc_wka_port *wka_port)
{
if (mutex_lock_interruptible(&wka_port->mutex))
return -ERESTARTSYS;
- if (wka_port->status == ZFCP_WKA_PORT_OFFLINE ||
- wka_port->status == ZFCP_WKA_PORT_CLOSING) {
- wka_port->status = ZFCP_WKA_PORT_OPENING;
+ if (wka_port->status == ZFCP_FC_WKA_PORT_OFFLINE ||
+ wka_port->status == ZFCP_FC_WKA_PORT_CLOSING) {
+ wka_port->status = ZFCP_FC_WKA_PORT_OPENING;
if (zfcp_fsf_open_wka_port(wka_port))
- wka_port->status = ZFCP_WKA_PORT_OFFLINE;
+ wka_port->status = ZFCP_FC_WKA_PORT_OFFLINE;
}
mutex_unlock(&wka_port->mutex);
wait_event(wka_port->completion_wq,
- wka_port->status == ZFCP_WKA_PORT_ONLINE ||
- wka_port->status == ZFCP_WKA_PORT_OFFLINE);
+ wka_port->status == ZFCP_FC_WKA_PORT_ONLINE ||
+ wka_port->status == ZFCP_FC_WKA_PORT_OFFLINE);
- if (wka_port->status == ZFCP_WKA_PORT_ONLINE) {
+ if (wka_port->status == ZFCP_FC_WKA_PORT_ONLINE) {
atomic_inc(&wka_port->refcount);
return 0;
}
@@ -85,24 +50,24 @@ static int zfcp_fc_wka_port_get(struct zfcp_wka_port *wka_port)
static void zfcp_fc_wka_port_offline(struct work_struct *work)
{
struct delayed_work *dw = to_delayed_work(work);
- struct zfcp_wka_port *wka_port =
- container_of(dw, struct zfcp_wka_port, work);
+ struct zfcp_fc_wka_port *wka_port =
+ container_of(dw, struct zfcp_fc_wka_port, work);
mutex_lock(&wka_port->mutex);
if ((atomic_read(&wka_port->refcount) != 0) ||
- (wka_port->status != ZFCP_WKA_PORT_ONLINE))
+ (wka_port->status != ZFCP_FC_WKA_PORT_ONLINE))
goto out;
- wka_port->status = ZFCP_WKA_PORT_CLOSING;
+ wka_port->status = ZFCP_FC_WKA_PORT_CLOSING;
if (zfcp_fsf_close_wka_port(wka_port)) {
- wka_port->status = ZFCP_WKA_PORT_OFFLINE;
+ wka_port->status = ZFCP_FC_WKA_PORT_OFFLINE;
wake_up(&wka_port->completion_wq);
}
out:
mutex_unlock(&wka_port->mutex);
}
-static void zfcp_fc_wka_port_put(struct zfcp_wka_port *wka_port)
+static void zfcp_fc_wka_port_put(struct zfcp_fc_wka_port *wka_port)
{
if (atomic_dec_return(&wka_port->refcount) != 0)
return;
@@ -110,7 +75,7 @@ static void zfcp_fc_wka_port_put(struct zfcp_wka_port *wka_port)
schedule_delayed_work(&wka_port->work, HZ / 100);
}
-static void zfcp_fc_wka_port_init(struct zfcp_wka_port *wka_port, u32 d_id,
+static void zfcp_fc_wka_port_init(struct zfcp_fc_wka_port *wka_port, u32 d_id,
struct zfcp_adapter *adapter)
{
init_waitqueue_head(&wka_port->completion_wq);
@@ -118,107 +83,107 @@ static void zfcp_fc_wka_port_init(struct zfcp_wka_port *wka_port, u32 d_id,
wka_port->adapter = adapter;
wka_port->d_id = d_id;
- wka_port->status = ZFCP_WKA_PORT_OFFLINE;
+ wka_port->status = ZFCP_FC_WKA_PORT_OFFLINE;
atomic_set(&wka_port->refcount, 0);
mutex_init(&wka_port->mutex);
INIT_DELAYED_WORK(&wka_port->work, zfcp_fc_wka_port_offline);
}
-static void zfcp_fc_wka_port_force_offline(struct zfcp_wka_port *wka)
+static void zfcp_fc_wka_port_force_offline(struct zfcp_fc_wka_port *wka)
{
cancel_delayed_work_sync(&wka->work);
mutex_lock(&wka->mutex);
- wka->status = ZFCP_WKA_PORT_OFFLINE;
+ wka->status = ZFCP_FC_WKA_PORT_OFFLINE;
mutex_unlock(&wka->mutex);
}
-void zfcp_fc_wka_ports_force_offline(struct zfcp_wka_ports *gs)
+void zfcp_fc_wka_ports_force_offline(struct zfcp_fc_wka_ports *gs)
{
+ if (!gs)
+ return;
zfcp_fc_wka_port_force_offline(&gs->ms);
zfcp_fc_wka_port_force_offline(&gs->ts);
zfcp_fc_wka_port_force_offline(&gs->ds);
zfcp_fc_wka_port_force_offline(&gs->as);
- zfcp_fc_wka_port_force_offline(&gs->ks);
}
static void _zfcp_fc_incoming_rscn(struct zfcp_fsf_req *fsf_req, u32 range,
- struct fcp_rscn_element *elem)
+ struct fc_els_rscn_page *page)
{
unsigned long flags;
+ struct zfcp_adapter *adapter = fsf_req->adapter;
struct zfcp_port *port;
- read_lock_irqsave(&zfcp_data.config_lock, flags);
- list_for_each_entry(port, &fsf_req->adapter->port_list_head, list) {
- if ((port->d_id & range) == (elem->nport_did & range))
+ read_lock_irqsave(&adapter->port_list_lock, flags);
+ list_for_each_entry(port, &adapter->port_list, list) {
+ if ((port->d_id & range) == (ntoh24(page->rscn_fid) & range))
zfcp_fc_test_link(port);
if (!port->d_id)
zfcp_erp_port_reopen(port,
ZFCP_STATUS_COMMON_ERP_FAILED,
"fcrscn1", NULL);
}
-
- read_unlock_irqrestore(&zfcp_data.config_lock, flags);
+ read_unlock_irqrestore(&adapter->port_list_lock, flags);
}
static void zfcp_fc_incoming_rscn(struct zfcp_fsf_req *fsf_req)
{
struct fsf_status_read_buffer *status_buffer = (void *)fsf_req->data;
- struct fcp_rscn_head *fcp_rscn_head;
- struct fcp_rscn_element *fcp_rscn_element;
+ struct fc_els_rscn *head;
+ struct fc_els_rscn_page *page;
u16 i;
u16 no_entries;
- u32 range_mask;
+ unsigned int afmt;
- fcp_rscn_head = (struct fcp_rscn_head *) status_buffer->payload.data;
- fcp_rscn_element = (struct fcp_rscn_element *) fcp_rscn_head;
+ head = (struct fc_els_rscn *) status_buffer->payload.data;
+ page = (struct fc_els_rscn_page *) head;
/* see FC-FS */
- no_entries = fcp_rscn_head->payload_len /
- sizeof(struct fcp_rscn_element);
+ no_entries = head->rscn_plen / sizeof(struct fc_els_rscn_page);
for (i = 1; i < no_entries; i++) {
/* skip head and start with 1st element */
- fcp_rscn_element++;
- range_mask = rscn_range_mask[fcp_rscn_element->addr_format];
- _zfcp_fc_incoming_rscn(fsf_req, range_mask, fcp_rscn_element);
+ page++;
+ afmt = page->rscn_page_flags & ELS_RSCN_ADDR_FMT_MASK;
+ _zfcp_fc_incoming_rscn(fsf_req, zfcp_fc_rscn_range_mask[afmt],
+ page);
}
- schedule_work(&fsf_req->adapter->scan_work);
+ queue_work(fsf_req->adapter->work_queue, &fsf_req->adapter->scan_work);
}
static void zfcp_fc_incoming_wwpn(struct zfcp_fsf_req *req, u64 wwpn)
{
+ unsigned long flags;
struct zfcp_adapter *adapter = req->adapter;
struct zfcp_port *port;
- unsigned long flags;
- read_lock_irqsave(&zfcp_data.config_lock, flags);
- list_for_each_entry(port, &adapter->port_list_head, list)
- if (port->wwpn == wwpn)
+ read_lock_irqsave(&adapter->port_list_lock, flags);
+ list_for_each_entry(port, &adapter->port_list, list)
+ if (port->wwpn == wwpn) {
+ zfcp_erp_port_forced_reopen(port, 0, "fciwwp1", req);
break;
- read_unlock_irqrestore(&zfcp_data.config_lock, flags);
-
- if (port && (port->wwpn == wwpn))
- zfcp_erp_port_forced_reopen(port, 0, "fciwwp1", req);
+ }
+ read_unlock_irqrestore(&adapter->port_list_lock, flags);
}
static void zfcp_fc_incoming_plogi(struct zfcp_fsf_req *req)
{
- struct fsf_status_read_buffer *status_buffer =
- (struct fsf_status_read_buffer *)req->data;
- struct fsf_plogi *els_plogi =
- (struct fsf_plogi *) status_buffer->payload.data;
+ struct fsf_status_read_buffer *status_buffer;
+ struct fc_els_flogi *plogi;
- zfcp_fc_incoming_wwpn(req, els_plogi->serv_param.wwpn);
+ status_buffer = (struct fsf_status_read_buffer *) req->data;
+ plogi = (struct fc_els_flogi *) status_buffer->payload.data;
+ zfcp_fc_incoming_wwpn(req, plogi->fl_wwpn);
}
static void zfcp_fc_incoming_logo(struct zfcp_fsf_req *req)
{
struct fsf_status_read_buffer *status_buffer =
(struct fsf_status_read_buffer *)req->data;
- struct fcp_logo *els_logo =
- (struct fcp_logo *) status_buffer->payload.data;
+ struct fc_els_logo *logo =
+ (struct fc_els_logo *) status_buffer->payload.data;
- zfcp_fc_incoming_wwpn(req, els_logo->nport_wwpn);
+ zfcp_fc_incoming_wwpn(req, logo->fl_n_port_wwn);
}
/**
@@ -232,79 +197,72 @@ void zfcp_fc_incoming_els(struct zfcp_fsf_req *fsf_req)
unsigned int els_type = status_buffer->payload.data[0];
zfcp_dbf_san_incoming_els(fsf_req);
- if (els_type == LS_PLOGI)
+ if (els_type == ELS_PLOGI)
zfcp_fc_incoming_plogi(fsf_req);
- else if (els_type == LS_LOGO)
+ else if (els_type == ELS_LOGO)
zfcp_fc_incoming_logo(fsf_req);
- else if (els_type == LS_RSCN)
+ else if (els_type == ELS_RSCN)
zfcp_fc_incoming_rscn(fsf_req);
}
-static void zfcp_fc_ns_handler(unsigned long data)
+static void zfcp_fc_ns_gid_pn_eval(void *data)
{
- struct zfcp_fc_ns_handler_data *compl_rec =
- (struct zfcp_fc_ns_handler_data *) data;
-
- if (compl_rec->handler)
- compl_rec->handler(compl_rec->handler_data);
-
- complete(&compl_rec->done);
-}
-
-static void zfcp_fc_ns_gid_pn_eval(unsigned long data)
-{
- struct zfcp_gid_pn_data *gid_pn = (struct zfcp_gid_pn_data *) data;
- struct zfcp_send_ct *ct = &gid_pn->ct;
- struct ct_iu_gid_pn_req *ct_iu_req = sg_virt(ct->req);
- struct ct_iu_gid_pn_resp *ct_iu_resp = sg_virt(ct->resp);
+ struct zfcp_fc_gid_pn *gid_pn = data;
+ struct zfcp_fsf_ct_els *ct = &gid_pn->ct;
+ struct zfcp_fc_gid_pn_req *gid_pn_req = sg_virt(ct->req);
+ struct zfcp_fc_gid_pn_resp *gid_pn_resp = sg_virt(ct->resp);
struct zfcp_port *port = gid_pn->port;
if (ct->status)
return;
- if (ct_iu_resp->header.cmd_rsp_code != ZFCP_CT_ACCEPT)
+ if (gid_pn_resp->ct_hdr.ct_cmd != FC_FS_ACC)
return;
/* paranoia */
- if (ct_iu_req->wwpn != port->wwpn)
+ if (gid_pn_req->gid_pn.fn_wwpn != port->wwpn)
return;
/* looks like a valid d_id */
- port->d_id = ct_iu_resp->d_id & ZFCP_DID_MASK;
+ port->d_id = ntoh24(gid_pn_resp->gid_pn.fp_fid);
+}
+
+static void zfcp_fc_complete(void *data)
+{
+ complete(data);
}
static int zfcp_fc_ns_gid_pn_request(struct zfcp_port *port,
- struct zfcp_gid_pn_data *gid_pn)
+ struct zfcp_fc_gid_pn *gid_pn)
{
struct zfcp_adapter *adapter = port->adapter;
- struct zfcp_fc_ns_handler_data compl_rec;
+ DECLARE_COMPLETION_ONSTACK(completion);
int ret;
/* setup parameters for send generic command */
gid_pn->port = port;
- gid_pn->ct.wka_port = &adapter->gs->ds;
- gid_pn->ct.handler = zfcp_fc_ns_handler;
- gid_pn->ct.handler_data = (unsigned long) &compl_rec;
- gid_pn->ct.req = &gid_pn->req;
- gid_pn->ct.resp = &gid_pn->resp;
- sg_init_one(&gid_pn->req, &gid_pn->ct_iu_req,
- sizeof(struct ct_iu_gid_pn_req));
- sg_init_one(&gid_pn->resp, &gid_pn->ct_iu_resp,
- sizeof(struct ct_iu_gid_pn_resp));
+ gid_pn->ct.handler = zfcp_fc_complete;
+ gid_pn->ct.handler_data = &completion;
+ gid_pn->ct.req = &gid_pn->sg_req;
+ gid_pn->ct.resp = &gid_pn->sg_resp;
+ sg_init_one(&gid_pn->sg_req, &gid_pn->gid_pn_req,
+ sizeof(struct zfcp_fc_gid_pn_req));
+ sg_init_one(&gid_pn->sg_resp, &gid_pn->gid_pn_resp,
+ sizeof(struct zfcp_fc_gid_pn_resp));
/* setup nameserver request */
- gid_pn->ct_iu_req.header.revision = ZFCP_CT_REVISION;
- gid_pn->ct_iu_req.header.gs_type = ZFCP_CT_DIRECTORY_SERVICE;
- gid_pn->ct_iu_req.header.gs_subtype = ZFCP_CT_NAME_SERVER;
- gid_pn->ct_iu_req.header.options = ZFCP_CT_SYNCHRONOUS;
- gid_pn->ct_iu_req.header.cmd_rsp_code = ZFCP_CT_GID_PN;
- gid_pn->ct_iu_req.header.max_res_size = ZFCP_CT_SIZE_ONE_PAGE / 4;
- gid_pn->ct_iu_req.wwpn = port->wwpn;
-
- init_completion(&compl_rec.done);
- compl_rec.handler = zfcp_fc_ns_gid_pn_eval;
- compl_rec.handler_data = (unsigned long) gid_pn;
- ret = zfcp_fsf_send_ct(&gid_pn->ct, adapter->pool.gid_pn_req);
- if (!ret)
- wait_for_completion(&compl_rec.done);
+ gid_pn->gid_pn_req.ct_hdr.ct_rev = FC_CT_REV;
+ gid_pn->gid_pn_req.ct_hdr.ct_fs_type = FC_FST_DIR;
+ gid_pn->gid_pn_req.ct_hdr.ct_fs_subtype = FC_NS_SUBTYPE;
+ gid_pn->gid_pn_req.ct_hdr.ct_options = 0;
+ gid_pn->gid_pn_req.ct_hdr.ct_cmd = FC_NS_GID_PN;
+ gid_pn->gid_pn_req.ct_hdr.ct_mr_size = ZFCP_FC_CT_SIZE_PAGE / 4;
+ gid_pn->gid_pn_req.gid_pn.fn_wwpn = port->wwpn;
+
+ ret = zfcp_fsf_send_ct(&adapter->gs->ds, &gid_pn->ct,
+ adapter->pool.gid_pn_req);
+ if (!ret) {
+ wait_for_completion(&completion);
+ zfcp_fc_ns_gid_pn_eval(gid_pn);
+ }
return ret;
}
@@ -316,10 +274,10 @@ static int zfcp_fc_ns_gid_pn_request(struct zfcp_port *port,
static int zfcp_fc_ns_gid_pn(struct zfcp_port *port)
{
int ret;
- struct zfcp_gid_pn_data *gid_pn;
+ struct zfcp_fc_gid_pn *gid_pn;
struct zfcp_adapter *adapter = port->adapter;
- gid_pn = mempool_alloc(adapter->pool.gid_pn_data, GFP_ATOMIC);
+ gid_pn = mempool_alloc(adapter->pool.gid_pn, GFP_ATOMIC);
if (!gid_pn)
return -ENOMEM;
@@ -333,7 +291,7 @@ static int zfcp_fc_ns_gid_pn(struct zfcp_port *port)
zfcp_fc_wka_port_put(&adapter->gs->ds);
out:
- mempool_free(gid_pn, adapter->pool.gid_pn_data);
+ mempool_free(gid_pn, adapter->pool.gid_pn);
return ret;
}
@@ -357,7 +315,7 @@ void zfcp_fc_port_did_lookup(struct work_struct *work)
zfcp_erp_port_reopen(port, 0, "fcgpn_3", NULL);
out:
- zfcp_port_put(port);
+ put_device(&port->sysfs_device);
}
/**
@@ -366,9 +324,9 @@ out:
*/
void zfcp_fc_trigger_did_lookup(struct zfcp_port *port)
{
- zfcp_port_get(port);
+ get_device(&port->sysfs_device);
if (!queue_work(port->adapter->work_queue, &port->gid_pn_work))
- zfcp_port_put(port);
+ put_device(&port->sysfs_device);
}
/**
@@ -378,33 +336,36 @@ void zfcp_fc_trigger_did_lookup(struct zfcp_port *port)
*
* Evaluate PLOGI playload and copy important fields into zfcp_port structure
*/
-void zfcp_fc_plogi_evaluate(struct zfcp_port *port, struct fsf_plogi *plogi)
-{
- port->maxframe_size = plogi->serv_param.common_serv_param[7] |
- ((plogi->serv_param.common_serv_param[6] & 0x0F) << 8);
- if (plogi->serv_param.class1_serv_param[0] & 0x80)
+void zfcp_fc_plogi_evaluate(struct zfcp_port *port, struct fc_els_flogi *plogi)
+{
+ if (plogi->fl_wwpn != port->wwpn) {
+ port->d_id = 0;
+ dev_warn(&port->adapter->ccw_device->dev,
+ "A port opened with WWPN 0x%016Lx returned data that "
+ "identifies it as WWPN 0x%016Lx\n",
+ (unsigned long long) port->wwpn,
+ (unsigned long long) plogi->fl_wwpn);
+ return;
+ }
+
+ port->wwnn = plogi->fl_wwnn;
+ port->maxframe_size = plogi->fl_csp.sp_bb_data;
+
+ if (plogi->fl_cssp[0].cp_class & FC_CPC_VALID)
port->supported_classes |= FC_COS_CLASS1;
- if (plogi->serv_param.class2_serv_param[0] & 0x80)
+ if (plogi->fl_cssp[1].cp_class & FC_CPC_VALID)
port->supported_classes |= FC_COS_CLASS2;
- if (plogi->serv_param.class3_serv_param[0] & 0x80)
+ if (plogi->fl_cssp[2].cp_class & FC_CPC_VALID)
port->supported_classes |= FC_COS_CLASS3;
- if (plogi->serv_param.class4_serv_param[0] & 0x80)
+ if (plogi->fl_cssp[3].cp_class & FC_CPC_VALID)
port->supported_classes |= FC_COS_CLASS4;
}
-struct zfcp_els_adisc {
- struct zfcp_send_els els;
- struct scatterlist req;
- struct scatterlist resp;
- struct zfcp_ls_adisc ls_adisc;
- struct zfcp_ls_adisc ls_adisc_acc;
-};
-
-static void zfcp_fc_adisc_handler(unsigned long data)
+static void zfcp_fc_adisc_handler(void *data)
{
- struct zfcp_els_adisc *adisc = (struct zfcp_els_adisc *) data;
+ struct zfcp_fc_els_adisc *adisc = data;
struct zfcp_port *port = adisc->els.port;
- struct zfcp_ls_adisc *ls_adisc = &adisc->ls_adisc_acc;
+ struct fc_els_adisc *adisc_resp = &adisc->adisc_resp;
if (adisc->els.status) {
/* request rejected or timed out */
@@ -414,9 +375,9 @@ static void zfcp_fc_adisc_handler(unsigned long data)
}
if (!port->wwnn)
- port->wwnn = ls_adisc->wwnn;
+ port->wwnn = adisc_resp->adisc_wwnn;
- if ((port->wwpn != ls_adisc->wwpn) ||
+ if ((port->wwpn != adisc_resp->adisc_wwpn) ||
!(atomic_read(&port->status) & ZFCP_STATUS_COMMON_OPEN)) {
zfcp_erp_port_reopen(port, ZFCP_STATUS_COMMON_ERP_FAILED,
"fcadh_2", NULL);
@@ -427,40 +388,44 @@ static void zfcp_fc_adisc_handler(unsigned long data)
zfcp_scsi_schedule_rport_register(port);
out:
atomic_clear_mask(ZFCP_STATUS_PORT_LINK_TEST, &port->status);
- zfcp_port_put(port);
- kfree(adisc);
+ put_device(&port->sysfs_device);
+ kmem_cache_free(zfcp_data.adisc_cache, adisc);
}
static int zfcp_fc_adisc(struct zfcp_port *port)
{
- struct zfcp_els_adisc *adisc;
+ struct zfcp_fc_els_adisc *adisc;
struct zfcp_adapter *adapter = port->adapter;
+ int ret;
- adisc = kzalloc(sizeof(struct zfcp_els_adisc), GFP_ATOMIC);
+ adisc = kmem_cache_alloc(zfcp_data.adisc_cache, GFP_ATOMIC);
if (!adisc)
return -ENOMEM;
+ adisc->els.port = port;
adisc->els.req = &adisc->req;
adisc->els.resp = &adisc->resp;
- sg_init_one(adisc->els.req, &adisc->ls_adisc,
- sizeof(struct zfcp_ls_adisc));
- sg_init_one(adisc->els.resp, &adisc->ls_adisc_acc,
- sizeof(struct zfcp_ls_adisc));
+ sg_init_one(adisc->els.req, &adisc->adisc_req,
+ sizeof(struct fc_els_adisc));
+ sg_init_one(adisc->els.resp, &adisc->adisc_resp,
+ sizeof(struct fc_els_adisc));
- adisc->els.adapter = adapter;
- adisc->els.port = port;
- adisc->els.d_id = port->d_id;
adisc->els.handler = zfcp_fc_adisc_handler;
- adisc->els.handler_data = (unsigned long) adisc;
- adisc->els.ls_code = adisc->ls_adisc.code = ZFCP_LS_ADISC;
+ adisc->els.handler_data = adisc;
/* acc. to FC-FS, hard_nport_id in ADISC should not be set for ports
without FC-AL-2 capability, so we don't set it */
- adisc->ls_adisc.wwpn = fc_host_port_name(adapter->scsi_host);
- adisc->ls_adisc.wwnn = fc_host_node_name(adapter->scsi_host);
- adisc->ls_adisc.nport_id = fc_host_port_id(adapter->scsi_host);
+ adisc->adisc_req.adisc_wwpn = fc_host_port_name(adapter->scsi_host);
+ adisc->adisc_req.adisc_wwnn = fc_host_node_name(adapter->scsi_host);
+ adisc->adisc_req.adisc_cmd = ELS_ADISC;
+ hton24(adisc->adisc_req.adisc_port_id,
+ fc_host_port_id(adapter->scsi_host));
+
+ ret = zfcp_fsf_send_els(adapter, port->d_id, &adisc->els);
+ if (ret)
+ kmem_cache_free(zfcp_data.adisc_cache, adisc);
- return zfcp_fsf_send_els(&adisc->els);
+ return ret;
}
void zfcp_fc_link_test_work(struct work_struct *work)
@@ -469,7 +434,7 @@ void zfcp_fc_link_test_work(struct work_struct *work)
container_of(work, struct zfcp_port, test_link_work);
int retval;
- zfcp_port_get(port);
+ get_device(&port->sysfs_device);
port->rport_task = RPORT_DEL;
zfcp_scsi_rport_work(&port->rport_work);
@@ -488,7 +453,7 @@ void zfcp_fc_link_test_work(struct work_struct *work)
zfcp_erp_port_forced_reopen(port, 0, "fcltwk1", NULL);
out:
- zfcp_port_put(port);
+ put_device(&port->sysfs_device);
}
/**
@@ -501,12 +466,12 @@ out:
*/
void zfcp_fc_test_link(struct zfcp_port *port)
{
- zfcp_port_get(port);
+ get_device(&port->sysfs_device);
if (!queue_work(port->adapter->work_queue, &port->test_link_work))
- zfcp_port_put(port);
+ put_device(&port->sysfs_device);
}
-static void zfcp_free_sg_env(struct zfcp_gpn_ft *gpn_ft, int buf_num)
+static void zfcp_free_sg_env(struct zfcp_fc_gpn_ft *gpn_ft, int buf_num)
{
struct scatterlist *sg = &gpn_ft->sg_req;
@@ -516,10 +481,10 @@ static void zfcp_free_sg_env(struct zfcp_gpn_ft *gpn_ft, int buf_num)
kfree(gpn_ft);
}
-static struct zfcp_gpn_ft *zfcp_alloc_sg_env(int buf_num)
+static struct zfcp_fc_gpn_ft *zfcp_alloc_sg_env(int buf_num)
{
- struct zfcp_gpn_ft *gpn_ft;
- struct ct_iu_gpn_ft_req *req;
+ struct zfcp_fc_gpn_ft *gpn_ft;
+ struct zfcp_fc_gpn_ft_req *req;
gpn_ft = kzalloc(sizeof(*gpn_ft), GFP_KERNEL);
if (!gpn_ft)
@@ -542,159 +507,152 @@ out:
}
-static int zfcp_fc_send_gpn_ft(struct zfcp_gpn_ft *gpn_ft,
+static int zfcp_fc_send_gpn_ft(struct zfcp_fc_gpn_ft *gpn_ft,
struct zfcp_adapter *adapter, int max_bytes)
{
- struct zfcp_send_ct *ct = &gpn_ft->ct;
- struct ct_iu_gpn_ft_req *req = sg_virt(&gpn_ft->sg_req);
- struct zfcp_fc_ns_handler_data compl_rec;
+ struct zfcp_fsf_ct_els *ct = &gpn_ft->ct;
+ struct zfcp_fc_gpn_ft_req *req = sg_virt(&gpn_ft->sg_req);
+ DECLARE_COMPLETION_ONSTACK(completion);
int ret;
/* prepare CT IU for GPN_FT */
- req->header.revision = ZFCP_CT_REVISION;
- req->header.gs_type = ZFCP_CT_DIRECTORY_SERVICE;
- req->header.gs_subtype = ZFCP_CT_NAME_SERVER;
- req->header.options = ZFCP_CT_SYNCHRONOUS;
- req->header.cmd_rsp_code = ZFCP_CT_GPN_FT;
- req->header.max_res_size = max_bytes / 4;
- req->flags = 0;
- req->domain_id_scope = 0;
- req->area_id_scope = 0;
- req->fc4_type = ZFCP_CT_SCSI_FCP;
+ req->ct_hdr.ct_rev = FC_CT_REV;
+ req->ct_hdr.ct_fs_type = FC_FST_DIR;
+ req->ct_hdr.ct_fs_subtype = FC_NS_SUBTYPE;
+ req->ct_hdr.ct_options = 0;
+ req->ct_hdr.ct_cmd = FC_NS_GPN_FT;
+ req->ct_hdr.ct_mr_size = max_bytes / 4;
+ req->gpn_ft.fn_domain_id_scope = 0;
+ req->gpn_ft.fn_area_id_scope = 0;
+ req->gpn_ft.fn_fc4_type = FC_TYPE_FCP;
/* prepare zfcp_send_ct */
- ct->wka_port = &adapter->gs->ds;
- ct->handler = zfcp_fc_ns_handler;
- ct->handler_data = (unsigned long)&compl_rec;
+ ct->handler = zfcp_fc_complete;
+ ct->handler_data = &completion;
ct->req = &gpn_ft->sg_req;
ct->resp = gpn_ft->sg_resp;
- init_completion(&compl_rec.done);
- compl_rec.handler = NULL;
- ret = zfcp_fsf_send_ct(ct, NULL);
+ ret = zfcp_fsf_send_ct(&adapter->gs->ds, ct, NULL);
if (!ret)
- wait_for_completion(&compl_rec.done);
+ wait_for_completion(&completion);
return ret;
}
-static void zfcp_fc_validate_port(struct zfcp_port *port)
+static void zfcp_fc_validate_port(struct zfcp_port *port, struct list_head *lh)
{
- struct zfcp_adapter *adapter = port->adapter;
-
if (!(atomic_read(&port->status) & ZFCP_STATUS_COMMON_NOESC))
return;
atomic_clear_mask(ZFCP_STATUS_COMMON_NOESC, &port->status);
if ((port->supported_classes != 0) ||
- !list_empty(&port->unit_list_head)) {
- zfcp_port_put(port);
+ !list_empty(&port->unit_list))
return;
- }
- zfcp_erp_port_shutdown(port, 0, "fcpval1", NULL);
- zfcp_erp_wait(adapter);
- zfcp_port_put(port);
- zfcp_port_dequeue(port);
+
+ list_move_tail(&port->list, lh);
}
-static int zfcp_fc_eval_gpn_ft(struct zfcp_gpn_ft *gpn_ft, int max_entries)
+static int zfcp_fc_eval_gpn_ft(struct zfcp_fc_gpn_ft *gpn_ft,
+ struct zfcp_adapter *adapter, int max_entries)
{
- struct zfcp_send_ct *ct = &gpn_ft->ct;
+ struct zfcp_fsf_ct_els *ct = &gpn_ft->ct;
struct scatterlist *sg = gpn_ft->sg_resp;
- struct ct_hdr *hdr = sg_virt(sg);
- struct gpn_ft_resp_acc *acc = sg_virt(sg);
- struct zfcp_adapter *adapter = ct->wka_port->adapter;
+ struct fc_ct_hdr *hdr = sg_virt(sg);
+ struct fc_gpn_ft_resp *acc = sg_virt(sg);
struct zfcp_port *port, *tmp;
+ unsigned long flags;
+ LIST_HEAD(remove_lh);
u32 d_id;
int ret = 0, x, last = 0;
if (ct->status)
return -EIO;
- if (hdr->cmd_rsp_code != ZFCP_CT_ACCEPT) {
- if (hdr->reason_code == ZFCP_CT_UNABLE_TO_PERFORM_CMD)
+ if (hdr->ct_cmd != FC_FS_ACC) {
+ if (hdr->ct_reason == FC_BA_RJT_UNABLE)
return -EAGAIN; /* might be a temporary condition */
return -EIO;
}
- if (hdr->max_res_size) {
+ if (hdr->ct_mr_size) {
dev_warn(&adapter->ccw_device->dev,
"The name server reported %d words residual data\n",
- hdr->max_res_size);
+ hdr->ct_mr_size);
return -E2BIG;
}
- mutex_lock(&zfcp_data.config_mutex);
-
/* first entry is the header */
for (x = 1; x < max_entries && !last; x++) {
- if (x % (ZFCP_GPN_FT_ENTRIES + 1))
+ if (x % (ZFCP_FC_GPN_FT_ENT_PAGE + 1))
acc++;
else
acc = sg_virt(++sg);
- last = acc->control & 0x80;
- d_id = acc->port_id[0] << 16 | acc->port_id[1] << 8 |
- acc->port_id[2];
+ last = acc->fp_flags & FC_NS_FID_LAST;
+ d_id = ntoh24(acc->fp_fid);
/* don't attach ports with a well known address */
- if ((d_id & ZFCP_DID_WKA) == ZFCP_DID_WKA)
+ if (d_id >= FC_FID_WELL_KNOWN_BASE)
continue;
/* skip the adapter's port and known remote ports */
- if (acc->wwpn == fc_host_port_name(adapter->scsi_host))
- continue;
- port = zfcp_get_port_by_wwpn(adapter, acc->wwpn);
- if (port)
+ if (acc->fp_wwpn == fc_host_port_name(adapter->scsi_host))
continue;
- port = zfcp_port_enqueue(adapter, acc->wwpn,
+ port = zfcp_port_enqueue(adapter, acc->fp_wwpn,
ZFCP_STATUS_COMMON_NOESC, d_id);
- if (IS_ERR(port))
- ret = PTR_ERR(port);
- else
+ if (!IS_ERR(port))
zfcp_erp_port_reopen(port, 0, "fcegpf1", NULL);
+ else if (PTR_ERR(port) != -EEXIST)
+ ret = PTR_ERR(port);
}
zfcp_erp_wait(adapter);
- list_for_each_entry_safe(port, tmp, &adapter->port_list_head, list)
- zfcp_fc_validate_port(port);
- mutex_unlock(&zfcp_data.config_mutex);
+ write_lock_irqsave(&adapter->port_list_lock, flags);
+ list_for_each_entry_safe(port, tmp, &adapter->port_list, list)
+ zfcp_fc_validate_port(port, &remove_lh);
+ write_unlock_irqrestore(&adapter->port_list_lock, flags);
+
+ list_for_each_entry_safe(port, tmp, &remove_lh, list) {
+ zfcp_erp_port_shutdown(port, 0, "fcegpf2", NULL);
+ zfcp_device_unregister(&port->sysfs_device,
+ &zfcp_sysfs_port_attrs);
+ }
+
return ret;
}
/**
* zfcp_fc_scan_ports - scan remote ports and attach new ports
- * @adapter: pointer to struct zfcp_adapter
+ * @work: reference to scheduled work
*/
-int zfcp_fc_scan_ports(struct zfcp_adapter *adapter)
+void zfcp_fc_scan_ports(struct work_struct *work)
{
+ struct zfcp_adapter *adapter = container_of(work, struct zfcp_adapter,
+ scan_work);
int ret, i;
- struct zfcp_gpn_ft *gpn_ft;
+ struct zfcp_fc_gpn_ft *gpn_ft;
int chain, max_entries, buf_num, max_bytes;
chain = adapter->adapter_features & FSF_FEATURE_ELS_CT_CHAINED_SBALS;
- buf_num = chain ? ZFCP_GPN_FT_BUFFERS : 1;
- max_entries = chain ? ZFCP_GPN_FT_MAX_ENTRIES : ZFCP_GPN_FT_ENTRIES;
- max_bytes = chain ? ZFCP_GPN_FT_MAX_SIZE : ZFCP_CT_SIZE_ONE_PAGE;
+ buf_num = chain ? ZFCP_FC_GPN_FT_NUM_BUFS : 1;
+ max_entries = chain ? ZFCP_FC_GPN_FT_MAX_ENT : ZFCP_FC_GPN_FT_ENT_PAGE;
+ max_bytes = chain ? ZFCP_FC_GPN_FT_MAX_SIZE : ZFCP_FC_CT_SIZE_PAGE;
if (fc_host_port_type(adapter->scsi_host) != FC_PORTTYPE_NPORT &&
fc_host_port_type(adapter->scsi_host) != FC_PORTTYPE_NPIV)
- return 0;
+ return;
- ret = zfcp_fc_wka_port_get(&adapter->gs->ds);
- if (ret)
- return ret;
+ if (zfcp_fc_wka_port_get(&adapter->gs->ds))
+ return;
gpn_ft = zfcp_alloc_sg_env(buf_num);
- if (!gpn_ft) {
- ret = -ENOMEM;
+ if (!gpn_ft)
goto out;
- }
for (i = 0; i < 3; i++) {
ret = zfcp_fc_send_gpn_ft(gpn_ft, adapter, max_bytes);
if (!ret) {
- ret = zfcp_fc_eval_gpn_ft(gpn_ft, max_entries);
+ ret = zfcp_fc_eval_gpn_ft(gpn_ft, adapter, max_entries);
if (ret == -EAGAIN)
ssleep(1);
else
@@ -704,174 +662,116 @@ int zfcp_fc_scan_ports(struct zfcp_adapter *adapter)
zfcp_free_sg_env(gpn_ft, buf_num);
out:
zfcp_fc_wka_port_put(&adapter->gs->ds);
- return ret;
-}
-
-
-void _zfcp_fc_scan_ports_later(struct work_struct *work)
-{
- zfcp_fc_scan_ports(container_of(work, struct zfcp_adapter, scan_work));
}
-struct zfcp_els_fc_job {
- struct zfcp_send_els els;
- struct fc_bsg_job *job;
-};
-
-static void zfcp_fc_generic_els_handler(unsigned long data)
+static void zfcp_fc_ct_els_job_handler(void *data)
{
- struct zfcp_els_fc_job *els_fc_job = (struct zfcp_els_fc_job *) data;
- struct fc_bsg_job *job = els_fc_job->job;
- struct fc_bsg_reply *reply = job->reply;
-
- if (els_fc_job->els.status) {
- /* request rejected or timed out */
- reply->reply_data.ctels_reply.status = FC_CTELS_STATUS_REJECT;
- goto out;
- }
-
- reply->reply_data.ctels_reply.status = FC_CTELS_STATUS_OK;
- reply->reply_payload_rcv_len = job->reply_payload.payload_len;
+ struct fc_bsg_job *job = data;
+ struct zfcp_fsf_ct_els *zfcp_ct_els = job->dd_data;
+ int status = zfcp_ct_els->status;
+ int reply_status;
-out:
- job->state_flags = FC_RQST_STATE_DONE;
+ reply_status = status ? FC_CTELS_STATUS_REJECT : FC_CTELS_STATUS_OK;
+ job->reply->reply_data.ctels_reply.status = reply_status;
+ job->reply->reply_payload_rcv_len = job->reply_payload.payload_len;
job->job_done(job);
- kfree(els_fc_job);
}
-int zfcp_fc_execute_els_fc_job(struct fc_bsg_job *job)
+static int zfcp_fc_exec_els_job(struct fc_bsg_job *job,
+ struct zfcp_adapter *adapter)
{
- struct zfcp_els_fc_job *els_fc_job;
+ struct zfcp_fsf_ct_els *els = job->dd_data;
struct fc_rport *rport = job->rport;
- struct Scsi_Host *shost;
- struct zfcp_adapter *adapter;
struct zfcp_port *port;
- u8 *port_did;
-
- shost = rport ? rport_to_shost(rport) : job->shost;
- adapter = (struct zfcp_adapter *)shost->hostdata[0];
-
- if (!(atomic_read(&adapter->status) & ZFCP_STATUS_COMMON_OPEN))
- return -EINVAL;
-
- els_fc_job = kzalloc(sizeof(struct zfcp_els_fc_job), GFP_KERNEL);
- if (!els_fc_job)
- return -ENOMEM;
+ u32 d_id;
- els_fc_job->els.adapter = adapter;
if (rport) {
- read_lock_irq(&zfcp_data.config_lock);
port = zfcp_get_port_by_wwpn(adapter, rport->port_name);
- if (port)
- els_fc_job->els.d_id = port->d_id;
- read_unlock_irq(&zfcp_data.config_lock);
- if (!port) {
- kfree(els_fc_job);
+ if (!port)
return -EINVAL;
- }
- } else {
- port_did = job->request->rqst_data.h_els.port_id;
- els_fc_job->els.d_id = (port_did[0] << 16) +
- (port_did[1] << 8) + port_did[2];
- }
-
- els_fc_job->els.req = job->request_payload.sg_list;
- els_fc_job->els.resp = job->reply_payload.sg_list;
- els_fc_job->els.handler = zfcp_fc_generic_els_handler;
- els_fc_job->els.handler_data = (unsigned long) els_fc_job;
- els_fc_job->job = job;
-
- return zfcp_fsf_send_els(&els_fc_job->els);
-}
-
-struct zfcp_ct_fc_job {
- struct zfcp_send_ct ct;
- struct fc_bsg_job *job;
-};
-
-static void zfcp_fc_generic_ct_handler(unsigned long data)
-{
- struct zfcp_ct_fc_job *ct_fc_job = (struct zfcp_ct_fc_job *) data;
- struct fc_bsg_job *job = ct_fc_job->job;
-
- job->reply->reply_data.ctels_reply.status = ct_fc_job->ct.status ?
- FC_CTELS_STATUS_REJECT : FC_CTELS_STATUS_OK;
- job->reply->reply_payload_rcv_len = job->reply_payload.payload_len;
- job->state_flags = FC_RQST_STATE_DONE;
- job->job_done(job);
- zfcp_fc_wka_port_put(ct_fc_job->ct.wka_port);
+ d_id = port->d_id;
+ put_device(&port->sysfs_device);
+ } else
+ d_id = ntoh24(job->request->rqst_data.h_els.port_id);
- kfree(ct_fc_job);
+ return zfcp_fsf_send_els(adapter, d_id, els);
}
-int zfcp_fc_execute_ct_fc_job(struct fc_bsg_job *job)
+static int zfcp_fc_exec_ct_job(struct fc_bsg_job *job,
+ struct zfcp_adapter *adapter)
{
int ret;
u8 gs_type;
- struct fc_rport *rport = job->rport;
- struct Scsi_Host *shost;
- struct zfcp_adapter *adapter;
- struct zfcp_ct_fc_job *ct_fc_job;
+ struct zfcp_fsf_ct_els *ct = job->dd_data;
+ struct zfcp_fc_wka_port *wka_port;
u32 preamble_word1;
- shost = rport ? rport_to_shost(rport) : job->shost;
-
- adapter = (struct zfcp_adapter *)shost->hostdata[0];
- if (!(atomic_read(&adapter->status) & ZFCP_STATUS_COMMON_OPEN))
- return -EINVAL;
-
- ct_fc_job = kzalloc(sizeof(struct zfcp_ct_fc_job), GFP_KERNEL);
- if (!ct_fc_job)
- return -ENOMEM;
-
preamble_word1 = job->request->rqst_data.r_ct.preamble_word1;
gs_type = (preamble_word1 & 0xff000000) >> 24;
switch (gs_type) {
case FC_FST_ALIAS:
- ct_fc_job->ct.wka_port = &adapter->gs->as;
+ wka_port = &adapter->gs->as;
break;
case FC_FST_MGMT:
- ct_fc_job->ct.wka_port = &adapter->gs->ms;
+ wka_port = &adapter->gs->ms;
break;
case FC_FST_TIME:
- ct_fc_job->ct.wka_port = &adapter->gs->ts;
+ wka_port = &adapter->gs->ts;
break;
case FC_FST_DIR:
- ct_fc_job->ct.wka_port = &adapter->gs->ds;
+ wka_port = &adapter->gs->ds;
break;
default:
- kfree(ct_fc_job);
return -EINVAL; /* no such service */
}
- ret = zfcp_fc_wka_port_get(ct_fc_job->ct.wka_port);
- if (ret) {
- kfree(ct_fc_job);
+ ret = zfcp_fc_wka_port_get(wka_port);
+ if (ret)
return ret;
- }
- ct_fc_job->ct.req = job->request_payload.sg_list;
- ct_fc_job->ct.resp = job->reply_payload.sg_list;
- ct_fc_job->ct.handler = zfcp_fc_generic_ct_handler;
- ct_fc_job->ct.handler_data = (unsigned long) ct_fc_job;
- ct_fc_job->ct.completion = NULL;
- ct_fc_job->job = job;
+ ret = zfcp_fsf_send_ct(wka_port, ct, NULL);
+ if (ret)
+ zfcp_fc_wka_port_put(wka_port);
- ret = zfcp_fsf_send_ct(&ct_fc_job->ct, NULL);
- if (ret) {
- kfree(ct_fc_job);
- zfcp_fc_wka_port_put(ct_fc_job->ct.wka_port);
- }
return ret;
}
+int zfcp_fc_exec_bsg_job(struct fc_bsg_job *job)
+{
+ struct Scsi_Host *shost;
+ struct zfcp_adapter *adapter;
+ struct zfcp_fsf_ct_els *ct_els = job->dd_data;
+
+ shost = job->rport ? rport_to_shost(job->rport) : job->shost;
+ adapter = (struct zfcp_adapter *)shost->hostdata[0];
+
+ if (!(atomic_read(&adapter->status) & ZFCP_STATUS_COMMON_OPEN))
+ return -EINVAL;
+
+ ct_els->req = job->request_payload.sg_list;
+ ct_els->resp = job->reply_payload.sg_list;
+ ct_els->handler = zfcp_fc_ct_els_job_handler;
+ ct_els->handler_data = job;
+
+ switch (job->request->msgcode) {
+ case FC_BSG_RPT_ELS:
+ case FC_BSG_HST_ELS_NOLOGIN:
+ return zfcp_fc_exec_els_job(job, adapter);
+ case FC_BSG_RPT_CT:
+ case FC_BSG_HST_CT:
+ return zfcp_fc_exec_ct_job(job, adapter);
+ default:
+ return -EINVAL;
+ }
+}
+
int zfcp_fc_gs_setup(struct zfcp_adapter *adapter)
{
- struct zfcp_wka_ports *wka_ports;
+ struct zfcp_fc_wka_ports *wka_ports;
- wka_ports = kzalloc(sizeof(struct zfcp_wka_ports), GFP_KERNEL);
+ wka_ports = kzalloc(sizeof(struct zfcp_fc_wka_ports), GFP_KERNEL);
if (!wka_ports)
return -ENOMEM;
@@ -880,7 +780,6 @@ int zfcp_fc_gs_setup(struct zfcp_adapter *adapter)
zfcp_fc_wka_port_init(&wka_ports->ts, FC_FID_TIME_SERV, adapter);
zfcp_fc_wka_port_init(&wka_ports->ds, FC_FID_DIR_SERV, adapter);
zfcp_fc_wka_port_init(&wka_ports->as, FC_FID_ALIASES, adapter);
- zfcp_fc_wka_port_init(&wka_ports->ks, FC_FID_SEC_KEY, adapter);
return 0;
}
diff --git a/drivers/s390/scsi/zfcp_fc.h b/drivers/s390/scsi/zfcp_fc.h
new file mode 100644
index 00000000000..cb2a3669a38
--- /dev/null
+++ b/drivers/s390/scsi/zfcp_fc.h
@@ -0,0 +1,260 @@
+/*
+ * zfcp device driver
+ *
+ * Fibre Channel related definitions and inline functions for the zfcp
+ * device driver
+ *
+ * Copyright IBM Corporation 2009
+ */
+
+#ifndef ZFCP_FC_H
+#define ZFCP_FC_H
+
+#include <scsi/fc/fc_els.h>
+#include <scsi/fc/fc_fcp.h>
+#include <scsi/fc/fc_ns.h>
+#include <scsi/scsi_cmnd.h>
+#include <scsi/scsi_tcq.h>
+#include "zfcp_fsf.h"
+
+#define ZFCP_FC_CT_SIZE_PAGE (PAGE_SIZE - sizeof(struct fc_ct_hdr))
+#define ZFCP_FC_GPN_FT_ENT_PAGE (ZFCP_FC_CT_SIZE_PAGE \
+ / sizeof(struct fc_gpn_ft_resp))
+#define ZFCP_FC_GPN_FT_NUM_BUFS 4 /* memory pages */
+
+#define ZFCP_FC_GPN_FT_MAX_SIZE (ZFCP_FC_GPN_FT_NUM_BUFS * PAGE_SIZE \
+ - sizeof(struct fc_ct_hdr))
+#define ZFCP_FC_GPN_FT_MAX_ENT (ZFCP_FC_GPN_FT_NUM_BUFS * \
+ (ZFCP_FC_GPN_FT_ENT_PAGE + 1))
+
+/**
+ * struct zfcp_fc_gid_pn_req - container for ct header plus gid_pn request
+ * @ct_hdr: FC GS common transport header
+ * @gid_pn: GID_PN request
+ */
+struct zfcp_fc_gid_pn_req {
+ struct fc_ct_hdr ct_hdr;
+ struct fc_ns_gid_pn gid_pn;
+} __packed;
+
+/**
+ * struct zfcp_fc_gid_pn_resp - container for ct header plus gid_pn response
+ * @ct_hdr: FC GS common transport header
+ * @gid_pn: GID_PN response
+ */
+struct zfcp_fc_gid_pn_resp {
+ struct fc_ct_hdr ct_hdr;
+ struct fc_gid_pn_resp gid_pn;
+} __packed;
+
+/**
+ * struct zfcp_fc_gid_pn - everything required in zfcp for gid_pn request
+ * @ct: data passed to zfcp_fsf for issuing fsf request
+ * @sg_req: scatterlist entry for request data
+ * @sg_resp: scatterlist entry for response data
+ * @gid_pn_req: GID_PN request data
+ * @gid_pn_resp: GID_PN response data
+ */
+struct zfcp_fc_gid_pn {
+ struct zfcp_fsf_ct_els ct;
+ struct scatterlist sg_req;
+ struct scatterlist sg_resp;
+ struct zfcp_fc_gid_pn_req gid_pn_req;
+ struct zfcp_fc_gid_pn_resp gid_pn_resp;
+ struct zfcp_port *port;
+};
+
+/**
+ * struct zfcp_fc_gpn_ft - container for ct header plus gpn_ft request
+ * @ct_hdr: FC GS common transport header
+ * @gpn_ft: GPN_FT request
+ */
+struct zfcp_fc_gpn_ft_req {
+ struct fc_ct_hdr ct_hdr;
+ struct fc_ns_gid_ft gpn_ft;
+} __packed;
+
+/**
+ * struct zfcp_fc_gpn_ft_resp - container for ct header plus gpn_ft response
+ * @ct_hdr: FC GS common transport header
+ * @gpn_ft: Array of gpn_ft response data to fill one memory page
+ */
+struct zfcp_fc_gpn_ft_resp {
+ struct fc_ct_hdr ct_hdr;
+ struct fc_gpn_ft_resp gpn_ft[ZFCP_FC_GPN_FT_ENT_PAGE];
+} __packed;
+
+/**
+ * struct zfcp_fc_gpn_ft - zfcp data for gpn_ft request
+ * @ct: data passed to zfcp_fsf for issuing fsf request
+ * @sg_req: scatter list entry for gpn_ft request
+ * @sg_resp: scatter list entries for gpn_ft responses (per memory page)
+ */
+struct zfcp_fc_gpn_ft {
+ struct zfcp_fsf_ct_els ct;
+ struct scatterlist sg_req;
+ struct scatterlist sg_resp[ZFCP_FC_GPN_FT_NUM_BUFS];
+};
+
+/**
+ * struct zfcp_fc_els_adisc - everything required in zfcp for issuing ELS ADISC
+ * @els: data required for issuing els fsf command
+ * @req: scatterlist entry for ELS ADISC request
+ * @resp: scatterlist entry for ELS ADISC response
+ * @adisc_req: ELS ADISC request data
+ * @adisc_resp: ELS ADISC response data
+ */
+struct zfcp_fc_els_adisc {
+ struct zfcp_fsf_ct_els els;
+ struct scatterlist req;
+ struct scatterlist resp;
+ struct fc_els_adisc adisc_req;
+ struct fc_els_adisc adisc_resp;
+};
+
+/**
+ * enum zfcp_fc_wka_status - FC WKA port status in zfcp
+ * @ZFCP_FC_WKA_PORT_OFFLINE: Port is closed and not in use
+ * @ZFCP_FC_WKA_PORT_CLOSING: The FSF "close port" request is pending
+ * @ZFCP_FC_WKA_PORT_OPENING: The FSF "open port" request is pending
+ * @ZFCP_FC_WKA_PORT_ONLINE: The port is open and the port handle is valid
+ */
+enum zfcp_fc_wka_status {
+ ZFCP_FC_WKA_PORT_OFFLINE,
+ ZFCP_FC_WKA_PORT_CLOSING,
+ ZFCP_FC_WKA_PORT_OPENING,
+ ZFCP_FC_WKA_PORT_ONLINE,
+};
+
+/**
+ * struct zfcp_fc_wka_port - representation of well-known-address (WKA) FC port
+ * @adapter: Pointer to adapter structure this WKA port belongs to
+ * @completion_wq: Wait for completion of open/close command
+ * @status: Current status of WKA port
+ * @refcount: Reference count to keep port open as long as it is in use
+ * @d_id: FC destination id or well-known-address
+ * @handle: FSF handle for the open WKA port
+ * @mutex: Mutex used during opening/closing state changes
+ * @work: For delaying the closing of the WKA port
+ */
+struct zfcp_fc_wka_port {
+ struct zfcp_adapter *adapter;
+ wait_queue_head_t completion_wq;
+ enum zfcp_fc_wka_status status;
+ atomic_t refcount;
+ u32 d_id;
+ u32 handle;
+ struct mutex mutex;
+ struct delayed_work work;
+};
+
+/**
+ * struct zfcp_fc_wka_ports - Data structures for FC generic services
+ * @ms: FC Management service
+ * @ts: FC time service
+ * @ds: FC directory service
+ * @as: FC alias service
+ */
+struct zfcp_fc_wka_ports {
+ struct zfcp_fc_wka_port ms;
+ struct zfcp_fc_wka_port ts;
+ struct zfcp_fc_wka_port ds;
+ struct zfcp_fc_wka_port as;
+};
+
+/**
+ * zfcp_fc_scsi_to_fcp - setup FCP command with data from scsi_cmnd
+ * @fcp: fcp_cmnd to setup
+ * @scsi: scsi_cmnd where to get LUN, task attributes/flags and CDB
+ */
+static inline
+void zfcp_fc_scsi_to_fcp(struct fcp_cmnd *fcp, struct scsi_cmnd *scsi)
+{
+ char tag[2];
+
+ int_to_scsilun(scsi->device->lun, (struct scsi_lun *) &fcp->fc_lun);
+
+ if (scsi_populate_tag_msg(scsi, tag)) {
+ switch (tag[0]) {
+ case MSG_ORDERED_TAG:
+ fcp->fc_pri_ta |= FCP_PTA_ORDERED;
+ break;
+ case MSG_SIMPLE_TAG:
+ fcp->fc_pri_ta |= FCP_PTA_SIMPLE;
+ break;
+ };
+ } else
+ fcp->fc_pri_ta = FCP_PTA_SIMPLE;
+
+ if (scsi->sc_data_direction == DMA_FROM_DEVICE)
+ fcp->fc_flags |= FCP_CFL_RDDATA;
+ if (scsi->sc_data_direction == DMA_TO_DEVICE)
+ fcp->fc_flags |= FCP_CFL_WRDATA;
+
+ memcpy(fcp->fc_cdb, scsi->cmnd, scsi->cmd_len);
+
+ fcp->fc_dl = scsi_bufflen(scsi);
+}
+
+/**
+ * zfcp_fc_fcp_tm - setup FCP command as task management command
+ * @fcp: fcp_cmnd to setup
+ * @dev: scsi_device where to send the task management command
+ * @tm: task management flags to setup tm command
+ */
+static inline
+void zfcp_fc_fcp_tm(struct fcp_cmnd *fcp, struct scsi_device *dev, u8 tm_flags)
+{
+ int_to_scsilun(dev->lun, (struct scsi_lun *) &fcp->fc_lun);
+ fcp->fc_tm_flags |= tm_flags;
+}
+
+/**
+ * zfcp_fc_evap_fcp_rsp - evaluate FCP RSP IU and update scsi_cmnd accordingly
+ * @fcp_rsp: FCP RSP IU to evaluate
+ * @scsi: SCSI command where to update status and sense buffer
+ */
+static inline
+void zfcp_fc_eval_fcp_rsp(struct fcp_resp_with_ext *fcp_rsp,
+ struct scsi_cmnd *scsi)
+{
+ struct fcp_resp_rsp_info *rsp_info;
+ char *sense;
+ u32 sense_len, resid;
+ u8 rsp_flags;
+
+ set_msg_byte(scsi, COMMAND_COMPLETE);
+ scsi->result |= fcp_rsp->resp.fr_status;
+
+ rsp_flags = fcp_rsp->resp.fr_flags;
+
+ if (unlikely(rsp_flags & FCP_RSP_LEN_VAL)) {
+ rsp_info = (struct fcp_resp_rsp_info *) &fcp_rsp[1];
+ if (rsp_info->rsp_code == FCP_TMF_CMPL)
+ set_host_byte(scsi, DID_OK);
+ else {
+ set_host_byte(scsi, DID_ERROR);
+ return;
+ }
+ }
+
+ if (unlikely(rsp_flags & FCP_SNS_LEN_VAL)) {
+ sense = (char *) &fcp_rsp[1];
+ if (rsp_flags & FCP_RSP_LEN_VAL)
+ sense += fcp_rsp->ext.fr_sns_len;
+ sense_len = min(fcp_rsp->ext.fr_sns_len,
+ (u32) SCSI_SENSE_BUFFERSIZE);
+ memcpy(scsi->sense_buffer, sense, sense_len);
+ }
+
+ if (unlikely(rsp_flags & FCP_RESID_UNDER)) {
+ resid = fcp_rsp->ext.fr_resid;
+ scsi_set_resid(scsi, resid);
+ if (scsi_bufflen(scsi) - resid < scsi->underflow &&
+ !(rsp_flags & FCP_SNS_LEN_VAL) &&
+ fcp_rsp->resp.fr_status == SAM_STAT_GOOD)
+ set_host_byte(scsi, DID_ERROR);
+ }
+}
+
+#endif
diff --git a/drivers/s390/scsi/zfcp_fsf.c b/drivers/s390/scsi/zfcp_fsf.c
index 4e41baa0c14..482dcd97aa5 100644
--- a/drivers/s390/scsi/zfcp_fsf.c
+++ b/drivers/s390/scsi/zfcp_fsf.c
@@ -10,7 +10,9 @@
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
#include <linux/blktrace_api.h>
+#include <scsi/fc/fc_els.h>
#include "zfcp_ext.h"
+#include "zfcp_fc.h"
#include "zfcp_dbf.h"
static void zfcp_fsf_request_timeout_handler(unsigned long data)
@@ -122,36 +124,32 @@ void zfcp_fsf_req_free(struct zfcp_fsf_req *req)
static void zfcp_fsf_status_read_port_closed(struct zfcp_fsf_req *req)
{
+ unsigned long flags;
struct fsf_status_read_buffer *sr_buf = req->data;
struct zfcp_adapter *adapter = req->adapter;
struct zfcp_port *port;
- int d_id = sr_buf->d_id & ZFCP_DID_MASK;
- unsigned long flags;
+ int d_id = ntoh24(sr_buf->d_id);
- read_lock_irqsave(&zfcp_data.config_lock, flags);
- list_for_each_entry(port, &adapter->port_list_head, list)
+ read_lock_irqsave(&adapter->port_list_lock, flags);
+ list_for_each_entry(port, &adapter->port_list, list)
if (port->d_id == d_id) {
- read_unlock_irqrestore(&zfcp_data.config_lock, flags);
zfcp_erp_port_reopen(port, 0, "fssrpc1", req);
- return;
+ break;
}
- read_unlock_irqrestore(&zfcp_data.config_lock, flags);
+ read_unlock_irqrestore(&adapter->port_list_lock, flags);
}
static void zfcp_fsf_link_down_info_eval(struct zfcp_fsf_req *req, char *id,
struct fsf_link_down_info *link_down)
{
struct zfcp_adapter *adapter = req->adapter;
- unsigned long flags;
if (atomic_read(&adapter->status) & ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED)
return;
atomic_set_mask(ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED, &adapter->status);
- read_lock_irqsave(&zfcp_data.config_lock, flags);
zfcp_scsi_schedule_rports_block(adapter);
- read_unlock_irqrestore(&zfcp_data.config_lock, flags);
if (!link_down)
goto out;
@@ -291,7 +289,7 @@ static void zfcp_fsf_status_read_handler(struct zfcp_fsf_req *req)
zfcp_erp_adapter_access_changed(adapter, "fssrh_3",
req);
if (sr_buf->status_subtype & FSF_STATUS_READ_SUB_INCOMING_ELS)
- schedule_work(&adapter->scan_work);
+ queue_work(adapter->work_queue, &adapter->scan_work);
break;
case FSF_STATUS_READ_CFDC_UPDATED:
zfcp_erp_adapter_access_changed(adapter, "fssrh_4", req);
@@ -317,7 +315,6 @@ static void zfcp_fsf_fsfstatus_qual_eval(struct zfcp_fsf_req *req)
case FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED:
return;
case FSF_SQ_COMMAND_ABORTED:
- req->status |= ZFCP_STATUS_FSFREQ_ABORTED;
break;
case FSF_SQ_NO_RECOM:
dev_err(&req->adapter->ccw_device->dev,
@@ -358,8 +355,7 @@ static void zfcp_fsf_protstatus_eval(struct zfcp_fsf_req *req)
zfcp_dbf_hba_fsf_response(req);
if (req->status & ZFCP_STATUS_FSFREQ_DISMISSED) {
- req->status |= ZFCP_STATUS_FSFREQ_ERROR |
- ZFCP_STATUS_FSFREQ_RETRY; /* only for SCSI cmnds. */
+ req->status |= ZFCP_STATUS_FSFREQ_ERROR;
return;
}
@@ -377,7 +373,7 @@ static void zfcp_fsf_protstatus_eval(struct zfcp_fsf_req *req)
case FSF_PROT_ERROR_STATE:
case FSF_PROT_SEQ_NUMB_ERROR:
zfcp_erp_adapter_reopen(adapter, 0, "fspse_2", req);
- req->status |= ZFCP_STATUS_FSFREQ_RETRY;
+ req->status |= ZFCP_STATUS_FSFREQ_ERROR;
break;
case FSF_PROT_UNSUPP_QTCB_TYPE:
dev_err(&adapter->ccw_device->dev,
@@ -480,20 +476,27 @@ void zfcp_fsf_req_dismiss_all(struct zfcp_adapter *adapter)
static int zfcp_fsf_exchange_config_evaluate(struct zfcp_fsf_req *req)
{
- struct fsf_qtcb_bottom_config *bottom;
+ struct fsf_qtcb_bottom_config *bottom = &req->qtcb->bottom.config;
struct zfcp_adapter *adapter = req->adapter;
struct Scsi_Host *shost = adapter->scsi_host;
+ struct fc_els_flogi *nsp, *plogi;
- bottom = &req->qtcb->bottom.config;
+ /* adjust pointers for missing command code */
+ nsp = (struct fc_els_flogi *) ((u8 *)&bottom->nport_serv_param
+ - sizeof(u32));
+ plogi = (struct fc_els_flogi *) ((u8 *)&bottom->plogi_payload
+ - sizeof(u32));
if (req->data)
memcpy(req->data, bottom, sizeof(*bottom));
- fc_host_node_name(shost) = bottom->nport_serv_param.wwnn;
- fc_host_port_name(shost) = bottom->nport_serv_param.wwpn;
- fc_host_port_id(shost) = bottom->s_id & ZFCP_DID_MASK;
+ fc_host_port_name(shost) = nsp->fl_wwpn;
+ fc_host_node_name(shost) = nsp->fl_wwnn;
+ fc_host_port_id(shost) = ntoh24(bottom->s_id);
fc_host_speed(shost) = bottom->fc_link_speed;
fc_host_supported_classes(shost) = FC_COS_CLASS2 | FC_COS_CLASS3;
+ fc_host_supported_fc4s(shost)[2] = 1; /* FCP */
+ fc_host_active_fc4s(shost)[2] = 1; /* FCP */
adapter->hydra_version = bottom->adapter_type;
adapter->timer_ticks = bottom->timer_interval;
@@ -503,9 +506,9 @@ static int zfcp_fsf_exchange_config_evaluate(struct zfcp_fsf_req *req)
switch (bottom->fc_topology) {
case FSF_TOPO_P2P:
- adapter->peer_d_id = bottom->peer_d_id & ZFCP_DID_MASK;
- adapter->peer_wwpn = bottom->plogi_payload.wwpn;
- adapter->peer_wwnn = bottom->plogi_payload.wwnn;
+ adapter->peer_d_id = ntoh24(bottom->peer_d_id);
+ adapter->peer_wwpn = plogi->fl_wwpn;
+ adapter->peer_wwnn = plogi->fl_wwnn;
fc_host_port_type(shost) = FC_PORTTYPE_PTP;
break;
case FSF_TOPO_FABRIC:
@@ -881,13 +884,11 @@ static void zfcp_fsf_abort_fcp_command_handler(struct zfcp_fsf_req *req)
break;
case FSF_PORT_BOXED:
zfcp_erp_port_boxed(unit->port, "fsafch3", req);
- req->status |= ZFCP_STATUS_FSFREQ_ERROR |
- ZFCP_STATUS_FSFREQ_RETRY;
+ req->status |= ZFCP_STATUS_FSFREQ_ERROR;
break;
case FSF_LUN_BOXED:
zfcp_erp_unit_boxed(unit, "fsafch4", req);
- req->status |= ZFCP_STATUS_FSFREQ_ERROR |
- ZFCP_STATUS_FSFREQ_RETRY;
+ req->status |= ZFCP_STATUS_FSFREQ_ERROR;
break;
case FSF_ADAPTER_STATUS_AVAILABLE:
switch (fsq->word[0]) {
@@ -958,10 +959,10 @@ out:
static void zfcp_fsf_send_ct_handler(struct zfcp_fsf_req *req)
{
struct zfcp_adapter *adapter = req->adapter;
- struct zfcp_send_ct *send_ct = req->data;
+ struct zfcp_fsf_ct_els *ct = req->data;
struct fsf_qtcb_header *header = &req->qtcb->header;
- send_ct->status = -EINVAL;
+ ct->status = -EINVAL;
if (req->status & ZFCP_STATUS_FSFREQ_ERROR)
goto skip_fsfstatus;
@@ -969,7 +970,7 @@ static void zfcp_fsf_send_ct_handler(struct zfcp_fsf_req *req)
switch (header->fsf_status) {
case FSF_GOOD:
zfcp_dbf_san_ct_response(req);
- send_ct->status = 0;
+ ct->status = 0;
break;
case FSF_SERVICE_CLASS_NOT_SUPPORTED:
zfcp_fsf_class_not_supp(req);
@@ -985,8 +986,7 @@ static void zfcp_fsf_send_ct_handler(struct zfcp_fsf_req *req)
case FSF_ACCESS_DENIED:
break;
case FSF_PORT_BOXED:
- req->status |= ZFCP_STATUS_FSFREQ_ERROR |
- ZFCP_STATUS_FSFREQ_RETRY;
+ req->status |= ZFCP_STATUS_FSFREQ_ERROR;
break;
case FSF_PORT_HANDLE_NOT_VALID:
zfcp_erp_adapter_reopen(adapter, 0, "fsscth1", req);
@@ -1001,8 +1001,8 @@ static void zfcp_fsf_send_ct_handler(struct zfcp_fsf_req *req)
}
skip_fsfstatus:
- if (send_ct->handler)
- send_ct->handler(send_ct->handler_data);
+ if (ct->handler)
+ ct->handler(ct->handler_data);
}
static void zfcp_fsf_setup_ct_els_unchained(struct qdio_buffer_element *sbale,
@@ -1071,15 +1071,17 @@ static int zfcp_fsf_setup_ct_els(struct zfcp_fsf_req *req,
int max_sbals)
{
int ret;
+ unsigned int fcp_chan_timeout;
ret = zfcp_fsf_setup_ct_els_sbals(req, sg_req, sg_resp, max_sbals);
if (ret)
return ret;
/* common settings for ct/gs and els requests */
+ fcp_chan_timeout = 2 * FC_DEF_R_A_TOV / 1000;
req->qtcb->bottom.support.service_class = FSF_CLASS_3;
- req->qtcb->bottom.support.timeout = 2 * R_A_TOV;
- zfcp_fsf_start_timer(req, (2 * R_A_TOV + 10) * HZ);
+ req->qtcb->bottom.support.timeout = fcp_chan_timeout;
+ zfcp_fsf_start_timer(req, (fcp_chan_timeout + 10) * HZ);
return 0;
}
@@ -1089,9 +1091,9 @@ static int zfcp_fsf_setup_ct_els(struct zfcp_fsf_req *req,
* @ct: pointer to struct zfcp_send_ct with data for request
* @pool: if non-null this mempool is used to allocate struct zfcp_fsf_req
*/
-int zfcp_fsf_send_ct(struct zfcp_send_ct *ct, mempool_t *pool)
+int zfcp_fsf_send_ct(struct zfcp_fc_wka_port *wka_port,
+ struct zfcp_fsf_ct_els *ct, mempool_t *pool)
{
- struct zfcp_wka_port *wka_port = ct->wka_port;
struct zfcp_qdio *qdio = wka_port->adapter->qdio;
struct zfcp_fsf_req *req;
int ret = -EIO;
@@ -1117,7 +1119,7 @@ int zfcp_fsf_send_ct(struct zfcp_send_ct *ct, mempool_t *pool)
req->qtcb->header.port_handle = wka_port->handle;
req->data = ct;
- zfcp_dbf_san_ct_request(req);
+ zfcp_dbf_san_ct_request(req, wka_port->d_id);
ret = zfcp_fsf_req_send(req);
if (ret)
@@ -1134,7 +1136,7 @@ out:
static void zfcp_fsf_send_els_handler(struct zfcp_fsf_req *req)
{
- struct zfcp_send_els *send_els = req->data;
+ struct zfcp_fsf_ct_els *send_els = req->data;
struct zfcp_port *port = send_els->port;
struct fsf_qtcb_header *header = &req->qtcb->header;
@@ -1154,9 +1156,6 @@ static void zfcp_fsf_send_els_handler(struct zfcp_fsf_req *req)
case FSF_ADAPTER_STATUS_AVAILABLE:
switch (header->fsf_status_qual.word[0]){
case FSF_SQ_INVOKE_LINK_TEST_PROCEDURE:
- if (port && (send_els->ls_code != ZFCP_LS_ADISC))
- zfcp_fc_test_link(port);
- /*fall through */
case FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED:
case FSF_SQ_RETRY_IF_POSSIBLE:
req->status |= ZFCP_STATUS_FSFREQ_ERROR;
@@ -1188,10 +1187,11 @@ skip_fsfstatus:
* zfcp_fsf_send_els - initiate an ELS command (FC-FS)
* @els: pointer to struct zfcp_send_els with data for the command
*/
-int zfcp_fsf_send_els(struct zfcp_send_els *els)
+int zfcp_fsf_send_els(struct zfcp_adapter *adapter, u32 d_id,
+ struct zfcp_fsf_ct_els *els)
{
struct zfcp_fsf_req *req;
- struct zfcp_qdio *qdio = els->adapter->qdio;
+ struct zfcp_qdio *qdio = adapter->qdio;
int ret = -EIO;
spin_lock_bh(&qdio->req_q_lock);
@@ -1211,7 +1211,7 @@ int zfcp_fsf_send_els(struct zfcp_send_els *els)
if (ret)
goto failed_send;
- req->qtcb->bottom.support.d_id = els->d_id;
+ hton24(req->qtcb->bottom.support.d_id, d_id);
req->handler = zfcp_fsf_send_els_handler;
req->data = els;
@@ -1422,7 +1422,7 @@ static void zfcp_fsf_open_port_handler(struct zfcp_fsf_req *req)
{
struct zfcp_port *port = req->data;
struct fsf_qtcb_header *header = &req->qtcb->header;
- struct fsf_plogi *plogi;
+ struct fc_els_flogi *plogi;
if (req->status & ZFCP_STATUS_FSFREQ_ERROR)
goto out;
@@ -1472,23 +1472,10 @@ static void zfcp_fsf_open_port_handler(struct zfcp_fsf_req *req)
* another GID_PN straight after a port has been opened.
* Alternately, an ADISC/PDISC ELS should suffice, as well.
*/
- plogi = (struct fsf_plogi *) req->qtcb->bottom.support.els;
+ plogi = (struct fc_els_flogi *) req->qtcb->bottom.support.els;
if (req->qtcb->bottom.support.els1_length >=
- FSF_PLOGI_MIN_LEN) {
- if (plogi->serv_param.wwpn != port->wwpn) {
- port->d_id = 0;
- dev_warn(&port->adapter->ccw_device->dev,
- "A port opened with WWPN 0x%016Lx "
- "returned data that identifies it as "
- "WWPN 0x%016Lx\n",
- (unsigned long long) port->wwpn,
- (unsigned long long)
- plogi->serv_param.wwpn);
- } else {
- port->wwnn = plogi->serv_param.wwnn;
+ FSF_PLOGI_MIN_LEN)
zfcp_fc_plogi_evaluate(port, plogi);
- }
- }
break;
case FSF_UNKNOWN_OP_SUBTYPE:
req->status |= ZFCP_STATUS_FSFREQ_ERROR;
@@ -1496,7 +1483,7 @@ static void zfcp_fsf_open_port_handler(struct zfcp_fsf_req *req)
}
out:
- zfcp_port_put(port);
+ put_device(&port->sysfs_device);
}
/**
@@ -1530,18 +1517,18 @@ int zfcp_fsf_open_port(struct zfcp_erp_action *erp_action)
sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY;
req->handler = zfcp_fsf_open_port_handler;
- req->qtcb->bottom.support.d_id = port->d_id;
+ hton24(req->qtcb->bottom.support.d_id, port->d_id);
req->data = port;
req->erp_action = erp_action;
erp_action->fsf_req = req;
- zfcp_port_get(port);
+ get_device(&port->sysfs_device);
zfcp_fsf_start_erp_timer(req);
retval = zfcp_fsf_req_send(req);
if (retval) {
zfcp_fsf_req_free(req);
erp_action->fsf_req = NULL;
- zfcp_port_put(port);
+ put_device(&port->sysfs_device);
}
out:
spin_unlock_bh(&qdio->req_q_lock);
@@ -1618,11 +1605,11 @@ out:
static void zfcp_fsf_open_wka_port_handler(struct zfcp_fsf_req *req)
{
- struct zfcp_wka_port *wka_port = req->data;
+ struct zfcp_fc_wka_port *wka_port = req->data;
struct fsf_qtcb_header *header = &req->qtcb->header;
if (req->status & ZFCP_STATUS_FSFREQ_ERROR) {
- wka_port->status = ZFCP_WKA_PORT_OFFLINE;
+ wka_port->status = ZFCP_FC_WKA_PORT_OFFLINE;
goto out;
}
@@ -1635,13 +1622,13 @@ static void zfcp_fsf_open_wka_port_handler(struct zfcp_fsf_req *req)
req->status |= ZFCP_STATUS_FSFREQ_ERROR;
/* fall through */
case FSF_ACCESS_DENIED:
- wka_port->status = ZFCP_WKA_PORT_OFFLINE;
+ wka_port->status = ZFCP_FC_WKA_PORT_OFFLINE;
break;
case FSF_GOOD:
wka_port->handle = header->port_handle;
/* fall through */
case FSF_PORT_ALREADY_OPEN:
- wka_port->status = ZFCP_WKA_PORT_ONLINE;
+ wka_port->status = ZFCP_FC_WKA_PORT_ONLINE;
}
out:
wake_up(&wka_port->completion_wq);
@@ -1649,10 +1636,10 @@ out:
/**
* zfcp_fsf_open_wka_port - create and send open wka-port request
- * @wka_port: pointer to struct zfcp_wka_port
+ * @wka_port: pointer to struct zfcp_fc_wka_port
* Returns: 0 on success, error otherwise
*/
-int zfcp_fsf_open_wka_port(struct zfcp_wka_port *wka_port)
+int zfcp_fsf_open_wka_port(struct zfcp_fc_wka_port *wka_port)
{
struct qdio_buffer_element *sbale;
struct zfcp_qdio *qdio = wka_port->adapter->qdio;
@@ -1677,7 +1664,7 @@ int zfcp_fsf_open_wka_port(struct zfcp_wka_port *wka_port)
sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY;
req->handler = zfcp_fsf_open_wka_port_handler;
- req->qtcb->bottom.support.d_id = wka_port->d_id;
+ hton24(req->qtcb->bottom.support.d_id, wka_port->d_id);
req->data = wka_port;
zfcp_fsf_start_timer(req, ZFCP_FSF_REQUEST_TIMEOUT);
@@ -1691,23 +1678,23 @@ out:
static void zfcp_fsf_close_wka_port_handler(struct zfcp_fsf_req *req)
{
- struct zfcp_wka_port *wka_port = req->data;
+ struct zfcp_fc_wka_port *wka_port = req->data;
if (req->qtcb->header.fsf_status == FSF_PORT_HANDLE_NOT_VALID) {
req->status |= ZFCP_STATUS_FSFREQ_ERROR;
zfcp_erp_adapter_reopen(wka_port->adapter, 0, "fscwph1", req);
}
- wka_port->status = ZFCP_WKA_PORT_OFFLINE;
+ wka_port->status = ZFCP_FC_WKA_PORT_OFFLINE;
wake_up(&wka_port->completion_wq);
}
/**
* zfcp_fsf_close_wka_port - create and send close wka port request
- * @erp_action: pointer to struct zfcp_erp_action
+ * @wka_port: WKA port to open
* Returns: 0 on success, error otherwise
*/
-int zfcp_fsf_close_wka_port(struct zfcp_wka_port *wka_port)
+int zfcp_fsf_close_wka_port(struct zfcp_fc_wka_port *wka_port)
{
struct qdio_buffer_element *sbale;
struct zfcp_qdio *qdio = wka_port->adapter->qdio;
@@ -1765,13 +1752,13 @@ static void zfcp_fsf_close_physical_port_handler(struct zfcp_fsf_req *req)
/* can't use generic zfcp_erp_modify_port_status because
* ZFCP_STATUS_COMMON_OPEN must not be reset for the port */
atomic_clear_mask(ZFCP_STATUS_PORT_PHYS_OPEN, &port->status);
- list_for_each_entry(unit, &port->unit_list_head, list)
+ read_lock(&port->unit_list_lock);
+ list_for_each_entry(unit, &port->unit_list, list)
atomic_clear_mask(ZFCP_STATUS_COMMON_OPEN,
&unit->status);
+ read_unlock(&port->unit_list_lock);
zfcp_erp_port_boxed(port, "fscpph2", req);
- req->status |= ZFCP_STATUS_FSFREQ_ERROR |
- ZFCP_STATUS_FSFREQ_RETRY;
-
+ req->status |= ZFCP_STATUS_FSFREQ_ERROR;
break;
case FSF_ADAPTER_STATUS_AVAILABLE:
switch (header->fsf_status_qual.word[0]) {
@@ -1787,9 +1774,11 @@ static void zfcp_fsf_close_physical_port_handler(struct zfcp_fsf_req *req)
* ZFCP_STATUS_COMMON_OPEN must not be reset for the port
*/
atomic_clear_mask(ZFCP_STATUS_PORT_PHYS_OPEN, &port->status);
- list_for_each_entry(unit, &port->unit_list_head, list)
+ read_lock(&port->unit_list_lock);
+ list_for_each_entry(unit, &port->unit_list, list)
atomic_clear_mask(ZFCP_STATUS_COMMON_OPEN,
&unit->status);
+ read_unlock(&port->unit_list_lock);
break;
}
}
@@ -1873,8 +1862,7 @@ static void zfcp_fsf_open_unit_handler(struct zfcp_fsf_req *req)
break;
case FSF_PORT_BOXED:
zfcp_erp_port_boxed(unit->port, "fsouh_2", req);
- req->status |= ZFCP_STATUS_FSFREQ_ERROR |
- ZFCP_STATUS_FSFREQ_RETRY;
+ req->status |= ZFCP_STATUS_FSFREQ_ERROR;
break;
case FSF_LUN_SHARING_VIOLATION:
if (header->fsf_status_qual.word[0])
@@ -2036,8 +2024,7 @@ static void zfcp_fsf_close_unit_handler(struct zfcp_fsf_req *req)
break;
case FSF_PORT_BOXED:
zfcp_erp_port_boxed(unit->port, "fscuh_3", req);
- req->status |= ZFCP_STATUS_FSFREQ_ERROR |
- ZFCP_STATUS_FSFREQ_RETRY;
+ req->status |= ZFCP_STATUS_FSFREQ_ERROR;
break;
case FSF_ADAPTER_STATUS_AVAILABLE:
switch (req->qtcb->header.fsf_status_qual.word[0]) {
@@ -2109,72 +2096,57 @@ static void zfcp_fsf_update_lat(struct fsf_latency_record *lat_rec, u32 lat)
lat_rec->max = max(lat_rec->max, lat);
}
-static void zfcp_fsf_req_latency(struct zfcp_fsf_req *req)
+static void zfcp_fsf_req_trace(struct zfcp_fsf_req *req, struct scsi_cmnd *scsi)
{
- struct fsf_qual_latency_info *lat_inf;
- struct latency_cont *lat;
+ struct fsf_qual_latency_info *lat_in;
+ struct latency_cont *lat = NULL;
struct zfcp_unit *unit = req->unit;
+ struct zfcp_blk_drv_data blktrc;
+ int ticks = req->adapter->timer_ticks;
- lat_inf = &req->qtcb->prefix.prot_status_qual.latency_info;
-
- switch (req->qtcb->bottom.io.data_direction) {
- case FSF_DATADIR_READ:
- lat = &unit->latencies.read;
- break;
- case FSF_DATADIR_WRITE:
- lat = &unit->latencies.write;
- break;
- case FSF_DATADIR_CMND:
- lat = &unit->latencies.cmd;
- break;
- default:
- return;
- }
+ lat_in = &req->qtcb->prefix.prot_status_qual.latency_info;
- spin_lock(&unit->latencies.lock);
- zfcp_fsf_update_lat(&lat->channel, lat_inf->channel_lat);
- zfcp_fsf_update_lat(&lat->fabric, lat_inf->fabric_lat);
- lat->counter++;
- spin_unlock(&unit->latencies.lock);
-}
-
-#ifdef CONFIG_BLK_DEV_IO_TRACE
-static void zfcp_fsf_trace_latency(struct zfcp_fsf_req *fsf_req)
-{
- struct fsf_qual_latency_info *lat_inf;
- struct scsi_cmnd *scsi_cmnd = (struct scsi_cmnd *)fsf_req->data;
- struct request *req = scsi_cmnd->request;
- struct zfcp_blk_drv_data trace;
- int ticks = fsf_req->adapter->timer_ticks;
+ blktrc.flags = 0;
+ blktrc.magic = ZFCP_BLK_DRV_DATA_MAGIC;
+ if (req->status & ZFCP_STATUS_FSFREQ_ERROR)
+ blktrc.flags |= ZFCP_BLK_REQ_ERROR;
+ blktrc.inb_usage = req->queue_req.qdio_inb_usage;
+ blktrc.outb_usage = req->queue_req.qdio_outb_usage;
+
+ if (req->adapter->adapter_features & FSF_FEATURE_MEASUREMENT_DATA) {
+ blktrc.flags |= ZFCP_BLK_LAT_VALID;
+ blktrc.channel_lat = lat_in->channel_lat * ticks;
+ blktrc.fabric_lat = lat_in->fabric_lat * ticks;
+
+ switch (req->qtcb->bottom.io.data_direction) {
+ case FSF_DATADIR_READ:
+ lat = &unit->latencies.read;
+ break;
+ case FSF_DATADIR_WRITE:
+ lat = &unit->latencies.write;
+ break;
+ case FSF_DATADIR_CMND:
+ lat = &unit->latencies.cmd;
+ break;
+ }
- trace.flags = 0;
- trace.magic = ZFCP_BLK_DRV_DATA_MAGIC;
- if (fsf_req->adapter->adapter_features & FSF_FEATURE_MEASUREMENT_DATA) {
- trace.flags |= ZFCP_BLK_LAT_VALID;
- lat_inf = &fsf_req->qtcb->prefix.prot_status_qual.latency_info;
- trace.channel_lat = lat_inf->channel_lat * ticks;
- trace.fabric_lat = lat_inf->fabric_lat * ticks;
+ if (lat) {
+ spin_lock(&unit->latencies.lock);
+ zfcp_fsf_update_lat(&lat->channel, lat_in->channel_lat);
+ zfcp_fsf_update_lat(&lat->fabric, lat_in->fabric_lat);
+ lat->counter++;
+ spin_unlock(&unit->latencies.lock);
+ }
}
- if (fsf_req->status & ZFCP_STATUS_FSFREQ_ERROR)
- trace.flags |= ZFCP_BLK_REQ_ERROR;
- trace.inb_usage = fsf_req->queue_req.qdio_inb_usage;
- trace.outb_usage = fsf_req->queue_req.qdio_outb_usage;
- blk_add_driver_data(req->q, req, &trace, sizeof(trace));
-}
-#else
-static inline void zfcp_fsf_trace_latency(struct zfcp_fsf_req *fsf_req)
-{
+ blk_add_driver_data(scsi->request->q, scsi->request, &blktrc,
+ sizeof(blktrc));
}
-#endif
static void zfcp_fsf_send_fcp_command_task_handler(struct zfcp_fsf_req *req)
{
struct scsi_cmnd *scpnt;
- struct fcp_rsp_iu *fcp_rsp_iu = (struct fcp_rsp_iu *)
- &(req->qtcb->bottom.io.fcp_rsp);
- u32 sns_len;
- char *fcp_rsp_info = (unsigned char *) &fcp_rsp_iu[1];
+ struct fcp_resp_with_ext *fcp_rsp;
unsigned long flags;
read_lock_irqsave(&req->adapter->abort_lock, flags);
@@ -2185,50 +2157,16 @@ static void zfcp_fsf_send_fcp_command_task_handler(struct zfcp_fsf_req *req)
return;
}
- if (unlikely(req->status & ZFCP_STATUS_FSFREQ_ABORTED)) {
- set_host_byte(scpnt, DID_SOFT_ERROR);
- goto skip_fsfstatus;
- }
-
if (unlikely(req->status & ZFCP_STATUS_FSFREQ_ERROR)) {
- set_host_byte(scpnt, DID_ERROR);
+ set_host_byte(scpnt, DID_TRANSPORT_DISRUPTED);
goto skip_fsfstatus;
}
- set_msg_byte(scpnt, COMMAND_COMPLETE);
-
- scpnt->result |= fcp_rsp_iu->scsi_status;
+ fcp_rsp = (struct fcp_resp_with_ext *) &req->qtcb->bottom.io.fcp_rsp;
+ zfcp_fc_eval_fcp_rsp(fcp_rsp, scpnt);
- if (req->adapter->adapter_features & FSF_FEATURE_MEASUREMENT_DATA)
- zfcp_fsf_req_latency(req);
-
- zfcp_fsf_trace_latency(req);
-
- if (unlikely(fcp_rsp_iu->validity.bits.fcp_rsp_len_valid)) {
- if (fcp_rsp_info[3] == RSP_CODE_GOOD)
- set_host_byte(scpnt, DID_OK);
- else {
- set_host_byte(scpnt, DID_ERROR);
- goto skip_fsfstatus;
- }
- }
-
- if (unlikely(fcp_rsp_iu->validity.bits.fcp_sns_len_valid)) {
- sns_len = FSF_FCP_RSP_SIZE - sizeof(struct fcp_rsp_iu) +
- fcp_rsp_iu->fcp_rsp_len;
- sns_len = min(sns_len, (u32) SCSI_SENSE_BUFFERSIZE);
- sns_len = min(sns_len, fcp_rsp_iu->fcp_sns_len);
-
- memcpy(scpnt->sense_buffer,
- zfcp_get_fcp_sns_info_ptr(fcp_rsp_iu), sns_len);
- }
+ zfcp_fsf_req_trace(req, scpnt);
- if (unlikely(fcp_rsp_iu->validity.bits.fcp_resid_under)) {
- scsi_set_resid(scpnt, fcp_rsp_iu->fcp_resid);
- if (scsi_bufflen(scpnt) - scsi_get_resid(scpnt) <
- scpnt->underflow)
- set_host_byte(scpnt, DID_ERROR);
- }
skip_fsfstatus:
if (scpnt->result != 0)
zfcp_dbf_scsi_result("erro", 3, req->adapter->dbf, scpnt, req);
@@ -2250,11 +2188,13 @@ skip_fsfstatus:
static void zfcp_fsf_send_fcp_ctm_handler(struct zfcp_fsf_req *req)
{
- struct fcp_rsp_iu *fcp_rsp_iu = (struct fcp_rsp_iu *)
- &(req->qtcb->bottom.io.fcp_rsp);
- char *fcp_rsp_info = (unsigned char *) &fcp_rsp_iu[1];
+ struct fcp_resp_with_ext *fcp_rsp;
+ struct fcp_resp_rsp_info *rsp_info;
+
+ fcp_rsp = (struct fcp_resp_with_ext *) &req->qtcb->bottom.io.fcp_rsp;
+ rsp_info = (struct fcp_resp_rsp_info *) &fcp_rsp[1];
- if ((fcp_rsp_info[3] != RSP_CODE_GOOD) ||
+ if ((rsp_info->rsp_code != FCP_TMF_CMPL) ||
(req->status & ZFCP_STATUS_FSFREQ_ERROR))
req->status |= ZFCP_STATUS_FSFREQ_TMFUNCFAILED;
}
@@ -2314,13 +2254,11 @@ static void zfcp_fsf_send_fcp_command_handler(struct zfcp_fsf_req *req)
break;
case FSF_PORT_BOXED:
zfcp_erp_port_boxed(unit->port, "fssfch5", req);
- req->status |= ZFCP_STATUS_FSFREQ_ERROR |
- ZFCP_STATUS_FSFREQ_RETRY;
+ req->status |= ZFCP_STATUS_FSFREQ_ERROR;
break;
case FSF_LUN_BOXED:
zfcp_erp_unit_boxed(unit, "fssfch6", req);
- req->status |= ZFCP_STATUS_FSFREQ_ERROR |
- ZFCP_STATUS_FSFREQ_RETRY;
+ req->status |= ZFCP_STATUS_FSFREQ_ERROR;
break;
case FSF_ADAPTER_STATUS_AVAILABLE:
if (header->fsf_status_qual.word[0] ==
@@ -2335,24 +2273,10 @@ skip_fsfstatus:
else {
zfcp_fsf_send_fcp_command_task_handler(req);
req->unit = NULL;
- zfcp_unit_put(unit);
+ put_device(&unit->sysfs_device);
}
}
-static void zfcp_set_fcp_dl(struct fcp_cmnd_iu *fcp_cmd, u32 fcp_dl)
-{
- u32 *fcp_dl_ptr;
-
- /*
- * fcp_dl_addr = start address of fcp_cmnd structure +
- * size of fixed part + size of dynamically sized add_dcp_cdb field
- * SEE FCP-2 documentation
- */
- fcp_dl_ptr = (u32 *) ((unsigned char *) &fcp_cmd[1] +
- (fcp_cmd->add_fcp_cdb_length << 2));
- *fcp_dl_ptr = fcp_dl;
-}
-
/**
* zfcp_fsf_send_fcp_command_task - initiate an FCP command (for a SCSI command)
* @unit: unit where command is sent to
@@ -2362,7 +2286,7 @@ int zfcp_fsf_send_fcp_command_task(struct zfcp_unit *unit,
struct scsi_cmnd *scsi_cmnd)
{
struct zfcp_fsf_req *req;
- struct fcp_cmnd_iu *fcp_cmnd_iu;
+ struct fcp_cmnd *fcp_cmnd;
unsigned int sbtype = SBAL_FLAGS0_TYPE_READ;
int real_bytes, retval = -EIO;
struct zfcp_adapter *adapter = unit->port->adapter;
@@ -2387,23 +2311,21 @@ int zfcp_fsf_send_fcp_command_task(struct zfcp_unit *unit,
}
req->status |= ZFCP_STATUS_FSFREQ_CLEANUP;
- zfcp_unit_get(unit);
+ get_device(&unit->sysfs_device);
req->unit = unit;
req->data = scsi_cmnd;
req->handler = zfcp_fsf_send_fcp_command_handler;
req->qtcb->header.lun_handle = unit->handle;
req->qtcb->header.port_handle = unit->port->handle;
req->qtcb->bottom.io.service_class = FSF_CLASS_3;
+ req->qtcb->bottom.io.fcp_cmnd_length = FCP_CMND_LEN;
scsi_cmnd->host_scribble = (unsigned char *) req->req_id;
- fcp_cmnd_iu = (struct fcp_cmnd_iu *) &(req->qtcb->bottom.io.fcp_cmnd);
- fcp_cmnd_iu->fcp_lun = unit->fcp_lun;
/*
* set depending on data direction:
* data direction bits in SBALE (SB Type)
* data direction bits in QTCB
- * data direction bits in FCP_CMND IU
*/
switch (scsi_cmnd->sc_data_direction) {
case DMA_NONE:
@@ -2411,32 +2333,17 @@ int zfcp_fsf_send_fcp_command_task(struct zfcp_unit *unit,
break;
case DMA_FROM_DEVICE:
req->qtcb->bottom.io.data_direction = FSF_DATADIR_READ;
- fcp_cmnd_iu->rddata = 1;
break;
case DMA_TO_DEVICE:
req->qtcb->bottom.io.data_direction = FSF_DATADIR_WRITE;
sbtype = SBAL_FLAGS0_TYPE_WRITE;
- fcp_cmnd_iu->wddata = 1;
break;
case DMA_BIDIRECTIONAL:
goto failed_scsi_cmnd;
}
- if (likely((scsi_cmnd->device->simple_tags) ||
- ((atomic_read(&unit->status) & ZFCP_STATUS_UNIT_READONLY) &&
- (atomic_read(&unit->status) & ZFCP_STATUS_UNIT_SHARED))))
- fcp_cmnd_iu->task_attribute = SIMPLE_Q;
- else
- fcp_cmnd_iu->task_attribute = UNTAGGED;
-
- if (unlikely(scsi_cmnd->cmd_len > FCP_CDB_LENGTH))
- fcp_cmnd_iu->add_fcp_cdb_length =
- (scsi_cmnd->cmd_len - FCP_CDB_LENGTH) >> 2;
-
- memcpy(fcp_cmnd_iu->fcp_cdb, scsi_cmnd->cmnd, scsi_cmnd->cmd_len);
-
- req->qtcb->bottom.io.fcp_cmnd_length = sizeof(struct fcp_cmnd_iu) +
- fcp_cmnd_iu->add_fcp_cdb_length + sizeof(u32);
+ fcp_cmnd = (struct fcp_cmnd *) &req->qtcb->bottom.io.fcp_cmnd;
+ zfcp_fc_scsi_to_fcp(fcp_cmnd, scsi_cmnd);
real_bytes = zfcp_qdio_sbals_from_sg(qdio, &req->queue_req, sbtype,
scsi_sglist(scsi_cmnd),
@@ -2454,8 +2361,6 @@ int zfcp_fsf_send_fcp_command_task(struct zfcp_unit *unit,
goto failed_scsi_cmnd;
}
- zfcp_set_fcp_dl(fcp_cmnd_iu, real_bytes);
-
retval = zfcp_fsf_req_send(req);
if (unlikely(retval))
goto failed_scsi_cmnd;
@@ -2463,7 +2368,7 @@ int zfcp_fsf_send_fcp_command_task(struct zfcp_unit *unit,
goto out;
failed_scsi_cmnd:
- zfcp_unit_put(unit);
+ put_device(&unit->sysfs_device);
zfcp_fsf_req_free(req);
scsi_cmnd->host_scribble = NULL;
out:
@@ -2481,7 +2386,7 @@ struct zfcp_fsf_req *zfcp_fsf_send_fcp_ctm(struct zfcp_unit *unit, u8 tm_flags)
{
struct qdio_buffer_element *sbale;
struct zfcp_fsf_req *req = NULL;
- struct fcp_cmnd_iu *fcp_cmnd_iu;
+ struct fcp_cmnd *fcp_cmnd;
struct zfcp_qdio *qdio = unit->port->adapter->qdio;
if (unlikely(!(atomic_read(&unit->status) &
@@ -2507,16 +2412,14 @@ struct zfcp_fsf_req *zfcp_fsf_send_fcp_ctm(struct zfcp_unit *unit, u8 tm_flags)
req->qtcb->header.port_handle = unit->port->handle;
req->qtcb->bottom.io.data_direction = FSF_DATADIR_CMND;
req->qtcb->bottom.io.service_class = FSF_CLASS_3;
- req->qtcb->bottom.io.fcp_cmnd_length = sizeof(struct fcp_cmnd_iu) +
- sizeof(u32);
+ req->qtcb->bottom.io.fcp_cmnd_length = FCP_CMND_LEN;
sbale = zfcp_qdio_sbale_req(qdio, &req->queue_req);
sbale[0].flags |= SBAL_FLAGS0_TYPE_WRITE;
sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY;
- fcp_cmnd_iu = (struct fcp_cmnd_iu *) &req->qtcb->bottom.io.fcp_cmnd;
- fcp_cmnd_iu->fcp_lun = unit->fcp_lun;
- fcp_cmnd_iu->task_management_flags = tm_flags;
+ fcp_cmnd = (struct fcp_cmnd *) &req->qtcb->bottom.io.fcp_cmnd;
+ zfcp_fc_fcp_tm(fcp_cmnd, unit->device, tm_flags);
zfcp_fsf_start_timer(req, ZFCP_SCSI_ER_TIMEOUT);
if (!zfcp_fsf_req_send(req))
diff --git a/drivers/s390/scsi/zfcp_fsf.h b/drivers/s390/scsi/zfcp_fsf.h
index dcc7c1dbcf5..b3de682b64c 100644
--- a/drivers/s390/scsi/zfcp_fsf.h
+++ b/drivers/s390/scsi/zfcp_fsf.h
@@ -11,6 +11,7 @@
#include <linux/pfn.h>
#include <linux/scatterlist.h>
+#include <scsi/libfc.h>
#define FSF_QTCB_CURRENT_VERSION 0x00000001
@@ -228,7 +229,8 @@ struct fsf_status_read_buffer {
u32 length;
u32 res1;
struct fsf_queue_designator queue_designator;
- u32 d_id;
+ u8 res2;
+ u8 d_id[3];
u32 class;
u64 fcp_lun;
u8 res3[24];
@@ -309,22 +311,7 @@ struct fsf_qtcb_header {
u8 res4[16];
} __attribute__ ((packed));
-struct fsf_nport_serv_param {
- u8 common_serv_param[16];
- u64 wwpn;
- u64 wwnn;
- u8 class1_serv_param[16];
- u8 class2_serv_param[16];
- u8 class3_serv_param[16];
- u8 class4_serv_param[16];
- u8 vendor_version_level[16];
-} __attribute__ ((packed));
-
#define FSF_PLOGI_MIN_LEN 112
-struct fsf_plogi {
- u32 code;
- struct fsf_nport_serv_param serv_param;
-} __attribute__ ((packed));
#define FSF_FCP_CMND_SIZE 288
#define FSF_FCP_RSP_SIZE 128
@@ -342,8 +329,8 @@ struct fsf_qtcb_bottom_io {
struct fsf_qtcb_bottom_support {
u32 operation_subtype;
- u8 res1[12];
- u32 d_id;
+ u8 res1[13];
+ u8 d_id[3];
u32 option;
u64 fcp_lun;
u64 res2;
@@ -372,18 +359,18 @@ struct fsf_qtcb_bottom_config {
u32 fc_topology;
u32 fc_link_speed;
u32 adapter_type;
- u32 peer_d_id;
+ u8 res0;
+ u8 peer_d_id[3];
u8 res1[2];
u16 timer_interval;
- u8 res2[8];
- u32 s_id;
- struct fsf_nport_serv_param nport_serv_param;
- u8 reserved_nport_serv_param[16];
+ u8 res2[9];
+ u8 s_id[3];
+ u8 nport_serv_param[128];
u8 res3[8];
u32 adapter_ports;
u32 hardware_version;
u8 serial_number[32];
- struct fsf_nport_serv_param plogi_payload;
+ u8 plogi_payload[112];
struct fsf_statistics_info stat_info;
u8 res4[112];
} __attribute__ ((packed));
@@ -450,4 +437,22 @@ struct zfcp_blk_drv_data {
u64 fabric_lat;
} __attribute__ ((packed));
+/**
+ * struct zfcp_fsf_ct_els - zfcp data for ct or els request
+ * @req: scatter-gather list for request
+ * @resp: scatter-gather list for response
+ * @handler: handler function (called for response to the request)
+ * @handler_data: data passed to handler function
+ * @port: Optional pointer to port for zfcp internal ELS (only test link ADISC)
+ * @status: used to pass error status to calling function
+ */
+struct zfcp_fsf_ct_els {
+ struct scatterlist *req;
+ struct scatterlist *resp;
+ void (*handler)(void *);
+ void *handler_data;
+ struct zfcp_port *port;
+ int status;
+};
+
#endif /* FSF_H */
diff --git a/drivers/s390/scsi/zfcp_scsi.c b/drivers/s390/scsi/zfcp_scsi.c
index 0e1a34627a2..771cc536a98 100644
--- a/drivers/s390/scsi/zfcp_scsi.c
+++ b/drivers/s390/scsi/zfcp_scsi.c
@@ -9,29 +9,33 @@
#define KMSG_COMPONENT "zfcp"
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
+#include <linux/types.h>
+#include <scsi/fc/fc_fcp.h>
#include <asm/atomic.h>
#include "zfcp_ext.h"
#include "zfcp_dbf.h"
+#include "zfcp_fc.h"
static unsigned int default_depth = 32;
module_param_named(queue_depth, default_depth, uint, 0600);
MODULE_PARM_DESC(queue_depth, "Default queue depth for new SCSI devices");
-/* Find start of Sense Information in FCP response unit*/
-char *zfcp_get_fcp_sns_info_ptr(struct fcp_rsp_iu *fcp_rsp_iu)
+static int zfcp_scsi_change_queue_depth(struct scsi_device *sdev, int depth,
+ int reason)
{
- char *fcp_sns_info_ptr;
-
- fcp_sns_info_ptr = (unsigned char *) &fcp_rsp_iu[1];
- if (fcp_rsp_iu->validity.bits.fcp_rsp_len_valid)
- fcp_sns_info_ptr += fcp_rsp_iu->fcp_rsp_len;
-
- return fcp_sns_info_ptr;
-}
-
-static int zfcp_scsi_change_queue_depth(struct scsi_device *sdev, int depth)
-{
- scsi_adjust_queue_depth(sdev, scsi_get_tag_type(sdev), depth);
+ switch (reason) {
+ case SCSI_QDEPTH_DEFAULT:
+ scsi_adjust_queue_depth(sdev, scsi_get_tag_type(sdev), depth);
+ break;
+ case SCSI_QDEPTH_QFULL:
+ scsi_track_queue_full(sdev, depth);
+ break;
+ case SCSI_QDEPTH_RAMP_UP:
+ scsi_adjust_queue_depth(sdev, scsi_get_tag_type(sdev), depth);
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
return sdev->queue_depth;
}
@@ -39,7 +43,7 @@ static void zfcp_scsi_slave_destroy(struct scsi_device *sdpnt)
{
struct zfcp_unit *unit = (struct zfcp_unit *) sdpnt->hostdata;
unit->device = NULL;
- zfcp_unit_put(unit);
+ put_device(&unit->sysfs_device);
}
static int zfcp_scsi_slave_configure(struct scsi_device *sdp)
@@ -99,12 +103,26 @@ static int zfcp_scsi_queuecommand(struct scsi_cmnd *scpnt,
}
status = atomic_read(&unit->status);
- if (unlikely((status & ZFCP_STATUS_COMMON_ERP_FAILED) ||
- !(status & ZFCP_STATUS_COMMON_RUNNING))) {
+ if (unlikely(status & ZFCP_STATUS_COMMON_ERP_FAILED) &&
+ !(atomic_read(&unit->port->status) &
+ ZFCP_STATUS_COMMON_ERP_FAILED)) {
+ /* only unit access denied, but port is good
+ * not covered by FC transport, have to fail here */
zfcp_scsi_command_fail(scpnt, DID_ERROR);
return 0;
}
+ if (unlikely(!(status & ZFCP_STATUS_COMMON_UNBLOCKED))) {
+ /* This could be either
+ * open unit pending: this is temporary, will result in
+ * open unit or ERP_FAILED, so retry command
+ * call to rport_delete pending: mimic retry from
+ * fc_remote_port_chkready until rport is BLOCKED
+ */
+ zfcp_scsi_command_fail(scpnt, DID_IMM_RETRY);
+ return 0;
+ }
+
ret = zfcp_fsf_send_fcp_command_task(unit, scpnt);
if (unlikely(ret == -EBUSY))
return SCSI_MLQUEUE_DEVICE_BUSY;
@@ -115,49 +133,44 @@ static int zfcp_scsi_queuecommand(struct scsi_cmnd *scpnt,
}
static struct zfcp_unit *zfcp_unit_lookup(struct zfcp_adapter *adapter,
- int channel, unsigned int id,
- unsigned int lun)
+ unsigned int id, u64 lun)
{
+ unsigned long flags;
struct zfcp_port *port;
- struct zfcp_unit *unit;
- int scsi_lun;
+ struct zfcp_unit *unit = NULL;
- list_for_each_entry(port, &adapter->port_list_head, list) {
+ read_lock_irqsave(&adapter->port_list_lock, flags);
+ list_for_each_entry(port, &adapter->port_list, list) {
if (!port->rport || (id != port->rport->scsi_target_id))
continue;
- list_for_each_entry(unit, &port->unit_list_head, list) {
- scsi_lun = scsilun_to_int(
- (struct scsi_lun *)&unit->fcp_lun);
- if (lun == scsi_lun)
- return unit;
- }
+ unit = zfcp_get_unit_by_lun(port, lun);
+ if (unit)
+ break;
}
+ read_unlock_irqrestore(&adapter->port_list_lock, flags);
- return NULL;
+ return unit;
}
static int zfcp_scsi_slave_alloc(struct scsi_device *sdp)
{
struct zfcp_adapter *adapter;
struct zfcp_unit *unit;
- unsigned long flags;
- int retval = -ENXIO;
+ u64 lun;
adapter = (struct zfcp_adapter *) sdp->host->hostdata[0];
if (!adapter)
goto out;
- read_lock_irqsave(&zfcp_data.config_lock, flags);
- unit = zfcp_unit_lookup(adapter, sdp->channel, sdp->id, sdp->lun);
+ int_to_scsilun(sdp->lun, (struct scsi_lun *)&lun);
+ unit = zfcp_unit_lookup(adapter, sdp->id, lun);
if (unit) {
sdp->hostdata = unit;
unit->device = sdp;
- zfcp_unit_get(unit);
- retval = 0;
+ return 0;
}
- read_unlock_irqrestore(&zfcp_data.config_lock, flags);
out:
- return retval;
+ return -ENXIO;
}
static int zfcp_scsi_eh_abort_handler(struct scsi_cmnd *scpnt)
@@ -196,6 +209,7 @@ static int zfcp_scsi_eh_abort_handler(struct scsi_cmnd *scpnt)
break;
zfcp_erp_wait(adapter);
+ fc_block_scsi_eh(scpnt);
if (!(atomic_read(&adapter->status) &
ZFCP_STATUS_COMMON_RUNNING)) {
zfcp_dbf_scsi_abort("nres", adapter->dbf, scpnt, NULL,
@@ -235,6 +249,7 @@ static int zfcp_task_mgmt_function(struct scsi_cmnd *scpnt, u8 tm_flags)
break;
zfcp_erp_wait(adapter);
+ fc_block_scsi_eh(scpnt);
if (!(atomic_read(&adapter->status) &
ZFCP_STATUS_COMMON_RUNNING)) {
zfcp_dbf_scsi_devreset("nres", tm_flags, unit, scpnt);
@@ -249,9 +264,6 @@ static int zfcp_task_mgmt_function(struct scsi_cmnd *scpnt, u8 tm_flags)
if (fsf_req->status & ZFCP_STATUS_FSFREQ_TMFUNCFAILED) {
zfcp_dbf_scsi_devreset("fail", tm_flags, unit, scpnt);
retval = FAILED;
- } else if (fsf_req->status & ZFCP_STATUS_FSFREQ_TMFUNCNOTSUPP) {
- zfcp_dbf_scsi_devreset("nsup", tm_flags, unit, scpnt);
- retval = FAILED;
} else
zfcp_dbf_scsi_devreset("okay", tm_flags, unit, scpnt);
@@ -261,12 +273,12 @@ static int zfcp_task_mgmt_function(struct scsi_cmnd *scpnt, u8 tm_flags)
static int zfcp_scsi_eh_device_reset_handler(struct scsi_cmnd *scpnt)
{
- return zfcp_task_mgmt_function(scpnt, FCP_LOGICAL_UNIT_RESET);
+ return zfcp_task_mgmt_function(scpnt, FCP_TMF_LUN_RESET);
}
static int zfcp_scsi_eh_target_reset_handler(struct scsi_cmnd *scpnt)
{
- return zfcp_task_mgmt_function(scpnt, FCP_TARGET_RESET);
+ return zfcp_task_mgmt_function(scpnt, FCP_TMF_TGT_RESET);
}
static int zfcp_scsi_eh_host_reset_handler(struct scsi_cmnd *scpnt)
@@ -276,6 +288,7 @@ static int zfcp_scsi_eh_host_reset_handler(struct scsi_cmnd *scpnt)
zfcp_erp_adapter_reopen(adapter, 0, "schrh_1", scpnt);
zfcp_erp_wait(adapter);
+ fc_block_scsi_eh(scpnt);
return SUCCESS;
}
@@ -303,7 +316,7 @@ int zfcp_adapter_scsi_register(struct zfcp_adapter *adapter)
adapter->scsi_host->max_lun = 1;
adapter->scsi_host->max_channel = 0;
adapter->scsi_host->unique_id = dev_id.devno;
- adapter->scsi_host->max_cmd_len = 255;
+ adapter->scsi_host->max_cmd_len = 16; /* in struct fcp_cmnd */
adapter->scsi_host->transportt = zfcp_data.scsi_transport_template;
adapter->scsi_host->hostdata[0] = (unsigned long) adapter;
@@ -325,12 +338,11 @@ void zfcp_adapter_scsi_unregister(struct zfcp_adapter *adapter)
if (!shost)
return;
- read_lock_irq(&zfcp_data.config_lock);
- list_for_each_entry(port, &adapter->port_list_head, list)
- if (port->rport)
- port->rport = NULL;
+ read_lock_irq(&adapter->port_list_lock);
+ list_for_each_entry(port, &adapter->port_list, list)
+ port->rport = NULL;
+ read_unlock_irq(&adapter->port_list_lock);
- read_unlock_irq(&zfcp_data.config_lock);
fc_remove_host(shost);
scsi_remove_host(shost);
scsi_host_put(shost);
@@ -348,7 +360,7 @@ zfcp_init_fc_host_stats(struct zfcp_adapter *adapter)
fc_stats = kmalloc(sizeof(*fc_stats), GFP_KERNEL);
if (!fc_stats)
return NULL;
- adapter->fc_stats = fc_stats; /* freed in adater_dequeue */
+ adapter->fc_stats = fc_stats; /* freed in adapter_release */
}
memset(adapter->fc_stats, 0, sizeof(*adapter->fc_stats));
return adapter->fc_stats;
@@ -464,7 +476,7 @@ static void zfcp_reset_fc_host_stats(struct Scsi_Host *shost)
adapter->stats_reset = jiffies/HZ;
kfree(adapter->stats_reset_data);
adapter->stats_reset_data = data; /* finally freed in
- adapter_dequeue */
+ adapter_release */
}
}
@@ -495,7 +507,7 @@ static void zfcp_set_rport_dev_loss_tmo(struct fc_rport *rport, u32 timeout)
* @rport: The FC rport where to teminate I/O
*
* Abort all pending SCSI commands for a port by closing the
- * port. Using a reopen for avoids a conflict with a shutdown
+ * port. Using a reopen avoiding a conflict with a shutdown
* overwriting a reopen.
*/
static void zfcp_scsi_terminate_rport_io(struct fc_rport *rport)
@@ -505,15 +517,11 @@ static void zfcp_scsi_terminate_rport_io(struct fc_rport *rport)
struct zfcp_adapter *adapter =
(struct zfcp_adapter *)shost->hostdata[0];
- write_lock_irq(&zfcp_data.config_lock);
port = zfcp_get_port_by_wwpn(adapter, rport->port_name);
- if (port)
- zfcp_port_get(port);
- write_unlock_irq(&zfcp_data.config_lock);
if (port) {
zfcp_erp_port_reopen(port, 0, "sctrpi1", NULL);
- zfcp_port_put(port);
+ put_device(&port->sysfs_device);
}
}
@@ -555,31 +563,34 @@ static void zfcp_scsi_rport_block(struct zfcp_port *port)
void zfcp_scsi_schedule_rport_register(struct zfcp_port *port)
{
- zfcp_port_get(port);
+ get_device(&port->sysfs_device);
port->rport_task = RPORT_ADD;
if (!queue_work(port->adapter->work_queue, &port->rport_work))
- zfcp_port_put(port);
+ put_device(&port->sysfs_device);
}
void zfcp_scsi_schedule_rport_block(struct zfcp_port *port)
{
- zfcp_port_get(port);
+ get_device(&port->sysfs_device);
port->rport_task = RPORT_DEL;
if (port->rport && queue_work(port->adapter->work_queue,
&port->rport_work))
return;
- zfcp_port_put(port);
+ put_device(&port->sysfs_device);
}
void zfcp_scsi_schedule_rports_block(struct zfcp_adapter *adapter)
{
+ unsigned long flags;
struct zfcp_port *port;
- list_for_each_entry(port, &adapter->port_list_head, list)
+ read_lock_irqsave(&adapter->port_list_lock, flags);
+ list_for_each_entry(port, &adapter->port_list, list)
zfcp_scsi_schedule_rport_block(port);
+ read_unlock_irqrestore(&adapter->port_list_lock, flags);
}
void zfcp_scsi_rport_work(struct work_struct *work)
@@ -597,7 +608,7 @@ void zfcp_scsi_rport_work(struct work_struct *work)
}
}
- zfcp_port_put(port);
+ put_device(&port->sysfs_device);
}
@@ -615,21 +626,7 @@ void zfcp_scsi_scan(struct work_struct *work)
scsilun_to_int((struct scsi_lun *)
&unit->fcp_lun), 0);
- zfcp_unit_put(unit);
-}
-
-static int zfcp_execute_fc_job(struct fc_bsg_job *job)
-{
- switch (job->request->msgcode) {
- case FC_BSG_RPT_ELS:
- case FC_BSG_HST_ELS_NOLOGIN:
- return zfcp_fc_execute_els_fc_job(job);
- case FC_BSG_RPT_CT:
- case FC_BSG_HST_CT:
- return zfcp_fc_execute_ct_fc_job(job);
- default:
- return -EINVAL;
- }
+ put_device(&unit->sysfs_device);
}
struct fc_function_template zfcp_transport_functions = {
@@ -643,6 +640,7 @@ struct fc_function_template zfcp_transport_functions = {
.show_host_port_name = 1,
.show_host_permanent_port_name = 1,
.show_host_supported_classes = 1,
+ .show_host_supported_fc4s = 1,
.show_host_supported_speeds = 1,
.show_host_maxframe_size = 1,
.show_host_serial_number = 1,
@@ -652,13 +650,15 @@ struct fc_function_template zfcp_transport_functions = {
.get_host_port_state = zfcp_get_host_port_state,
.terminate_rport_io = zfcp_scsi_terminate_rport_io,
.show_host_port_state = 1,
- .bsg_request = zfcp_execute_fc_job,
+ .show_host_active_fc4s = 1,
+ .bsg_request = zfcp_fc_exec_bsg_job,
/* no functions registered for following dynamic attributes but
directly set by LLDD */
.show_host_port_type = 1,
.show_host_speed = 1,
.show_host_port_id = 1,
.disable_target_scan = 1,
+ .dd_bsg_size = sizeof(struct zfcp_fsf_ct_els),
};
struct zfcp_data zfcp_data = {
diff --git a/drivers/s390/scsi/zfcp_sysfs.c b/drivers/s390/scsi/zfcp_sysfs.c
index d31000886ca..f539e006683 100644
--- a/drivers/s390/scsi/zfcp_sysfs.c
+++ b/drivers/s390/scsi/zfcp_sysfs.c
@@ -3,7 +3,7 @@
*
* sysfs attributes.
*
- * Copyright IBM Corporation 2008
+ * Copyright IBM Corporation 2008, 2009
*/
#define KMSG_COMPONENT "zfcp"
@@ -19,30 +19,44 @@ static ssize_t zfcp_sysfs_##_feat##_##_name##_show(struct device *dev, \
struct device_attribute *at,\
char *buf) \
{ \
- struct _feat_def *_feat = dev_get_drvdata(dev); \
+ struct _feat_def *_feat = container_of(dev, struct _feat_def, \
+ sysfs_device); \
\
return sprintf(buf, _format, _value); \
} \
static ZFCP_DEV_ATTR(_feat, _name, S_IRUGO, \
zfcp_sysfs_##_feat##_##_name##_show, NULL);
-ZFCP_DEFINE_ATTR(zfcp_adapter, adapter, status, "0x%08x\n",
- atomic_read(&adapter->status));
-ZFCP_DEFINE_ATTR(zfcp_adapter, adapter, peer_wwnn, "0x%016llx\n",
- (unsigned long long) adapter->peer_wwnn);
-ZFCP_DEFINE_ATTR(zfcp_adapter, adapter, peer_wwpn, "0x%016llx\n",
- (unsigned long long) adapter->peer_wwpn);
-ZFCP_DEFINE_ATTR(zfcp_adapter, adapter, peer_d_id, "0x%06x\n",
- adapter->peer_d_id);
-ZFCP_DEFINE_ATTR(zfcp_adapter, adapter, card_version, "0x%04x\n",
- adapter->hydra_version);
-ZFCP_DEFINE_ATTR(zfcp_adapter, adapter, lic_version, "0x%08x\n",
- adapter->fsf_lic_version);
-ZFCP_DEFINE_ATTR(zfcp_adapter, adapter, hardware_version, "0x%08x\n",
- adapter->hardware_version);
-ZFCP_DEFINE_ATTR(zfcp_adapter, adapter, in_recovery, "%d\n",
- (atomic_read(&adapter->status) &
- ZFCP_STATUS_COMMON_ERP_INUSE) != 0);
+#define ZFCP_DEFINE_A_ATTR(_name, _format, _value) \
+static ssize_t zfcp_sysfs_adapter_##_name##_show(struct device *dev, \
+ struct device_attribute *at,\
+ char *buf) \
+{ \
+ struct ccw_device *cdev = to_ccwdev(dev); \
+ struct zfcp_adapter *adapter = zfcp_ccw_adapter_by_cdev(cdev); \
+ int i; \
+ \
+ if (!adapter) \
+ return -ENODEV; \
+ \
+ i = sprintf(buf, _format, _value); \
+ zfcp_ccw_adapter_put(adapter); \
+ return i; \
+} \
+static ZFCP_DEV_ATTR(adapter, _name, S_IRUGO, \
+ zfcp_sysfs_adapter_##_name##_show, NULL);
+
+ZFCP_DEFINE_A_ATTR(status, "0x%08x\n", atomic_read(&adapter->status));
+ZFCP_DEFINE_A_ATTR(peer_wwnn, "0x%016llx\n",
+ (unsigned long long) adapter->peer_wwnn);
+ZFCP_DEFINE_A_ATTR(peer_wwpn, "0x%016llx\n",
+ (unsigned long long) adapter->peer_wwpn);
+ZFCP_DEFINE_A_ATTR(peer_d_id, "0x%06x\n", adapter->peer_d_id);
+ZFCP_DEFINE_A_ATTR(card_version, "0x%04x\n", adapter->hydra_version);
+ZFCP_DEFINE_A_ATTR(lic_version, "0x%08x\n", adapter->fsf_lic_version);
+ZFCP_DEFINE_A_ATTR(hardware_version, "0x%08x\n", adapter->hardware_version);
+ZFCP_DEFINE_A_ATTR(in_recovery, "%d\n", (atomic_read(&adapter->status) &
+ ZFCP_STATUS_COMMON_ERP_INUSE) != 0);
ZFCP_DEFINE_ATTR(zfcp_port, port, status, "0x%08x\n",
atomic_read(&port->status));
@@ -73,7 +87,8 @@ static ssize_t zfcp_sysfs_##_feat##_failed_show(struct device *dev, \
struct device_attribute *attr, \
char *buf) \
{ \
- struct _feat_def *_feat = dev_get_drvdata(dev); \
+ struct _feat_def *_feat = container_of(dev, struct _feat_def, \
+ sysfs_device); \
\
if (atomic_read(&_feat->status) & ZFCP_STATUS_COMMON_ERP_FAILED) \
return sprintf(buf, "1\n"); \
@@ -84,15 +99,13 @@ static ssize_t zfcp_sysfs_##_feat##_failed_store(struct device *dev, \
struct device_attribute *attr,\
const char *buf, size_t count)\
{ \
- struct _feat_def *_feat = dev_get_drvdata(dev); \
+ struct _feat_def *_feat = container_of(dev, struct _feat_def, \
+ sysfs_device); \
unsigned long val; \
int retval = 0; \
\
- mutex_lock(&zfcp_data.config_mutex); \
- if (atomic_read(&_feat->status) & ZFCP_STATUS_COMMON_REMOVE) { \
- retval = -EBUSY; \
- goto out; \
- } \
+ if (!(_feat && get_device(&_feat->sysfs_device))) \
+ return -EBUSY; \
\
if (strict_strtoul(buf, 0, &val) || val != 0) { \
retval = -EINVAL; \
@@ -105,29 +118,82 @@ static ssize_t zfcp_sysfs_##_feat##_failed_store(struct device *dev, \
_reopen_id, NULL); \
zfcp_erp_wait(_adapter); \
out: \
- mutex_unlock(&zfcp_data.config_mutex); \
+ put_device(&_feat->sysfs_device); \
return retval ? retval : (ssize_t) count; \
} \
static ZFCP_DEV_ATTR(_feat, failed, S_IWUSR | S_IRUGO, \
zfcp_sysfs_##_feat##_failed_show, \
zfcp_sysfs_##_feat##_failed_store);
-ZFCP_SYSFS_FAILED(zfcp_adapter, adapter, adapter, "syafai1", "syafai2");
ZFCP_SYSFS_FAILED(zfcp_port, port, port->adapter, "sypfai1", "sypfai2");
ZFCP_SYSFS_FAILED(zfcp_unit, unit, unit->port->adapter, "syufai1", "syufai2");
+static ssize_t zfcp_sysfs_adapter_failed_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct ccw_device *cdev = to_ccwdev(dev);
+ struct zfcp_adapter *adapter = zfcp_ccw_adapter_by_cdev(cdev);
+ int i;
+
+ if (!adapter)
+ return -ENODEV;
+
+ if (atomic_read(&adapter->status) & ZFCP_STATUS_COMMON_ERP_FAILED)
+ i = sprintf(buf, "1\n");
+ else
+ i = sprintf(buf, "0\n");
+
+ zfcp_ccw_adapter_put(adapter);
+ return i;
+}
+
+static ssize_t zfcp_sysfs_adapter_failed_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct ccw_device *cdev = to_ccwdev(dev);
+ struct zfcp_adapter *adapter = zfcp_ccw_adapter_by_cdev(cdev);
+ unsigned long val;
+ int retval = 0;
+
+ if (!adapter)
+ return -ENODEV;
+
+ if (strict_strtoul(buf, 0, &val) || val != 0) {
+ retval = -EINVAL;
+ goto out;
+ }
+
+ zfcp_erp_modify_adapter_status(adapter, "syafai1", NULL,
+ ZFCP_STATUS_COMMON_RUNNING, ZFCP_SET);
+ zfcp_erp_adapter_reopen(adapter, ZFCP_STATUS_COMMON_ERP_FAILED,
+ "syafai2", NULL);
+ zfcp_erp_wait(adapter);
+out:
+ zfcp_ccw_adapter_put(adapter);
+ return retval ? retval : (ssize_t) count;
+}
+static ZFCP_DEV_ATTR(adapter, failed, S_IWUSR | S_IRUGO,
+ zfcp_sysfs_adapter_failed_show,
+ zfcp_sysfs_adapter_failed_store);
+
static ssize_t zfcp_sysfs_port_rescan_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
- struct zfcp_adapter *adapter = dev_get_drvdata(dev);
- int ret;
+ struct ccw_device *cdev = to_ccwdev(dev);
+ struct zfcp_adapter *adapter = zfcp_ccw_adapter_by_cdev(cdev);
- if (atomic_read(&adapter->status) & ZFCP_STATUS_COMMON_REMOVE)
- return -EBUSY;
+ if (!adapter)
+ return -ENODEV;
- ret = zfcp_fc_scan_ports(adapter);
- return ret ? ret : (ssize_t) count;
+ /* sync the user-space- with the kernel-invocation of scan_work */
+ queue_work(adapter->work_queue, &adapter->scan_work);
+ flush_work(&adapter->scan_work);
+ zfcp_ccw_adapter_put(adapter);
+
+ return (ssize_t) count;
}
static ZFCP_DEV_ATTR(adapter, port_rescan, S_IWUSR, NULL,
zfcp_sysfs_port_rescan_store);
@@ -136,44 +202,34 @@ static ssize_t zfcp_sysfs_port_remove_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
- struct zfcp_adapter *adapter = dev_get_drvdata(dev);
+ struct ccw_device *cdev = to_ccwdev(dev);
+ struct zfcp_adapter *adapter = zfcp_ccw_adapter_by_cdev(cdev);
struct zfcp_port *port;
u64 wwpn;
- int retval = 0;
- LIST_HEAD(port_remove_lh);
+ int retval = -EINVAL;
- mutex_lock(&zfcp_data.config_mutex);
- if (atomic_read(&adapter->status) & ZFCP_STATUS_COMMON_REMOVE) {
- retval = -EBUSY;
- goto out;
- }
+ if (!adapter)
+ return -ENODEV;
- if (strict_strtoull(buf, 0, (unsigned long long *) &wwpn)) {
- retval = -EINVAL;
+ if (strict_strtoull(buf, 0, (unsigned long long *) &wwpn))
goto out;
- }
- write_lock_irq(&zfcp_data.config_lock);
port = zfcp_get_port_by_wwpn(adapter, wwpn);
- if (port && (atomic_read(&port->refcount) == 0)) {
- zfcp_port_get(port);
- atomic_set_mask(ZFCP_STATUS_COMMON_REMOVE, &port->status);
- list_move(&port->list, &port_remove_lh);
- } else
- port = NULL;
- write_unlock_irq(&zfcp_data.config_lock);
-
- if (!port) {
- retval = -ENXIO;
+ if (!port)
goto out;
- }
+ else
+ retval = 0;
+
+ write_lock_irq(&adapter->port_list_lock);
+ list_del(&port->list);
+ write_unlock_irq(&adapter->port_list_lock);
+
+ put_device(&port->sysfs_device);
zfcp_erp_port_shutdown(port, 0, "syprs_1", NULL);
- zfcp_erp_wait(adapter);
- zfcp_port_put(port);
- zfcp_port_dequeue(port);
+ zfcp_device_unregister(&port->sysfs_device, &zfcp_sysfs_port_attrs);
out:
- mutex_unlock(&zfcp_data.config_mutex);
+ zfcp_ccw_adapter_put(adapter);
return retval ? retval : (ssize_t) count;
}
static ZFCP_DEV_ATTR(adapter, port_remove, S_IWUSR, NULL,
@@ -202,16 +258,14 @@ static ssize_t zfcp_sysfs_unit_add_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
- struct zfcp_port *port = dev_get_drvdata(dev);
+ struct zfcp_port *port = container_of(dev, struct zfcp_port,
+ sysfs_device);
struct zfcp_unit *unit;
u64 fcp_lun;
int retval = -EINVAL;
- mutex_lock(&zfcp_data.config_mutex);
- if (atomic_read(&port->status) & ZFCP_STATUS_COMMON_REMOVE) {
- retval = -EBUSY;
- goto out;
- }
+ if (!(port && get_device(&port->sysfs_device)))
+ return -EBUSY;
if (strict_strtoull(buf, 0, (unsigned long long *) &fcp_lun))
goto out;
@@ -219,15 +273,14 @@ static ssize_t zfcp_sysfs_unit_add_store(struct device *dev,
unit = zfcp_unit_enqueue(port, fcp_lun);
if (IS_ERR(unit))
goto out;
-
- retval = 0;
+ else
+ retval = 0;
zfcp_erp_unit_reopen(unit, 0, "syuas_1", NULL);
zfcp_erp_wait(unit->port->adapter);
flush_work(&unit->scsi_work);
- zfcp_unit_put(unit);
out:
- mutex_unlock(&zfcp_data.config_mutex);
+ put_device(&port->sysfs_device);
return retval ? retval : (ssize_t) count;
}
static DEVICE_ATTR(unit_add, S_IWUSR, NULL, zfcp_sysfs_unit_add_store);
@@ -236,54 +289,37 @@ static ssize_t zfcp_sysfs_unit_remove_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
- struct zfcp_port *port = dev_get_drvdata(dev);
+ struct zfcp_port *port = container_of(dev, struct zfcp_port,
+ sysfs_device);
struct zfcp_unit *unit;
u64 fcp_lun;
- int retval = 0;
- LIST_HEAD(unit_remove_lh);
+ int retval = -EINVAL;
- mutex_lock(&zfcp_data.config_mutex);
- if (atomic_read(&port->status) & ZFCP_STATUS_COMMON_REMOVE) {
- retval = -EBUSY;
- goto out;
- }
+ if (!(port && get_device(&port->sysfs_device)))
+ return -EBUSY;
- if (strict_strtoull(buf, 0, (unsigned long long *) &fcp_lun)) {
- retval = -EINVAL;
+ if (strict_strtoull(buf, 0, (unsigned long long *) &fcp_lun))
goto out;
- }
- write_lock_irq(&zfcp_data.config_lock);
unit = zfcp_get_unit_by_lun(port, fcp_lun);
- if (unit) {
- write_unlock_irq(&zfcp_data.config_lock);
- /* wait for possible timeout during SCSI probe */
- flush_work(&unit->scsi_work);
- write_lock_irq(&zfcp_data.config_lock);
-
- if (atomic_read(&unit->refcount) == 0) {
- zfcp_unit_get(unit);
- atomic_set_mask(ZFCP_STATUS_COMMON_REMOVE,
- &unit->status);
- list_move(&unit->list, &unit_remove_lh);
- } else {
- unit = NULL;
- }
- }
+ if (!unit)
+ goto out;
+ else
+ retval = 0;
- write_unlock_irq(&zfcp_data.config_lock);
+ /* wait for possible timeout during SCSI probe */
+ flush_work(&unit->scsi_work);
- if (!unit) {
- retval = -ENXIO;
- goto out;
- }
+ write_lock_irq(&port->unit_list_lock);
+ list_del(&unit->list);
+ write_unlock_irq(&port->unit_list_lock);
+
+ put_device(&unit->sysfs_device);
zfcp_erp_unit_shutdown(unit, 0, "syurs_1", NULL);
- zfcp_erp_wait(unit->port->adapter);
- zfcp_unit_put(unit);
- zfcp_unit_dequeue(unit);
+ zfcp_device_unregister(&unit->sysfs_device, &zfcp_sysfs_unit_attrs);
out:
- mutex_unlock(&zfcp_data.config_mutex);
+ put_device(&port->sysfs_device);
return retval ? retval : (ssize_t) count;
}
static DEVICE_ATTR(unit_remove, S_IWUSR, NULL, zfcp_sysfs_unit_remove_store);