summaryrefslogtreecommitdiffstats
path: root/drivers/s390
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/s390')
-rw-r--r--drivers/s390/block/dasd.c53
-rw-r--r--drivers/s390/block/dasd_devmap.c13
-rw-r--r--drivers/s390/block/dasd_genhd.c1
-rw-r--r--drivers/s390/block/dasd_int.h1
-rw-r--r--drivers/s390/block/dasd_proc.c109
-rw-r--r--drivers/s390/char/tape_block.c5
-rw-r--r--drivers/s390/char/zcore.c163
-rw-r--r--drivers/s390/cio/ccwreq.c2
-rw-r--r--drivers/s390/cio/chsc.c2
-rw-r--r--drivers/s390/cio/chsc_sch.c4
-rw-r--r--drivers/s390/cio/cio.c14
-rw-r--r--drivers/s390/cio/crw.c29
-rw-r--r--drivers/s390/cio/css.c79
-rw-r--r--drivers/s390/cio/css.h5
-rw-r--r--drivers/s390/cio/device.c160
-rw-r--r--drivers/s390/cio/device.h3
-rw-r--r--drivers/s390/cio/device_fsm.c43
-rw-r--r--drivers/s390/cio/qdio.h92
-rw-r--r--drivers/s390/cio/qdio_debug.c23
-rw-r--r--drivers/s390/cio/qdio_main.c28
-rw-r--r--drivers/s390/cio/qdio_setup.c20
-rw-r--r--drivers/s390/cio/qdio_thinint.c4
-rw-r--r--drivers/s390/crypto/zcrypt_api.c158
-rw-r--r--drivers/s390/kvm/kvm_virtio.c4
-rw-r--r--drivers/s390/net/qeth_core.h5
-rw-r--r--drivers/s390/net/qeth_core_main.c169
-rw-r--r--drivers/s390/net/qeth_core_mpc.h44
-rw-r--r--drivers/s390/net/qeth_core_sys.c14
-rw-r--r--drivers/s390/net/qeth_l2_main.c30
-rw-r--r--drivers/s390/net/qeth_l3.h2
-rw-r--r--drivers/s390/net/qeth_l3_main.c176
-rw-r--r--drivers/s390/net/qeth_l3_sys.c56
-rw-r--r--drivers/s390/scsi/zfcp_aux.c90
-rw-r--r--drivers/s390/scsi/zfcp_ccw.c11
-rw-r--r--drivers/s390/scsi/zfcp_dbf.c20
-rw-r--r--drivers/s390/scsi/zfcp_dbf.h34
-rw-r--r--drivers/s390/scsi/zfcp_def.h114
-rw-r--r--drivers/s390/scsi/zfcp_erp.c36
-rw-r--r--drivers/s390/scsi/zfcp_ext.h9
-rw-r--r--drivers/s390/scsi/zfcp_fc.c23
-rw-r--r--drivers/s390/scsi/zfcp_fsf.c163
-rw-r--r--drivers/s390/scsi/zfcp_qdio.c50
-rw-r--r--drivers/s390/scsi/zfcp_qdio.h109
-rw-r--r--drivers/s390/scsi/zfcp_reqlist.h183
-rw-r--r--drivers/s390/scsi/zfcp_scsi.c38
-rw-r--r--drivers/s390/scsi/zfcp_sysfs.c37
46 files changed, 1450 insertions, 978 deletions
diff --git a/drivers/s390/block/dasd.c b/drivers/s390/block/dasd.c
index 5905936c7c6..4951aa82e9f 100644
--- a/drivers/s390/block/dasd.c
+++ b/drivers/s390/block/dasd.c
@@ -20,6 +20,7 @@
#include <linux/buffer_head.h>
#include <linux/hdreg.h>
#include <linux/async.h>
+#include <linux/mutex.h>
#include <asm/ccwdev.h>
#include <asm/ebcdic.h>
@@ -112,6 +113,7 @@ struct dasd_device *dasd_alloc_device(void)
INIT_WORK(&device->restore_device, do_restore_device);
device->state = DASD_STATE_NEW;
device->target = DASD_STATE_NEW;
+ mutex_init(&device->state_mutex);
return device;
}
@@ -321,8 +323,8 @@ static int dasd_state_ready_to_basic(struct dasd_device *device)
device->state = DASD_STATE_READY;
return rc;
}
- dasd_destroy_partitions(block);
dasd_flush_request_queue(block);
+ dasd_destroy_partitions(block);
block->blocks = 0;
block->bp_block = 0;
block->s2b_shift = 0;
@@ -484,10 +486,8 @@ static void dasd_change_state(struct dasd_device *device)
if (rc)
device->target = device->state;
- if (device->state == device->target) {
+ if (device->state == device->target)
wake_up(&dasd_init_waitq);
- dasd_put_device(device);
- }
/* let user-space know that the device status changed */
kobject_uevent(&device->cdev->dev.kobj, KOBJ_CHANGE);
@@ -502,7 +502,9 @@ static void dasd_change_state(struct dasd_device *device)
static void do_kick_device(struct work_struct *work)
{
struct dasd_device *device = container_of(work, struct dasd_device, kick_work);
+ mutex_lock(&device->state_mutex);
dasd_change_state(device);
+ mutex_unlock(&device->state_mutex);
dasd_schedule_device_bh(device);
dasd_put_device(device);
}
@@ -539,18 +541,19 @@ void dasd_restore_device(struct dasd_device *device)
void dasd_set_target_state(struct dasd_device *device, int target)
{
dasd_get_device(device);
+ mutex_lock(&device->state_mutex);
/* If we are in probeonly mode stop at DASD_STATE_READY. */
if (dasd_probeonly && target > DASD_STATE_READY)
target = DASD_STATE_READY;
if (device->target != target) {
- if (device->state == target) {
+ if (device->state == target)
wake_up(&dasd_init_waitq);
- dasd_put_device(device);
- }
device->target = target;
}
if (device->state != device->target)
dasd_change_state(device);
+ mutex_unlock(&device->state_mutex);
+ dasd_put_device(device);
}
/*
@@ -1000,12 +1003,20 @@ static void dasd_handle_killed_request(struct ccw_device *cdev,
return;
}
- device = (struct dasd_device *) cqr->startdev;
- if (device == NULL ||
- device != dasd_device_from_cdev_locked(cdev) ||
- strncmp(device->discipline->ebcname, (char *) &cqr->magic, 4)) {
+ device = dasd_device_from_cdev_locked(cdev);
+ if (IS_ERR(device)) {
+ DBF_EVENT_DEVID(DBF_DEBUG, cdev, "%s",
+ "unable to get device from cdev");
+ return;
+ }
+
+ if (!cqr->startdev ||
+ device != cqr->startdev ||
+ strncmp(cqr->startdev->discipline->ebcname,
+ (char *) &cqr->magic, 4)) {
DBF_EVENT_DEVID(DBF_DEBUG, cdev, "%s",
"invalid device in request");
+ dasd_put_device(device);
return;
}
@@ -1692,7 +1703,6 @@ int dasd_cancel_req(struct dasd_ccw_req *cqr)
cqr, rc);
} else {
cqr->stopclk = get_clock();
- rc = 1;
}
break;
default: /* already finished or clear pending - do nothing */
@@ -2129,9 +2139,8 @@ static void dasd_setup_queue(struct dasd_block *block)
blk_queue_logical_block_size(block->request_queue, block->bp_block);
max = block->base->discipline->max_blocks << block->s2b_shift;
- blk_queue_max_sectors(block->request_queue, max);
- blk_queue_max_phys_segments(block->request_queue, -1L);
- blk_queue_max_hw_segments(block->request_queue, -1L);
+ blk_queue_max_hw_sectors(block->request_queue, max);
+ blk_queue_max_segments(block->request_queue, -1L);
/* with page sized segments we can translate each segement into
* one idaw/tidaw
*/
@@ -2170,9 +2179,13 @@ static void dasd_flush_request_queue(struct dasd_block *block)
static int dasd_open(struct block_device *bdev, fmode_t mode)
{
struct dasd_block *block = bdev->bd_disk->private_data;
- struct dasd_device *base = block->base;
+ struct dasd_device *base;
int rc;
+ if (!block)
+ return -ENODEV;
+
+ base = block->base;
atomic_inc(&block->open_count);
if (test_bit(DASD_FLAG_OFFLINE, &base->flags)) {
rc = -ENODEV;
@@ -2285,11 +2298,6 @@ static void dasd_generic_auto_online(void *data, async_cookie_t cookie)
if (ret)
pr_warning("%s: Setting the DASD online failed with rc=%d\n",
dev_name(&cdev->dev), ret);
- else {
- struct dasd_device *device = dasd_device_from_cdev(cdev);
- wait_event(dasd_init_waitq, _wait_for_device(device));
- dasd_put_device(device);
- }
}
/*
@@ -2424,6 +2432,9 @@ int dasd_generic_set_online(struct ccw_device *cdev,
} else
pr_debug("dasd_generic device %s found\n",
dev_name(&cdev->dev));
+
+ wait_event(dasd_init_waitq, _wait_for_device(device));
+
dasd_put_device(device);
return rc;
}
diff --git a/drivers/s390/block/dasd_devmap.c b/drivers/s390/block/dasd_devmap.c
index 4cac5b54f26..d49766f3b94 100644
--- a/drivers/s390/block/dasd_devmap.c
+++ b/drivers/s390/block/dasd_devmap.c
@@ -874,12 +874,19 @@ dasd_discipline_show(struct device *dev, struct device_attribute *attr,
ssize_t len;
device = dasd_device_from_cdev(to_ccwdev(dev));
- if (!IS_ERR(device) && device->discipline) {
+ if (IS_ERR(device))
+ goto out;
+ else if (!device->discipline) {
+ dasd_put_device(device);
+ goto out;
+ } else {
len = snprintf(buf, PAGE_SIZE, "%s\n",
device->discipline->name);
dasd_put_device(device);
- } else
- len = snprintf(buf, PAGE_SIZE, "none\n");
+ return len;
+ }
+out:
+ len = snprintf(buf, PAGE_SIZE, "none\n");
return len;
}
diff --git a/drivers/s390/block/dasd_genhd.c b/drivers/s390/block/dasd_genhd.c
index d3198303b93..94f92a1247f 100644
--- a/drivers/s390/block/dasd_genhd.c
+++ b/drivers/s390/block/dasd_genhd.c
@@ -88,6 +88,7 @@ void dasd_gendisk_free(struct dasd_block *block)
if (block->gdp) {
del_gendisk(block->gdp);
block->gdp->queue = NULL;
+ block->gdp->private_data = NULL;
put_disk(block->gdp);
block->gdp = NULL;
}
diff --git a/drivers/s390/block/dasd_int.h b/drivers/s390/block/dasd_int.h
index e4c2143dabf..ed73ce55082 100644
--- a/drivers/s390/block/dasd_int.h
+++ b/drivers/s390/block/dasd_int.h
@@ -368,6 +368,7 @@ struct dasd_device {
/* Device state and target state. */
int state, target;
+ struct mutex state_mutex;
int stopped; /* device (ccw_device_start) was stopped */
/* reference count. */
diff --git a/drivers/s390/block/dasd_proc.c b/drivers/s390/block/dasd_proc.c
index 71f95f54866..f13a0bdd148 100644
--- a/drivers/s390/block/dasd_proc.c
+++ b/drivers/s390/block/dasd_proc.c
@@ -165,51 +165,32 @@ static const struct file_operations dasd_devices_file_ops = {
.release = seq_release,
};
-static int
-dasd_calc_metrics(char *page, char **start, off_t off,
- int count, int *eof, int len)
-{
- len = (len > off) ? len - off : 0;
- if (len > count)
- len = count;
- if (len < count)
- *eof = 1;
- *start = page + off;
- return len;
-}
-
#ifdef CONFIG_DASD_PROFILE
-static char *
-dasd_statistics_array(char *str, unsigned int *array, int factor)
+static void dasd_statistics_array(struct seq_file *m, unsigned int *array, int factor)
{
int i;
for (i = 0; i < 32; i++) {
- str += sprintf(str, "%7d ", array[i] / factor);
+ seq_printf(m, "%7d ", array[i] / factor);
if (i == 15)
- str += sprintf(str, "\n");
+ seq_putc(m, '\n');
}
- str += sprintf(str,"\n");
- return str;
+ seq_putc(m, '\n');
}
#endif /* CONFIG_DASD_PROFILE */
-static int
-dasd_statistics_read(char *page, char **start, off_t off,
- int count, int *eof, void *data)
+static int dasd_stats_proc_show(struct seq_file *m, void *v)
{
- unsigned long len;
#ifdef CONFIG_DASD_PROFILE
struct dasd_profile_info_t *prof;
- char *str;
int factor;
/* check for active profiling */
if (dasd_profile_level == DASD_PROFILE_OFF) {
- len = sprintf(page, "Statistics are off - they might be "
+ seq_printf(m, "Statistics are off - they might be "
"switched on using 'echo set on > "
"/proc/dasd/statistics'\n");
- return dasd_calc_metrics(page, start, off, count, eof, len);
+ return 0;
}
prof = &dasd_global_profile;
@@ -217,47 +198,49 @@ dasd_statistics_read(char *page, char **start, off_t off,
for (factor = 1; (prof->dasd_io_reqs / factor) > 9999999;
factor *= 10);
- str = page;
- str += sprintf(str, "%d dasd I/O requests\n", prof->dasd_io_reqs);
- str += sprintf(str, "with %u sectors(512B each)\n",
+ seq_printf(m, "%d dasd I/O requests\n", prof->dasd_io_reqs);
+ seq_printf(m, "with %u sectors(512B each)\n",
prof->dasd_io_sects);
- str += sprintf(str, "Scale Factor is %d\n", factor);
- str += sprintf(str,
+ seq_printf(m, "Scale Factor is %d\n", factor);
+ seq_printf(m,
" __<4 ___8 __16 __32 __64 _128 "
" _256 _512 __1k __2k __4k __8k "
" _16k _32k _64k 128k\n");
- str += sprintf(str,
+ seq_printf(m,
" _256 _512 __1M __2M __4M __8M "
" _16M _32M _64M 128M 256M 512M "
" __1G __2G __4G " " _>4G\n");
- str += sprintf(str, "Histogram of sizes (512B secs)\n");
- str = dasd_statistics_array(str, prof->dasd_io_secs, factor);
- str += sprintf(str, "Histogram of I/O times (microseconds)\n");
- str = dasd_statistics_array(str, prof->dasd_io_times, factor);
- str += sprintf(str, "Histogram of I/O times per sector\n");
- str = dasd_statistics_array(str, prof->dasd_io_timps, factor);
- str += sprintf(str, "Histogram of I/O time till ssch\n");
- str = dasd_statistics_array(str, prof->dasd_io_time1, factor);
- str += sprintf(str, "Histogram of I/O time between ssch and irq\n");
- str = dasd_statistics_array(str, prof->dasd_io_time2, factor);
- str += sprintf(str, "Histogram of I/O time between ssch "
+ seq_printf(m, "Histogram of sizes (512B secs)\n");
+ dasd_statistics_array(m, prof->dasd_io_secs, factor);
+ seq_printf(m, "Histogram of I/O times (microseconds)\n");
+ dasd_statistics_array(m, prof->dasd_io_times, factor);
+ seq_printf(m, "Histogram of I/O times per sector\n");
+ dasd_statistics_array(m, prof->dasd_io_timps, factor);
+ seq_printf(m, "Histogram of I/O time till ssch\n");
+ dasd_statistics_array(m, prof->dasd_io_time1, factor);
+ seq_printf(m, "Histogram of I/O time between ssch and irq\n");
+ dasd_statistics_array(m, prof->dasd_io_time2, factor);
+ seq_printf(m, "Histogram of I/O time between ssch "
"and irq per sector\n");
- str = dasd_statistics_array(str, prof->dasd_io_time2ps, factor);
- str += sprintf(str, "Histogram of I/O time between irq and end\n");
- str = dasd_statistics_array(str, prof->dasd_io_time3, factor);
- str += sprintf(str, "# of req in chanq at enqueuing (1..32) \n");
- str = dasd_statistics_array(str, prof->dasd_io_nr_req, factor);
- len = str - page;
+ dasd_statistics_array(m, prof->dasd_io_time2ps, factor);
+ seq_printf(m, "Histogram of I/O time between irq and end\n");
+ dasd_statistics_array(m, prof->dasd_io_time3, factor);
+ seq_printf(m, "# of req in chanq at enqueuing (1..32) \n");
+ dasd_statistics_array(m, prof->dasd_io_nr_req, factor);
#else
- len = sprintf(page, "Statistics are not activated in this kernel\n");
+ seq_printf(m, "Statistics are not activated in this kernel\n");
#endif
- return dasd_calc_metrics(page, start, off, count, eof, len);
+ return 0;
}
-static int
-dasd_statistics_write(struct file *file, const char __user *user_buf,
- unsigned long user_len, void *data)
+static int dasd_stats_proc_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, dasd_stats_proc_show, NULL);
+}
+
+static ssize_t dasd_stats_proc_write(struct file *file,
+ const char __user *user_buf, size_t user_len, loff_t *pos)
{
#ifdef CONFIG_DASD_PROFILE
char *buffer, *str;
@@ -308,6 +291,15 @@ out_error:
#endif /* CONFIG_DASD_PROFILE */
}
+static const struct file_operations dasd_stats_proc_fops = {
+ .owner = THIS_MODULE,
+ .open = dasd_stats_proc_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+ .write = dasd_stats_proc_write,
+};
+
/*
* Create dasd proc-fs entries.
* In case creation failed, cleanup and return -ENOENT.
@@ -324,13 +316,12 @@ dasd_proc_init(void)
&dasd_devices_file_ops);
if (!dasd_devices_entry)
goto out_nodevices;
- dasd_statistics_entry = create_proc_entry("statistics",
- S_IFREG | S_IRUGO | S_IWUSR,
- dasd_proc_root_entry);
+ dasd_statistics_entry = proc_create("statistics",
+ S_IFREG | S_IRUGO | S_IWUSR,
+ dasd_proc_root_entry,
+ &dasd_stats_proc_fops);
if (!dasd_statistics_entry)
goto out_nostatistics;
- dasd_statistics_entry->read_proc = dasd_statistics_read;
- dasd_statistics_entry->write_proc = dasd_statistics_write;
return 0;
out_nostatistics:
diff --git a/drivers/s390/char/tape_block.c b/drivers/s390/char/tape_block.c
index 8d3d720737d..097da8ce6be 100644
--- a/drivers/s390/char/tape_block.c
+++ b/drivers/s390/char/tape_block.c
@@ -222,9 +222,8 @@ tapeblock_setup_device(struct tape_device * device)
goto cleanup_queue;
blk_queue_logical_block_size(blkdat->request_queue, TAPEBLOCK_HSEC_SIZE);
- blk_queue_max_sectors(blkdat->request_queue, TAPEBLOCK_MAX_SEC);
- blk_queue_max_phys_segments(blkdat->request_queue, -1L);
- blk_queue_max_hw_segments(blkdat->request_queue, -1L);
+ blk_queue_max_hw_sectors(blkdat->request_queue, TAPEBLOCK_MAX_SEC);
+ blk_queue_max_segments(blkdat->request_queue, -1L);
blk_queue_max_segment_size(blkdat->request_queue, -1L);
blk_queue_segment_boundary(blkdat->request_queue, -1L);
diff --git a/drivers/s390/char/zcore.c b/drivers/s390/char/zcore.c
index 82daa3c1dc9..3438658b66b 100644
--- a/drivers/s390/char/zcore.c
+++ b/drivers/s390/char/zcore.c
@@ -15,6 +15,7 @@
#include <linux/init.h>
#include <linux/miscdevice.h>
#include <linux/debugfs.h>
+#include <asm/asm-offsets.h>
#include <asm/ipl.h>
#include <asm/sclp.h>
#include <asm/setup.h>
@@ -40,12 +41,12 @@ enum arch_id {
/* dump system info */
struct sys_info {
- enum arch_id arch;
- unsigned long sa_base;
- u32 sa_size;
- int cpu_map[NR_CPUS];
- unsigned long mem_size;
- union save_area lc_mask;
+ enum arch_id arch;
+ unsigned long sa_base;
+ u32 sa_size;
+ int cpu_map[NR_CPUS];
+ unsigned long mem_size;
+ struct save_area lc_mask;
};
struct ipib_info {
@@ -183,52 +184,9 @@ static int memcpy_real_user(void __user *dest, unsigned long src, size_t count)
return 0;
}
-#ifdef __s390x__
-/*
- * Convert s390x (64 bit) cpu info to s390 (32 bit) cpu info
- */
-static void __init s390x_to_s390_regs(union save_area *out, union save_area *in,
- int cpu)
-{
- int i;
-
- for (i = 0; i < 16; i++) {
- out->s390.gp_regs[i] = in->s390x.gp_regs[i] & 0x00000000ffffffff;
- out->s390.acc_regs[i] = in->s390x.acc_regs[i];
- out->s390.ctrl_regs[i] =
- in->s390x.ctrl_regs[i] & 0x00000000ffffffff;
- }
- /* locore for 31 bit has only space for fpregs 0,2,4,6 */
- out->s390.fp_regs[0] = in->s390x.fp_regs[0];
- out->s390.fp_regs[1] = in->s390x.fp_regs[2];
- out->s390.fp_regs[2] = in->s390x.fp_regs[4];
- out->s390.fp_regs[3] = in->s390x.fp_regs[6];
- memcpy(&(out->s390.psw[0]), &(in->s390x.psw[0]), 4);
- out->s390.psw[1] |= 0x8; /* set bit 12 */
- memcpy(&(out->s390.psw[4]),&(in->s390x.psw[12]), 4);
- out->s390.psw[4] |= 0x80; /* set (31bit) addressing bit */
- out->s390.pref_reg = in->s390x.pref_reg;
- out->s390.timer = in->s390x.timer;
- out->s390.clk_cmp = in->s390x.clk_cmp;
-}
-
-static void __init s390x_to_s390_save_areas(void)
-{
- int i = 1;
- static union save_area tmp;
-
- while (zfcpdump_save_areas[i]) {
- s390x_to_s390_regs(&tmp, zfcpdump_save_areas[i], i);
- memcpy(zfcpdump_save_areas[i], &tmp, sizeof(tmp));
- i++;
- }
-}
-
-#endif /* __s390x__ */
-
static int __init init_cpu_info(enum arch_id arch)
{
- union save_area *sa;
+ struct save_area *sa;
/* get info for boot cpu from lowcore, stored in the HSA */
@@ -241,20 +199,12 @@ static int __init init_cpu_info(enum arch_id arch)
return -EIO;
}
zfcpdump_save_areas[0] = sa;
-
-#ifdef __s390x__
- /* convert s390x regs to s390, if we are dumping an s390 Linux */
-
- if (arch == ARCH_S390)
- s390x_to_s390_save_areas();
-#endif
-
return 0;
}
static DEFINE_MUTEX(zcore_mutex);
-#define DUMP_VERSION 0x3
+#define DUMP_VERSION 0x5
#define DUMP_MAGIC 0xa8190173618f23fdULL
#define DUMP_ARCH_S390X 2
#define DUMP_ARCH_S390 1
@@ -279,7 +229,14 @@ struct zcore_header {
u32 volnr;
u32 build_arch;
u64 rmem_size;
- char pad2[4016];
+ u8 mvdump;
+ u16 cpu_cnt;
+ u16 real_cpu_cnt;
+ u8 end_pad1[0x200-0x061];
+ u64 mvdump_sign;
+ u64 mvdump_zipl_time;
+ u8 end_pad2[0x800-0x210];
+ u32 lc_vec[512];
} __attribute__((packed,__aligned__(16)));
static struct zcore_header zcore_header = {
@@ -289,7 +246,7 @@ static struct zcore_header zcore_header = {
.dump_level = 0,
.page_size = PAGE_SIZE,
.mem_start = 0,
-#ifdef __s390x__
+#ifdef CONFIG_64BIT
.build_arch = DUMP_ARCH_S390X,
#else
.build_arch = DUMP_ARCH_S390,
@@ -340,11 +297,7 @@ static int zcore_add_lc(char __user *buf, unsigned long start, size_t count)
unsigned long prefix;
unsigned long sa_off, len, buf_off;
- if (sys_info.arch == ARCH_S390)
- prefix = zfcpdump_save_areas[i]->s390.pref_reg;
- else
- prefix = zfcpdump_save_areas[i]->s390x.pref_reg;
-
+ prefix = zfcpdump_save_areas[i]->pref_reg;
sa_start = prefix + sys_info.sa_base;
sa_end = prefix + sys_info.sa_base + sys_info.sa_size;
@@ -561,34 +514,39 @@ static const struct file_operations zcore_reipl_fops = {
.release = zcore_reipl_release,
};
+#ifdef CONFIG_32BIT
-static void __init set_s390_lc_mask(union save_area *map)
+static void __init set_lc_mask(struct save_area *map)
{
- memset(&map->s390.ext_save, 0xff, sizeof(map->s390.ext_save));
- memset(&map->s390.timer, 0xff, sizeof(map->s390.timer));
- memset(&map->s390.clk_cmp, 0xff, sizeof(map->s390.clk_cmp));
- memset(&map->s390.psw, 0xff, sizeof(map->s390.psw));
- memset(&map->s390.pref_reg, 0xff, sizeof(map->s390.pref_reg));
- memset(&map->s390.acc_regs, 0xff, sizeof(map->s390.acc_regs));
- memset(&map->s390.fp_regs, 0xff, sizeof(map->s390.fp_regs));
- memset(&map->s390.gp_regs, 0xff, sizeof(map->s390.gp_regs));
- memset(&map->s390.ctrl_regs, 0xff, sizeof(map->s390.ctrl_regs));
+ memset(&map->ext_save, 0xff, sizeof(map->ext_save));
+ memset(&map->timer, 0xff, sizeof(map->timer));
+ memset(&map->clk_cmp, 0xff, sizeof(map->clk_cmp));
+ memset(&map->psw, 0xff, sizeof(map->psw));
+ memset(&map->pref_reg, 0xff, sizeof(map->pref_reg));
+ memset(&map->acc_regs, 0xff, sizeof(map->acc_regs));
+ memset(&map->fp_regs, 0xff, sizeof(map->fp_regs));
+ memset(&map->gp_regs, 0xff, sizeof(map->gp_regs));
+ memset(&map->ctrl_regs, 0xff, sizeof(map->ctrl_regs));
}
-static void __init set_s390x_lc_mask(union save_area *map)
+#else /* CONFIG_32BIT */
+
+static void __init set_lc_mask(struct save_area *map)
{
- memset(&map->s390x.fp_regs, 0xff, sizeof(map->s390x.fp_regs));
- memset(&map->s390x.gp_regs, 0xff, sizeof(map->s390x.gp_regs));
- memset(&map->s390x.psw, 0xff, sizeof(map->s390x.psw));
- memset(&map->s390x.pref_reg, 0xff, sizeof(map->s390x.pref_reg));
- memset(&map->s390x.fp_ctrl_reg, 0xff, sizeof(map->s390x.fp_ctrl_reg));
- memset(&map->s390x.tod_reg, 0xff, sizeof(map->s390x.tod_reg));
- memset(&map->s390x.timer, 0xff, sizeof(map->s390x.timer));
- memset(&map->s390x.clk_cmp, 0xff, sizeof(map->s390x.clk_cmp));
- memset(&map->s390x.acc_regs, 0xff, sizeof(map->s390x.acc_regs));
- memset(&map->s390x.ctrl_regs, 0xff, sizeof(map->s390x.ctrl_regs));
+ memset(&map->fp_regs, 0xff, sizeof(map->fp_regs));
+ memset(&map->gp_regs, 0xff, sizeof(map->gp_regs));
+ memset(&map->psw, 0xff, sizeof(map->psw));
+ memset(&map->pref_reg, 0xff, sizeof(map->pref_reg));
+ memset(&map->fp_ctrl_reg, 0xff, sizeof(map->fp_ctrl_reg));
+ memset(&map->tod_reg, 0xff, sizeof(map->tod_reg));
+ memset(&map->timer, 0xff, sizeof(map->timer));
+ memset(&map->clk_cmp, 0xff, sizeof(map->clk_cmp));
+ memset(&map->acc_regs, 0xff, sizeof(map->acc_regs));
+ memset(&map->ctrl_regs, 0xff, sizeof(map->ctrl_regs));
}
+#endif /* CONFIG_32BIT */
+
/*
* Initialize dump globals for a given architecture
*/
@@ -599,21 +557,18 @@ static int __init sys_info_init(enum arch_id arch)
switch (arch) {
case ARCH_S390X:
pr_alert("DETECTED 'S390X (64 bit) OS'\n");
- sys_info.sa_base = SAVE_AREA_BASE_S390X;
- sys_info.sa_size = sizeof(struct save_area_s390x);
- set_s390x_lc_mask(&sys_info.lc_mask);
break;
case ARCH_S390:
pr_alert("DETECTED 'S390 (32 bit) OS'\n");
- sys_info.sa_base = SAVE_AREA_BASE_S390;
- sys_info.sa_size = sizeof(struct save_area_s390);
- set_s390_lc_mask(&sys_info.lc_mask);
break;
default:
pr_alert("0x%x is an unknown architecture.\n",arch);
return -EINVAL;
}
+ sys_info.sa_base = SAVE_AREA_BASE;
+ sys_info.sa_size = sizeof(struct save_area);
sys_info.arch = arch;
+ set_lc_mask(&sys_info.lc_mask);
rc = init_cpu_info(arch);
if (rc)
return rc;
@@ -660,8 +615,9 @@ static int __init get_mem_size(unsigned long *mem)
static int __init zcore_header_init(int arch, struct zcore_header *hdr)
{
- int rc;
+ int rc, i;
unsigned long memory = 0;
+ u32 prefix;
if (arch == ARCH_S390X)
hdr->arch_id = DUMP_ARCH_S390X;
@@ -676,6 +632,14 @@ static int __init zcore_header_init(int arch, struct zcore_header *hdr)
hdr->num_pages = memory / PAGE_SIZE;
hdr->tod = get_clock();
get_cpu_id(&hdr->cpu_id);
+ for (i = 0; zfcpdump_save_areas[i]; i++) {
+ prefix = zfcpdump_save_areas[i]->pref_reg;
+ hdr->real_cpu_cnt++;
+ if (!prefix)
+ continue;
+ hdr->lc_vec[hdr->cpu_cnt] = prefix;
+ hdr->cpu_cnt++;
+ }
return 0;
}
@@ -741,14 +705,21 @@ static int __init zcore_init(void)
if (rc)
goto fail;
-#ifndef __s390x__
+#ifdef CONFIG_64BIT
+ if (arch == ARCH_S390) {
+ pr_alert("The 64-bit dump tool cannot be used for a "
+ "32-bit system\n");
+ rc = -EINVAL;
+ goto fail;
+ }
+#else /* CONFIG_64BIT */
if (arch == ARCH_S390X) {
pr_alert("The 32-bit dump tool cannot be used for a "
"64-bit system\n");
rc = -EINVAL;
goto fail;
}
-#endif
+#endif /* CONFIG_64BIT */
rc = sys_info_init(arch);
if (rc)
diff --git a/drivers/s390/cio/ccwreq.c b/drivers/s390/cio/ccwreq.c
index 7a28a3029a3..37df42af05e 100644
--- a/drivers/s390/cio/ccwreq.c
+++ b/drivers/s390/cio/ccwreq.c
@@ -224,8 +224,8 @@ static void ccwreq_log_status(struct ccw_device *cdev, enum io_status status)
*/
void ccw_request_handler(struct ccw_device *cdev)
{
+ struct irb *irb = (struct irb *)&S390_lowcore.irb;
struct ccw_request *req = &cdev->private->req;
- struct irb *irb = (struct irb *) __LC_IRB;
enum io_status status;
int rc = -EOPNOTSUPP;
diff --git a/drivers/s390/cio/chsc.c b/drivers/s390/cio/chsc.c
index 1ecd3e56764..4038f5b4f14 100644
--- a/drivers/s390/cio/chsc.c
+++ b/drivers/s390/cio/chsc.c
@@ -574,7 +574,7 @@ int __chsc_do_secm(struct channel_subsystem *css, int enable, void *page)
secm_area->request.length = 0x0050;
secm_area->request.code = 0x0016;
- secm_area->key = PAGE_DEFAULT_KEY;
+ secm_area->key = PAGE_DEFAULT_KEY >> 4;
secm_area->cub_addr1 = (u64)(unsigned long)css->cub_addr1;
secm_area->cub_addr2 = (u64)(unsigned long)css->cub_addr2;
diff --git a/drivers/s390/cio/chsc_sch.c b/drivers/s390/cio/chsc_sch.c
index c84ac944307..852612f5dba 100644
--- a/drivers/s390/cio/chsc_sch.c
+++ b/drivers/s390/cio/chsc_sch.c
@@ -51,7 +51,7 @@ static void chsc_subchannel_irq(struct subchannel *sch)
{
struct chsc_private *private = sch->private;
struct chsc_request *request = private->request;
- struct irb *irb = (struct irb *)__LC_IRB;
+ struct irb *irb = (struct irb *)&S390_lowcore.irb;
CHSC_LOG(4, "irb");
CHSC_LOG_HEX(4, irb, sizeof(*irb));
@@ -237,7 +237,7 @@ static int chsc_async(struct chsc_async_area *chsc_area,
int ret = -ENODEV;
char dbf[10];
- chsc_area->header.key = PAGE_DEFAULT_KEY;
+ chsc_area->header.key = PAGE_DEFAULT_KEY >> 4;
while ((sch = chsc_get_next_subchannel(sch))) {
spin_lock(sch->lock);
private = sch->private;
diff --git a/drivers/s390/cio/cio.c b/drivers/s390/cio/cio.c
index 126f240715a..f736cdcf08a 100644
--- a/drivers/s390/cio/cio.c
+++ b/drivers/s390/cio/cio.c
@@ -625,8 +625,8 @@ void __irq_entry do_IRQ(struct pt_regs *regs)
/*
* Get interrupt information from lowcore
*/
- tpi_info = (struct tpi_info *) __LC_SUBCHANNEL_ID;
- irb = (struct irb *) __LC_IRB;
+ tpi_info = (struct tpi_info *)&S390_lowcore.subchannel_id;
+ irb = (struct irb *)&S390_lowcore.irb;
do {
kstat_cpu(smp_processor_id()).irqs[IO_INTERRUPT]++;
/*
@@ -661,7 +661,7 @@ void __irq_entry do_IRQ(struct pt_regs *regs)
* We don't do this for VM because a tpi drops the cpu
* out of the sie which costs more cycles than it saves.
*/
- } while (!MACHINE_IS_VM && tpi (NULL) != 0);
+ } while (MACHINE_IS_LPAR && tpi(NULL) != 0);
irq_exit();
set_irq_regs(old_regs);
}
@@ -682,10 +682,10 @@ static int cio_tpi(void)
struct irb *irb;
int irq_context;
- tpi_info = (struct tpi_info *) __LC_SUBCHANNEL_ID;
+ tpi_info = (struct tpi_info *)&S390_lowcore.subchannel_id;
if (tpi(NULL) != 1)
return 0;
- irb = (struct irb *) __LC_IRB;
+ irb = (struct irb *)&S390_lowcore.irb;
/* Store interrupt response block to lowcore. */
if (tsch(tpi_info->schid, irb) != 0)
/* Not status pending or not operational. */
@@ -885,7 +885,7 @@ __clear_io_subchannel_easy(struct subchannel_id schid)
struct tpi_info ti;
if (tpi(&ti)) {
- tsch(ti.schid, (struct irb *)__LC_IRB);
+ tsch(ti.schid, (struct irb *)&S390_lowcore.irb);
if (schid_equal(&ti.schid, &schid))
return 0;
}
@@ -1083,7 +1083,7 @@ int __init cio_get_iplinfo(struct cio_iplinfo *iplinfo)
struct subchannel_id schid;
struct schib schib;
- schid = *(struct subchannel_id *)__LC_SUBCHANNEL_ID;
+ schid = *(struct subchannel_id *)&S390_lowcore.subchannel_id;
if (!schid.one)
return -ENODEV;
if (stsch(schid, &schib))
diff --git a/drivers/s390/cio/crw.c b/drivers/s390/cio/crw.c
index d157665d0e7..425f741a280 100644
--- a/drivers/s390/cio/crw.c
+++ b/drivers/s390/cio/crw.c
@@ -8,15 +8,16 @@
* Heiko Carstens <heiko.carstens@de.ibm.com>,
*/
-#include <linux/semaphore.h>
#include <linux/mutex.h>
#include <linux/kthread.h>
#include <linux/init.h>
+#include <linux/wait.h>
#include <asm/crw.h>
-static struct semaphore crw_semaphore;
static DEFINE_MUTEX(crw_handler_mutex);
static crw_handler_t crw_handlers[NR_RSCS];
+static atomic_t crw_nr_req = ATOMIC_INIT(0);
+static DECLARE_WAIT_QUEUE_HEAD(crw_handler_wait_q);
/**
* crw_register_handler() - register a channel report word handler
@@ -59,12 +60,14 @@ void crw_unregister_handler(int rsc)
static int crw_collect_info(void *unused)
{
struct crw crw[2];
- int ccode;
+ int ccode, signal;
unsigned int chain;
- int ignore;
repeat:
- ignore = down_interruptible(&crw_semaphore);
+ signal = wait_event_interruptible(crw_handler_wait_q,
+ atomic_read(&crw_nr_req) > 0);
+ if (unlikely(signal))
+ atomic_inc(&crw_nr_req);
chain = 0;
while (1) {
crw_handler_t handler;
@@ -122,25 +125,23 @@ repeat:
/* chain is always 0 or 1 here. */
chain = crw[chain].chn ? chain + 1 : 0;
}
+ if (atomic_dec_and_test(&crw_nr_req))
+ wake_up(&crw_handler_wait_q);
goto repeat;
return 0;
}
void crw_handle_channel_report(void)
{
- up(&crw_semaphore);
+ atomic_inc(&crw_nr_req);
+ wake_up(&crw_handler_wait_q);
}
-/*
- * Separate initcall needed for semaphore initialization since
- * crw_handle_channel_report might be called before crw_machine_check_init.
- */
-static int __init crw_init_semaphore(void)
+void crw_wait_for_channel_report(void)
{
- init_MUTEX_LOCKED(&crw_semaphore);
- return 0;
+ crw_handle_channel_report();
+ wait_event(crw_handler_wait_q, atomic_read(&crw_nr_req) == 0);
}
-pure_initcall(crw_init_semaphore);
/*
* Machine checks for the channel subsystem must be enabled
diff --git a/drivers/s390/cio/css.c b/drivers/s390/cio/css.c
index 7679aee6fa1..2769da54f2b 100644
--- a/drivers/s390/cio/css.c
+++ b/drivers/s390/cio/css.c
@@ -18,6 +18,7 @@
#include <linux/list.h>
#include <linux/reboot.h>
#include <linux/suspend.h>
+#include <linux/proc_fs.h>
#include <asm/isc.h>
#include <asm/crw.h>
@@ -232,7 +233,7 @@ void css_sched_sch_todo(struct subchannel *sch, enum sch_todo todo)
if (!get_device(&sch->dev))
return;
sch->todo = todo;
- if (!queue_work(slow_path_wq, &sch->todo_work)) {
+ if (!queue_work(cio_work_q, &sch->todo_work)) {
/* Already queued, release workqueue ref. */
put_device(&sch->dev);
}
@@ -543,7 +544,7 @@ static void css_slow_path_func(struct work_struct *unused)
}
static DECLARE_WORK(slow_path_work, css_slow_path_func);
-struct workqueue_struct *slow_path_wq;
+struct workqueue_struct *cio_work_q;
void css_schedule_eval(struct subchannel_id schid)
{
@@ -552,7 +553,7 @@ void css_schedule_eval(struct subchannel_id schid)
spin_lock_irqsave(&slow_subchannel_lock, flags);
idset_sch_add(slow_subchannel_set, schid);
atomic_set(&css_eval_scheduled, 1);
- queue_work(slow_path_wq, &slow_path_work);
+ queue_work(cio_work_q, &slow_path_work);
spin_unlock_irqrestore(&slow_subchannel_lock, flags);
}
@@ -563,7 +564,7 @@ void css_schedule_eval_all(void)
spin_lock_irqsave(&slow_subchannel_lock, flags);
idset_fill(slow_subchannel_set);
atomic_set(&css_eval_scheduled, 1);
- queue_work(slow_path_wq, &slow_path_work);
+ queue_work(cio_work_q, &slow_path_work);
spin_unlock_irqrestore(&slow_subchannel_lock, flags);
}
@@ -594,14 +595,14 @@ void css_schedule_eval_all_unreg(void)
spin_lock_irqsave(&slow_subchannel_lock, flags);
idset_add_set(slow_subchannel_set, unreg_set);
atomic_set(&css_eval_scheduled, 1);
- queue_work(slow_path_wq, &slow_path_work);
+ queue_work(cio_work_q, &slow_path_work);
spin_unlock_irqrestore(&slow_subchannel_lock, flags);
idset_free(unreg_set);
}
void css_wait_for_slow_path(void)
{
- flush_workqueue(slow_path_wq);
+ flush_workqueue(cio_work_q);
}
/* Schedule reprobing of all unregistered subchannels. */
@@ -992,12 +993,21 @@ static int __init channel_subsystem_init(void)
ret = css_bus_init();
if (ret)
return ret;
-
+ cio_work_q = create_singlethread_workqueue("cio");
+ if (!cio_work_q) {
+ ret = -ENOMEM;
+ goto out_bus;
+ }
ret = io_subchannel_init();
if (ret)
- css_bus_cleanup();
+ goto out_wq;
return ret;
+out_wq:
+ destroy_workqueue(cio_work_q);
+out_bus:
+ css_bus_cleanup();
+ return ret;
}
subsys_initcall(channel_subsystem_init);
@@ -1006,10 +1016,25 @@ static int css_settle(struct device_driver *drv, void *unused)
struct css_driver *cssdrv = to_cssdriver(drv);
if (cssdrv->settle)
- cssdrv->settle();
+ return cssdrv->settle();
return 0;
}
+int css_complete_work(void)
+{
+ int ret;
+
+ /* Wait for the evaluation of subchannels to finish. */
+ ret = wait_event_interruptible(css_eval_wq,
+ atomic_read(&css_eval_scheduled) == 0);
+ if (ret)
+ return -EINTR;
+ flush_workqueue(cio_work_q);
+ /* Wait for the subchannel type specific initialization to finish */
+ return bus_for_each_drv(&css_bus_type, NULL, NULL, css_settle);
+}
+
+
/*
* Wait for the initialization of devices to finish, to make sure we are
* done with our setup if the search for the root device starts.
@@ -1018,13 +1043,41 @@ static int __init channel_subsystem_init_sync(void)
{
/* Start initial subchannel evaluation. */
css_schedule_eval_all();
- /* Wait for the evaluation of subchannels to finish. */
- wait_event(css_eval_wq, atomic_read(&css_eval_scheduled) == 0);
- /* Wait for the subchannel type specific initialization to finish */
- return bus_for_each_drv(&css_bus_type, NULL, NULL, css_settle);
+ css_complete_work();
+ return 0;
}
subsys_initcall_sync(channel_subsystem_init_sync);
+#ifdef CONFIG_PROC_FS
+static ssize_t cio_settle_write(struct file *file, const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ int ret;
+
+ /* Handle pending CRW's. */
+ crw_wait_for_channel_report();
+ ret = css_complete_work();
+
+ return ret ? ret : count;
+}
+
+static const struct file_operations cio_settle_proc_fops = {
+ .write = cio_settle_write,
+};
+
+static int __init cio_settle_init(void)
+{
+ struct proc_dir_entry *entry;
+
+ entry = proc_create("cio_settle", S_IWUSR, NULL,
+ &cio_settle_proc_fops);
+ if (!entry)
+ return -ENOMEM;
+ return 0;
+}
+device_initcall(cio_settle_init);
+#endif /*CONFIG_PROC_FS*/
+
int sch_is_pseudo_sch(struct subchannel *sch)
{
return sch == to_css(sch->dev.parent)->pseudo_subchannel;
diff --git a/drivers/s390/cio/css.h b/drivers/s390/cio/css.h
index fe84b92cde6..7e37886de23 100644
--- a/drivers/s390/cio/css.h
+++ b/drivers/s390/cio/css.h
@@ -95,7 +95,7 @@ struct css_driver {
int (*freeze)(struct subchannel *);
int (*thaw) (struct subchannel *);
int (*restore)(struct subchannel *);
- void (*settle)(void);
+ int (*settle)(void);
const char *name;
};
@@ -146,12 +146,13 @@ extern struct channel_subsystem *channel_subsystems[];
/* Helper functions to build lists for the slow path. */
void css_schedule_eval(struct subchannel_id schid);
void css_schedule_eval_all(void);
+int css_complete_work(void);
int sch_is_pseudo_sch(struct subchannel *);
struct schib;
int css_sch_is_valid(struct schib *);
-extern struct workqueue_struct *slow_path_wq;
+extern struct workqueue_struct *cio_work_q;
void css_wait_for_slow_path(void);
void css_sched_sch_todo(struct subchannel *sch, enum sch_todo todo);
#endif
diff --git a/drivers/s390/cio/device.c b/drivers/s390/cio/device.c
index a6c7d5426fb..c6abb75c461 100644
--- a/drivers/s390/cio/device.c
+++ b/drivers/s390/cio/device.c
@@ -136,7 +136,6 @@ static int io_subchannel_sch_event(struct subchannel *, int);
static int io_subchannel_chp_event(struct subchannel *, struct chp_link *,
int);
static void recovery_func(unsigned long data);
-struct workqueue_struct *ccw_device_work;
wait_queue_head_t ccw_device_init_wq;
atomic_t ccw_device_init_count;
@@ -159,11 +158,16 @@ static int io_subchannel_prepare(struct subchannel *sch)
return 0;
}
-static void io_subchannel_settle(void)
+static int io_subchannel_settle(void)
{
- wait_event(ccw_device_init_wq,
- atomic_read(&ccw_device_init_count) == 0);
- flush_workqueue(ccw_device_work);
+ int ret;
+
+ ret = wait_event_interruptible(ccw_device_init_wq,
+ atomic_read(&ccw_device_init_count) == 0);
+ if (ret)
+ return -EINTR;
+ flush_workqueue(cio_work_q);
+ return 0;
}
static struct css_driver io_subchannel_driver = {
@@ -188,27 +192,13 @@ int __init io_subchannel_init(void)
atomic_set(&ccw_device_init_count, 0);
setup_timer(&recovery_timer, recovery_func, 0);
- ccw_device_work = create_singlethread_workqueue("cio");
- if (!ccw_device_work)
- return -ENOMEM;
- slow_path_wq = create_singlethread_workqueue("kslowcrw");
- if (!slow_path_wq) {
- ret = -ENOMEM;
- goto out_err;
- }
- if ((ret = bus_register (&ccw_bus_type)))
- goto out_err;
-
+ ret = bus_register(&ccw_bus_type);
+ if (ret)
+ return ret;
ret = css_driver_register(&io_subchannel_driver);
if (ret)
- goto out_err;
+ bus_unregister(&ccw_bus_type);
- return 0;
-out_err:
- if (ccw_device_work)
- destroy_workqueue(ccw_device_work);
- if (slow_path_wq)
- destroy_workqueue(slow_path_wq);
return ret;
}
@@ -1348,7 +1338,7 @@ static enum io_sch_action sch_get_action(struct subchannel *sch)
/* Not operational. */
if (!cdev)
return IO_SCH_UNREG;
- if (!ccw_device_notify(cdev, CIO_GONE))
+ if (ccw_device_notify(cdev, CIO_GONE) != NOTIFY_OK)
return IO_SCH_UNREG;
return IO_SCH_ORPH_UNREG;
}
@@ -1356,12 +1346,12 @@ static enum io_sch_action sch_get_action(struct subchannel *sch)
if (!cdev)
return IO_SCH_ATTACH;
if (sch->schib.pmcw.dev != cdev->private->dev_id.devno) {
- if (!ccw_device_notify(cdev, CIO_GONE))
+ if (ccw_device_notify(cdev, CIO_GONE) != NOTIFY_OK)
return IO_SCH_UNREG_ATTACH;
return IO_SCH_ORPH_ATTACH;
}
if ((sch->schib.pmcw.pam & sch->opm) == 0) {
- if (!ccw_device_notify(cdev, CIO_NO_PATH))
+ if (ccw_device_notify(cdev, CIO_NO_PATH) != NOTIFY_OK)
return IO_SCH_UNREG;
return IO_SCH_DISC;
}
@@ -1410,6 +1400,12 @@ static int io_subchannel_sch_event(struct subchannel *sch, int process)
rc = 0;
goto out_unlock;
case IO_SCH_VERIFY:
+ if (cdev->private->flags.resuming == 1) {
+ if (cio_enable_subchannel(sch, (u32)(addr_t)sch)) {
+ ccw_device_set_notoper(cdev);
+ break;
+ }
+ }
/* Trigger path verification. */
io_subchannel_verify(sch);
rc = 0;
@@ -1448,7 +1444,8 @@ static int io_subchannel_sch_event(struct subchannel *sch, int process)
break;
case IO_SCH_UNREG_ATTACH:
/* Unregister ccw device. */
- ccw_device_unregister(cdev);
+ if (!cdev->private->flags.resuming)
+ ccw_device_unregister(cdev);
break;
default:
break;
@@ -1457,7 +1454,8 @@ static int io_subchannel_sch_event(struct subchannel *sch, int process)
switch (action) {
case IO_SCH_ORPH_UNREG:
case IO_SCH_UNREG:
- css_sch_device_unregister(sch);
+ if (!cdev || !cdev->private->flags.resuming)
+ css_sch_device_unregister(sch);
break;
case IO_SCH_ORPH_ATTACH:
case IO_SCH_UNREG_ATTACH:
@@ -1779,26 +1777,42 @@ static void __ccw_device_pm_restore(struct ccw_device *cdev)
{
struct subchannel *sch = to_subchannel(cdev->dev.parent);
- if (cio_is_console(sch->schid))
- goto out;
+ spin_lock_irq(sch->lock);
+ if (cio_is_console(sch->schid)) {
+ cio_enable_subchannel(sch, (u32)(addr_t)sch);
+ goto out_unlock;
+ }
/*
* While we were sleeping, devices may have gone or become
* available again. Kick re-detection.
*/
- spin_lock_irq(sch->lock);
cdev->private->flags.resuming = 1;
+ css_schedule_eval(sch->schid);
+ spin_unlock_irq(sch->lock);
+ css_complete_work();
+
+ /* cdev may have been moved to a different subchannel. */
+ sch = to_subchannel(cdev->dev.parent);
+ spin_lock_irq(sch->lock);
+ if (cdev->private->state != DEV_STATE_ONLINE &&
+ cdev->private->state != DEV_STATE_OFFLINE)
+ goto out_unlock;
+
ccw_device_recognition(cdev);
spin_unlock_irq(sch->lock);
wait_event(cdev->private->wait_q, dev_fsm_final_state(cdev) ||
cdev->private->state == DEV_STATE_DISCONNECTED);
-out:
+ spin_lock_irq(sch->lock);
+
+out_unlock:
cdev->private->flags.resuming = 0;
+ spin_unlock_irq(sch->lock);
}
static int resume_handle_boxed(struct ccw_device *cdev)
{
cdev->private->state = DEV_STATE_BOXED;
- if (ccw_device_notify(cdev, CIO_BOXED))
+ if (ccw_device_notify(cdev, CIO_BOXED) == NOTIFY_OK)
return 0;
ccw_device_sched_todo(cdev, CDEV_TODO_UNREG);
return -ENODEV;
@@ -1807,7 +1821,7 @@ static int resume_handle_boxed(struct ccw_device *cdev)
static int resume_handle_disc(struct ccw_device *cdev)
{
cdev->private->state = DEV_STATE_DISCONNECTED;
- if (ccw_device_notify(cdev, CIO_GONE))
+ if (ccw_device_notify(cdev, CIO_GONE) == NOTIFY_OK)
return 0;
ccw_device_sched_todo(cdev, CDEV_TODO_UNREG);
return -ENODEV;
@@ -1816,40 +1830,31 @@ static int resume_handle_disc(struct ccw_device *cdev)
static int ccw_device_pm_restore(struct device *dev)
{
struct ccw_device *cdev = to_ccwdev(dev);
- struct subchannel *sch = to_subchannel(cdev->dev.parent);
- int ret = 0, cm_enabled;
+ struct subchannel *sch;
+ int ret = 0;
__ccw_device_pm_restore(cdev);
+ sch = to_subchannel(cdev->dev.parent);
spin_lock_irq(sch->lock);
- if (cio_is_console(sch->schid)) {
- cio_enable_subchannel(sch, (u32)(addr_t)sch);
- spin_unlock_irq(sch->lock);
+ if (cio_is_console(sch->schid))
goto out_restore;
- }
- cdev->private->flags.donotify = 0;
+
/* check recognition results */
switch (cdev->private->state) {
case DEV_STATE_OFFLINE:
+ case DEV_STATE_ONLINE:
+ cdev->private->flags.donotify = 0;
break;
case DEV_STATE_BOXED:
ret = resume_handle_boxed(cdev);
- spin_unlock_irq(sch->lock);
if (ret)
- goto out;
+ goto out_unlock;
goto out_restore;
- case DEV_STATE_DISCONNECTED:
- goto out_disc_unlock;
default:
- goto out_unreg_unlock;
- }
- /* check if the device id has changed */
- if (sch->schib.pmcw.dev != cdev->private->dev_id.devno) {
- CIO_MSG_EVENT(0, "resume: sch 0.%x.%04x: failed (devno "
- "changed from %04x to %04x)\n",
- sch->schid.ssid, sch->schid.sch_no,
- cdev->private->dev_id.devno,
- sch->schib.pmcw.dev);
- goto out_unreg_unlock;
+ ret = resume_handle_disc(cdev);
+ if (ret)
+ goto out_unlock;
+ goto out_restore;
}
/* check if the device type has changed */
if (!ccw_device_test_sense_data(cdev)) {
@@ -1858,24 +1863,30 @@ static int ccw_device_pm_restore(struct device *dev)
ret = -ENODEV;
goto out_unlock;
}
- if (!cdev->online) {
- ret = 0;
+ if (!cdev->online)
goto out_unlock;
- }
- ret = ccw_device_online(cdev);
- if (ret)
- goto out_disc_unlock;
- cm_enabled = cdev->private->cmb != NULL;
+ if (ccw_device_online(cdev)) {
+ ret = resume_handle_disc(cdev);
+ if (ret)
+ goto out_unlock;
+ goto out_restore;
+ }
spin_unlock_irq(sch->lock);
-
wait_event(cdev->private->wait_q, dev_fsm_final_state(cdev));
- if (cdev->private->state != DEV_STATE_ONLINE) {
- spin_lock_irq(sch->lock);
- goto out_disc_unlock;
+ spin_lock_irq(sch->lock);
+
+ if (ccw_device_notify(cdev, CIO_OPER) == NOTIFY_BAD) {
+ ccw_device_sched_todo(cdev, CDEV_TODO_UNREG);
+ ret = -ENODEV;
+ goto out_unlock;
}
- if (cm_enabled) {
+
+ /* reenable cmf, if needed */
+ if (cdev->private->cmb) {
+ spin_unlock_irq(sch->lock);
ret = ccw_set_cmf(cdev, 1);
+ spin_lock_irq(sch->lock);
if (ret) {
CIO_MSG_EVENT(2, "resume: cdev 0.%x.%04x: cmf failed "
"(rc=%d)\n", cdev->private->dev_id.ssid,
@@ -1885,21 +1896,11 @@ static int ccw_device_pm_restore(struct device *dev)
}
out_restore:
+ spin_unlock_irq(sch->lock);
if (cdev->online && cdev->drv && cdev->drv->restore)
ret = cdev->drv->restore(cdev);
-out:
return ret;
-out_disc_unlock:
- ret = resume_handle_disc(cdev);
- spin_unlock_irq(sch->lock);
- if (ret)
- return ret;
- goto out_restore;
-
-out_unreg_unlock:
- ccw_device_sched_todo(cdev, CDEV_TODO_UNREG_EVAL);
- ret = -ENODEV;
out_unlock:
spin_unlock_irq(sch->lock);
return ret;
@@ -2028,7 +2029,7 @@ void ccw_device_sched_todo(struct ccw_device *cdev, enum cdev_todo todo)
/* Get workqueue ref. */
if (!get_device(&cdev->dev))
return;
- if (!queue_work(slow_path_wq, &cdev->private->todo_work)) {
+ if (!queue_work(cio_work_q, &cdev->private->todo_work)) {
/* Already queued, release workqueue ref. */
put_device(&cdev->dev);
}
@@ -2041,5 +2042,4 @@ EXPORT_SYMBOL(ccw_driver_register);
EXPORT_SYMBOL(ccw_driver_unregister);
EXPORT_SYMBOL(get_ccwdev_by_busid);
EXPORT_SYMBOL(ccw_bus_type);
-EXPORT_SYMBOL(ccw_device_work);
EXPORT_SYMBOL_GPL(ccw_device_get_subchannel_id);
diff --git a/drivers/s390/cio/device.h b/drivers/s390/cio/device.h
index bcfe13e4263..379de2d1ec4 100644
--- a/drivers/s390/cio/device.h
+++ b/drivers/s390/cio/device.h
@@ -4,7 +4,7 @@
#include <asm/ccwdev.h>
#include <asm/atomic.h>
#include <linux/wait.h>
-
+#include <linux/notifier.h>
#include "io_sch.h"
/*
@@ -71,7 +71,6 @@ dev_fsm_final_state(struct ccw_device *cdev)
cdev->private->state == DEV_STATE_BOXED);
}
-extern struct workqueue_struct *ccw_device_work;
extern wait_queue_head_t ccw_device_init_wq;
extern atomic_t ccw_device_init_count;
int __init io_subchannel_init(void);
diff --git a/drivers/s390/cio/device_fsm.c b/drivers/s390/cio/device_fsm.c
index ae760658a13..c56ab94612f 100644
--- a/drivers/s390/cio/device_fsm.c
+++ b/drivers/s390/cio/device_fsm.c
@@ -313,21 +313,43 @@ ccw_device_sense_id_done(struct ccw_device *cdev, int err)
}
}
+/**
+ * ccw_device_notify() - inform the device's driver about an event
+ * @cdev: device for which an event occured
+ * @event: event that occurred
+ *
+ * Returns:
+ * -%EINVAL if the device is offline or has no driver.
+ * -%EOPNOTSUPP if the device's driver has no notifier registered.
+ * %NOTIFY_OK if the driver wants to keep the device.
+ * %NOTIFY_BAD if the driver doesn't want to keep the device.
+ */
int ccw_device_notify(struct ccw_device *cdev, int event)
{
+ int ret = -EINVAL;
+
if (!cdev->drv)
- return 0;
+ goto out;
if (!cdev->online)
- return 0;
+ goto out;
CIO_MSG_EVENT(2, "notify called for 0.%x.%04x, event=%d\n",
cdev->private->dev_id.ssid, cdev->private->dev_id.devno,
event);
- return cdev->drv->notify ? cdev->drv->notify(cdev, event) : 0;
+ if (!cdev->drv->notify) {
+ ret = -EOPNOTSUPP;
+ goto out;
+ }
+ if (cdev->drv->notify(cdev, event))
+ ret = NOTIFY_OK;
+ else
+ ret = NOTIFY_BAD;
+out:
+ return ret;
}
static void ccw_device_oper_notify(struct ccw_device *cdev)
{
- if (ccw_device_notify(cdev, CIO_OPER)) {
+ if (ccw_device_notify(cdev, CIO_OPER) == NOTIFY_OK) {
/* Reenable channel measurements, if needed. */
ccw_device_sched_todo(cdev, CDEV_TODO_ENABLE_CMF);
return;
@@ -361,14 +383,15 @@ ccw_device_done(struct ccw_device *cdev, int state)
case DEV_STATE_BOXED:
CIO_MSG_EVENT(0, "Boxed device %04x on subchannel %04x\n",
cdev->private->dev_id.devno, sch->schid.sch_no);
- if (cdev->online && !ccw_device_notify(cdev, CIO_BOXED))
+ if (cdev->online &&
+ ccw_device_notify(cdev, CIO_BOXED) != NOTIFY_OK)
ccw_device_sched_todo(cdev, CDEV_TODO_UNREG);
cdev->private->flags.donotify = 0;
break;
case DEV_STATE_NOT_OPER:
CIO_MSG_EVENT(0, "Device %04x gone on subchannel %04x\n",
cdev->private->dev_id.devno, sch->schid.sch_no);
- if (!ccw_device_notify(cdev, CIO_GONE))
+ if (ccw_device_notify(cdev, CIO_GONE) != NOTIFY_OK)
ccw_device_sched_todo(cdev, CDEV_TODO_UNREG);
else
ccw_device_set_disconnected(cdev);
@@ -378,7 +401,7 @@ ccw_device_done(struct ccw_device *cdev, int state)
CIO_MSG_EVENT(0, "Disconnected device %04x on subchannel "
"%04x\n", cdev->private->dev_id.devno,
sch->schid.sch_no);
- if (!ccw_device_notify(cdev, CIO_NO_PATH))
+ if (ccw_device_notify(cdev, CIO_NO_PATH) != NOTIFY_OK)
ccw_device_sched_todo(cdev, CDEV_TODO_UNREG);
else
ccw_device_set_disconnected(cdev);
@@ -586,7 +609,7 @@ ccw_device_offline(struct ccw_device *cdev)
static void ccw_device_generic_notoper(struct ccw_device *cdev,
enum dev_event dev_event)
{
- if (!ccw_device_notify(cdev, CIO_GONE))
+ if (ccw_device_notify(cdev, CIO_GONE) != NOTIFY_OK)
ccw_device_sched_todo(cdev, CDEV_TODO_UNREG);
else
ccw_device_set_disconnected(cdev);
@@ -667,7 +690,7 @@ ccw_device_irq(struct ccw_device *cdev, enum dev_event dev_event)
struct irb *irb;
int is_cmd;
- irb = (struct irb *) __LC_IRB;
+ irb = (struct irb *)&S390_lowcore.irb;
is_cmd = !scsw_is_tm(&irb->scsw);
/* Check for unsolicited interrupt. */
if (!scsw_is_solicited(&irb->scsw)) {
@@ -732,7 +755,7 @@ ccw_device_w4sense(struct ccw_device *cdev, enum dev_event dev_event)
{
struct irb *irb;
- irb = (struct irb *) __LC_IRB;
+ irb = (struct irb *)&S390_lowcore.irb;
/* Check for unsolicited interrupt. */
if (scsw_stctl(&irb->scsw) ==
(SCSW_STCTL_STATUS_PEND | SCSW_STCTL_ALERT_STATUS)) {
diff --git a/drivers/s390/cio/qdio.h b/drivers/s390/cio/qdio.h
index 44f2f6a97f3..48aa0647432 100644
--- a/drivers/s390/cio/qdio.h
+++ b/drivers/s390/cio/qdio.h
@@ -208,18 +208,27 @@ struct qdio_dev_perf_stat {
unsigned int eqbs_partial;
unsigned int sqbs;
unsigned int sqbs_partial;
+} ____cacheline_aligned;
+
+struct qdio_queue_perf_stat {
+ /*
+ * Sorted into order-2 buckets: 1, 2-3, 4-7, ... 64-127, 128.
+ * Since max. 127 SBALs are scanned reuse entry for 128 as queue full
+ * aka 127 SBALs found.
+ */
+ unsigned int nr_sbals[8];
+ unsigned int nr_sbal_error;
+ unsigned int nr_sbal_nop;
+ unsigned int nr_sbal_total;
};
struct qdio_input_q {
/* input buffer acknowledgement flag */
int polling;
-
/* first ACK'ed buffer */
int ack_start;
-
/* how much sbals are acknowledged with qebsm */
int ack_count;
-
/* last time of noticing incoming data */
u64 timestamp;
};
@@ -227,40 +236,27 @@ struct qdio_input_q {
struct qdio_output_q {
/* PCIs are enabled for the queue */
int pci_out_enabled;
-
/* IQDIO: output multiple buffers (enhanced SIGA) */
int use_enh_siga;
-
/* timer to check for more outbound work */
struct timer_list timer;
};
+/*
+ * Note on cache alignment: grouped slsb and write mostly data at the beginning
+ * sbal[] is read-only and starts on a new cacheline followed by read mostly.
+ */
struct qdio_q {
struct slsb slsb;
+
union {
struct qdio_input_q in;
struct qdio_output_q out;
} u;
- /* queue number */
- int nr;
-
- /* bitmask of queue number */
- int mask;
-
- /* input or output queue */
- int is_input_q;
-
- /* list of thinint input queues */
- struct list_head entry;
-
- /* upper-layer program handler */
- qdio_handler_t (*handler);
-
/*
* inbound: next buffer the program should check for
- * outbound: next buffer to check for having been processed
- * by the card
+ * outbound: next buffer to check if adapter processed it
*/
int first_to_check;
@@ -273,16 +269,32 @@ struct qdio_q {
/* number of buffers in use by the adapter */
atomic_t nr_buf_used;
- struct qdio_irq *irq_ptr;
- struct dentry *debugfs_q;
- struct tasklet_struct tasklet;
-
/* error condition during a data transfer */
unsigned int qdio_error;
- struct sl *sl;
- struct qdio_buffer *sbal[QDIO_MAX_BUFFERS_PER_Q];
+ struct tasklet_struct tasklet;
+ struct qdio_queue_perf_stat q_stats;
+
+ struct qdio_buffer *sbal[QDIO_MAX_BUFFERS_PER_Q] ____cacheline_aligned;
+
+ /* queue number */
+ int nr;
+
+ /* bitmask of queue number */
+ int mask;
+
+ /* input or output queue */
+ int is_input_q;
+
+ /* list of thinint input queues */
+ struct list_head entry;
+ /* upper-layer program handler */
+ qdio_handler_t (*handler);
+
+ struct dentry *debugfs_q;
+ struct qdio_irq *irq_ptr;
+ struct sl *sl;
/*
* Warning: Leave this member at the end so it won't be cleared in
* qdio_fill_qs. A page is allocated under this pointer and used for
@@ -317,12 +329,8 @@ struct qdio_irq {
struct qdio_ssqd_desc ssqd_desc;
void (*orig_handler) (struct ccw_device *, unsigned long, struct irb *);
- struct qdio_dev_perf_stat perf_stat;
int perf_stat_enabled;
- /*
- * Warning: Leave these members together at the end so they won't be
- * cleared in qdio_setup_irq.
- */
+
struct qdr *qdr;
unsigned long chsc_page;
@@ -331,6 +339,7 @@ struct qdio_irq {
debug_info_t *debug_area;
struct mutex setup_mutex;
+ struct qdio_dev_perf_stat perf_stat;
};
/* helper functions */
@@ -341,9 +350,20 @@ struct qdio_irq {
(irq->qib.qfmt == QDIO_IQDIO_QFMT || \
css_general_characteristics.aif_osa)
-#define qperf(qdev,attr) qdev->perf_stat.attr
-#define qperf_inc(q,attr) if (q->irq_ptr->perf_stat_enabled) \
- q->irq_ptr->perf_stat.attr++
+#define qperf(__qdev, __attr) ((__qdev)->perf_stat.(__attr))
+
+#define qperf_inc(__q, __attr) \
+({ \
+ struct qdio_irq *qdev = (__q)->irq_ptr; \
+ if (qdev->perf_stat_enabled) \
+ (qdev->perf_stat.__attr)++; \
+})
+
+static inline void account_sbals_error(struct qdio_q *q, int count)
+{
+ q->q_stats.nr_sbal_error += count;
+ q->q_stats.nr_sbal_total += count;
+}
/* the highest iqdio queue is used for multicast */
static inline int multicast_outbound(struct qdio_q *q)
diff --git a/drivers/s390/cio/qdio_debug.c b/drivers/s390/cio/qdio_debug.c
index f49761ff9a0..c94eb2a0fa2 100644
--- a/drivers/s390/cio/qdio_debug.c
+++ b/drivers/s390/cio/qdio_debug.c
@@ -60,7 +60,7 @@ static int qstat_show(struct seq_file *m, void *v)
seq_printf(m, "ftc: %d last_move: %d\n", q->first_to_check, q->last_move);
seq_printf(m, "polling: %d ack start: %d ack count: %d\n",
q->u.in.polling, q->u.in.ack_start, q->u.in.ack_count);
- seq_printf(m, "slsb buffer states:\n");
+ seq_printf(m, "SBAL states:\n");
seq_printf(m, "|0 |8 |16 |24 |32 |40 |48 |56 63|\n");
for (i = 0; i < QDIO_MAX_BUFFERS_PER_Q; i++) {
@@ -97,6 +97,20 @@ static int qstat_show(struct seq_file *m, void *v)
}
seq_printf(m, "\n");
seq_printf(m, "|64 |72 |80 |88 |96 |104 |112 | 127|\n");
+
+ seq_printf(m, "\nSBAL statistics:");
+ if (!q->irq_ptr->perf_stat_enabled) {
+ seq_printf(m, " disabled\n");
+ return 0;
+ }
+
+ seq_printf(m, "\n1 2.. 4.. 8.. "
+ "16.. 32.. 64.. 127\n");
+ for (i = 0; i < ARRAY_SIZE(q->q_stats.nr_sbals); i++)
+ seq_printf(m, "%-10u ", q->q_stats.nr_sbals[i]);
+ seq_printf(m, "\nError NOP Total\n%-10u %-10u %-10u\n\n",
+ q->q_stats.nr_sbal_error, q->q_stats.nr_sbal_nop,
+ q->q_stats.nr_sbal_total);
return 0;
}
@@ -181,9 +195,10 @@ static ssize_t qperf_seq_write(struct file *file, const char __user *ubuf,
{
struct seq_file *seq = file->private_data;
struct qdio_irq *irq_ptr = seq->private;
+ struct qdio_q *q;
unsigned long val;
char buf[8];
- int ret;
+ int ret, i;
if (!irq_ptr)
return 0;
@@ -201,6 +216,10 @@ static ssize_t qperf_seq_write(struct file *file, const char __user *ubuf,
case 0:
irq_ptr->perf_stat_enabled = 0;
memset(&irq_ptr->perf_stat, 0, sizeof(irq_ptr->perf_stat));
+ for_each_input_queue(irq_ptr, q, i)
+ memset(&q->q_stats, 0, sizeof(q->q_stats));
+ for_each_output_queue(irq_ptr, q, i)
+ memset(&q->q_stats, 0, sizeof(q->q_stats));
break;
case 1:
irq_ptr->perf_stat_enabled = 1;
diff --git a/drivers/s390/cio/qdio_main.c b/drivers/s390/cio/qdio_main.c
index 62b654af923..232ef047ba3 100644
--- a/drivers/s390/cio/qdio_main.c
+++ b/drivers/s390/cio/qdio_main.c
@@ -392,6 +392,20 @@ static inline void qdio_stop_polling(struct qdio_q *q)
set_buf_state(q, q->u.in.ack_start, SLSB_P_INPUT_NOT_INIT);
}
+static inline void account_sbals(struct qdio_q *q, int count)
+{
+ int pos = 0;
+
+ q->q_stats.nr_sbal_total += count;
+ if (count == QDIO_MAX_BUFFERS_MASK) {
+ q->q_stats.nr_sbals[7]++;
+ return;
+ }
+ while (count >>= 1)
+ pos++;
+ q->q_stats.nr_sbals[pos]++;
+}
+
static void announce_buffer_error(struct qdio_q *q, int count)
{
q->qdio_error |= QDIO_ERROR_SLSB_STATE;
@@ -487,16 +501,22 @@ static int get_inbound_buffer_frontier(struct qdio_q *q)
q->first_to_check = add_buf(q->first_to_check, count);
if (atomic_sub(count, &q->nr_buf_used) == 0)
qperf_inc(q, inbound_queue_full);
+ if (q->irq_ptr->perf_stat_enabled)
+ account_sbals(q, count);
break;
case SLSB_P_INPUT_ERROR:
announce_buffer_error(q, count);
/* process the buffer, the upper layer will take care of it */
q->first_to_check = add_buf(q->first_to_check, count);
atomic_sub(count, &q->nr_buf_used);
+ if (q->irq_ptr->perf_stat_enabled)
+ account_sbals_error(q, count);
break;
case SLSB_CU_INPUT_EMPTY:
case SLSB_P_INPUT_NOT_INIT:
case SLSB_P_INPUT_ACK:
+ if (q->irq_ptr->perf_stat_enabled)
+ q->q_stats.nr_sbal_nop++;
DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "in nop");
break;
default:
@@ -514,7 +534,7 @@ static int qdio_inbound_q_moved(struct qdio_q *q)
if ((bufnr != q->last_move) || q->qdio_error) {
q->last_move = bufnr;
- if (!is_thinint_irq(q->irq_ptr) && !MACHINE_IS_VM)
+ if (!is_thinint_irq(q->irq_ptr) && MACHINE_IS_LPAR)
q->u.in.timestamp = get_usecs();
return 1;
} else
@@ -643,15 +663,21 @@ static int get_outbound_buffer_frontier(struct qdio_q *q)
atomic_sub(count, &q->nr_buf_used);
q->first_to_check = add_buf(q->first_to_check, count);
+ if (q->irq_ptr->perf_stat_enabled)
+ account_sbals(q, count);
break;
case SLSB_P_OUTPUT_ERROR:
announce_buffer_error(q, count);
/* process the buffer, the upper layer will take care of it */
q->first_to_check = add_buf(q->first_to_check, count);
atomic_sub(count, &q->nr_buf_used);
+ if (q->irq_ptr->perf_stat_enabled)
+ account_sbals_error(q, count);
break;
case SLSB_CU_OUTPUT_PRIMED:
/* the adapter has not fetched the output yet */
+ if (q->irq_ptr->perf_stat_enabled)
+ q->q_stats.nr_sbal_nop++;
DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "out primed:%1d", q->nr);
break;
case SLSB_P_OUTPUT_NOT_INIT:
diff --git a/drivers/s390/cio/qdio_setup.c b/drivers/s390/cio/qdio_setup.c
index 8c2dea5fa2b..7f4a7546514 100644
--- a/drivers/s390/cio/qdio_setup.c
+++ b/drivers/s390/cio/qdio_setup.c
@@ -333,10 +333,10 @@ static void __qdio_allocate_fill_qdr(struct qdio_irq *irq_ptr,
irq_ptr->qdr->qdf0[i + nr].slsba =
(unsigned long)&irq_ptr_qs[i]->slsb.val[0];
- irq_ptr->qdr->qdf0[i + nr].akey = PAGE_DEFAULT_KEY;
- irq_ptr->qdr->qdf0[i + nr].bkey = PAGE_DEFAULT_KEY;
- irq_ptr->qdr->qdf0[i + nr].ckey = PAGE_DEFAULT_KEY;
- irq_ptr->qdr->qdf0[i + nr].dkey = PAGE_DEFAULT_KEY;
+ irq_ptr->qdr->qdf0[i + nr].akey = PAGE_DEFAULT_KEY >> 4;
+ irq_ptr->qdr->qdf0[i + nr].bkey = PAGE_DEFAULT_KEY >> 4;
+ irq_ptr->qdr->qdf0[i + nr].ckey = PAGE_DEFAULT_KEY >> 4;
+ irq_ptr->qdr->qdf0[i + nr].dkey = PAGE_DEFAULT_KEY >> 4;
}
static void setup_qdr(struct qdio_irq *irq_ptr,
@@ -350,7 +350,7 @@ static void setup_qdr(struct qdio_irq *irq_ptr,
irq_ptr->qdr->iqdsz = sizeof(struct qdesfmt0) / 4; /* size in words */
irq_ptr->qdr->oqdsz = sizeof(struct qdesfmt0) / 4;
irq_ptr->qdr->qiba = (unsigned long)&irq_ptr->qib;
- irq_ptr->qdr->qkey = PAGE_DEFAULT_KEY;
+ irq_ptr->qdr->qkey = PAGE_DEFAULT_KEY >> 4;
for (i = 0; i < qdio_init->no_input_qs; i++)
__qdio_allocate_fill_qdr(irq_ptr, irq_ptr->input_qs, i, 0);
@@ -382,7 +382,15 @@ int qdio_setup_irq(struct qdio_initialize *init_data)
struct qdio_irq *irq_ptr = init_data->cdev->private->qdio_data;
int rc;
- memset(irq_ptr, 0, ((char *)&irq_ptr->qdr) - ((char *)irq_ptr));
+ memset(&irq_ptr->qib, 0, sizeof(irq_ptr->qib));
+ memset(&irq_ptr->siga_flag, 0, sizeof(irq_ptr->siga_flag));
+ memset(&irq_ptr->ccw, 0, sizeof(irq_ptr->ccw));
+ memset(&irq_ptr->ssqd_desc, 0, sizeof(irq_ptr->ssqd_desc));
+ memset(&irq_ptr->perf_stat, 0, sizeof(irq_ptr->perf_stat));
+
+ irq_ptr->debugfs_dev = irq_ptr->debugfs_perf = NULL;
+ irq_ptr->sch_token = irq_ptr->state = irq_ptr->perf_stat_enabled = 0;
+
/* wipes qib.ac, required by ar7063 */
memset(irq_ptr->qdr, 0, sizeof(struct qdr));
diff --git a/drivers/s390/cio/qdio_thinint.c b/drivers/s390/cio/qdio_thinint.c
index 091d904d318..9942c1031b2 100644
--- a/drivers/s390/cio/qdio_thinint.c
+++ b/drivers/s390/cio/qdio_thinint.c
@@ -198,8 +198,8 @@ static int set_subchannel_ind(struct qdio_irq *irq_ptr, int reset)
.code = 0x0021,
};
scssc_area->operation_code = 0;
- scssc_area->ks = PAGE_DEFAULT_KEY;
- scssc_area->kc = PAGE_DEFAULT_KEY;
+ scssc_area->ks = PAGE_DEFAULT_KEY >> 4;
+ scssc_area->kc = PAGE_DEFAULT_KEY >> 4;
scssc_area->isc = QDIO_AIRQ_ISC;
scssc_area->schid = irq_ptr->schid;
diff --git a/drivers/s390/crypto/zcrypt_api.c b/drivers/s390/crypto/zcrypt_api.c
index c68be24e27d..ba50fe02e57 100644
--- a/drivers/s390/crypto/zcrypt_api.c
+++ b/drivers/s390/crypto/zcrypt_api.c
@@ -33,6 +33,7 @@
#include <linux/miscdevice.h>
#include <linux/fs.h>
#include <linux/proc_fs.h>
+#include <linux/seq_file.h>
#include <linux/compat.h>
#include <linux/smp_lock.h>
#include <asm/atomic.h>
@@ -912,126 +913,105 @@ static struct miscdevice zcrypt_misc_device = {
*/
static struct proc_dir_entry *zcrypt_entry;
-static int sprintcl(unsigned char *outaddr, unsigned char *addr,
- unsigned int len)
+static void sprintcl(struct seq_file *m, unsigned char *addr, unsigned int len)
{
- int hl, i;
+ int i;
- hl = 0;
for (i = 0; i < len; i++)
- hl += sprintf(outaddr+hl, "%01x", (unsigned int) addr[i]);
- hl += sprintf(outaddr+hl, " ");
- return hl;
+ seq_printf(m, "%01x", (unsigned int) addr[i]);
+ seq_putc(m, ' ');
}
-static int sprintrw(unsigned char *outaddr, unsigned char *addr,
- unsigned int len)
+static void sprintrw(struct seq_file *m, unsigned char *addr, unsigned int len)
{
- int hl, inl, c, cx;
+ int inl, c, cx;
- hl = sprintf(outaddr, " ");
+ seq_printf(m, " ");
inl = 0;
for (c = 0; c < (len / 16); c++) {
- hl += sprintcl(outaddr+hl, addr+inl, 16);
+ sprintcl(m, addr+inl, 16);
inl += 16;
}
cx = len%16;
if (cx) {
- hl += sprintcl(outaddr+hl, addr+inl, cx);
+ sprintcl(m, addr+inl, cx);
inl += cx;
}
- hl += sprintf(outaddr+hl, "\n");
- return hl;
+ seq_putc(m, '\n');
}
-static int sprinthx(unsigned char *title, unsigned char *outaddr,
- unsigned char *addr, unsigned int len)
+static void sprinthx(unsigned char *title, struct seq_file *m,
+ unsigned char *addr, unsigned int len)
{
- int hl, inl, r, rx;
+ int inl, r, rx;
- hl = sprintf(outaddr, "\n%s\n", title);
+ seq_printf(m, "\n%s\n", title);
inl = 0;
for (r = 0; r < (len / 64); r++) {
- hl += sprintrw(outaddr+hl, addr+inl, 64);
+ sprintrw(m, addr+inl, 64);
inl += 64;
}
rx = len % 64;
if (rx) {
- hl += sprintrw(outaddr+hl, addr+inl, rx);
+ sprintrw(m, addr+inl, rx);
inl += rx;
}
- hl += sprintf(outaddr+hl, "\n");
- return hl;
+ seq_putc(m, '\n');
}
-static int sprinthx4(unsigned char *title, unsigned char *outaddr,
- unsigned int *array, unsigned int len)
+static void sprinthx4(unsigned char *title, struct seq_file *m,
+ unsigned int *array, unsigned int len)
{
- int hl, r;
+ int r;
- hl = sprintf(outaddr, "\n%s\n", title);
+ seq_printf(m, "\n%s\n", title);
for (r = 0; r < len; r++) {
if ((r % 8) == 0)
- hl += sprintf(outaddr+hl, " ");
- hl += sprintf(outaddr+hl, "%08X ", array[r]);
+ seq_printf(m, " ");
+ seq_printf(m, "%08X ", array[r]);
if ((r % 8) == 7)
- hl += sprintf(outaddr+hl, "\n");
+ seq_putc(m, '\n');
}
- hl += sprintf(outaddr+hl, "\n");
- return hl;
+ seq_putc(m, '\n');
}
-static int zcrypt_status_read(char *resp_buff, char **start, off_t offset,
- int count, int *eof, void *data)
+static int zcrypt_proc_show(struct seq_file *m, void *v)
{
- unsigned char *workarea;
- int len;
-
- len = 0;
-
- /* resp_buff is a page. Use the right half for a work area */
- workarea = resp_buff + 2000;
- len += sprintf(resp_buff + len, "\nzcrypt version: %d.%d.%d\n",
- ZCRYPT_VERSION, ZCRYPT_RELEASE, ZCRYPT_VARIANT);
- len += sprintf(resp_buff + len, "Cryptographic domain: %d\n",
- ap_domain_index);
- len += sprintf(resp_buff + len, "Total device count: %d\n",
- zcrypt_device_count);
- len += sprintf(resp_buff + len, "PCICA count: %d\n",
- zcrypt_count_type(ZCRYPT_PCICA));
- len += sprintf(resp_buff + len, "PCICC count: %d\n",
- zcrypt_count_type(ZCRYPT_PCICC));
- len += sprintf(resp_buff + len, "PCIXCC MCL2 count: %d\n",
- zcrypt_count_type(ZCRYPT_PCIXCC_MCL2));
- len += sprintf(resp_buff + len, "PCIXCC MCL3 count: %d\n",
- zcrypt_count_type(ZCRYPT_PCIXCC_MCL3));
- len += sprintf(resp_buff + len, "CEX2C count: %d\n",
- zcrypt_count_type(ZCRYPT_CEX2C));
- len += sprintf(resp_buff + len, "CEX2A count: %d\n",
- zcrypt_count_type(ZCRYPT_CEX2A));
- len += sprintf(resp_buff + len, "CEX3C count: %d\n",
- zcrypt_count_type(ZCRYPT_CEX3C));
- len += sprintf(resp_buff + len, "CEX3A count: %d\n",
- zcrypt_count_type(ZCRYPT_CEX3A));
- len += sprintf(resp_buff + len, "requestq count: %d\n",
- zcrypt_requestq_count());
- len += sprintf(resp_buff + len, "pendingq count: %d\n",
- zcrypt_pendingq_count());
- len += sprintf(resp_buff + len, "Total open handles: %d\n\n",
- atomic_read(&zcrypt_open_count));
+ char workarea[sizeof(int) * AP_DEVICES];
+
+ seq_printf(m, "\nzcrypt version: %d.%d.%d\n",
+ ZCRYPT_VERSION, ZCRYPT_RELEASE, ZCRYPT_VARIANT);
+ seq_printf(m, "Cryptographic domain: %d\n", ap_domain_index);
+ seq_printf(m, "Total device count: %d\n", zcrypt_device_count);
+ seq_printf(m, "PCICA count: %d\n", zcrypt_count_type(ZCRYPT_PCICA));
+ seq_printf(m, "PCICC count: %d\n", zcrypt_count_type(ZCRYPT_PCICC));
+ seq_printf(m, "PCIXCC MCL2 count: %d\n",
+ zcrypt_count_type(ZCRYPT_PCIXCC_MCL2));
+ seq_printf(m, "PCIXCC MCL3 count: %d\n",
+ zcrypt_count_type(ZCRYPT_PCIXCC_MCL3));
+ seq_printf(m, "CEX2C count: %d\n", zcrypt_count_type(ZCRYPT_CEX2C));
+ seq_printf(m, "CEX2A count: %d\n", zcrypt_count_type(ZCRYPT_CEX2A));
+ seq_printf(m, "CEX3C count: %d\n", zcrypt_count_type(ZCRYPT_CEX3C));
+ seq_printf(m, "CEX3A count: %d\n", zcrypt_count_type(ZCRYPT_CEX3A));
+ seq_printf(m, "requestq count: %d\n", zcrypt_requestq_count());
+ seq_printf(m, "pendingq count: %d\n", zcrypt_pendingq_count());
+ seq_printf(m, "Total open handles: %d\n\n",
+ atomic_read(&zcrypt_open_count));
zcrypt_status_mask(workarea);
- len += sprinthx("Online devices: 1=PCICA 2=PCICC 3=PCIXCC(MCL2) "
- "4=PCIXCC(MCL3) 5=CEX2C 6=CEX2A 7=CEX3C 8=CEX3A",
- resp_buff+len, workarea, AP_DEVICES);
+ sprinthx("Online devices: 1=PCICA 2=PCICC 3=PCIXCC(MCL2) "
+ "4=PCIXCC(MCL3) 5=CEX2C 6=CEX2A 7=CEX3C 8=CEX3A",
+ m, workarea, AP_DEVICES);
zcrypt_qdepth_mask(workarea);
- len += sprinthx("Waiting work element counts",
- resp_buff+len, workarea, AP_DEVICES);
+ sprinthx("Waiting work element counts", m, workarea, AP_DEVICES);
zcrypt_perdev_reqcnt((int *) workarea);
- len += sprinthx4("Per-device successfully completed request counts",
- resp_buff+len,(unsigned int *) workarea, AP_DEVICES);
- *eof = 1;
- memset((void *) workarea, 0x00, AP_DEVICES * sizeof(unsigned int));
- return len;
+ sprinthx4("Per-device successfully completed request counts",
+ m, (unsigned int *) workarea, AP_DEVICES);
+ return 0;
+}
+
+static int zcrypt_proc_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, zcrypt_proc_show, NULL);
}
static void zcrypt_disable_card(int index)
@@ -1061,11 +1041,11 @@ static void zcrypt_enable_card(int index)
spin_unlock_bh(&zcrypt_device_lock);
}
-static int zcrypt_status_write(struct file *file, const char __user *buffer,
- unsigned long count, void *data)
+static ssize_t zcrypt_proc_write(struct file *file, const char __user *buffer,
+ size_t count, loff_t *pos)
{
unsigned char *lbuf, *ptr;
- unsigned long local_count;
+ size_t local_count;
int j;
if (count <= 0)
@@ -1115,6 +1095,15 @@ out:
return count;
}
+static const struct file_operations zcrypt_proc_fops = {
+ .owner = THIS_MODULE,
+ .open = zcrypt_proc_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+ .write = zcrypt_proc_write,
+};
+
static int zcrypt_rng_device_count;
static u32 *zcrypt_rng_buffer;
static int zcrypt_rng_buffer_index;
@@ -1197,14 +1186,11 @@ int __init zcrypt_api_init(void)
goto out;
/* Set up the proc file system */
- zcrypt_entry = create_proc_entry("driver/z90crypt", 0644, NULL);
+ zcrypt_entry = proc_create("driver/z90crypt", 0644, NULL, &zcrypt_proc_fops);
if (!zcrypt_entry) {
rc = -ENOMEM;
goto out_misc;
}
- zcrypt_entry->data = NULL;
- zcrypt_entry->read_proc = zcrypt_status_read;
- zcrypt_entry->write_proc = zcrypt_status_write;
return 0;
diff --git a/drivers/s390/kvm/kvm_virtio.c b/drivers/s390/kvm/kvm_virtio.c
index 2930fc763ac..b2fc4fd63f7 100644
--- a/drivers/s390/kvm/kvm_virtio.c
+++ b/drivers/s390/kvm/kvm_virtio.c
@@ -340,11 +340,11 @@ static void kvm_extint_handler(u16 code)
return;
/* The LSB might be overloaded, we have to mask it */
- vq = (struct virtqueue *) ((*(long *) __LC_PFAULT_INTPARM) & ~1UL);
+ vq = (struct virtqueue *)(S390_lowcore.ext_params2 & ~1UL);
/* We use the LSB of extparam, to decide, if this interrupt is a config
* change or a "standard" interrupt */
- config_changed = (*(int *) __LC_EXT_PARAMS & 1);
+ config_changed = S390_lowcore.ext_params & 1;
if (config_changed) {
struct virtio_driver *drv;
diff --git a/drivers/s390/net/qeth_core.h b/drivers/s390/net/qeth_core.h
index b232693378c..a3ac4456e0b 100644
--- a/drivers/s390/net/qeth_core.h
+++ b/drivers/s390/net/qeth_core.h
@@ -649,6 +649,7 @@ struct qeth_card_options {
int performance_stats;
int rx_sg_cb;
enum qeth_ipa_isolation_modes isolation;
+ int sniffer;
};
/*
@@ -737,6 +738,7 @@ struct qeth_card {
struct qeth_discipline discipline;
atomic_t force_alloc_skb;
struct service_level qeth_service_level;
+ struct qdio_ssqd_desc ssqd;
};
struct qeth_card_list_struct {
@@ -811,7 +813,8 @@ int qeth_send_ipa_cmd(struct qeth_card *, struct qeth_cmd_buffer *,
struct qeth_cmd_buffer *qeth_get_ipacmd_buffer(struct qeth_card *,
enum qeth_ipa_cmds, enum qeth_prot_versions);
int qeth_query_setadapterparms(struct qeth_card *);
-int qeth_check_qdio_errors(struct qdio_buffer *, unsigned int, const char *);
+int qeth_check_qdio_errors(struct qeth_card *, struct qdio_buffer *,
+ unsigned int, const char *);
void qeth_queue_input_buffer(struct qeth_card *, int);
struct sk_buff *qeth_core_get_next_skb(struct qeth_card *,
struct qdio_buffer *, struct qdio_buffer_element **, int *,
diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c
index d34804d5ece..fa8a519218a 100644
--- a/drivers/s390/net/qeth_core_main.c
+++ b/drivers/s390/net/qeth_core_main.c
@@ -269,6 +269,7 @@ int qeth_realloc_buffer_pool(struct qeth_card *card, int bufcnt)
card->qdio.init_pool.buf_count = bufcnt;
return qeth_alloc_buffer_pool(card);
}
+EXPORT_SYMBOL_GPL(qeth_realloc_buffer_pool);
static int qeth_issue_next_read(struct qeth_card *card)
{
@@ -350,8 +351,10 @@ static struct qeth_ipa_cmd *qeth_check_ipa_data(struct qeth_card *card,
if (IS_IPA(iob->data)) {
cmd = (struct qeth_ipa_cmd *) PDU_ENCAPSULATION(iob->data);
if (IS_IPA_REPLY(cmd)) {
- if (cmd->hdr.command < IPA_CMD_SETCCID ||
- cmd->hdr.command > IPA_CMD_MODCCID)
+ if (cmd->hdr.command != IPA_CMD_SETCCID &&
+ cmd->hdr.command != IPA_CMD_DELCCID &&
+ cmd->hdr.command != IPA_CMD_MODCCID &&
+ cmd->hdr.command != IPA_CMD_SET_DIAG_ASS)
qeth_issue_ipa_msg(cmd,
cmd->hdr.return_code, card);
return cmd;
@@ -1100,11 +1103,6 @@ static int qeth_setup_card(struct qeth_card *card)
card->thread_running_mask = 0;
INIT_WORK(&card->kernel_thread_starter, qeth_start_kernel_thread);
INIT_LIST_HEAD(&card->ip_list);
- card->ip_tbd_list = kmalloc(sizeof(struct list_head), GFP_KERNEL);
- if (!card->ip_tbd_list) {
- QETH_DBF_TEXT(SETUP, 0, "iptbdnom");
- return -ENOMEM;
- }
INIT_LIST_HEAD(card->ip_tbd_list);
INIT_LIST_HEAD(&card->cmd_waiter_list);
init_waitqueue_head(&card->wait_q);
@@ -1138,21 +1136,30 @@ static struct qeth_card *qeth_alloc_card(void)
QETH_DBF_TEXT(SETUP, 2, "alloccrd");
card = kzalloc(sizeof(struct qeth_card), GFP_DMA|GFP_KERNEL);
if (!card)
- return NULL;
+ goto out;
QETH_DBF_HEX(SETUP, 2, &card, sizeof(void *));
- if (qeth_setup_channel(&card->read)) {
- kfree(card);
- return NULL;
- }
- if (qeth_setup_channel(&card->write)) {
- qeth_clean_channel(&card->read);
- kfree(card);
- return NULL;
+ card->ip_tbd_list = kmalloc(sizeof(struct list_head), GFP_KERNEL);
+ if (!card->ip_tbd_list) {
+ QETH_DBF_TEXT(SETUP, 0, "iptbdnom");
+ goto out_card;
}
+ if (qeth_setup_channel(&card->read))
+ goto out_ip;
+ if (qeth_setup_channel(&card->write))
+ goto out_channel;
card->options.layer2 = -1;
card->qeth_service_level.seq_print = qeth_core_sl_print;
register_service_level(&card->qeth_service_level);
return card;
+
+out_channel:
+ qeth_clean_channel(&card->read);
+out_ip:
+ kfree(card->ip_tbd_list);
+out_card:
+ kfree(card);
+out:
+ return NULL;
}
static int qeth_determine_card_type(struct qeth_card *card)
@@ -1355,26 +1362,29 @@ static int qeth_read_conf_data(struct qeth_card *card, void **buffer,
return ret;
}
-static int qeth_get_unitaddr(struct qeth_card *card)
+static void qeth_configure_unitaddr(struct qeth_card *card, char *prcd)
{
- int length;
- char *prcd;
- int rc;
-
- QETH_DBF_TEXT(SETUP, 2, "getunit");
- rc = qeth_read_conf_data(card, (void **) &prcd, &length);
- if (rc) {
- QETH_DBF_MESSAGE(2, "%s qeth_read_conf_data returned %i\n",
- dev_name(&card->gdev->dev), rc);
- return rc;
- }
+ QETH_DBF_TEXT(SETUP, 2, "cfgunit");
card->info.chpid = prcd[30];
card->info.unit_addr2 = prcd[31];
card->info.cula = prcd[63];
card->info.guestlan = ((prcd[0x10] == _ascebc['V']) &&
(prcd[0x11] == _ascebc['M']));
- kfree(prcd);
- return 0;
+}
+
+static void qeth_configure_blkt_default(struct qeth_card *card, char *prcd)
+{
+ QETH_DBF_TEXT(SETUP, 2, "cfgblkt");
+
+ if (prcd[74] == 0xF0 && prcd[75] == 0xF0 && prcd[76] == 0xF5) {
+ card->info.blkt.time_total = 250;
+ card->info.blkt.inter_packet = 5;
+ card->info.blkt.inter_packet_jumbo = 15;
+ } else {
+ card->info.blkt.time_total = 0;
+ card->info.blkt.inter_packet = 0;
+ card->info.blkt.inter_packet_jumbo = 0;
+ }
}
static void qeth_init_tokens(struct qeth_card *card)
@@ -2573,8 +2583,8 @@ int qeth_query_setadapterparms(struct qeth_card *card)
}
EXPORT_SYMBOL_GPL(qeth_query_setadapterparms);
-int qeth_check_qdio_errors(struct qdio_buffer *buf, unsigned int qdio_error,
- const char *dbftext)
+int qeth_check_qdio_errors(struct qeth_card *card, struct qdio_buffer *buf,
+ unsigned int qdio_error, const char *dbftext)
{
if (qdio_error) {
QETH_DBF_TEXT(TRACE, 2, dbftext);
@@ -2584,7 +2594,11 @@ int qeth_check_qdio_errors(struct qdio_buffer *buf, unsigned int qdio_error,
QETH_DBF_TEXT_(QERR, 2, " F14=%02X",
buf->element[14].flags & 0xff);
QETH_DBF_TEXT_(QERR, 2, " qerr=%X", qdio_error);
- return 1;
+ if ((buf->element[15].flags & 0xff) == 0x12) {
+ card->stats.rx_dropped++;
+ return 0;
+ } else
+ return 1;
}
return 0;
}
@@ -2667,7 +2681,7 @@ static int qeth_handle_send_error(struct qeth_card *card,
qdio_err = 1;
}
}
- qeth_check_qdio_errors(buffer->buffer, qdio_err, "qouterr");
+ qeth_check_qdio_errors(card, buffer->buffer, qdio_err, "qouterr");
if (!qdio_err)
return QETH_SEND_ERROR_NONE;
@@ -3509,6 +3523,7 @@ void qeth_tx_timeout(struct net_device *dev)
{
struct qeth_card *card;
+ QETH_DBF_TEXT(TRACE, 4, "txtimeo");
card = dev->ml_priv;
card->stats.tx_errors++;
qeth_schedule_recovery(card);
@@ -3847,9 +3862,7 @@ static int qeth_core_driver_group(const char *buf, struct device *root_dev,
int qeth_core_hardsetup_card(struct qeth_card *card)
{
- struct qdio_ssqd_desc *ssqd;
int retries = 0;
- int mpno = 0;
int rc;
QETH_DBF_TEXT(SETUP, 2, "hrdsetup");
@@ -3882,31 +3895,6 @@ retriable:
else
goto retry;
}
-
- rc = qeth_get_unitaddr(card);
- if (rc) {
- QETH_DBF_TEXT_(SETUP, 2, "2err%d", rc);
- return rc;
- }
-
- ssqd = kmalloc(sizeof(struct qdio_ssqd_desc), GFP_KERNEL);
- if (!ssqd) {
- rc = -ENOMEM;
- goto out;
- }
- rc = qdio_get_ssqd_desc(CARD_DDEV(card), ssqd);
- if (rc == 0)
- mpno = ssqd->pcnt;
- kfree(ssqd);
-
- if (mpno)
- mpno = min(mpno - 1, QETH_MAX_PORTNO);
- if (card->info.portno > mpno) {
- QETH_DBF_MESSAGE(2, "Device %s does not offer port number %d"
- "\n.", CARD_BUS_ID(card), card->info.portno);
- rc = -ENODEV;
- goto out;
- }
qeth_init_tokens(card);
qeth_init_func_level(card);
rc = qeth_idx_activate_channel(&card->read, qeth_idx_read_cb);
@@ -3990,7 +3978,7 @@ struct sk_buff *qeth_core_get_next_skb(struct qeth_card *card,
struct qdio_buffer_element *element = *__element;
int offset = *__offset;
struct sk_buff *skb = NULL;
- int skb_len;
+ int skb_len = 0;
void *data_ptr;
int data_len;
int headroom = 0;
@@ -4009,20 +3997,24 @@ struct sk_buff *qeth_core_get_next_skb(struct qeth_card *card,
*hdr = element->addr + offset;
offset += sizeof(struct qeth_hdr);
- if (card->options.layer2) {
- if (card->info.type == QETH_CARD_TYPE_OSN) {
- skb_len = (*hdr)->hdr.osn.pdu_length;
- headroom = sizeof(struct qeth_hdr);
- } else {
- skb_len = (*hdr)->hdr.l2.pkt_length;
- }
- } else {
+ switch ((*hdr)->hdr.l2.id) {
+ case QETH_HEADER_TYPE_LAYER2:
+ skb_len = (*hdr)->hdr.l2.pkt_length;
+ break;
+ case QETH_HEADER_TYPE_LAYER3:
skb_len = (*hdr)->hdr.l3.length;
if ((card->info.link_type == QETH_LINK_TYPE_LANE_TR) ||
(card->info.link_type == QETH_LINK_TYPE_HSTR))
headroom = TR_HLEN;
else
headroom = ETH_HLEN;
+ break;
+ case QETH_HEADER_TYPE_OSN:
+ skb_len = (*hdr)->hdr.osn.pdu_length;
+ headroom = sizeof(struct qeth_hdr);
+ break;
+ default:
+ break;
}
if (!skb_len)
@@ -4177,6 +4169,41 @@ void qeth_core_free_discipline(struct qeth_card *card)
card->discipline.ccwgdriver = NULL;
}
+static void qeth_determine_capabilities(struct qeth_card *card)
+{
+ int rc;
+ int length;
+ char *prcd;
+
+ QETH_DBF_TEXT(SETUP, 2, "detcapab");
+ rc = ccw_device_set_online(CARD_DDEV(card));
+ if (rc) {
+ QETH_DBF_TEXT_(SETUP, 2, "3err%d", rc);
+ goto out;
+ }
+
+
+ rc = qeth_read_conf_data(card, (void **) &prcd, &length);
+ if (rc) {
+ QETH_DBF_MESSAGE(2, "%s qeth_read_conf_data returned %i\n",
+ dev_name(&card->gdev->dev), rc);
+ QETH_DBF_TEXT_(SETUP, 2, "5err%d", rc);
+ goto out_offline;
+ }
+ qeth_configure_unitaddr(card, prcd);
+ qeth_configure_blkt_default(card, prcd);
+ kfree(prcd);
+
+ rc = qdio_get_ssqd_desc(CARD_DDEV(card), &card->ssqd);
+ if (rc)
+ QETH_DBF_TEXT_(SETUP, 2, "6err%d", rc);
+
+out_offline:
+ ccw_device_set_offline(CARD_DDEV(card));
+out:
+ return;
+}
+
static int qeth_core_probe_device(struct ccwgroup_device *gdev)
{
struct qeth_card *card;
@@ -4242,6 +4269,8 @@ static int qeth_core_probe_device(struct ccwgroup_device *gdev)
write_lock_irqsave(&qeth_core_card_list.rwlock, flags);
list_add_tail(&card->list, &qeth_core_card_list.list);
write_unlock_irqrestore(&qeth_core_card_list.rwlock, flags);
+
+ qeth_determine_capabilities(card);
return 0;
err_card:
diff --git a/drivers/s390/net/qeth_core_mpc.h b/drivers/s390/net/qeth_core_mpc.h
index 1ba51152f66..104a3351e02 100644
--- a/drivers/s390/net/qeth_core_mpc.h
+++ b/drivers/s390/net/qeth_core_mpc.h
@@ -156,6 +156,8 @@ enum qeth_ipa_return_codes {
IPA_RC_IP_TABLE_FULL = 0x0002,
IPA_RC_UNKNOWN_ERROR = 0x0003,
IPA_RC_UNSUPPORTED_COMMAND = 0x0004,
+ IPA_RC_TRACE_ALREADY_ACTIVE = 0x0005,
+ IPA_RC_INVALID_FORMAT = 0x0006,
IPA_RC_DUP_IPV6_REMOTE = 0x0008,
IPA_RC_DUP_IPV6_HOME = 0x0010,
IPA_RC_UNREGISTERED_ADDR = 0x0011,
@@ -196,6 +198,11 @@ enum qeth_ipa_return_codes {
IPA_RC_INVALID_IP_VERSION2 = 0xf001,
IPA_RC_FFFF = 0xffff
};
+/* for DELIP */
+#define IPA_RC_IP_ADDRESS_NOT_DEFINED IPA_RC_PRIMARY_ALREADY_DEFINED
+/* for SET_DIAGNOSTIC_ASSIST */
+#define IPA_RC_INVALID_SUBCMD IPA_RC_IP_TABLE_FULL
+#define IPA_RC_HARDWARE_AUTH_ERROR IPA_RC_UNKNOWN_ERROR
/* IPA function flags; each flag marks availability of respective function */
enum qeth_ipa_funcs {
@@ -246,6 +253,7 @@ enum qeth_ipa_setadp_cmd {
IPA_SETADP_SET_SNMP_CONTROL = 0x00000200L,
IPA_SETADP_QUERY_CARD_INFO = 0x00000400L,
IPA_SETADP_SET_PROMISC_MODE = 0x00000800L,
+ IPA_SETADP_SET_DIAG_ASSIST = 0x00002000L,
IPA_SETADP_SET_ACCESS_CONTROL = 0x00010000L,
};
enum qeth_ipa_mac_ops {
@@ -424,6 +432,40 @@ struct qeth_create_destroy_address {
__u8 unique_id[8];
} __attribute__ ((packed));
+/* SET DIAGNOSTIC ASSIST IPA Command: *************************************/
+
+enum qeth_diags_cmds {
+ QETH_DIAGS_CMD_QUERY = 0x0001,
+ QETH_DIAGS_CMD_TRAP = 0x0002,
+ QETH_DIAGS_CMD_TRACE = 0x0004,
+ QETH_DIAGS_CMD_NOLOG = 0x0008,
+ QETH_DIAGS_CMD_DUMP = 0x0010,
+};
+
+enum qeth_diags_trace_types {
+ QETH_DIAGS_TYPE_HIPERSOCKET = 0x02,
+};
+
+enum qeth_diags_trace_cmds {
+ QETH_DIAGS_CMD_TRACE_ENABLE = 0x0001,
+ QETH_DIAGS_CMD_TRACE_DISABLE = 0x0002,
+ QETH_DIAGS_CMD_TRACE_MODIFY = 0x0004,
+ QETH_DIAGS_CMD_TRACE_REPLACE = 0x0008,
+ QETH_DIAGS_CMD_TRACE_QUERY = 0x0010,
+};
+
+struct qeth_ipacmd_diagass {
+ __u32 host_tod2;
+ __u32:32;
+ __u16 subcmd_len;
+ __u16:16;
+ __u32 subcmd;
+ __u8 type;
+ __u8 action;
+ __u16 options;
+ __u32:32;
+} __attribute__ ((packed));
+
/* Header for each IPA command */
struct qeth_ipacmd_hdr {
__u8 command;
@@ -452,6 +494,7 @@ struct qeth_ipa_cmd {
struct qeth_create_destroy_address create_destroy_addr;
struct qeth_ipacmd_setadpparms setadapterparms;
struct qeth_set_routing setrtg;
+ struct qeth_ipacmd_diagass diagass;
} data;
} __attribute__ ((packed));
@@ -469,7 +512,6 @@ enum qeth_ipa_arp_return_codes {
QETH_IPA_ARP_RC_Q_NO_DATA = 0x0008,
};
-
extern char *qeth_get_ipa_msg(enum qeth_ipa_return_codes rc);
extern char *qeth_get_ipa_cmd_name(enum qeth_ipa_cmds cmd);
diff --git a/drivers/s390/net/qeth_core_sys.c b/drivers/s390/net/qeth_core_sys.c
index 9ff2b36fdc4..88ae4357136 100644
--- a/drivers/s390/net/qeth_core_sys.c
+++ b/drivers/s390/net/qeth_core_sys.c
@@ -118,7 +118,7 @@ static ssize_t qeth_dev_portno_store(struct device *dev,
{
struct qeth_card *card = dev_get_drvdata(dev);
char *tmp;
- unsigned int portno;
+ unsigned int portno, limit;
if (!card)
return -EINVAL;
@@ -128,9 +128,11 @@ static ssize_t qeth_dev_portno_store(struct device *dev,
return -EPERM;
portno = simple_strtoul(buf, &tmp, 16);
- if (portno > QETH_MAX_PORTNO) {
+ if (portno > QETH_MAX_PORTNO)
+ return -EINVAL;
+ limit = (card->ssqd.pcnt ? card->ssqd.pcnt - 1 : card->ssqd.pcnt);
+ if (portno > limit)
return -EINVAL;
- }
card->info.portno = portno;
return count;
@@ -537,7 +539,7 @@ static ssize_t qeth_dev_blkt_total_store(struct device *dev,
struct qeth_card *card = dev_get_drvdata(dev);
return qeth_dev_blkt_store(card, buf, count,
- &card->info.blkt.time_total, 1000);
+ &card->info.blkt.time_total, 5000);
}
@@ -559,7 +561,7 @@ static ssize_t qeth_dev_blkt_inter_store(struct device *dev,
struct qeth_card *card = dev_get_drvdata(dev);
return qeth_dev_blkt_store(card, buf, count,
- &card->info.blkt.inter_packet, 100);
+ &card->info.blkt.inter_packet, 1000);
}
static DEVICE_ATTR(inter, 0644, qeth_dev_blkt_inter_show,
@@ -580,7 +582,7 @@ static ssize_t qeth_dev_blkt_inter_jumbo_store(struct device *dev,
struct qeth_card *card = dev_get_drvdata(dev);
return qeth_dev_blkt_store(card, buf, count,
- &card->info.blkt.inter_packet_jumbo, 100);
+ &card->info.blkt.inter_packet_jumbo, 1000);
}
static DEVICE_ATTR(inter_jumbo, 0644, qeth_dev_blkt_inter_jumbo_show,
diff --git a/drivers/s390/net/qeth_l2_main.c b/drivers/s390/net/qeth_l2_main.c
index 0b763396d5d..51fde6f2e0b 100644
--- a/drivers/s390/net/qeth_l2_main.c
+++ b/drivers/s390/net/qeth_l2_main.c
@@ -486,22 +486,14 @@ static int qeth_l2_send_setmac_cb(struct qeth_card *card,
case IPA_RC_L2_DUP_MAC:
case IPA_RC_L2_DUP_LAYER3_MAC:
dev_warn(&card->gdev->dev,
- "MAC address "
- "%2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x "
- "already exists\n",
- card->dev->dev_addr[0], card->dev->dev_addr[1],
- card->dev->dev_addr[2], card->dev->dev_addr[3],
- card->dev->dev_addr[4], card->dev->dev_addr[5]);
+ "MAC address %pM already exists\n",
+ card->dev->dev_addr);
break;
case IPA_RC_L2_MAC_NOT_AUTH_BY_HYP:
case IPA_RC_L2_MAC_NOT_AUTH_BY_ADP:
dev_warn(&card->gdev->dev,
- "MAC address "
- "%2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x "
- "is not authorized\n",
- card->dev->dev_addr[0], card->dev->dev_addr[1],
- card->dev->dev_addr[2], card->dev->dev_addr[3],
- card->dev->dev_addr[4], card->dev->dev_addr[5]);
+ "MAC address %pM is not authorized\n",
+ card->dev->dev_addr);
break;
default:
break;
@@ -512,12 +504,8 @@ static int qeth_l2_send_setmac_cb(struct qeth_card *card,
memcpy(card->dev->dev_addr, cmd->data.setdelmac.mac,
OSA_ADDR_LEN);
dev_info(&card->gdev->dev,
- "MAC address %2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x "
- "successfully registered on device %s\n",
- card->dev->dev_addr[0], card->dev->dev_addr[1],
- card->dev->dev_addr[2], card->dev->dev_addr[3],
- card->dev->dev_addr[4], card->dev->dev_addr[5],
- card->dev->name);
+ "MAC address %pM successfully registered on device %s\n",
+ card->dev->dev_addr, card->dev->name);
}
return 0;
}
@@ -634,7 +622,7 @@ static void qeth_l2_set_multicast_list(struct net_device *dev)
for (dm = dev->mc_list; dm; dm = dm->next)
qeth_l2_add_mc(card, dm->da_addr, 0);
- list_for_each_entry(ha, &dev->uc.list, list)
+ netdev_for_each_uc_addr(ha, dev)
qeth_l2_add_mc(card, ha->addr, 1);
spin_unlock_bh(&card->mclock);
@@ -781,7 +769,8 @@ static void qeth_l2_qdio_input_handler(struct ccw_device *ccwdev,
index = i % QDIO_MAX_BUFFERS_PER_Q;
buffer = &card->qdio.in_q->bufs[index];
if (!(qdio_err &&
- qeth_check_qdio_errors(buffer->buffer, qdio_err, "qinerr")))
+ qeth_check_qdio_errors(card, buffer->buffer, qdio_err,
+ "qinerr")))
qeth_l2_process_inbound_buffer(card, buffer, index);
/* clear buffer and give back to hardware */
qeth_put_buffer_pool_entry(card, buffer->pool_entry);
@@ -938,7 +927,6 @@ static int __qeth_l2_set_online(struct ccwgroup_device *gdev, int recovery_mode)
QETH_DBF_TEXT(SETUP, 2, "setonlin");
QETH_DBF_HEX(SETUP, 2, &card, sizeof(void *));
- qeth_set_allowed_threads(card, QETH_RECOVER_THREAD, 1);
recover_flag = card->state;
rc = qeth_core_hardsetup_card(card);
if (rc) {
diff --git a/drivers/s390/net/qeth_l3.h b/drivers/s390/net/qeth_l3.h
index 321988fa9f7..8447d233d0b 100644
--- a/drivers/s390/net/qeth_l3.h
+++ b/drivers/s390/net/qeth_l3.h
@@ -13,6 +13,8 @@
#include "qeth_core.h"
+#define QETH_SNIFF_AVAIL 0x0008
+
struct qeth_ipaddr {
struct list_head entry;
enum qeth_ip_types type;
diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c
index fd1b6ed3721..5475834ab91 100644
--- a/drivers/s390/net/qeth_l3_main.c
+++ b/drivers/s390/net/qeth_l3_main.c
@@ -242,6 +242,8 @@ static int __qeth_l3_insert_ip_todo(struct qeth_card *card,
struct qeth_ipaddr *tmp, *t;
int found = 0;
+ if (card->options.sniffer)
+ return 0;
list_for_each_entry_safe(tmp, t, card->ip_tbd_list, entry) {
if ((addr->type == QETH_IP_TYPE_DEL_ALL_MC) &&
(tmp->type == QETH_IP_TYPE_DEL_ALL_MC))
@@ -457,6 +459,8 @@ static void qeth_l3_set_ip_addr_list(struct qeth_card *card)
QETH_DBF_TEXT(TRACE, 2, "sdiplist");
QETH_DBF_HEX(TRACE, 2, &card, sizeof(void *));
+ if (card->options.sniffer)
+ return;
spin_lock_irqsave(&card->ip_lock, flags);
tbd_list = card->ip_tbd_list;
card->ip_tbd_list = kmalloc(sizeof(struct list_head), GFP_ATOMIC);
@@ -495,7 +499,7 @@ static void qeth_l3_set_ip_addr_list(struct qeth_card *card)
spin_unlock_irqrestore(&card->ip_lock, flags);
rc = qeth_l3_deregister_addr_entry(card, addr);
spin_lock_irqsave(&card->ip_lock, flags);
- if (!rc || (rc == IPA_RC_PRIMARY_ALREADY_DEFINED))
+ if (!rc || (rc == IPA_RC_IP_ADDRESS_NOT_DEFINED))
kfree(addr);
else
list_add_tail(&addr->entry, &card->ip_list);
@@ -513,6 +517,8 @@ static void qeth_l3_clear_ip_list(struct qeth_card *card, int clean,
unsigned long flags;
QETH_DBF_TEXT(TRACE, 4, "clearip");
+ if (recover && card->options.sniffer)
+ return;
spin_lock_irqsave(&card->ip_lock, flags);
/* clear todo list */
list_for_each_entry_safe(addr, tmp, card->ip_tbd_list, entry) {
@@ -1674,6 +1680,76 @@ static int qeth_l3_get_unique_id(struct qeth_card *card)
return rc;
}
+static int
+qeth_diags_trace_cb(struct qeth_card *card, struct qeth_reply *reply,
+ unsigned long data)
+{
+ struct qeth_ipa_cmd *cmd;
+ __u16 rc;
+
+ QETH_DBF_TEXT(SETUP, 2, "diastrcb");
+
+ cmd = (struct qeth_ipa_cmd *)data;
+ rc = cmd->hdr.return_code;
+ if (rc) {
+ QETH_DBF_TEXT_(TRACE, 2, "dxter%x", rc);
+ if (cmd->data.diagass.action == QETH_DIAGS_CMD_TRACE_ENABLE) {
+ switch (rc) {
+ case IPA_RC_HARDWARE_AUTH_ERROR:
+ dev_warn(&card->gdev->dev, "The device is not "
+ "authorized to run as a HiperSockets "
+ "network traffic analyzer\n");
+ break;
+ case IPA_RC_TRACE_ALREADY_ACTIVE:
+ dev_warn(&card->gdev->dev, "A HiperSockets "
+ "network traffic analyzer is already "
+ "active in the HiperSockets LAN\n");
+ break;
+ default:
+ break;
+ }
+ }
+ return 0;
+ }
+
+ switch (cmd->data.diagass.action) {
+ case QETH_DIAGS_CMD_TRACE_QUERY:
+ break;
+ case QETH_DIAGS_CMD_TRACE_DISABLE:
+ card->info.promisc_mode = SET_PROMISC_MODE_OFF;
+ dev_info(&card->gdev->dev, "The HiperSockets network traffic "
+ "analyzer is deactivated\n");
+ break;
+ case QETH_DIAGS_CMD_TRACE_ENABLE:
+ card->info.promisc_mode = SET_PROMISC_MODE_ON;
+ dev_info(&card->gdev->dev, "The HiperSockets network traffic "
+ "analyzer is activated\n");
+ break;
+ default:
+ QETH_DBF_MESSAGE(2, "Unknown sniffer action (0x%04x) on %s\n",
+ cmd->data.diagass.action, QETH_CARD_IFNAME(card));
+ }
+
+ return 0;
+}
+
+static int
+qeth_diags_trace(struct qeth_card *card, enum qeth_diags_trace_cmds diags_cmd)
+{
+ struct qeth_cmd_buffer *iob;
+ struct qeth_ipa_cmd *cmd;
+
+ QETH_DBF_TEXT(SETUP, 2, "diagtrac");
+
+ iob = qeth_get_ipacmd_buffer(card, IPA_CMD_SET_DIAG_ASS, 0);
+ cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
+ cmd->data.diagass.subcmd_len = 16;
+ cmd->data.diagass.subcmd = QETH_DIAGS_CMD_TRACE;
+ cmd->data.diagass.type = QETH_DIAGS_TYPE_HIPERSOCKET;
+ cmd->data.diagass.action = diags_cmd;
+ return qeth_send_ipa_cmd(card, iob, qeth_diags_trace_cb, NULL);
+}
+
static void qeth_l3_get_mac_for_ipm(__u32 ipm, char *mac,
struct net_device *dev)
{
@@ -1951,7 +2027,10 @@ static inline __u16 qeth_l3_rebuild_skb(struct qeth_card *card,
case QETH_CAST_ANYCAST:
case QETH_CAST_NOCAST:
default:
- skb->pkt_type = PACKET_HOST;
+ if (card->options.sniffer)
+ skb->pkt_type = PACKET_OTHERHOST;
+ else
+ skb->pkt_type = PACKET_HOST;
memcpy(tg_addr, card->dev->dev_addr,
card->dev->addr_len);
}
@@ -2007,7 +2086,6 @@ static void qeth_l3_process_inbound_buffer(struct qeth_card *card,
int offset;
__u16 vlan_tag = 0;
unsigned int len;
-
/* get first element of current buffer */
element = (struct qdio_buffer_element *)&buf->buffer->element[0];
offset = 0;
@@ -2026,7 +2104,7 @@ static void qeth_l3_process_inbound_buffer(struct qeth_card *card,
case QETH_HEADER_TYPE_LAYER3:
vlan_tag = qeth_l3_rebuild_skb(card, skb, hdr);
len = skb->len;
- if (vlan_tag)
+ if (vlan_tag && !card->options.sniffer)
if (card->vlangrp)
vlan_hwaccel_rx(skb, card->vlangrp,
vlan_tag);
@@ -2037,6 +2115,16 @@ static void qeth_l3_process_inbound_buffer(struct qeth_card *card,
else
netif_rx(skb);
break;
+ case QETH_HEADER_TYPE_LAYER2: /* for HiperSockets sniffer */
+ skb->pkt_type = PACKET_HOST;
+ skb->protocol = eth_type_trans(skb, skb->dev);
+ if (card->options.checksum_type == NO_CHECKSUMMING)
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+ else
+ skb->ip_summed = CHECKSUM_NONE;
+ len = skb->len;
+ netif_receive_skb(skb);
+ break;
default:
dev_kfree_skb_any(skb);
QETH_DBF_TEXT(TRACE, 3, "inbunkno");
@@ -2118,6 +2206,9 @@ static int qeth_l3_stop_card(struct qeth_card *card, int recovery_mode)
QETH_DBF_HEX(SETUP, 2, &card, sizeof(void *));
qeth_set_allowed_threads(card, 0, 1);
+ if (card->options.sniffer &&
+ (card->info.promisc_mode == SET_PROMISC_MODE_ON))
+ qeth_diags_trace(card, QETH_DIAGS_CMD_TRACE_DISABLE);
if (card->read.state == CH_STATE_UP &&
card->write.state == CH_STATE_UP &&
(card->state == CARD_STATE_UP)) {
@@ -2162,6 +2253,36 @@ static int qeth_l3_stop_card(struct qeth_card *card, int recovery_mode)
return rc;
}
+/*
+ * test for and Switch promiscuous mode (on or off)
+ * either for guestlan or HiperSocket Sniffer
+ */
+static void
+qeth_l3_handle_promisc_mode(struct qeth_card *card)
+{
+ struct net_device *dev = card->dev;
+
+ if (((dev->flags & IFF_PROMISC) &&
+ (card->info.promisc_mode == SET_PROMISC_MODE_ON)) ||
+ (!(dev->flags & IFF_PROMISC) &&
+ (card->info.promisc_mode == SET_PROMISC_MODE_OFF)))
+ return;
+
+ if (card->info.guestlan) { /* Guestlan trace */
+ if (qeth_adp_supported(card, IPA_SETADP_SET_PROMISC_MODE))
+ qeth_setadp_promisc_mode(card);
+ } else if (card->options.sniffer && /* HiperSockets trace */
+ qeth_adp_supported(card, IPA_SETADP_SET_DIAG_ASSIST)) {
+ if (dev->flags & IFF_PROMISC) {
+ QETH_DBF_TEXT(TRACE, 3, "+promisc");
+ qeth_diags_trace(card, QETH_DIAGS_CMD_TRACE_ENABLE);
+ } else {
+ QETH_DBF_TEXT(TRACE, 3, "-promisc");
+ qeth_diags_trace(card, QETH_DIAGS_CMD_TRACE_DISABLE);
+ }
+ }
+}
+
static void qeth_l3_set_multicast_list(struct net_device *dev)
{
struct qeth_card *card = dev->ml_priv;
@@ -2170,15 +2291,17 @@ static void qeth_l3_set_multicast_list(struct net_device *dev)
if (qeth_threads_running(card, QETH_RECOVER_THREAD) &&
(card->state != CARD_STATE_UP))
return;
- qeth_l3_delete_mc_addresses(card);
- qeth_l3_add_multicast_ipv4(card);
+ if (!card->options.sniffer) {
+ qeth_l3_delete_mc_addresses(card);
+ qeth_l3_add_multicast_ipv4(card);
#ifdef CONFIG_QETH_IPV6
- qeth_l3_add_multicast_ipv6(card);
+ qeth_l3_add_multicast_ipv6(card);
#endif
- qeth_l3_set_ip_addr_list(card);
- if (!qeth_adp_supported(card, IPA_SETADP_SET_PROMISC_MODE))
- return;
- qeth_setadp_promisc_mode(card);
+ qeth_l3_set_ip_addr_list(card);
+ if (!qeth_adp_supported(card, IPA_SETADP_SET_PROMISC_MODE))
+ return;
+ }
+ qeth_l3_handle_promisc_mode(card);
}
static const char *qeth_l3_arp_get_error_cause(int *rc)
@@ -2778,8 +2901,9 @@ static int qeth_l3_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
int nr_frags;
if ((card->info.type == QETH_CARD_TYPE_IQD) &&
- (skb->protocol != htons(ETH_P_IPV6)) &&
- (skb->protocol != htons(ETH_P_IP)))
+ (((skb->protocol != htons(ETH_P_IPV6)) &&
+ (skb->protocol != htons(ETH_P_IP))) ||
+ card->options.sniffer))
goto tx_drop;
if ((card->state != CARD_STATE_UP) || !card->lan_online) {
@@ -3155,7 +3279,7 @@ static void qeth_l3_qdio_input_handler(struct ccw_device *ccwdev,
index = i % QDIO_MAX_BUFFERS_PER_Q;
buffer = &card->qdio.in_q->bufs[index];
if (!(qdio_err &&
- qeth_check_qdio_errors(buffer->buffer,
+ qeth_check_qdio_errors(card, buffer->buffer,
qdio_err, "qinerr")))
qeth_l3_process_inbound_buffer(card, buffer, index);
/* clear buffer and give back to hardware */
@@ -3214,8 +3338,6 @@ static int __qeth_l3_set_online(struct ccwgroup_device *gdev, int recovery_mode)
QETH_DBF_TEXT(SETUP, 2, "setonlin");
QETH_DBF_HEX(SETUP, 2, &card, sizeof(void *));
- qeth_set_allowed_threads(card, QETH_RECOVER_THREAD, 1);
-
recover_flag = card->state;
rc = qeth_core_hardsetup_card(card);
if (rc) {
@@ -3250,20 +3372,22 @@ static int __qeth_l3_set_online(struct ccwgroup_device *gdev, int recovery_mode)
goto out_remove;
} else
card->lan_online = 1;
- qeth_l3_set_large_send(card, card->options.large_send);
rc = qeth_l3_setadapter_parms(card);
if (rc)
QETH_DBF_TEXT_(SETUP, 2, "2err%d", rc);
- rc = qeth_l3_start_ipassists(card);
- if (rc)
- QETH_DBF_TEXT_(SETUP, 2, "3err%d", rc);
- rc = qeth_l3_setrouting_v4(card);
- if (rc)
- QETH_DBF_TEXT_(SETUP, 2, "4err%d", rc);
- rc = qeth_l3_setrouting_v6(card);
- if (rc)
- QETH_DBF_TEXT_(SETUP, 2, "5err%d", rc);
+ if (!card->options.sniffer) {
+ rc = qeth_l3_start_ipassists(card);
+ if (rc)
+ QETH_DBF_TEXT_(SETUP, 2, "3err%d", rc);
+ qeth_l3_set_large_send(card, card->options.large_send);
+ rc = qeth_l3_setrouting_v4(card);
+ if (rc)
+ QETH_DBF_TEXT_(SETUP, 2, "4err%d", rc);
+ rc = qeth_l3_setrouting_v6(card);
+ if (rc)
+ QETH_DBF_TEXT_(SETUP, 2, "5err%d", rc);
+ }
netif_tx_disable(card->dev);
rc = qeth_init_qdio_queues(card);
diff --git a/drivers/s390/net/qeth_l3_sys.c b/drivers/s390/net/qeth_l3_sys.c
index 3360b0941aa..3f08b11274a 100644
--- a/drivers/s390/net/qeth_l3_sys.c
+++ b/drivers/s390/net/qeth_l3_sys.c
@@ -319,6 +319,61 @@ static ssize_t qeth_l3_dev_checksum_store(struct device *dev,
static DEVICE_ATTR(checksumming, 0644, qeth_l3_dev_checksum_show,
qeth_l3_dev_checksum_store);
+static ssize_t qeth_l3_dev_sniffer_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct qeth_card *card = dev_get_drvdata(dev);
+
+ if (!card)
+ return -EINVAL;
+
+ return sprintf(buf, "%i\n", card->options.sniffer ? 1 : 0);
+}
+
+static ssize_t qeth_l3_dev_sniffer_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct qeth_card *card = dev_get_drvdata(dev);
+ int ret;
+ unsigned long i;
+
+ if (!card)
+ return -EINVAL;
+
+ if (card->info.type != QETH_CARD_TYPE_IQD)
+ return -EPERM;
+
+ if ((card->state != CARD_STATE_DOWN) &&
+ (card->state != CARD_STATE_RECOVER))
+ return -EPERM;
+
+ ret = strict_strtoul(buf, 16, &i);
+ if (ret)
+ return -EINVAL;
+ switch (i) {
+ case 0:
+ card->options.sniffer = i;
+ break;
+ case 1:
+ ret = qdio_get_ssqd_desc(CARD_DDEV(card), &card->ssqd);
+ if (card->ssqd.qdioac2 & QETH_SNIFF_AVAIL) {
+ card->options.sniffer = i;
+ if (card->qdio.init_pool.buf_count !=
+ QETH_IN_BUF_COUNT_MAX)
+ qeth_realloc_buffer_pool(card,
+ QETH_IN_BUF_COUNT_MAX);
+ break;
+ } else
+ return -EPERM;
+ default: /* fall through */
+ return -EINVAL;
+ }
+ return count;
+}
+
+static DEVICE_ATTR(sniffer, 0644, qeth_l3_dev_sniffer_show,
+ qeth_l3_dev_sniffer_store);
+
static ssize_t qeth_l3_dev_large_send_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
@@ -373,6 +428,7 @@ static struct attribute *qeth_l3_device_attrs[] = {
&dev_attr_broadcast_mode.attr,
&dev_attr_canonical_macaddr.attr,
&dev_attr_checksumming.attr,
+ &dev_attr_sniffer.attr,
&dev_attr_large_send.attr,
NULL,
};
diff --git a/drivers/s390/scsi/zfcp_aux.c b/drivers/s390/scsi/zfcp_aux.c
index 9d0c941b7d3..66d6c01fcf3 100644
--- a/drivers/s390/scsi/zfcp_aux.c
+++ b/drivers/s390/scsi/zfcp_aux.c
@@ -3,7 +3,7 @@
*
* Module interface and handling of zfcp data structures.
*
- * Copyright IBM Corporation 2002, 2009
+ * Copyright IBM Corporation 2002, 2010
*/
/*
@@ -32,6 +32,7 @@
#include <linux/seq_file.h>
#include "zfcp_ext.h"
#include "zfcp_fc.h"
+#include "zfcp_reqlist.h"
#define ZFCP_BUS_ID_SIZE 20
@@ -49,36 +50,6 @@ static struct kmem_cache *zfcp_cache_hw_align(const char *name,
return kmem_cache_create(name, size, roundup_pow_of_two(size), 0, NULL);
}
-static int zfcp_reqlist_alloc(struct zfcp_adapter *adapter)
-{
- int idx;
-
- adapter->req_list = kcalloc(REQUEST_LIST_SIZE, sizeof(struct list_head),
- GFP_KERNEL);
- if (!adapter->req_list)
- return -ENOMEM;
-
- for (idx = 0; idx < REQUEST_LIST_SIZE; idx++)
- INIT_LIST_HEAD(&adapter->req_list[idx]);
- return 0;
-}
-
-/**
- * zfcp_reqlist_isempty - is the request list empty
- * @adapter: pointer to struct zfcp_adapter
- *
- * Returns: true if list is empty, false otherwise
- */
-int zfcp_reqlist_isempty(struct zfcp_adapter *adapter)
-{
- unsigned int idx;
-
- for (idx = 0; idx < REQUEST_LIST_SIZE; idx++)
- if (!list_empty(&adapter->req_list[idx]))
- return 0;
- return 1;
-}
-
static void __init zfcp_init_device_configure(char *busid, u64 wwpn, u64 lun)
{
struct ccw_device *cdev;
@@ -110,7 +81,7 @@ static void __init zfcp_init_device_configure(char *busid, u64 wwpn, u64 lun)
flush_work(&unit->scsi_work);
out_unit:
- put_device(&port->sysfs_device);
+ put_device(&port->dev);
out_port:
zfcp_ccw_adapter_put(adapter);
out_ccw_device:
@@ -255,7 +226,7 @@ struct zfcp_unit *zfcp_get_unit_by_lun(struct zfcp_port *port, u64 fcp_lun)
read_lock_irqsave(&port->unit_list_lock, flags);
list_for_each_entry(unit, &port->unit_list, list)
if (unit->fcp_lun == fcp_lun) {
- if (!get_device(&unit->sysfs_device))
+ if (!get_device(&unit->dev))
unit = NULL;
read_unlock_irqrestore(&port->unit_list_lock, flags);
return unit;
@@ -280,7 +251,7 @@ struct zfcp_port *zfcp_get_port_by_wwpn(struct zfcp_adapter *adapter,
read_lock_irqsave(&adapter->port_list_lock, flags);
list_for_each_entry(port, &adapter->port_list, list)
if (port->wwpn == wwpn) {
- if (!get_device(&port->sysfs_device))
+ if (!get_device(&port->dev))
port = NULL;
read_unlock_irqrestore(&adapter->port_list_lock, flags);
return port;
@@ -298,10 +269,9 @@ struct zfcp_port *zfcp_get_port_by_wwpn(struct zfcp_adapter *adapter,
*/
static void zfcp_unit_release(struct device *dev)
{
- struct zfcp_unit *unit = container_of(dev, struct zfcp_unit,
- sysfs_device);
+ struct zfcp_unit *unit = container_of(dev, struct zfcp_unit, dev);
- put_device(&unit->port->sysfs_device);
+ put_device(&unit->port->dev);
kfree(unit);
}
@@ -318,11 +288,11 @@ struct zfcp_unit *zfcp_unit_enqueue(struct zfcp_port *port, u64 fcp_lun)
struct zfcp_unit *unit;
int retval = -ENOMEM;
- get_device(&port->sysfs_device);
+ get_device(&port->dev);
unit = zfcp_get_unit_by_lun(port, fcp_lun);
if (unit) {
- put_device(&unit->sysfs_device);
+ put_device(&unit->dev);
retval = -EEXIST;
goto err_out;
}
@@ -333,10 +303,10 @@ struct zfcp_unit *zfcp_unit_enqueue(struct zfcp_port *port, u64 fcp_lun)
unit->port = port;
unit->fcp_lun = fcp_lun;
- unit->sysfs_device.parent = &port->sysfs_device;
- unit->sysfs_device.release = zfcp_unit_release;
+ unit->dev.parent = &port->dev;
+ unit->dev.release = zfcp_unit_release;
- if (dev_set_name(&unit->sysfs_device, "0x%016llx",
+ if (dev_set_name(&unit->dev, "0x%016llx",
(unsigned long long) fcp_lun)) {
kfree(unit);
goto err_out;
@@ -353,13 +323,12 @@ struct zfcp_unit *zfcp_unit_enqueue(struct zfcp_port *port, u64 fcp_lun)
unit->latencies.cmd.channel.min = 0xFFFFFFFF;
unit->latencies.cmd.fabric.min = 0xFFFFFFFF;
- if (device_register(&unit->sysfs_device)) {
- put_device(&unit->sysfs_device);
+ if (device_register(&unit->dev)) {
+ put_device(&unit->dev);
goto err_out;
}
- if (sysfs_create_group(&unit->sysfs_device.kobj,
- &zfcp_sysfs_unit_attrs))
+ if (sysfs_create_group(&unit->dev.kobj, &zfcp_sysfs_unit_attrs))
goto err_out_put;
write_lock_irq(&port->unit_list_lock);
@@ -371,9 +340,9 @@ struct zfcp_unit *zfcp_unit_enqueue(struct zfcp_port *port, u64 fcp_lun)
return unit;
err_out_put:
- device_unregister(&unit->sysfs_device);
+ device_unregister(&unit->dev);
err_out:
- put_device(&port->sysfs_device);
+ put_device(&port->dev);
return ERR_PTR(retval);
}
@@ -539,7 +508,8 @@ struct zfcp_adapter *zfcp_adapter_enqueue(struct ccw_device *ccw_device)
if (zfcp_allocate_low_mem_buffers(adapter))
goto failed;
- if (zfcp_reqlist_alloc(adapter))
+ adapter->req_list = zfcp_reqlist_alloc();
+ if (!adapter->req_list)
goto failed;
if (zfcp_dbf_adapter_register(adapter))
@@ -560,8 +530,6 @@ struct zfcp_adapter *zfcp_adapter_enqueue(struct ccw_device *ccw_device)
INIT_LIST_HEAD(&adapter->erp_ready_head);
INIT_LIST_HEAD(&adapter->erp_running_head);
- spin_lock_init(&adapter->req_list_lock);
-
rwlock_init(&adapter->erp_lock);
rwlock_init(&adapter->abort_lock);
@@ -640,8 +608,7 @@ void zfcp_device_unregister(struct device *dev,
static void zfcp_port_release(struct device *dev)
{
- struct zfcp_port *port = container_of(dev, struct zfcp_port,
- sysfs_device);
+ struct zfcp_port *port = container_of(dev, struct zfcp_port, dev);
zfcp_ccw_adapter_put(port->adapter);
kfree(port);
@@ -669,7 +636,7 @@ struct zfcp_port *zfcp_port_enqueue(struct zfcp_adapter *adapter, u64 wwpn,
port = zfcp_get_port_by_wwpn(adapter, wwpn);
if (port) {
- put_device(&port->sysfs_device);
+ put_device(&port->dev);
retval = -EEXIST;
goto err_out;
}
@@ -689,22 +656,21 @@ struct zfcp_port *zfcp_port_enqueue(struct zfcp_adapter *adapter, u64 wwpn,
port->d_id = d_id;
port->wwpn = wwpn;
port->rport_task = RPORT_NONE;
- port->sysfs_device.parent = &adapter->ccw_device->dev;
- port->sysfs_device.release = zfcp_port_release;
+ port->dev.parent = &adapter->ccw_device->dev;
+ port->dev.release = zfcp_port_release;
- if (dev_set_name(&port->sysfs_device, "0x%016llx",
- (unsigned long long)wwpn)) {
+ if (dev_set_name(&port->dev, "0x%016llx", (unsigned long long)wwpn)) {
kfree(port);
goto err_out;
}
retval = -EINVAL;
- if (device_register(&port->sysfs_device)) {
- put_device(&port->sysfs_device);
+ if (device_register(&port->dev)) {
+ put_device(&port->dev);
goto err_out;
}
- if (sysfs_create_group(&port->sysfs_device.kobj,
+ if (sysfs_create_group(&port->dev.kobj,
&zfcp_sysfs_port_attrs))
goto err_out_put;
@@ -717,7 +683,7 @@ struct zfcp_port *zfcp_port_enqueue(struct zfcp_adapter *adapter, u64 wwpn,
return port;
err_out_put:
- device_unregister(&port->sysfs_device);
+ device_unregister(&port->dev);
err_out:
zfcp_ccw_adapter_put(adapter);
return ERR_PTR(retval);
diff --git a/drivers/s390/scsi/zfcp_ccw.c b/drivers/s390/scsi/zfcp_ccw.c
index c22cb72a5ae..ce1cc7a11fb 100644
--- a/drivers/s390/scsi/zfcp_ccw.c
+++ b/drivers/s390/scsi/zfcp_ccw.c
@@ -3,13 +3,14 @@
*
* Registration and callback for the s390 common I/O layer.
*
- * Copyright IBM Corporation 2002, 2009
+ * Copyright IBM Corporation 2002, 2010
*/
#define KMSG_COMPONENT "zfcp"
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
#include "zfcp_ext.h"
+#include "zfcp_reqlist.h"
#define ZFCP_MODEL_PRIV 0x4
@@ -122,12 +123,10 @@ static void zfcp_ccw_remove(struct ccw_device *cdev)
zfcp_ccw_adapter_put(adapter); /* put from zfcp_ccw_adapter_by_cdev */
list_for_each_entry_safe(unit, u, &unit_remove_lh, list)
- zfcp_device_unregister(&unit->sysfs_device,
- &zfcp_sysfs_unit_attrs);
+ zfcp_device_unregister(&unit->dev, &zfcp_sysfs_unit_attrs);
list_for_each_entry_safe(port, p, &port_remove_lh, list)
- zfcp_device_unregister(&port->sysfs_device,
- &zfcp_sysfs_port_attrs);
+ zfcp_device_unregister(&port->dev, &zfcp_sysfs_port_attrs);
zfcp_adapter_unregister(adapter);
}
@@ -162,7 +161,7 @@ static int zfcp_ccw_set_online(struct ccw_device *cdev)
}
/* initialize request counter */
- BUG_ON(!zfcp_reqlist_isempty(adapter));
+ BUG_ON(!zfcp_reqlist_isempty(adapter->req_list));
adapter->req_no = 0;
zfcp_erp_modify_adapter_status(adapter, "ccsonl1", NULL,
diff --git a/drivers/s390/scsi/zfcp_dbf.c b/drivers/s390/scsi/zfcp_dbf.c
index 7369c8911bc..7a149fd85f6 100644
--- a/drivers/s390/scsi/zfcp_dbf.c
+++ b/drivers/s390/scsi/zfcp_dbf.c
@@ -140,9 +140,9 @@ void _zfcp_dbf_hba_fsf_response(const char *tag2, int level,
memcpy(response->fsf_status_qual,
fsf_status_qual, FSF_STATUS_QUALIFIER_SIZE);
response->fsf_req_status = fsf_req->status;
- response->sbal_first = fsf_req->queue_req.sbal_first;
- response->sbal_last = fsf_req->queue_req.sbal_last;
- response->sbal_response = fsf_req->queue_req.sbal_response;
+ response->sbal_first = fsf_req->qdio_req.sbal_first;
+ response->sbal_last = fsf_req->qdio_req.sbal_last;
+ response->sbal_response = fsf_req->qdio_req.sbal_response;
response->pool = fsf_req->pool != NULL;
response->erp_action = (unsigned long)fsf_req->erp_action;
@@ -576,7 +576,8 @@ void zfcp_dbf_rec_adapter(char *id, void *ref, struct zfcp_dbf *dbf)
struct zfcp_adapter *adapter = dbf->adapter;
zfcp_dbf_rec_target(id, ref, dbf, &adapter->status,
- &adapter->erp_counter, 0, 0, 0);
+ &adapter->erp_counter, 0, 0,
+ ZFCP_DBF_INVALID_LUN);
}
/**
@@ -590,8 +591,8 @@ void zfcp_dbf_rec_port(char *id, void *ref, struct zfcp_port *port)
struct zfcp_dbf *dbf = port->adapter->dbf;
zfcp_dbf_rec_target(id, ref, dbf, &port->status,
- &port->erp_counter, port->wwpn, port->d_id,
- 0);
+ &port->erp_counter, port->wwpn, port->d_id,
+ ZFCP_DBF_INVALID_LUN);
}
/**
@@ -642,10 +643,9 @@ void zfcp_dbf_rec_trigger(char *id2, void *ref, u8 want, u8 need, void *action,
r->u.trigger.ps = atomic_read(&port->status);
r->u.trigger.wwpn = port->wwpn;
}
- if (unit) {
+ if (unit)
r->u.trigger.us = atomic_read(&unit->status);
- r->u.trigger.fcp_lun = unit->fcp_lun;
- }
+ r->u.trigger.fcp_lun = unit ? unit->fcp_lun : ZFCP_DBF_INVALID_LUN;
debug_event(dbf->rec, action ? 1 : 4, r, sizeof(*r));
spin_unlock_irqrestore(&dbf->rec_lock, flags);
}
@@ -668,7 +668,7 @@ void zfcp_dbf_rec_action(char *id2, struct zfcp_erp_action *erp_action)
r->u.action.action = (unsigned long)erp_action;
r->u.action.status = erp_action->status;
r->u.action.step = erp_action->step;
- r->u.action.fsf_req = (unsigned long)erp_action->fsf_req;
+ r->u.action.fsf_req = erp_action->fsf_req_id;
debug_event(dbf->rec, 5, r, sizeof(*r));
spin_unlock_irqrestore(&dbf->rec_lock, flags);
}
diff --git a/drivers/s390/scsi/zfcp_dbf.h b/drivers/s390/scsi/zfcp_dbf.h
index 8b7fd9a1033..457e046f2d2 100644
--- a/drivers/s390/scsi/zfcp_dbf.h
+++ b/drivers/s390/scsi/zfcp_dbf.h
@@ -30,6 +30,8 @@
#define ZFCP_DBF_TAG_SIZE 4
#define ZFCP_DBF_ID_SIZE 7
+#define ZFCP_DBF_INVALID_LUN 0xFFFFFFFFFFFFFFFFull
+
struct zfcp_dbf_dump {
u8 tag[ZFCP_DBF_TAG_SIZE];
u32 total_size; /* size of total dump data */
@@ -192,10 +194,10 @@ struct zfcp_dbf_san_record {
struct zfcp_dbf_san_record_ct_response ct_resp;
struct zfcp_dbf_san_record_els els;
} u;
-#define ZFCP_DBF_SAN_MAX_PAYLOAD 1024
- u8 payload[32];
} __attribute__ ((packed));
+#define ZFCP_DBF_SAN_MAX_PAYLOAD 1024
+
struct zfcp_dbf_scsi_record {
u8 tag[ZFCP_DBF_TAG_SIZE];
u8 tag2[ZFCP_DBF_TAG_SIZE];
@@ -301,17 +303,31 @@ void zfcp_dbf_scsi(const char *tag, const char *tag2, int level,
/**
* zfcp_dbf_scsi_result - trace event for SCSI command completion
- * @tag: tag indicating success or failure of SCSI command
- * @level: trace level applicable for this event
- * @adapter: adapter that has been used to issue the SCSI command
+ * @dbf: adapter dbf trace
+ * @scmd: SCSI command pointer
+ * @req: FSF request used to issue SCSI command
+ */
+static inline
+void zfcp_dbf_scsi_result(struct zfcp_dbf *dbf, struct scsi_cmnd *scmd,
+ struct zfcp_fsf_req *req)
+{
+ if (scmd->result != 0)
+ zfcp_dbf_scsi("rslt", "erro", 3, dbf, scmd, req, 0);
+ else if (scmd->retries > 0)
+ zfcp_dbf_scsi("rslt", "retr", 4, dbf, scmd, req, 0);
+ else
+ zfcp_dbf_scsi("rslt", "norm", 6, dbf, scmd, req, 0);
+}
+
+/**
+ * zfcp_dbf_scsi_fail_send - trace event for failure to send SCSI command
+ * @dbf: adapter dbf trace
* @scmd: SCSI command pointer
- * @fsf_req: request used to issue SCSI command (might be NULL)
*/
static inline
-void zfcp_dbf_scsi_result(const char *tag, int level, struct zfcp_dbf *dbf,
- struct scsi_cmnd *scmd, struct zfcp_fsf_req *fsf_req)
+void zfcp_dbf_scsi_fail_send(struct zfcp_dbf *dbf, struct scsi_cmnd *scmd)
{
- zfcp_dbf_scsi("rslt", tag, level, dbf, scmd, fsf_req, 0);
+ zfcp_dbf_scsi("rslt", "fail", 4, dbf, scmd, NULL, 0);
}
/**
diff --git a/drivers/s390/scsi/zfcp_def.h b/drivers/s390/scsi/zfcp_def.h
index e1b5b88e2dd..7131c7db1f0 100644
--- a/drivers/s390/scsi/zfcp_def.h
+++ b/drivers/s390/scsi/zfcp_def.h
@@ -3,7 +3,7 @@
*
* Global definitions for the zfcp device driver.
*
- * Copyright IBM Corporation 2002, 2009
+ * Copyright IBM Corporation 2002, 2010
*/
#ifndef ZFCP_DEF_H
@@ -33,15 +33,13 @@
#include <scsi/scsi_transport_fc.h>
#include <scsi/scsi_bsg_fc.h>
#include <asm/ccwdev.h>
-#include <asm/qdio.h>
#include <asm/debug.h>
#include <asm/ebcdic.h>
#include <asm/sysinfo.h>
#include "zfcp_fsf.h"
+#include "zfcp_qdio.h"
-/********************* GENERAL DEFINES *********************************/
-
-#define REQUEST_LIST_SIZE 128
+struct zfcp_reqlist;
/********************* SCSI SPECIFIC DEFINES *********************************/
#define ZFCP_SCSI_ER_TIMEOUT (10*HZ)
@@ -129,12 +127,6 @@ struct zfcp_adapter_mempool {
mempool_t *qtcb_pool;
};
-struct zfcp_qdio_queue {
- struct qdio_buffer *sbal[QDIO_MAX_BUFFERS_PER_Q];
- u8 first; /* index of next free bfr in queue */
- atomic_t count; /* number of free buffers in queue */
-};
-
struct zfcp_erp_action {
struct list_head list;
int action; /* requested action code */
@@ -143,8 +135,7 @@ struct zfcp_erp_action {
struct zfcp_unit *unit;
u32 status; /* recovery status */
u32 step; /* active step of this erp action */
- struct zfcp_fsf_req *fsf_req; /* fsf request currently pending
- for this action */
+ unsigned long fsf_req_id;
struct timer_list timer;
};
@@ -167,29 +158,6 @@ struct zfcp_latencies {
spinlock_t lock;
};
-/** struct zfcp_qdio - basic QDIO data structure
- * @resp_q: response queue
- * @req_q: request queue
- * @stat_lock: lock to protect req_q_util and req_q_time
- * @req_q_lock; lock to serialize access to request queue
- * @req_q_time: time of last fill level change
- * @req_q_util: used for accounting
- * @req_q_full: queue full incidents
- * @req_q_wq: used to wait for SBAL availability
- * @adapter: adapter used in conjunction with this QDIO structure
- */
-struct zfcp_qdio {
- struct zfcp_qdio_queue resp_q;
- struct zfcp_qdio_queue req_q;
- spinlock_t stat_lock;
- spinlock_t req_q_lock;
- unsigned long long req_q_time;
- u64 req_q_util;
- atomic_t req_q_full;
- wait_queue_head_t req_q_wq;
- struct zfcp_adapter *adapter;
-};
-
struct zfcp_adapter {
struct kref ref;
u64 peer_wwnn; /* P2P peer WWNN */
@@ -207,8 +175,7 @@ struct zfcp_adapter {
struct list_head port_list; /* remote port list */
rwlock_t port_list_lock; /* port list lock */
unsigned long req_no; /* unique FSF req number */
- struct list_head *req_list; /* list of pending reqs */
- spinlock_t req_list_lock; /* request list lock */
+ struct zfcp_reqlist *req_list;
u32 fsf_req_seq_no; /* FSF cmnd seq number */
rwlock_t abort_lock; /* Protects against SCSI
stack abort/command
@@ -241,7 +208,7 @@ struct zfcp_adapter {
};
struct zfcp_port {
- struct device sysfs_device; /* sysfs device */
+ struct device dev;
struct fc_rport *rport; /* rport of fc transport class */
struct list_head list; /* list of remote ports */
struct zfcp_adapter *adapter; /* adapter used to access port */
@@ -263,7 +230,7 @@ struct zfcp_port {
};
struct zfcp_unit {
- struct device sysfs_device; /* sysfs device */
+ struct device dev;
struct list_head list; /* list of logical units */
struct zfcp_port *port; /* remote port of unit */
atomic_t status; /* status of this logical unit */
@@ -277,33 +244,11 @@ struct zfcp_unit {
};
/**
- * struct zfcp_queue_req - queue related values for a request
- * @sbal_number: number of free SBALs
- * @sbal_first: first SBAL for this request
- * @sbal_last: last SBAL for this request
- * @sbal_limit: last possible SBAL for this request
- * @sbale_curr: current SBALE at creation of this request
- * @sbal_response: SBAL used in interrupt
- * @qdio_outb_usage: usage of outbound queue
- * @qdio_inb_usage: usage of inbound queue
- */
-struct zfcp_queue_req {
- u8 sbal_number;
- u8 sbal_first;
- u8 sbal_last;
- u8 sbal_limit;
- u8 sbale_curr;
- u8 sbal_response;
- u16 qdio_outb_usage;
- u16 qdio_inb_usage;
-};
-
-/**
* struct zfcp_fsf_req - basic FSF request structure
* @list: list of FSF requests
* @req_id: unique request ID
* @adapter: adapter this request belongs to
- * @queue_req: queue related values
+ * @qdio_req: qdio queue related values
* @completion: used to signal the completion of the request
* @status: status of the request
* @fsf_command: FSF command issued
@@ -321,7 +266,7 @@ struct zfcp_fsf_req {
struct list_head list;
unsigned long req_id;
struct zfcp_adapter *adapter;
- struct zfcp_queue_req queue_req;
+ struct zfcp_qdio_req qdio_req;
struct completion completion;
u32 status;
u32 fsf_command;
@@ -352,45 +297,4 @@ struct zfcp_data {
#define ZFCP_SET 0x00000100
#define ZFCP_CLEAR 0x00000200
-/*
- * Helper functions for request ID management.
- */
-static inline int zfcp_reqlist_hash(unsigned long req_id)
-{
- return req_id % REQUEST_LIST_SIZE;
-}
-
-static inline void zfcp_reqlist_remove(struct zfcp_adapter *adapter,
- struct zfcp_fsf_req *fsf_req)
-{
- list_del(&fsf_req->list);
-}
-
-static inline struct zfcp_fsf_req *
-zfcp_reqlist_find(struct zfcp_adapter *adapter, unsigned long req_id)
-{
- struct zfcp_fsf_req *request;
- unsigned int idx;
-
- idx = zfcp_reqlist_hash(req_id);
- list_for_each_entry(request, &adapter->req_list[idx], list)
- if (request->req_id == req_id)
- return request;
- return NULL;
-}
-
-static inline struct zfcp_fsf_req *
-zfcp_reqlist_find_safe(struct zfcp_adapter *adapter, struct zfcp_fsf_req *req)
-{
- struct zfcp_fsf_req *request;
- unsigned int idx;
-
- for (idx = 0; idx < REQUEST_LIST_SIZE; idx++) {
- list_for_each_entry(request, &adapter->req_list[idx], list)
- if (request == req)
- return request;
- }
- return NULL;
-}
-
#endif /* ZFCP_DEF_H */
diff --git a/drivers/s390/scsi/zfcp_erp.c b/drivers/s390/scsi/zfcp_erp.c
index b51a11a82e6..0be5e7ea282 100644
--- a/drivers/s390/scsi/zfcp_erp.c
+++ b/drivers/s390/scsi/zfcp_erp.c
@@ -3,7 +3,7 @@
*
* Error Recovery Procedures (ERP).
*
- * Copyright IBM Corporation 2002, 2009
+ * Copyright IBM Corporation 2002, 2010
*/
#define KMSG_COMPONENT "zfcp"
@@ -11,6 +11,7 @@
#include <linux/kthread.h>
#include "zfcp_ext.h"
+#include "zfcp_reqlist.h"
#define ZFCP_MAX_ERPS 3
@@ -174,7 +175,7 @@ static struct zfcp_erp_action *zfcp_erp_setup_act(int need,
switch (need) {
case ZFCP_ERP_ACTION_REOPEN_UNIT:
- if (!get_device(&unit->sysfs_device))
+ if (!get_device(&unit->dev))
return NULL;
atomic_set_mask(ZFCP_STATUS_COMMON_ERP_INUSE, &unit->status);
erp_action = &unit->erp_action;
@@ -184,7 +185,7 @@ static struct zfcp_erp_action *zfcp_erp_setup_act(int need,
case ZFCP_ERP_ACTION_REOPEN_PORT:
case ZFCP_ERP_ACTION_REOPEN_PORT_FORCED:
- if (!get_device(&port->sysfs_device))
+ if (!get_device(&port->dev))
return NULL;
zfcp_erp_action_dismiss_port(port);
atomic_set_mask(ZFCP_STATUS_COMMON_ERP_INUSE, &port->status);
@@ -478,26 +479,27 @@ static void zfcp_erp_action_to_running(struct zfcp_erp_action *erp_action)
static void zfcp_erp_strategy_check_fsfreq(struct zfcp_erp_action *act)
{
struct zfcp_adapter *adapter = act->adapter;
+ struct zfcp_fsf_req *req;
- if (!act->fsf_req)
+ if (!act->fsf_req_id)
return;
- spin_lock(&adapter->req_list_lock);
- if (zfcp_reqlist_find_safe(adapter, act->fsf_req) &&
- act->fsf_req->erp_action == act) {
+ spin_lock(&adapter->req_list->lock);
+ req = _zfcp_reqlist_find(adapter->req_list, act->fsf_req_id);
+ if (req && req->erp_action == act) {
if (act->status & (ZFCP_STATUS_ERP_DISMISSED |
ZFCP_STATUS_ERP_TIMEDOUT)) {
- act->fsf_req->status |= ZFCP_STATUS_FSFREQ_DISMISSED;
+ req->status |= ZFCP_STATUS_FSFREQ_DISMISSED;
zfcp_dbf_rec_action("erscf_1", act);
- act->fsf_req->erp_action = NULL;
+ req->erp_action = NULL;
}
if (act->status & ZFCP_STATUS_ERP_TIMEDOUT)
zfcp_dbf_rec_action("erscf_2", act);
- if (act->fsf_req->status & ZFCP_STATUS_FSFREQ_DISMISSED)
- act->fsf_req = NULL;
+ if (req->status & ZFCP_STATUS_FSFREQ_DISMISSED)
+ act->fsf_req_id = 0;
} else
- act->fsf_req = NULL;
- spin_unlock(&adapter->req_list_lock);
+ act->fsf_req_id = 0;
+ spin_unlock(&adapter->req_list->lock);
}
/**
@@ -1179,19 +1181,19 @@ static void zfcp_erp_action_cleanup(struct zfcp_erp_action *act, int result)
switch (act->action) {
case ZFCP_ERP_ACTION_REOPEN_UNIT:
if ((result == ZFCP_ERP_SUCCEEDED) && !unit->device) {
- get_device(&unit->sysfs_device);
+ get_device(&unit->dev);
if (scsi_queue_work(unit->port->adapter->scsi_host,
&unit->scsi_work) <= 0)
- put_device(&unit->sysfs_device);
+ put_device(&unit->dev);
}
- put_device(&unit->sysfs_device);
+ put_device(&unit->dev);
break;
case ZFCP_ERP_ACTION_REOPEN_PORT_FORCED:
case ZFCP_ERP_ACTION_REOPEN_PORT:
if (result == ZFCP_ERP_SUCCEEDED)
zfcp_scsi_schedule_rport_register(port);
- put_device(&port->sysfs_device);
+ put_device(&port->dev);
break;
case ZFCP_ERP_ACTION_REOPEN_ADAPTER:
diff --git a/drivers/s390/scsi/zfcp_ext.h b/drivers/s390/scsi/zfcp_ext.h
index 66bdb34143c..8786a79c7f8 100644
--- a/drivers/s390/scsi/zfcp_ext.h
+++ b/drivers/s390/scsi/zfcp_ext.h
@@ -21,7 +21,6 @@ extern struct zfcp_adapter *zfcp_adapter_enqueue(struct ccw_device *);
extern struct zfcp_port *zfcp_port_enqueue(struct zfcp_adapter *, u64, u32,
u32);
extern struct zfcp_unit *zfcp_unit_enqueue(struct zfcp_port *, u64);
-extern int zfcp_reqlist_isempty(struct zfcp_adapter *);
extern void zfcp_sg_free_table(struct scatterlist *, int);
extern int zfcp_sg_setup_table(struct scatterlist *, int);
extern void zfcp_device_unregister(struct device *,
@@ -144,13 +143,9 @@ extern void zfcp_fsf_reqid_check(struct zfcp_qdio *, int);
/* zfcp_qdio.c */
extern int zfcp_qdio_setup(struct zfcp_adapter *);
extern void zfcp_qdio_destroy(struct zfcp_qdio *);
-extern int zfcp_qdio_send(struct zfcp_qdio *, struct zfcp_queue_req *);
-extern struct qdio_buffer_element
- *zfcp_qdio_sbale_req(struct zfcp_qdio *, struct zfcp_queue_req *);
-extern struct qdio_buffer_element
- *zfcp_qdio_sbale_curr(struct zfcp_qdio *, struct zfcp_queue_req *);
+extern int zfcp_qdio_send(struct zfcp_qdio *, struct zfcp_qdio_req *);
extern int zfcp_qdio_sbals_from_sg(struct zfcp_qdio *,
- struct zfcp_queue_req *, unsigned long,
+ struct zfcp_qdio_req *, unsigned long,
struct scatterlist *, int);
extern int zfcp_qdio_open(struct zfcp_qdio *);
extern void zfcp_qdio_close(struct zfcp_qdio *);
diff --git a/drivers/s390/scsi/zfcp_fc.c b/drivers/s390/scsi/zfcp_fc.c
index 271399f62f1..5219670f0c9 100644
--- a/drivers/s390/scsi/zfcp_fc.c
+++ b/drivers/s390/scsi/zfcp_fc.c
@@ -3,7 +3,7 @@
*
* Fibre Channel related functions for the zfcp device driver.
*
- * Copyright IBM Corporation 2008, 2009
+ * Copyright IBM Corporation 2008, 2010
*/
#define KMSG_COMPONENT "zfcp"
@@ -316,7 +316,7 @@ void zfcp_fc_port_did_lookup(struct work_struct *work)
zfcp_erp_port_reopen(port, 0, "fcgpn_3", NULL);
out:
- put_device(&port->sysfs_device);
+ put_device(&port->dev);
}
/**
@@ -325,9 +325,9 @@ out:
*/
void zfcp_fc_trigger_did_lookup(struct zfcp_port *port)
{
- get_device(&port->sysfs_device);
+ get_device(&port->dev);
if (!queue_work(port->adapter->work_queue, &port->gid_pn_work))
- put_device(&port->sysfs_device);
+ put_device(&port->dev);
}
/**
@@ -389,7 +389,7 @@ static void zfcp_fc_adisc_handler(void *data)
zfcp_scsi_schedule_rport_register(port);
out:
atomic_clear_mask(ZFCP_STATUS_PORT_LINK_TEST, &port->status);
- put_device(&port->sysfs_device);
+ put_device(&port->dev);
kmem_cache_free(zfcp_data.adisc_cache, adisc);
}
@@ -436,7 +436,7 @@ void zfcp_fc_link_test_work(struct work_struct *work)
container_of(work, struct zfcp_port, test_link_work);
int retval;
- get_device(&port->sysfs_device);
+ get_device(&port->dev);
port->rport_task = RPORT_DEL;
zfcp_scsi_rport_work(&port->rport_work);
@@ -455,7 +455,7 @@ void zfcp_fc_link_test_work(struct work_struct *work)
zfcp_erp_port_forced_reopen(port, 0, "fcltwk1", NULL);
out:
- put_device(&port->sysfs_device);
+ put_device(&port->dev);
}
/**
@@ -468,9 +468,9 @@ out:
*/
void zfcp_fc_test_link(struct zfcp_port *port)
{
- get_device(&port->sysfs_device);
+ get_device(&port->dev);
if (!queue_work(port->adapter->work_queue, &port->test_link_work))
- put_device(&port->sysfs_device);
+ put_device(&port->dev);
}
static void zfcp_free_sg_env(struct zfcp_fc_gpn_ft *gpn_ft, int buf_num)
@@ -617,8 +617,7 @@ static int zfcp_fc_eval_gpn_ft(struct zfcp_fc_gpn_ft *gpn_ft,
list_for_each_entry_safe(port, tmp, &remove_lh, list) {
zfcp_erp_port_shutdown(port, 0, "fcegpf2", NULL);
- zfcp_device_unregister(&port->sysfs_device,
- &zfcp_sysfs_port_attrs);
+ zfcp_device_unregister(&port->dev, &zfcp_sysfs_port_attrs);
}
return ret;
@@ -731,7 +730,7 @@ static int zfcp_fc_exec_els_job(struct fc_bsg_job *job,
return -EINVAL;
d_id = port->d_id;
- put_device(&port->sysfs_device);
+ put_device(&port->dev);
} else
d_id = ntoh24(job->request->rqst_data.h_els.port_id);
diff --git a/drivers/s390/scsi/zfcp_fsf.c b/drivers/s390/scsi/zfcp_fsf.c
index e8fb4d9baa8..6538742b421 100644
--- a/drivers/s390/scsi/zfcp_fsf.c
+++ b/drivers/s390/scsi/zfcp_fsf.c
@@ -3,7 +3,7 @@
*
* Implementation of FSF commands.
*
- * Copyright IBM Corporation 2002, 2009
+ * Copyright IBM Corporation 2002, 2010
*/
#define KMSG_COMPONENT "zfcp"
@@ -14,6 +14,8 @@
#include "zfcp_ext.h"
#include "zfcp_fc.h"
#include "zfcp_dbf.h"
+#include "zfcp_qdio.h"
+#include "zfcp_reqlist.h"
static void zfcp_fsf_request_timeout_handler(unsigned long data)
{
@@ -393,7 +395,7 @@ static void zfcp_fsf_protstatus_eval(struct zfcp_fsf_req *req)
case FSF_PROT_LINK_DOWN:
zfcp_fsf_link_down_info_eval(req, "fspse_5",
&psq->link_down_info);
- /* FIXME: reopening adapter now? better wait for link up */
+ /* go through reopen to flush pending requests */
zfcp_erp_adapter_reopen(adapter, 0, "fspse_6", req);
break;
case FSF_PROT_REEST_QUEUE:
@@ -457,15 +459,10 @@ static void zfcp_fsf_req_complete(struct zfcp_fsf_req *req)
void zfcp_fsf_req_dismiss_all(struct zfcp_adapter *adapter)
{
struct zfcp_fsf_req *req, *tmp;
- unsigned long flags;
LIST_HEAD(remove_queue);
- unsigned int i;
BUG_ON(atomic_read(&adapter->status) & ZFCP_STATUS_ADAPTER_QDIOUP);
- spin_lock_irqsave(&adapter->req_list_lock, flags);
- for (i = 0; i < REQUEST_LIST_SIZE; i++)
- list_splice_init(&adapter->req_list[i], &remove_queue);
- spin_unlock_irqrestore(&adapter->req_list_lock, flags);
+ zfcp_reqlist_move(adapter->req_list, &remove_queue);
list_for_each_entry_safe(req, tmp, &remove_queue, list) {
list_del(&req->list);
@@ -495,8 +492,6 @@ static int zfcp_fsf_exchange_config_evaluate(struct zfcp_fsf_req *req)
fc_host_port_id(shost) = ntoh24(bottom->s_id);
fc_host_speed(shost) = bottom->fc_link_speed;
fc_host_supported_classes(shost) = FC_COS_CLASS2 | FC_COS_CLASS3;
- fc_host_supported_fc4s(shost)[2] = 1; /* FCP */
- fc_host_active_fc4s(shost)[2] = 1; /* FCP */
adapter->hydra_version = bottom->adapter_type;
adapter->timer_ticks = bottom->timer_interval;
@@ -619,6 +614,10 @@ static void zfcp_fsf_exchange_port_evaluate(struct zfcp_fsf_req *req)
fc_host_permanent_port_name(shost) = fc_host_port_name(shost);
fc_host_maxframe_size(shost) = bottom->maximum_frame_size;
fc_host_supported_speeds(shost) = bottom->supported_speed;
+ memcpy(fc_host_supported_fc4s(shost), bottom->supported_fc4_types,
+ FC_FC4_LIST_SIZE);
+ memcpy(fc_host_active_fc4s(shost), bottom->active_fc4_types,
+ FC_FC4_LIST_SIZE);
}
static void zfcp_fsf_exchange_port_data_handler(struct zfcp_fsf_req *req)
@@ -725,12 +724,12 @@ static struct zfcp_fsf_req *zfcp_fsf_req_create(struct zfcp_qdio *qdio,
req->adapter = adapter;
req->fsf_command = fsf_cmd;
req->req_id = adapter->req_no;
- req->queue_req.sbal_number = 1;
- req->queue_req.sbal_first = req_q->first;
- req->queue_req.sbal_last = req_q->first;
- req->queue_req.sbale_curr = 1;
+ req->qdio_req.sbal_number = 1;
+ req->qdio_req.sbal_first = req_q->first;
+ req->qdio_req.sbal_last = req_q->first;
+ req->qdio_req.sbale_curr = 1;
- sbale = zfcp_qdio_sbale_req(qdio, &req->queue_req);
+ sbale = zfcp_qdio_sbale_req(qdio, &req->qdio_req);
sbale[0].addr = (void *) req->req_id;
sbale[0].flags |= SBAL_FLAGS0_COMMAND;
@@ -745,6 +744,7 @@ static struct zfcp_fsf_req *zfcp_fsf_req_create(struct zfcp_qdio *qdio,
return ERR_PTR(-ENOMEM);
}
+ req->seq_no = adapter->fsf_req_seq_no;
req->qtcb->prefix.req_seq_no = adapter->fsf_req_seq_no;
req->qtcb->prefix.req_id = req->req_id;
req->qtcb->prefix.ulp_info = 26;
@@ -752,8 +752,6 @@ static struct zfcp_fsf_req *zfcp_fsf_req_create(struct zfcp_qdio *qdio,
req->qtcb->prefix.qtcb_version = FSF_QTCB_CURRENT_VERSION;
req->qtcb->header.req_handle = req->req_id;
req->qtcb->header.fsf_command = req->fsf_command;
- req->seq_no = adapter->fsf_req_seq_no;
- req->qtcb->prefix.req_seq_no = adapter->fsf_req_seq_no;
sbale[1].addr = (void *) req->qtcb;
sbale[1].length = sizeof(struct fsf_qtcb);
}
@@ -770,25 +768,17 @@ static int zfcp_fsf_req_send(struct zfcp_fsf_req *req)
{
struct zfcp_adapter *adapter = req->adapter;
struct zfcp_qdio *qdio = adapter->qdio;
- unsigned long flags;
- int idx;
- int with_qtcb = (req->qtcb != NULL);
+ int with_qtcb = (req->qtcb != NULL);
+ int req_id = req->req_id;
- /* put allocated FSF request into hash table */
- spin_lock_irqsave(&adapter->req_list_lock, flags);
- idx = zfcp_reqlist_hash(req->req_id);
- list_add_tail(&req->list, &adapter->req_list[idx]);
- spin_unlock_irqrestore(&adapter->req_list_lock, flags);
+ zfcp_reqlist_add(adapter->req_list, req);
- req->queue_req.qdio_outb_usage = atomic_read(&qdio->req_q.count);
+ req->qdio_req.qdio_outb_usage = atomic_read(&qdio->req_q.count);
req->issued = get_clock();
- if (zfcp_qdio_send(qdio, &req->queue_req)) {
+ if (zfcp_qdio_send(qdio, &req->qdio_req)) {
del_timer(&req->timer);
- spin_lock_irqsave(&adapter->req_list_lock, flags);
/* lookup request again, list might have changed */
- if (zfcp_reqlist_find_safe(adapter, req))
- zfcp_reqlist_remove(adapter, req);
- spin_unlock_irqrestore(&adapter->req_list_lock, flags);
+ zfcp_reqlist_find_rm(adapter->req_list, req_id);
zfcp_erp_adapter_reopen(adapter, 0, "fsrs__1", req);
return -EIO;
}
@@ -826,9 +816,9 @@ int zfcp_fsf_status_read(struct zfcp_qdio *qdio)
goto out;
}
- sbale = zfcp_qdio_sbale_req(qdio, &req->queue_req);
+ sbale = zfcp_qdio_sbale_req(qdio, &req->qdio_req);
sbale[2].flags |= SBAL_FLAGS_LAST_ENTRY;
- req->queue_req.sbale_curr = 2;
+ req->qdio_req.sbale_curr = 2;
sr_buf = mempool_alloc(adapter->pool.status_read_data, GFP_ATOMIC);
if (!sr_buf) {
@@ -837,7 +827,7 @@ int zfcp_fsf_status_read(struct zfcp_qdio *qdio)
}
memset(sr_buf, 0, sizeof(*sr_buf));
req->data = sr_buf;
- sbale = zfcp_qdio_sbale_curr(qdio, &req->queue_req);
+ sbale = zfcp_qdio_sbale_curr(qdio, &req->qdio_req);
sbale->addr = (void *) sr_buf;
sbale->length = sizeof(*sr_buf);
@@ -934,7 +924,7 @@ struct zfcp_fsf_req *zfcp_fsf_abort_fcp_command(unsigned long old_req_id,
ZFCP_STATUS_COMMON_UNBLOCKED)))
goto out_error_free;
- sbale = zfcp_qdio_sbale_req(qdio, &req->queue_req);
+ sbale = zfcp_qdio_sbale_req(qdio, &req->qdio_req);
sbale[0].flags |= SBAL_FLAGS0_TYPE_READ;
sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY;
@@ -1029,7 +1019,7 @@ static int zfcp_fsf_setup_ct_els_sbals(struct zfcp_fsf_req *req,
{
struct zfcp_adapter *adapter = req->adapter;
struct qdio_buffer_element *sbale = zfcp_qdio_sbale_req(adapter->qdio,
- &req->queue_req);
+ &req->qdio_req);
u32 feat = adapter->adapter_features;
int bytes;
@@ -1047,15 +1037,15 @@ static int zfcp_fsf_setup_ct_els_sbals(struct zfcp_fsf_req *req,
return 0;
}
- bytes = zfcp_qdio_sbals_from_sg(adapter->qdio, &req->queue_req,
+ bytes = zfcp_qdio_sbals_from_sg(adapter->qdio, &req->qdio_req,
SBAL_FLAGS0_TYPE_WRITE_READ,
sg_req, max_sbals);
if (bytes <= 0)
return -EIO;
req->qtcb->bottom.support.req_buf_length = bytes;
- req->queue_req.sbale_curr = ZFCP_LAST_SBALE_PER_SBAL;
+ req->qdio_req.sbale_curr = ZFCP_LAST_SBALE_PER_SBAL;
- bytes = zfcp_qdio_sbals_from_sg(adapter->qdio, &req->queue_req,
+ bytes = zfcp_qdio_sbals_from_sg(adapter->qdio, &req->qdio_req,
SBAL_FLAGS0_TYPE_WRITE_READ,
sg_resp, max_sbals);
req->qtcb->bottom.support.resp_buf_length = bytes;
@@ -1251,7 +1241,7 @@ int zfcp_fsf_exchange_config_data(struct zfcp_erp_action *erp_action)
}
req->status |= ZFCP_STATUS_FSFREQ_CLEANUP;
- sbale = zfcp_qdio_sbale_req(qdio, &req->queue_req);
+ sbale = zfcp_qdio_sbale_req(qdio, &req->qdio_req);
sbale[0].flags |= SBAL_FLAGS0_TYPE_READ;
sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY;
@@ -1262,13 +1252,13 @@ int zfcp_fsf_exchange_config_data(struct zfcp_erp_action *erp_action)
FSF_FEATURE_UPDATE_ALERT;
req->erp_action = erp_action;
req->handler = zfcp_fsf_exchange_config_data_handler;
- erp_action->fsf_req = req;
+ erp_action->fsf_req_id = req->req_id;
zfcp_fsf_start_erp_timer(req);
retval = zfcp_fsf_req_send(req);
if (retval) {
zfcp_fsf_req_free(req);
- erp_action->fsf_req = NULL;
+ erp_action->fsf_req_id = 0;
}
out:
spin_unlock_bh(&qdio->req_q_lock);
@@ -1293,7 +1283,7 @@ int zfcp_fsf_exchange_config_data_sync(struct zfcp_qdio *qdio,
goto out_unlock;
}
- sbale = zfcp_qdio_sbale_req(qdio, &req->queue_req);
+ sbale = zfcp_qdio_sbale_req(qdio, &req->qdio_req);
sbale[0].flags |= SBAL_FLAGS0_TYPE_READ;
sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY;
req->handler = zfcp_fsf_exchange_config_data_handler;
@@ -1349,19 +1339,19 @@ int zfcp_fsf_exchange_port_data(struct zfcp_erp_action *erp_action)
}
req->status |= ZFCP_STATUS_FSFREQ_CLEANUP;
- sbale = zfcp_qdio_sbale_req(qdio, &req->queue_req);
+ sbale = zfcp_qdio_sbale_req(qdio, &req->qdio_req);
sbale[0].flags |= SBAL_FLAGS0_TYPE_READ;
sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY;
req->handler = zfcp_fsf_exchange_port_data_handler;
req->erp_action = erp_action;
- erp_action->fsf_req = req;
+ erp_action->fsf_req_id = req->req_id;
zfcp_fsf_start_erp_timer(req);
retval = zfcp_fsf_req_send(req);
if (retval) {
zfcp_fsf_req_free(req);
- erp_action->fsf_req = NULL;
+ erp_action->fsf_req_id = 0;
}
out:
spin_unlock_bh(&qdio->req_q_lock);
@@ -1398,7 +1388,7 @@ int zfcp_fsf_exchange_port_data_sync(struct zfcp_qdio *qdio,
if (data)
req->data = data;
- sbale = zfcp_qdio_sbale_req(qdio, &req->queue_req);
+ sbale = zfcp_qdio_sbale_req(qdio, &req->qdio_req);
sbale[0].flags |= SBAL_FLAGS0_TYPE_READ;
sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY;
@@ -1484,7 +1474,7 @@ static void zfcp_fsf_open_port_handler(struct zfcp_fsf_req *req)
}
out:
- put_device(&port->sysfs_device);
+ put_device(&port->dev);
}
/**
@@ -1513,7 +1503,7 @@ int zfcp_fsf_open_port(struct zfcp_erp_action *erp_action)
}
req->status |= ZFCP_STATUS_FSFREQ_CLEANUP;
- sbale = zfcp_qdio_sbale_req(qdio, &req->queue_req);
+ sbale = zfcp_qdio_sbale_req(qdio, &req->qdio_req);
sbale[0].flags |= SBAL_FLAGS0_TYPE_READ;
sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY;
@@ -1521,15 +1511,15 @@ int zfcp_fsf_open_port(struct zfcp_erp_action *erp_action)
hton24(req->qtcb->bottom.support.d_id, port->d_id);
req->data = port;
req->erp_action = erp_action;
- erp_action->fsf_req = req;
- get_device(&port->sysfs_device);
+ erp_action->fsf_req_id = req->req_id;
+ get_device(&port->dev);
zfcp_fsf_start_erp_timer(req);
retval = zfcp_fsf_req_send(req);
if (retval) {
zfcp_fsf_req_free(req);
- erp_action->fsf_req = NULL;
- put_device(&port->sysfs_device);
+ erp_action->fsf_req_id = 0;
+ put_device(&port->dev);
}
out:
spin_unlock_bh(&qdio->req_q_lock);
@@ -1583,7 +1573,7 @@ int zfcp_fsf_close_port(struct zfcp_erp_action *erp_action)
}
req->status |= ZFCP_STATUS_FSFREQ_CLEANUP;
- sbale = zfcp_qdio_sbale_req(qdio, &req->queue_req);
+ sbale = zfcp_qdio_sbale_req(qdio, &req->qdio_req);
sbale[0].flags |= SBAL_FLAGS0_TYPE_READ;
sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY;
@@ -1591,13 +1581,13 @@ int zfcp_fsf_close_port(struct zfcp_erp_action *erp_action)
req->data = erp_action->port;
req->erp_action = erp_action;
req->qtcb->header.port_handle = erp_action->port->handle;
- erp_action->fsf_req = req;
+ erp_action->fsf_req_id = req->req_id;
zfcp_fsf_start_erp_timer(req);
retval = zfcp_fsf_req_send(req);
if (retval) {
zfcp_fsf_req_free(req);
- erp_action->fsf_req = NULL;
+ erp_action->fsf_req_id = 0;
}
out:
spin_unlock_bh(&qdio->req_q_lock);
@@ -1660,7 +1650,7 @@ int zfcp_fsf_open_wka_port(struct zfcp_fc_wka_port *wka_port)
}
req->status |= ZFCP_STATUS_FSFREQ_CLEANUP;
- sbale = zfcp_qdio_sbale_req(qdio, &req->queue_req);
+ sbale = zfcp_qdio_sbale_req(qdio, &req->qdio_req);
sbale[0].flags |= SBAL_FLAGS0_TYPE_READ;
sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY;
@@ -1715,7 +1705,7 @@ int zfcp_fsf_close_wka_port(struct zfcp_fc_wka_port *wka_port)
}
req->status |= ZFCP_STATUS_FSFREQ_CLEANUP;
- sbale = zfcp_qdio_sbale_req(qdio, &req->queue_req);
+ sbale = zfcp_qdio_sbale_req(qdio, &req->qdio_req);
sbale[0].flags |= SBAL_FLAGS0_TYPE_READ;
sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY;
@@ -1809,7 +1799,7 @@ int zfcp_fsf_close_physical_port(struct zfcp_erp_action *erp_action)
}
req->status |= ZFCP_STATUS_FSFREQ_CLEANUP;
- sbale = zfcp_qdio_sbale_req(qdio, &req->queue_req);
+ sbale = zfcp_qdio_sbale_req(qdio, &req->qdio_req);
sbale[0].flags |= SBAL_FLAGS0_TYPE_READ;
sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY;
@@ -1817,13 +1807,13 @@ int zfcp_fsf_close_physical_port(struct zfcp_erp_action *erp_action)
req->qtcb->header.port_handle = erp_action->port->handle;
req->erp_action = erp_action;
req->handler = zfcp_fsf_close_physical_port_handler;
- erp_action->fsf_req = req;
+ erp_action->fsf_req_id = req->req_id;
zfcp_fsf_start_erp_timer(req);
retval = zfcp_fsf_req_send(req);
if (retval) {
zfcp_fsf_req_free(req);
- erp_action->fsf_req = NULL;
+ erp_action->fsf_req_id = 0;
}
out:
spin_unlock_bh(&qdio->req_q_lock);
@@ -1982,7 +1972,7 @@ int zfcp_fsf_open_unit(struct zfcp_erp_action *erp_action)
}
req->status |= ZFCP_STATUS_FSFREQ_CLEANUP;
- sbale = zfcp_qdio_sbale_req(qdio, &req->queue_req);
+ sbale = zfcp_qdio_sbale_req(qdio, &req->qdio_req);
sbale[0].flags |= SBAL_FLAGS0_TYPE_READ;
sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY;
@@ -1991,7 +1981,7 @@ int zfcp_fsf_open_unit(struct zfcp_erp_action *erp_action)
req->handler = zfcp_fsf_open_unit_handler;
req->data = erp_action->unit;
req->erp_action = erp_action;
- erp_action->fsf_req = req;
+ erp_action->fsf_req_id = req->req_id;
if (!(adapter->connection_features & FSF_FEATURE_NPIV_MODE))
req->qtcb->bottom.support.option = FSF_OPEN_LUN_SUPPRESS_BOXING;
@@ -2000,7 +1990,7 @@ int zfcp_fsf_open_unit(struct zfcp_erp_action *erp_action)
retval = zfcp_fsf_req_send(req);
if (retval) {
zfcp_fsf_req_free(req);
- erp_action->fsf_req = NULL;
+ erp_action->fsf_req_id = 0;
}
out:
spin_unlock_bh(&qdio->req_q_lock);
@@ -2068,7 +2058,7 @@ int zfcp_fsf_close_unit(struct zfcp_erp_action *erp_action)
}
req->status |= ZFCP_STATUS_FSFREQ_CLEANUP;
- sbale = zfcp_qdio_sbale_req(qdio, &req->queue_req);
+ sbale = zfcp_qdio_sbale_req(qdio, &req->qdio_req);
sbale[0].flags |= SBAL_FLAGS0_TYPE_READ;
sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY;
@@ -2077,13 +2067,13 @@ int zfcp_fsf_close_unit(struct zfcp_erp_action *erp_action)
req->handler = zfcp_fsf_close_unit_handler;
req->data = erp_action->unit;
req->erp_action = erp_action;
- erp_action->fsf_req = req;
+ erp_action->fsf_req_id = req->req_id;
zfcp_fsf_start_erp_timer(req);
retval = zfcp_fsf_req_send(req);
if (retval) {
zfcp_fsf_req_free(req);
- erp_action->fsf_req = NULL;
+ erp_action->fsf_req_id = 0;
}
out:
spin_unlock_bh(&qdio->req_q_lock);
@@ -2111,8 +2101,8 @@ static void zfcp_fsf_req_trace(struct zfcp_fsf_req *req, struct scsi_cmnd *scsi)
blktrc.magic = ZFCP_BLK_DRV_DATA_MAGIC;
if (req->status & ZFCP_STATUS_FSFREQ_ERROR)
blktrc.flags |= ZFCP_BLK_REQ_ERROR;
- blktrc.inb_usage = req->queue_req.qdio_inb_usage;
- blktrc.outb_usage = req->queue_req.qdio_outb_usage;
+ blktrc.inb_usage = req->qdio_req.qdio_inb_usage;
+ blktrc.outb_usage = req->qdio_req.qdio_outb_usage;
if (req->adapter->adapter_features & FSF_FEATURE_MEASUREMENT_DATA) {
blktrc.flags |= ZFCP_BLK_LAT_VALID;
@@ -2169,12 +2159,7 @@ static void zfcp_fsf_send_fcp_command_task_handler(struct zfcp_fsf_req *req)
zfcp_fsf_req_trace(req, scpnt);
skip_fsfstatus:
- if (scpnt->result != 0)
- zfcp_dbf_scsi_result("erro", 3, req->adapter->dbf, scpnt, req);
- else if (scpnt->retries > 0)
- zfcp_dbf_scsi_result("retr", 4, req->adapter->dbf, scpnt, req);
- else
- zfcp_dbf_scsi_result("norm", 6, req->adapter->dbf, scpnt, req);
+ zfcp_dbf_scsi_result(req->adapter->dbf, scpnt, req);
scpnt->host_scribble = NULL;
(scpnt->scsi_done) (scpnt);
@@ -2274,7 +2259,7 @@ skip_fsfstatus:
else {
zfcp_fsf_send_fcp_command_task_handler(req);
req->unit = NULL;
- put_device(&unit->sysfs_device);
+ put_device(&unit->dev);
}
}
@@ -2312,7 +2297,7 @@ int zfcp_fsf_send_fcp_command_task(struct zfcp_unit *unit,
}
req->status |= ZFCP_STATUS_FSFREQ_CLEANUP;
- get_device(&unit->sysfs_device);
+ get_device(&unit->dev);
req->unit = unit;
req->data = scsi_cmnd;
req->handler = zfcp_fsf_send_fcp_command_handler;
@@ -2346,11 +2331,11 @@ int zfcp_fsf_send_fcp_command_task(struct zfcp_unit *unit,
fcp_cmnd = (struct fcp_cmnd *) &req->qtcb->bottom.io.fcp_cmnd;
zfcp_fc_scsi_to_fcp(fcp_cmnd, scsi_cmnd);
- real_bytes = zfcp_qdio_sbals_from_sg(qdio, &req->queue_req, sbtype,
+ real_bytes = zfcp_qdio_sbals_from_sg(qdio, &req->qdio_req, sbtype,
scsi_sglist(scsi_cmnd),
FSF_MAX_SBALS_PER_REQ);
if (unlikely(real_bytes < 0)) {
- if (req->queue_req.sbal_number >= FSF_MAX_SBALS_PER_REQ) {
+ if (req->qdio_req.sbal_number >= FSF_MAX_SBALS_PER_REQ) {
dev_err(&adapter->ccw_device->dev,
"Oversize data package, unit 0x%016Lx "
"on port 0x%016Lx closed\n",
@@ -2369,7 +2354,7 @@ int zfcp_fsf_send_fcp_command_task(struct zfcp_unit *unit,
goto out;
failed_scsi_cmnd:
- put_device(&unit->sysfs_device);
+ put_device(&unit->dev);
zfcp_fsf_req_free(req);
scsi_cmnd->host_scribble = NULL;
out:
@@ -2415,7 +2400,7 @@ struct zfcp_fsf_req *zfcp_fsf_send_fcp_ctm(struct zfcp_unit *unit, u8 tm_flags)
req->qtcb->bottom.io.service_class = FSF_CLASS_3;
req->qtcb->bottom.io.fcp_cmnd_length = FCP_CMND_LEN;
- sbale = zfcp_qdio_sbale_req(qdio, &req->queue_req);
+ sbale = zfcp_qdio_sbale_req(qdio, &req->qdio_req);
sbale[0].flags |= SBAL_FLAGS0_TYPE_WRITE;
sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY;
@@ -2478,14 +2463,14 @@ struct zfcp_fsf_req *zfcp_fsf_control_file(struct zfcp_adapter *adapter,
req->handler = zfcp_fsf_control_file_handler;
- sbale = zfcp_qdio_sbale_req(qdio, &req->queue_req);
+ sbale = zfcp_qdio_sbale_req(qdio, &req->qdio_req);
sbale[0].flags |= direction;
bottom = &req->qtcb->bottom.support;
bottom->operation_subtype = FSF_CFDC_OPERATION_SUBTYPE;
bottom->option = fsf_cfdc->option;
- bytes = zfcp_qdio_sbals_from_sg(qdio, &req->queue_req,
+ bytes = zfcp_qdio_sbals_from_sg(qdio, &req->qdio_req,
direction, fsf_cfdc->sg,
FSF_MAX_SBALS_PER_REQ);
if (bytes != ZFCP_CFDC_MAX_SIZE) {
@@ -2516,15 +2501,14 @@ void zfcp_fsf_reqid_check(struct zfcp_qdio *qdio, int sbal_idx)
struct qdio_buffer *sbal = qdio->resp_q.sbal[sbal_idx];
struct qdio_buffer_element *sbale;
struct zfcp_fsf_req *fsf_req;
- unsigned long flags, req_id;
+ unsigned long req_id;
int idx;
for (idx = 0; idx < QDIO_MAX_ELEMENTS_PER_BUFFER; idx++) {
sbale = &sbal->element[idx];
req_id = (unsigned long) sbale->addr;
- spin_lock_irqsave(&adapter->req_list_lock, flags);
- fsf_req = zfcp_reqlist_find(adapter, req_id);
+ fsf_req = zfcp_reqlist_find_rm(adapter->req_list, req_id);
if (!fsf_req)
/*
@@ -2534,11 +2518,8 @@ void zfcp_fsf_reqid_check(struct zfcp_qdio *qdio, int sbal_idx)
panic("error: unknown req_id (%lx) on adapter %s.\n",
req_id, dev_name(&adapter->ccw_device->dev));
- list_del(&fsf_req->list);
- spin_unlock_irqrestore(&adapter->req_list_lock, flags);
-
- fsf_req->queue_req.sbal_response = sbal_idx;
- fsf_req->queue_req.qdio_inb_usage =
+ fsf_req->qdio_req.sbal_response = sbal_idx;
+ fsf_req->qdio_req.qdio_inb_usage =
atomic_read(&qdio->resp_q.count);
zfcp_fsf_req_complete(fsf_req);
diff --git a/drivers/s390/scsi/zfcp_qdio.c b/drivers/s390/scsi/zfcp_qdio.c
index 6c5228b627f..71b97ff77cf 100644
--- a/drivers/s390/scsi/zfcp_qdio.c
+++ b/drivers/s390/scsi/zfcp_qdio.c
@@ -10,6 +10,7 @@
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
#include "zfcp_ext.h"
+#include "zfcp_qdio.h"
#define QBUFF_PER_PAGE (PAGE_SIZE / sizeof(struct qdio_buffer))
@@ -28,12 +29,6 @@ static int zfcp_qdio_buffers_enqueue(struct qdio_buffer **sbal)
return 0;
}
-static struct qdio_buffer_element *
-zfcp_qdio_sbale(struct zfcp_qdio_queue *q, int sbal_idx, int sbale_idx)
-{
- return &q->sbal[sbal_idx]->element[sbale_idx];
-}
-
static void zfcp_qdio_handler_error(struct zfcp_qdio *qdio, char *id)
{
struct zfcp_adapter *adapter = qdio->adapter;
@@ -106,7 +101,7 @@ static void zfcp_qdio_resp_put_back(struct zfcp_qdio *qdio, int processed)
if (unlikely(retval)) {
atomic_set(&queue->count, count);
- /* FIXME: Recover this with an adapter reopen? */
+ zfcp_erp_adapter_reopen(qdio->adapter, 0, "qdrpb_1", NULL);
} else {
queue->first += count;
queue->first %= QDIO_MAX_BUFFERS_PER_Q;
@@ -145,32 +140,8 @@ static void zfcp_qdio_int_resp(struct ccw_device *cdev, unsigned int qdio_err,
zfcp_qdio_resp_put_back(qdio, count);
}
-/**
- * zfcp_qdio_sbale_req - return ptr to SBALE of req_q for a struct zfcp_fsf_req
- * @qdio: pointer to struct zfcp_qdio
- * @q_rec: pointer to struct zfcp_queue_rec
- * Returns: pointer to qdio_buffer_element (SBALE) structure
- */
-struct qdio_buffer_element *zfcp_qdio_sbale_req(struct zfcp_qdio *qdio,
- struct zfcp_queue_req *q_req)
-{
- return zfcp_qdio_sbale(&qdio->req_q, q_req->sbal_last, 0);
-}
-
-/**
- * zfcp_qdio_sbale_curr - return curr SBALE on req_q for a struct zfcp_fsf_req
- * @fsf_req: pointer to struct fsf_req
- * Returns: pointer to qdio_buffer_element (SBALE) structure
- */
-struct qdio_buffer_element *zfcp_qdio_sbale_curr(struct zfcp_qdio *qdio,
- struct zfcp_queue_req *q_req)
-{
- return zfcp_qdio_sbale(&qdio->req_q, q_req->sbal_last,
- q_req->sbale_curr);
-}
-
static void zfcp_qdio_sbal_limit(struct zfcp_qdio *qdio,
- struct zfcp_queue_req *q_req, int max_sbals)
+ struct zfcp_qdio_req *q_req, int max_sbals)
{
int count = atomic_read(&qdio->req_q.count);
count = min(count, max_sbals);
@@ -179,7 +150,7 @@ static void zfcp_qdio_sbal_limit(struct zfcp_qdio *qdio,
}
static struct qdio_buffer_element *
-zfcp_qdio_sbal_chain(struct zfcp_qdio *qdio, struct zfcp_queue_req *q_req,
+zfcp_qdio_sbal_chain(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req,
unsigned long sbtype)
{
struct qdio_buffer_element *sbale;
@@ -214,7 +185,7 @@ zfcp_qdio_sbal_chain(struct zfcp_qdio *qdio, struct zfcp_queue_req *q_req,
}
static struct qdio_buffer_element *
-zfcp_qdio_sbale_next(struct zfcp_qdio *qdio, struct zfcp_queue_req *q_req,
+zfcp_qdio_sbale_next(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req,
unsigned int sbtype)
{
if (q_req->sbale_curr == ZFCP_LAST_SBALE_PER_SBAL)
@@ -224,7 +195,7 @@ zfcp_qdio_sbale_next(struct zfcp_qdio *qdio, struct zfcp_queue_req *q_req,
}
static void zfcp_qdio_undo_sbals(struct zfcp_qdio *qdio,
- struct zfcp_queue_req *q_req)
+ struct zfcp_qdio_req *q_req)
{
struct qdio_buffer **sbal = qdio->req_q.sbal;
int first = q_req->sbal_first;
@@ -235,7 +206,7 @@ static void zfcp_qdio_undo_sbals(struct zfcp_qdio *qdio,
}
static int zfcp_qdio_fill_sbals(struct zfcp_qdio *qdio,
- struct zfcp_queue_req *q_req,
+ struct zfcp_qdio_req *q_req,
unsigned int sbtype, void *start_addr,
unsigned int total_length)
{
@@ -271,8 +242,7 @@ static int zfcp_qdio_fill_sbals(struct zfcp_qdio *qdio,
* @max_sbals: upper bound for number of SBALs to be used
* Returns: number of bytes, or error (negativ)
*/
-int zfcp_qdio_sbals_from_sg(struct zfcp_qdio *qdio,
- struct zfcp_queue_req *q_req,
+int zfcp_qdio_sbals_from_sg(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req,
unsigned long sbtype, struct scatterlist *sg,
int max_sbals)
{
@@ -304,10 +274,10 @@ int zfcp_qdio_sbals_from_sg(struct zfcp_qdio *qdio,
/**
* zfcp_qdio_send - set PCI flag in first SBALE and send req to QDIO
* @qdio: pointer to struct zfcp_qdio
- * @q_req: pointer to struct zfcp_queue_req
+ * @q_req: pointer to struct zfcp_qdio_req
* Returns: 0 on success, error otherwise
*/
-int zfcp_qdio_send(struct zfcp_qdio *qdio, struct zfcp_queue_req *q_req)
+int zfcp_qdio_send(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req)
{
struct zfcp_qdio_queue *req_q = &qdio->req_q;
int first = q_req->sbal_first;
diff --git a/drivers/s390/scsi/zfcp_qdio.h b/drivers/s390/scsi/zfcp_qdio.h
new file mode 100644
index 00000000000..8cca54631e1
--- /dev/null
+++ b/drivers/s390/scsi/zfcp_qdio.h
@@ -0,0 +1,109 @@
+/*
+ * zfcp device driver
+ *
+ * Header file for zfcp qdio interface
+ *
+ * Copyright IBM Corporation 2010
+ */
+
+#ifndef ZFCP_QDIO_H
+#define ZFCP_QDIO_H
+
+#include <asm/qdio.h>
+
+/**
+ * struct zfcp_qdio_queue - qdio queue buffer, zfcp index and free count
+ * @sbal: qdio buffers
+ * @first: index of next free buffer in queue
+ * @count: number of free buffers in queue
+ */
+struct zfcp_qdio_queue {
+ struct qdio_buffer *sbal[QDIO_MAX_BUFFERS_PER_Q];
+ u8 first;
+ atomic_t count;
+};
+
+/**
+ * struct zfcp_qdio - basic qdio data structure
+ * @resp_q: response queue
+ * @req_q: request queue
+ * @stat_lock: lock to protect req_q_util and req_q_time
+ * @req_q_lock: lock to serialize access to request queue
+ * @req_q_time: time of last fill level change
+ * @req_q_util: used for accounting
+ * @req_q_full: queue full incidents
+ * @req_q_wq: used to wait for SBAL availability
+ * @adapter: adapter used in conjunction with this qdio structure
+ */
+struct zfcp_qdio {
+ struct zfcp_qdio_queue resp_q;
+ struct zfcp_qdio_queue req_q;
+ spinlock_t stat_lock;
+ spinlock_t req_q_lock;
+ unsigned long long req_q_time;
+ u64 req_q_util;
+ atomic_t req_q_full;
+ wait_queue_head_t req_q_wq;
+ struct zfcp_adapter *adapter;
+};
+
+/**
+ * struct zfcp_qdio_req - qdio queue related values for a request
+ * @sbal_number: number of free sbals
+ * @sbal_first: first sbal for this request
+ * @sbal_last: last sbal for this request
+ * @sbal_limit: last possible sbal for this request
+ * @sbale_curr: current sbale at creation of this request
+ * @sbal_response: sbal used in interrupt
+ * @qdio_outb_usage: usage of outbound queue
+ * @qdio_inb_usage: usage of inbound queue
+ */
+struct zfcp_qdio_req {
+ u8 sbal_number;
+ u8 sbal_first;
+ u8 sbal_last;
+ u8 sbal_limit;
+ u8 sbale_curr;
+ u8 sbal_response;
+ u16 qdio_outb_usage;
+ u16 qdio_inb_usage;
+};
+
+/**
+ * zfcp_qdio_sbale - return pointer to sbale in qdio queue
+ * @q: queue where to find sbal
+ * @sbal_idx: sbal index in queue
+ * @sbale_idx: sbale index in sbal
+ */
+static inline struct qdio_buffer_element *
+zfcp_qdio_sbale(struct zfcp_qdio_queue *q, int sbal_idx, int sbale_idx)
+{
+ return &q->sbal[sbal_idx]->element[sbale_idx];
+}
+
+/**
+ * zfcp_qdio_sbale_req - return pointer to sbale on req_q for a request
+ * @qdio: pointer to struct zfcp_qdio
+ * @q_rec: pointer to struct zfcp_qdio_req
+ * Returns: pointer to qdio_buffer_element (sbale) structure
+ */
+static inline struct qdio_buffer_element *
+zfcp_qdio_sbale_req(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req)
+{
+ return zfcp_qdio_sbale(&qdio->req_q, q_req->sbal_last, 0);
+}
+
+/**
+ * zfcp_qdio_sbale_curr - return current sbale on req_q for a request
+ * @qdio: pointer to struct zfcp_qdio
+ * @fsf_req: pointer to struct zfcp_fsf_req
+ * Returns: pointer to qdio_buffer_element (sbale) structure
+ */
+static inline struct qdio_buffer_element *
+zfcp_qdio_sbale_curr(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req)
+{
+ return zfcp_qdio_sbale(&qdio->req_q, q_req->sbal_last,
+ q_req->sbale_curr);
+}
+
+#endif /* ZFCP_QDIO_H */
diff --git a/drivers/s390/scsi/zfcp_reqlist.h b/drivers/s390/scsi/zfcp_reqlist.h
new file mode 100644
index 00000000000..a72d1b730ab
--- /dev/null
+++ b/drivers/s390/scsi/zfcp_reqlist.h
@@ -0,0 +1,183 @@
+/*
+ * zfcp device driver
+ *
+ * Data structure and helper functions for tracking pending FSF
+ * requests.
+ *
+ * Copyright IBM Corporation 2009
+ */
+
+#ifndef ZFCP_REQLIST_H
+#define ZFCP_REQLIST_H
+
+/* number of hash buckets */
+#define ZFCP_REQ_LIST_BUCKETS 128
+
+/**
+ * struct zfcp_reqlist - Container for request list (reqlist)
+ * @lock: Spinlock for protecting the hash list
+ * @list: Array of hashbuckets, each is a list of requests in this bucket
+ */
+struct zfcp_reqlist {
+ spinlock_t lock;
+ struct list_head buckets[ZFCP_REQ_LIST_BUCKETS];
+};
+
+static inline int zfcp_reqlist_hash(unsigned long req_id)
+{
+ return req_id % ZFCP_REQ_LIST_BUCKETS;
+}
+
+/**
+ * zfcp_reqlist_alloc - Allocate and initialize reqlist
+ *
+ * Returns pointer to allocated reqlist on success, or NULL on
+ * allocation failure.
+ */
+static inline struct zfcp_reqlist *zfcp_reqlist_alloc(void)
+{
+ unsigned int i;
+ struct zfcp_reqlist *rl;
+
+ rl = kzalloc(sizeof(struct zfcp_reqlist), GFP_KERNEL);
+ if (!rl)
+ return NULL;
+
+ spin_lock_init(&rl->lock);
+
+ for (i = 0; i < ZFCP_REQ_LIST_BUCKETS; i++)
+ INIT_LIST_HEAD(&rl->buckets[i]);
+
+ return rl;
+}
+
+/**
+ * zfcp_reqlist_isempty - Check whether the request list empty
+ * @rl: pointer to reqlist
+ *
+ * Returns: 1 if list is empty, 0 if not
+ */
+static inline int zfcp_reqlist_isempty(struct zfcp_reqlist *rl)
+{
+ unsigned int i;
+
+ for (i = 0; i < ZFCP_REQ_LIST_BUCKETS; i++)
+ if (!list_empty(&rl->buckets[i]))
+ return 0;
+ return 1;
+}
+
+/**
+ * zfcp_reqlist_free - Free allocated memory for reqlist
+ * @rl: The reqlist where to free memory
+ */
+static inline void zfcp_reqlist_free(struct zfcp_reqlist *rl)
+{
+ /* sanity check */
+ BUG_ON(!zfcp_reqlist_isempty(rl));
+
+ kfree(rl);
+}
+
+static inline struct zfcp_fsf_req *
+_zfcp_reqlist_find(struct zfcp_reqlist *rl, unsigned long req_id)
+{
+ struct zfcp_fsf_req *req;
+ unsigned int i;
+
+ i = zfcp_reqlist_hash(req_id);
+ list_for_each_entry(req, &rl->buckets[i], list)
+ if (req->req_id == req_id)
+ return req;
+ return NULL;
+}
+
+/**
+ * zfcp_reqlist_find - Lookup FSF request by its request id
+ * @rl: The reqlist where to lookup the FSF request
+ * @req_id: The request id to look for
+ *
+ * Returns a pointer to the FSF request with the specified request id
+ * or NULL if there is no known FSF request with this id.
+ */
+static inline struct zfcp_fsf_req *
+zfcp_reqlist_find(struct zfcp_reqlist *rl, unsigned long req_id)
+{
+ unsigned long flags;
+ struct zfcp_fsf_req *req;
+
+ spin_lock_irqsave(&rl->lock, flags);
+ req = _zfcp_reqlist_find(rl, req_id);
+ spin_unlock_irqrestore(&rl->lock, flags);
+
+ return req;
+}
+
+/**
+ * zfcp_reqlist_find_rm - Lookup request by id and remove it from reqlist
+ * @rl: reqlist where to search and remove entry
+ * @req_id: The request id of the request to look for
+ *
+ * This functions tries to find the FSF request with the specified
+ * id and then removes it from the reqlist. The reqlist lock is held
+ * during both steps of the operation.
+ *
+ * Returns: Pointer to the FSF request if the request has been found,
+ * NULL if it has not been found.
+ */
+static inline struct zfcp_fsf_req *
+zfcp_reqlist_find_rm(struct zfcp_reqlist *rl, unsigned long req_id)
+{
+ unsigned long flags;
+ struct zfcp_fsf_req *req;
+
+ spin_lock_irqsave(&rl->lock, flags);
+ req = _zfcp_reqlist_find(rl, req_id);
+ if (req)
+ list_del(&req->list);
+ spin_unlock_irqrestore(&rl->lock, flags);
+
+ return req;
+}
+
+/**
+ * zfcp_reqlist_add - Add entry to reqlist
+ * @rl: reqlist where to add the entry
+ * @req: The entry to add
+ *
+ * The request id always increases. As an optimization new requests
+ * are added here with list_add_tail at the end of the bucket lists
+ * while old requests are looked up starting at the beginning of the
+ * lists.
+ */
+static inline void zfcp_reqlist_add(struct zfcp_reqlist *rl,
+ struct zfcp_fsf_req *req)
+{
+ unsigned int i;
+ unsigned long flags;
+
+ i = zfcp_reqlist_hash(req->req_id);
+
+ spin_lock_irqsave(&rl->lock, flags);
+ list_add_tail(&req->list, &rl->buckets[i]);
+ spin_unlock_irqrestore(&rl->lock, flags);
+}
+
+/**
+ * zfcp_reqlist_move - Move all entries from reqlist to simple list
+ * @rl: The zfcp_reqlist where to remove all entries
+ * @list: The list where to move all entries
+ */
+static inline void zfcp_reqlist_move(struct zfcp_reqlist *rl,
+ struct list_head *list)
+{
+ unsigned int i;
+ unsigned long flags;
+
+ spin_lock_irqsave(&rl->lock, flags);
+ for (i = 0; i < ZFCP_REQ_LIST_BUCKETS; i++)
+ list_splice_init(&rl->buckets[i], list);
+ spin_unlock_irqrestore(&rl->lock, flags);
+}
+
+#endif /* ZFCP_REQLIST_H */
diff --git a/drivers/s390/scsi/zfcp_scsi.c b/drivers/s390/scsi/zfcp_scsi.c
index 8e6fc68d6bd..c3c4178888a 100644
--- a/drivers/s390/scsi/zfcp_scsi.c
+++ b/drivers/s390/scsi/zfcp_scsi.c
@@ -3,7 +3,7 @@
*
* Interface to Linux SCSI midlayer.
*
- * Copyright IBM Corporation 2002, 2009
+ * Copyright IBM Corporation 2002, 2010
*/
#define KMSG_COMPONENT "zfcp"
@@ -15,6 +15,7 @@
#include "zfcp_ext.h"
#include "zfcp_dbf.h"
#include "zfcp_fc.h"
+#include "zfcp_reqlist.h"
static unsigned int default_depth = 32;
module_param_named(queue_depth, default_depth, uint, 0600);
@@ -43,7 +44,7 @@ static void zfcp_scsi_slave_destroy(struct scsi_device *sdpnt)
{
struct zfcp_unit *unit = (struct zfcp_unit *) sdpnt->hostdata;
unit->device = NULL;
- put_device(&unit->sysfs_device);
+ put_device(&unit->dev);
}
static int zfcp_scsi_slave_configure(struct scsi_device *sdp)
@@ -59,10 +60,9 @@ static void zfcp_scsi_command_fail(struct scsi_cmnd *scpnt, int result)
{
struct zfcp_adapter *adapter =
(struct zfcp_adapter *) scpnt->device->host->hostdata[0];
+
set_host_byte(scpnt, result);
- if ((scpnt->device != NULL) && (scpnt->device->host != NULL))
- zfcp_dbf_scsi_result("fail", 4, adapter->dbf, scpnt, NULL);
- /* return directly */
+ zfcp_dbf_scsi_fail_send(adapter->dbf, scpnt);
scpnt->scsi_done(scpnt);
}
@@ -86,18 +86,10 @@ static int zfcp_scsi_queuecommand(struct scsi_cmnd *scpnt,
adapter = (struct zfcp_adapter *) scpnt->device->host->hostdata[0];
unit = scpnt->device->hostdata;
- BUG_ON(!adapter || (adapter != unit->port->adapter));
- BUG_ON(!scpnt->scsi_done);
-
- if (unlikely(!unit)) {
- zfcp_scsi_command_fail(scpnt, DID_NO_CONNECT);
- return 0;
- }
-
scsi_result = fc_remote_port_chkready(rport);
if (unlikely(scsi_result)) {
scpnt->result = scsi_result;
- zfcp_dbf_scsi_result("fail", 4, adapter->dbf, scpnt, NULL);
+ zfcp_dbf_scsi_fail_send(adapter->dbf, scpnt);
scpnt->scsi_done(scpnt);
return 0;
}
@@ -189,9 +181,7 @@ static int zfcp_scsi_eh_abort_handler(struct scsi_cmnd *scpnt)
/* avoid race condition between late normal completion and abort */
write_lock_irqsave(&adapter->abort_lock, flags);
- spin_lock(&adapter->req_list_lock);
- old_req = zfcp_reqlist_find(adapter, old_reqid);
- spin_unlock(&adapter->req_list_lock);
+ old_req = zfcp_reqlist_find(adapter->req_list, old_reqid);
if (!old_req) {
write_unlock_irqrestore(&adapter->abort_lock, flags);
zfcp_dbf_scsi_abort("lte1", adapter->dbf, scpnt, NULL,
@@ -521,7 +511,7 @@ static void zfcp_scsi_terminate_rport_io(struct fc_rport *rport)
if (port) {
zfcp_erp_port_reopen(port, 0, "sctrpi1", NULL);
- put_device(&port->sysfs_device);
+ put_device(&port->dev);
}
}
@@ -563,23 +553,23 @@ static void zfcp_scsi_rport_block(struct zfcp_port *port)
void zfcp_scsi_schedule_rport_register(struct zfcp_port *port)
{
- get_device(&port->sysfs_device);
+ get_device(&port->dev);
port->rport_task = RPORT_ADD;
if (!queue_work(port->adapter->work_queue, &port->rport_work))
- put_device(&port->sysfs_device);
+ put_device(&port->dev);
}
void zfcp_scsi_schedule_rport_block(struct zfcp_port *port)
{
- get_device(&port->sysfs_device);
+ get_device(&port->dev);
port->rport_task = RPORT_DEL;
if (port->rport && queue_work(port->adapter->work_queue,
&port->rport_work))
return;
- put_device(&port->sysfs_device);
+ put_device(&port->dev);
}
void zfcp_scsi_schedule_rports_block(struct zfcp_adapter *adapter)
@@ -608,7 +598,7 @@ void zfcp_scsi_rport_work(struct work_struct *work)
}
}
- put_device(&port->sysfs_device);
+ put_device(&port->dev);
}
@@ -626,7 +616,7 @@ void zfcp_scsi_scan(struct work_struct *work)
scsilun_to_int((struct scsi_lun *)
&unit->fcp_lun), 0);
- put_device(&unit->sysfs_device);
+ put_device(&unit->dev);
}
struct fc_function_template zfcp_transport_functions = {
diff --git a/drivers/s390/scsi/zfcp_sysfs.c b/drivers/s390/scsi/zfcp_sysfs.c
index f539e006683..a43035d4bd7 100644
--- a/drivers/s390/scsi/zfcp_sysfs.c
+++ b/drivers/s390/scsi/zfcp_sysfs.c
@@ -3,7 +3,7 @@
*
* sysfs attributes.
*
- * Copyright IBM Corporation 2008, 2009
+ * Copyright IBM Corporation 2008, 2010
*/
#define KMSG_COMPONENT "zfcp"
@@ -19,8 +19,7 @@ static ssize_t zfcp_sysfs_##_feat##_##_name##_show(struct device *dev, \
struct device_attribute *at,\
char *buf) \
{ \
- struct _feat_def *_feat = container_of(dev, struct _feat_def, \
- sysfs_device); \
+ struct _feat_def *_feat = container_of(dev, struct _feat_def, dev); \
\
return sprintf(buf, _format, _value); \
} \
@@ -87,8 +86,7 @@ static ssize_t zfcp_sysfs_##_feat##_failed_show(struct device *dev, \
struct device_attribute *attr, \
char *buf) \
{ \
- struct _feat_def *_feat = container_of(dev, struct _feat_def, \
- sysfs_device); \
+ struct _feat_def *_feat = container_of(dev, struct _feat_def, dev); \
\
if (atomic_read(&_feat->status) & ZFCP_STATUS_COMMON_ERP_FAILED) \
return sprintf(buf, "1\n"); \
@@ -99,12 +97,11 @@ static ssize_t zfcp_sysfs_##_feat##_failed_store(struct device *dev, \
struct device_attribute *attr,\
const char *buf, size_t count)\
{ \
- struct _feat_def *_feat = container_of(dev, struct _feat_def, \
- sysfs_device); \
+ struct _feat_def *_feat = container_of(dev, struct _feat_def, dev); \
unsigned long val; \
int retval = 0; \
\
- if (!(_feat && get_device(&_feat->sysfs_device))) \
+ if (!(_feat && get_device(&_feat->dev))) \
return -EBUSY; \
\
if (strict_strtoul(buf, 0, &val) || val != 0) { \
@@ -118,7 +115,7 @@ static ssize_t zfcp_sysfs_##_feat##_failed_store(struct device *dev, \
_reopen_id, NULL); \
zfcp_erp_wait(_adapter); \
out: \
- put_device(&_feat->sysfs_device); \
+ put_device(&_feat->dev); \
return retval ? retval : (ssize_t) count; \
} \
static ZFCP_DEV_ATTR(_feat, failed, S_IWUSR | S_IRUGO, \
@@ -224,10 +221,10 @@ static ssize_t zfcp_sysfs_port_remove_store(struct device *dev,
list_del(&port->list);
write_unlock_irq(&adapter->port_list_lock);
- put_device(&port->sysfs_device);
+ put_device(&port->dev);
zfcp_erp_port_shutdown(port, 0, "syprs_1", NULL);
- zfcp_device_unregister(&port->sysfs_device, &zfcp_sysfs_port_attrs);
+ zfcp_device_unregister(&port->dev, &zfcp_sysfs_port_attrs);
out:
zfcp_ccw_adapter_put(adapter);
return retval ? retval : (ssize_t) count;
@@ -258,13 +255,12 @@ static ssize_t zfcp_sysfs_unit_add_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
- struct zfcp_port *port = container_of(dev, struct zfcp_port,
- sysfs_device);
+ struct zfcp_port *port = container_of(dev, struct zfcp_port, dev);
struct zfcp_unit *unit;
u64 fcp_lun;
int retval = -EINVAL;
- if (!(port && get_device(&port->sysfs_device)))
+ if (!(port && get_device(&port->dev)))
return -EBUSY;
if (strict_strtoull(buf, 0, (unsigned long long *) &fcp_lun))
@@ -280,7 +276,7 @@ static ssize_t zfcp_sysfs_unit_add_store(struct device *dev,
zfcp_erp_wait(unit->port->adapter);
flush_work(&unit->scsi_work);
out:
- put_device(&port->sysfs_device);
+ put_device(&port->dev);
return retval ? retval : (ssize_t) count;
}
static DEVICE_ATTR(unit_add, S_IWUSR, NULL, zfcp_sysfs_unit_add_store);
@@ -289,13 +285,12 @@ static ssize_t zfcp_sysfs_unit_remove_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
- struct zfcp_port *port = container_of(dev, struct zfcp_port,
- sysfs_device);
+ struct zfcp_port *port = container_of(dev, struct zfcp_port, dev);
struct zfcp_unit *unit;
u64 fcp_lun;
int retval = -EINVAL;
- if (!(port && get_device(&port->sysfs_device)))
+ if (!(port && get_device(&port->dev)))
return -EBUSY;
if (strict_strtoull(buf, 0, (unsigned long long *) &fcp_lun))
@@ -314,12 +309,12 @@ static ssize_t zfcp_sysfs_unit_remove_store(struct device *dev,
list_del(&unit->list);
write_unlock_irq(&port->unit_list_lock);
- put_device(&unit->sysfs_device);
+ put_device(&unit->dev);
zfcp_erp_unit_shutdown(unit, 0, "syurs_1", NULL);
- zfcp_device_unregister(&unit->sysfs_device, &zfcp_sysfs_unit_attrs);
+ zfcp_device_unregister(&unit->dev, &zfcp_sysfs_unit_attrs);
out:
- put_device(&port->sysfs_device);
+ put_device(&port->dev);
return retval ? retval : (ssize_t) count;
}
static DEVICE_ATTR(unit_remove, S_IWUSR, NULL, zfcp_sysfs_unit_remove_store);