summaryrefslogtreecommitdiffstats
path: root/drivers/s390
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/s390')
-rw-r--r--drivers/s390/block/dasd.c4
-rw-r--r--drivers/s390/block/dasd_devmap.c2
-rw-r--r--drivers/s390/block/dasd_diag.c38
-rw-r--r--drivers/s390/block/dasd_eckd.c32
-rw-r--r--drivers/s390/block/dasd_fba.c28
-rw-r--r--drivers/s390/block/dasd_int.h5
-rw-r--r--drivers/s390/block/dcssblk.c6
-rw-r--r--drivers/s390/block/xpram.c8
-rw-r--r--drivers/s390/char/Kconfig12
-rw-r--r--drivers/s390/char/con3215.c3
-rw-r--r--drivers/s390/char/con3270.c3
-rw-r--r--drivers/s390/char/monwriter.c6
-rw-r--r--drivers/s390/char/raw3270.c7
-rw-r--r--drivers/s390/char/sclp.c5
-rw-r--r--drivers/s390/char/sclp_vt220.c62
-rw-r--r--drivers/s390/char/tape.h2
-rw-r--r--drivers/s390/char/tape_34xx.c35
-rw-r--r--drivers/s390/char/tape_3590.c63
-rw-r--r--drivers/s390/char/tape_block.c4
-rw-r--r--drivers/s390/char/tty3270.c9
-rw-r--r--drivers/s390/char/tty3270.h16
-rw-r--r--drivers/s390/char/vmur.c428
-rw-r--r--drivers/s390/char/vmur.h6
-rw-r--r--drivers/s390/char/vmwatchdog.c4
-rw-r--r--drivers/s390/char/zcore.c7
-rw-r--r--drivers/s390/cio/blacklist.c19
-rw-r--r--drivers/s390/cio/ccwgroup.c73
-rw-r--r--drivers/s390/cio/chp.c49
-rw-r--r--drivers/s390/cio/chsc.c26
-rw-r--r--drivers/s390/cio/chsc.h2
-rw-r--r--drivers/s390/cio/cio.c18
-rw-r--r--drivers/s390/cio/cio_debug.h2
-rw-r--r--drivers/s390/cio/cmf.c252
-rw-r--r--drivers/s390/cio/css.c131
-rw-r--r--drivers/s390/cio/css.h4
-rw-r--r--drivers/s390/cio/device.c160
-rw-r--r--drivers/s390/cio/device.h1
-rw-r--r--drivers/s390/cio/device_fsm.c167
-rw-r--r--drivers/s390/cio/device_id.c48
-rw-r--r--drivers/s390/cio/device_ops.c498
-rw-r--r--drivers/s390/cio/qdio.c136
-rw-r--r--drivers/s390/crypto/ap_bus.c19
-rw-r--r--drivers/s390/crypto/zcrypt_mono.c4
-rw-r--r--drivers/s390/crypto/zcrypt_pcixcc.c9
-rw-r--r--drivers/s390/crypto/zcrypt_pcixcc.h45
-rw-r--r--drivers/s390/net/claw.c10
-rw-r--r--drivers/s390/net/ctcmain.c7
-rw-r--r--drivers/s390/net/lcs.c12
-rw-r--r--drivers/s390/net/lcs.h1
-rw-r--r--drivers/s390/net/netiucv.c5
-rw-r--r--drivers/s390/net/qeth.h7
-rw-r--r--drivers/s390/net/qeth_eddp.c16
-rw-r--r--drivers/s390/net/qeth_main.c227
-rw-r--r--drivers/s390/net/qeth_mpc.h1
-rw-r--r--drivers/s390/net/qeth_sys.c8
-rw-r--r--drivers/s390/scsi/zfcp_aux.c17
-rw-r--r--drivers/s390/scsi/zfcp_ccw.c10
-rw-r--r--drivers/s390/scsi/zfcp_dbf.c10
-rw-r--r--drivers/s390/scsi/zfcp_def.h1
-rw-r--r--drivers/s390/scsi/zfcp_erp.c21
-rw-r--r--drivers/s390/scsi/zfcp_fsf.c7
-rw-r--r--drivers/s390/scsi/zfcp_qdio.c152
-rw-r--r--drivers/s390/scsi/zfcp_scsi.c4
63 files changed, 1534 insertions, 1440 deletions
diff --git a/drivers/s390/block/dasd.c b/drivers/s390/block/dasd.c
index bfeca57098f..e6bfce690ca 100644
--- a/drivers/s390/block/dasd.c
+++ b/drivers/s390/block/dasd.c
@@ -1187,7 +1187,7 @@ dasd_end_request_cb(struct dasd_ccw_req * cqr, void *data)
static void
__dasd_process_blk_queue(struct dasd_device * device)
{
- request_queue_t *queue;
+ struct request_queue *queue;
struct request *req;
struct dasd_ccw_req *cqr;
int nr_queued;
@@ -1740,7 +1740,7 @@ dasd_cancel_req(struct dasd_ccw_req *cqr)
* Dasd request queue function. Called from ll_rw_blk.c
*/
static void
-do_dasd_request(request_queue_t * queue)
+do_dasd_request(struct request_queue * queue)
{
struct dasd_device *device;
diff --git a/drivers/s390/block/dasd_devmap.c b/drivers/s390/block/dasd_devmap.c
index 6a89cefe99b..0c67258fb9e 100644
--- a/drivers/s390/block/dasd_devmap.c
+++ b/drivers/s390/block/dasd_devmap.c
@@ -291,7 +291,7 @@ dasd_parse_keyword( char *parsestring ) {
dasd_page_cache =
kmem_cache_create("dasd_page_cache", PAGE_SIZE,
PAGE_SIZE, SLAB_CACHE_DMA,
- NULL, NULL );
+ NULL);
if (!dasd_page_cache)
MESSAGE(KERN_WARNING, "%s", "Failed to create slab, "
"fixed buffer mode disabled.");
diff --git a/drivers/s390/block/dasd_diag.c b/drivers/s390/block/dasd_diag.c
index eccac1c3b71..571320ab9e1 100644
--- a/drivers/s390/block/dasd_diag.c
+++ b/drivers/s390/block/dasd_diag.c
@@ -24,6 +24,7 @@
#include <asm/s390_ext.h>
#include <asm/todclk.h>
#include <asm/vtoc.h>
+#include <asm/diag.h>
#include "dasd_int.h"
#include "dasd_diag.h"
@@ -471,14 +472,13 @@ dasd_diag_build_cp(struct dasd_device * device, struct request *req)
struct dasd_ccw_req *cqr;
struct dasd_diag_req *dreq;
struct dasd_diag_bio *dbio;
- struct bio *bio;
+ struct req_iterator iter;
struct bio_vec *bv;
char *dst;
unsigned int count, datasize;
sector_t recid, first_rec, last_rec;
unsigned int blksize, off;
unsigned char rw_cmd;
- int i;
if (rq_data_dir(req) == READ)
rw_cmd = MDSK_READ_REQ;
@@ -492,13 +492,11 @@ dasd_diag_build_cp(struct dasd_device * device, struct request *req)
last_rec = (req->sector + req->nr_sectors - 1) >> device->s2b_shift;
/* Check struct bio and count the number of blocks for the request. */
count = 0;
- rq_for_each_bio(bio, req) {
- bio_for_each_segment(bv, bio, i) {
- if (bv->bv_len & (blksize - 1))
- /* Fba can only do full blocks. */
- return ERR_PTR(-EINVAL);
- count += bv->bv_len >> (device->s2b_shift + 9);
- }
+ rq_for_each_segment(bv, req, iter) {
+ if (bv->bv_len & (blksize - 1))
+ /* Fba can only do full blocks. */
+ return ERR_PTR(-EINVAL);
+ count += bv->bv_len >> (device->s2b_shift + 9);
}
/* Paranoia. */
if (count != last_rec - first_rec + 1)
@@ -515,18 +513,16 @@ dasd_diag_build_cp(struct dasd_device * device, struct request *req)
dreq->block_count = count;
dbio = dreq->bio;
recid = first_rec;
- rq_for_each_bio(bio, req) {
- bio_for_each_segment(bv, bio, i) {
- dst = page_address(bv->bv_page) + bv->bv_offset;
- for (off = 0; off < bv->bv_len; off += blksize) {
- memset(dbio, 0, sizeof (struct dasd_diag_bio));
- dbio->type = rw_cmd;
- dbio->block_number = recid + 1;
- dbio->buffer = dst;
- dbio++;
- dst += blksize;
- recid++;
- }
+ rq_for_each_segment(bv, req, iter) {
+ dst = page_address(bv->bv_page) + bv->bv_offset;
+ for (off = 0; off < bv->bv_len; off += blksize) {
+ memset(dbio, 0, sizeof (struct dasd_diag_bio));
+ dbio->type = rw_cmd;
+ dbio->block_number = recid + 1;
+ dbio->buffer = dst;
+ dbio++;
+ dst += blksize;
+ recid++;
}
}
cqr->retries = DIAG_MAX_RETRIES;
diff --git a/drivers/s390/block/dasd_eckd.c b/drivers/s390/block/dasd_eckd.c
index 418b4e63a4f..44adf8496bd 100644
--- a/drivers/s390/block/dasd_eckd.c
+++ b/drivers/s390/block/dasd_eckd.c
@@ -555,7 +555,7 @@ dasd_eckd_read_conf(struct dasd_device *device)
if (conf_data == NULL) {
MESSAGE(KERN_WARNING, "%s", "No configuration "
"data retrieved");
- continue; /* no errror */
+ continue; /* no error */
}
if (conf_len != sizeof (struct dasd_eckd_confdata)) {
MESSAGE(KERN_WARNING,
@@ -564,7 +564,7 @@ dasd_eckd_read_conf(struct dasd_device *device)
conf_len,
sizeof (struct dasd_eckd_confdata));
kfree(conf_data);
- continue; /* no errror */
+ continue; /* no error */
}
/* save first valid configuration data */
if (!conf_data_saved){
@@ -1176,7 +1176,7 @@ dasd_eckd_build_cp(struct dasd_device * device, struct request *req)
struct LO_eckd_data *LO_data;
struct dasd_ccw_req *cqr;
struct ccw1 *ccw;
- struct bio *bio;
+ struct req_iterator iter;
struct bio_vec *bv;
char *dst;
unsigned int blksize, blk_per_trk, off;
@@ -1185,7 +1185,6 @@ dasd_eckd_build_cp(struct dasd_device * device, struct request *req)
sector_t first_trk, last_trk;
unsigned int first_offs, last_offs;
unsigned char cmd, rcmd;
- int i;
private = (struct dasd_eckd_private *) device->private;
if (rq_data_dir(req) == READ)
@@ -1206,18 +1205,15 @@ dasd_eckd_build_cp(struct dasd_device * device, struct request *req)
/* Check struct bio and count the number of blocks for the request. */
count = 0;
cidaw = 0;
- rq_for_each_bio(bio, req) {
- bio_for_each_segment(bv, bio, i) {
- if (bv->bv_len & (blksize - 1))
- /* Eckd can only do full blocks. */
- return ERR_PTR(-EINVAL);
- count += bv->bv_len >> (device->s2b_shift + 9);
+ rq_for_each_segment(bv, req, iter) {
+ if (bv->bv_len & (blksize - 1))
+ /* Eckd can only do full blocks. */
+ return ERR_PTR(-EINVAL);
+ count += bv->bv_len >> (device->s2b_shift + 9);
#if defined(CONFIG_64BIT)
- if (idal_is_needed (page_address(bv->bv_page),
- bv->bv_len))
- cidaw += bv->bv_len >> (device->s2b_shift + 9);
+ if (idal_is_needed (page_address(bv->bv_page), bv->bv_len))
+ cidaw += bv->bv_len >> (device->s2b_shift + 9);
#endif
- }
}
/* Paranoia. */
if (count != last_rec - first_rec + 1)
@@ -1257,7 +1253,7 @@ dasd_eckd_build_cp(struct dasd_device * device, struct request *req)
locate_record(ccw++, LO_data++, first_trk, first_offs + 1,
last_rec - recid + 1, cmd, device, blksize);
}
- rq_for_each_bio(bio, req) bio_for_each_segment(bv, bio, i) {
+ rq_for_each_segment(bv, req, iter) {
dst = page_address(bv->bv_page) + bv->bv_offset;
if (dasd_page_cache) {
char *copy = kmem_cache_alloc(dasd_page_cache,
@@ -1328,12 +1324,12 @@ dasd_eckd_free_cp(struct dasd_ccw_req *cqr, struct request *req)
{
struct dasd_eckd_private *private;
struct ccw1 *ccw;
- struct bio *bio;
+ struct req_iterator iter;
struct bio_vec *bv;
char *dst, *cda;
unsigned int blksize, blk_per_trk, off;
sector_t recid;
- int i, status;
+ int status;
if (!dasd_page_cache)
goto out;
@@ -1346,7 +1342,7 @@ dasd_eckd_free_cp(struct dasd_ccw_req *cqr, struct request *req)
ccw++;
if (private->uses_cdl == 0 || recid > 2*blk_per_trk)
ccw++;
- rq_for_each_bio(bio, req) bio_for_each_segment(bv, bio, i) {
+ rq_for_each_segment(bv, req, iter) {
dst = page_address(bv->bv_page) + bv->bv_offset;
for (off = 0; off < bv->bv_len; off += blksize) {
/* Skip locate record. */
diff --git a/drivers/s390/block/dasd_fba.c b/drivers/s390/block/dasd_fba.c
index da16ead8aff..1d95822e0b8 100644
--- a/drivers/s390/block/dasd_fba.c
+++ b/drivers/s390/block/dasd_fba.c
@@ -234,14 +234,13 @@ dasd_fba_build_cp(struct dasd_device * device, struct request *req)
struct LO_fba_data *LO_data;
struct dasd_ccw_req *cqr;
struct ccw1 *ccw;
- struct bio *bio;
+ struct req_iterator iter;
struct bio_vec *bv;
char *dst;
int count, cidaw, cplength, datasize;
sector_t recid, first_rec, last_rec;
unsigned int blksize, off;
unsigned char cmd;
- int i;
private = (struct dasd_fba_private *) device->private;
if (rq_data_dir(req) == READ) {
@@ -257,18 +256,15 @@ dasd_fba_build_cp(struct dasd_device * device, struct request *req)
/* Check struct bio and count the number of blocks for the request. */
count = 0;
cidaw = 0;
- rq_for_each_bio(bio, req) {
- bio_for_each_segment(bv, bio, i) {
- if (bv->bv_len & (blksize - 1))
- /* Fba can only do full blocks. */
- return ERR_PTR(-EINVAL);
- count += bv->bv_len >> (device->s2b_shift + 9);
+ rq_for_each_segment(bv, req, iter) {
+ if (bv->bv_len & (blksize - 1))
+ /* Fba can only do full blocks. */
+ return ERR_PTR(-EINVAL);
+ count += bv->bv_len >> (device->s2b_shift + 9);
#if defined(CONFIG_64BIT)
- if (idal_is_needed (page_address(bv->bv_page),
- bv->bv_len))
- cidaw += bv->bv_len / blksize;
+ if (idal_is_needed (page_address(bv->bv_page), bv->bv_len))
+ cidaw += bv->bv_len / blksize;
#endif
- }
}
/* Paranoia. */
if (count != last_rec - first_rec + 1)
@@ -304,7 +300,7 @@ dasd_fba_build_cp(struct dasd_device * device, struct request *req)
locate_record(ccw++, LO_data++, rq_data_dir(req), 0, count);
}
recid = first_rec;
- rq_for_each_bio(bio, req) bio_for_each_segment(bv, bio, i) {
+ rq_for_each_segment(bv, req, iter) {
dst = page_address(bv->bv_page) + bv->bv_offset;
if (dasd_page_cache) {
char *copy = kmem_cache_alloc(dasd_page_cache,
@@ -359,11 +355,11 @@ dasd_fba_free_cp(struct dasd_ccw_req *cqr, struct request *req)
{
struct dasd_fba_private *private;
struct ccw1 *ccw;
- struct bio *bio;
+ struct req_iterator iter;
struct bio_vec *bv;
char *dst, *cda;
unsigned int blksize, off;
- int i, status;
+ int status;
if (!dasd_page_cache)
goto out;
@@ -374,7 +370,7 @@ dasd_fba_free_cp(struct dasd_ccw_req *cqr, struct request *req)
ccw++;
if (private->rdc_data.mode.bits.data_chain != 0)
ccw++;
- rq_for_each_bio(bio, req) bio_for_each_segment(bv, bio, i) {
+ rq_for_each_segment(bv, req, iter) {
dst = page_address(bv->bv_page) + bv->bv_offset;
for (off = 0; off < bv->bv_len; off += blksize) {
/* Skip locate record. */
diff --git a/drivers/s390/block/dasd_int.h b/drivers/s390/block/dasd_int.h
index 241294cba41..d427daeef51 100644
--- a/drivers/s390/block/dasd_int.h
+++ b/drivers/s390/block/dasd_int.h
@@ -53,6 +53,7 @@
#include <linux/genhd.h>
#include <linux/hdreg.h>
#include <linux/interrupt.h>
+#include <linux/log2.h>
#include <asm/ccwdev.h>
#include <linux/workqueue.h>
#include <asm/debug.h>
@@ -293,7 +294,7 @@ struct dasd_uid {
struct dasd_device {
/* Block device stuff. */
struct gendisk *gdp;
- request_queue_t *request_queue;
+ struct request_queue *request_queue;
spinlock_t request_queue_lock;
struct block_device *bdev;
unsigned int devindex;
@@ -456,7 +457,7 @@ dasd_free_chunk(struct list_head *chunk_list, void *mem)
static inline int
dasd_check_blocksize(int bsize)
{
- if (bsize < 512 || bsize > 4096 || (bsize & (bsize - 1)) != 0)
+ if (bsize < 512 || bsize > 4096 || !is_power_of_2(bsize))
return -EMEDIUMTYPE;
return 0;
}
diff --git a/drivers/s390/block/dcssblk.c b/drivers/s390/block/dcssblk.c
index 35765f6a86e..859f870552e 100644
--- a/drivers/s390/block/dcssblk.c
+++ b/drivers/s390/block/dcssblk.c
@@ -621,7 +621,7 @@ out:
}
static int
-dcssblk_make_request(request_queue_t *q, struct bio *bio)
+dcssblk_make_request(struct request_queue *q, struct bio *bio)
{
struct dcssblk_dev_info *dev_info;
struct bio_vec *bvec;
@@ -674,10 +674,10 @@ dcssblk_make_request(request_queue_t *q, struct bio *bio)
}
bytes_done += bvec->bv_len;
}
- bio_endio(bio, bytes_done, 0);
+ bio_endio(bio, 0);
return 0;
fail:
- bio_io_error(bio, bio->bi_size);
+ bio_io_error(bio);
return 0;
}
diff --git a/drivers/s390/block/xpram.c b/drivers/s390/block/xpram.c
index a04d9120cef..f231bc21b1c 100644
--- a/drivers/s390/block/xpram.c
+++ b/drivers/s390/block/xpram.c
@@ -191,7 +191,7 @@ static unsigned long __init xpram_highest_page_index(void)
/*
* Block device make request function.
*/
-static int xpram_make_request(request_queue_t *q, struct bio *bio)
+static int xpram_make_request(struct request_queue *q, struct bio *bio)
{
xpram_device_t *xdev = bio->bi_bdev->bd_disk->private_data;
struct bio_vec *bvec;
@@ -230,12 +230,10 @@ static int xpram_make_request(request_queue_t *q, struct bio *bio)
}
}
set_bit(BIO_UPTODATE, &bio->bi_flags);
- bytes = bio->bi_size;
- bio->bi_size = 0;
- bio->bi_end_io(bio, bytes, 0);
+ bio_endio(bio, 0);
return 0;
fail:
- bio_io_error(bio, bio->bi_size);
+ bio_io_error(bio);
return 0;
}
diff --git a/drivers/s390/char/Kconfig b/drivers/s390/char/Kconfig
index 3f36cb3910e..643033890e3 100644
--- a/drivers/s390/char/Kconfig
+++ b/drivers/s390/char/Kconfig
@@ -44,15 +44,9 @@ config CCW_CONSOLE
depends on TN3215_CONSOLE || TN3270_CONSOLE
default y
-config SCLP
- bool "Support for SCLP"
- depends on S390
- help
- Include support for the SCLP interface to the service element.
-
config SCLP_TTY
bool "Support for SCLP line mode terminal"
- depends on SCLP
+ depends on S390
help
Include support for IBM SCLP line-mode terminals.
@@ -65,7 +59,7 @@ config SCLP_CONSOLE
config SCLP_VT220_TTY
bool "Support for SCLP VT220-compatible terminal"
- depends on SCLP
+ depends on S390
help
Include support for an IBM SCLP VT220-compatible terminal.
@@ -78,7 +72,7 @@ config SCLP_VT220_CONSOLE
config SCLP_CPI
tristate "Control-Program Identification"
- depends on SCLP
+ depends on S390
help
This option enables the hardware console interface for system
identification. This is commonly used for workload management and
diff --git a/drivers/s390/char/con3215.c b/drivers/s390/char/con3215.c
index 6000bdee408..0e1f35c9ed9 100644
--- a/drivers/s390/char/con3215.c
+++ b/drivers/s390/char/con3215.c
@@ -667,6 +667,9 @@ raw3215_probe (struct ccw_device *cdev)
struct raw3215_info *raw;
int line;
+ /* Console is special. */
+ if (raw3215[0] && (cdev->dev.driver_data == raw3215[0]))
+ return 0;
raw = kmalloc(sizeof(struct raw3215_info) +
RAW3215_INBUF_SIZE, GFP_KERNEL|GFP_DMA);
if (raw == NULL)
diff --git a/drivers/s390/char/con3270.c b/drivers/s390/char/con3270.c
index fd3479119eb..0b040557db0 100644
--- a/drivers/s390/char/con3270.c
+++ b/drivers/s390/char/con3270.c
@@ -22,6 +22,7 @@
#include <asm/ebcdic.h>
#include "raw3270.h"
+#include "tty3270.h"
#include "ctrlchar.h"
#define CON3270_OUTPUT_BUFFER_SIZE 1024
@@ -507,8 +508,6 @@ con3270_write(struct console *co, const char *str, unsigned int count)
spin_unlock_irqrestore(&cp->view.lock,flags);
}
-extern struct tty_driver *tty3270_driver;
-
static struct tty_driver *
con3270_device(struct console *c, int *index)
{
diff --git a/drivers/s390/char/monwriter.c b/drivers/s390/char/monwriter.c
index 268598ef3ef..20442fbf934 100644
--- a/drivers/s390/char/monwriter.c
+++ b/drivers/s390/char/monwriter.c
@@ -17,6 +17,7 @@
#include <linux/miscdevice.h>
#include <linux/ctype.h>
#include <linux/poll.h>
+#include <linux/mutex.h>
#include <asm/uaccess.h>
#include <asm/ebcdic.h>
#include <asm/io.h>
@@ -41,6 +42,7 @@ struct mon_private {
size_t hdr_to_read;
size_t data_to_read;
struct mon_buf *current_buf;
+ struct mutex thread_mutex;
};
/*
@@ -179,6 +181,7 @@ static int monwrite_open(struct inode *inode, struct file *filp)
return -ENOMEM;
INIT_LIST_HEAD(&monpriv->list);
monpriv->hdr_to_read = sizeof(monpriv->hdr);
+ mutex_init(&monpriv->thread_mutex);
filp->private_data = monpriv;
return nonseekable_open(inode, filp);
}
@@ -209,6 +212,7 @@ static ssize_t monwrite_write(struct file *filp, const char __user *data,
void *to;
int rc;
+ mutex_lock(&monpriv->thread_mutex);
for (written = 0; written < count; ) {
if (monpriv->hdr_to_read) {
len = min(count - written, monpriv->hdr_to_read);
@@ -247,11 +251,13 @@ static ssize_t monwrite_write(struct file *filp, const char __user *data,
}
monpriv->hdr_to_read = sizeof(monpriv->hdr);
}
+ mutex_unlock(&monpriv->thread_mutex);
return written;
out_error:
monpriv->data_to_read = 0;
monpriv->hdr_to_read = sizeof(struct monwrite_hdr);
+ mutex_unlock(&monpriv->thread_mutex);
return rc;
}
diff --git a/drivers/s390/char/raw3270.c b/drivers/s390/char/raw3270.c
index 743944ad61e..2edd5fb6d3d 100644
--- a/drivers/s390/char/raw3270.c
+++ b/drivers/s390/char/raw3270.c
@@ -21,6 +21,7 @@
#include <asm/ccwdev.h>
#include <asm/cio.h>
#include <asm/ebcdic.h>
+#include <asm/diag.h>
#include "raw3270.h"
@@ -147,8 +148,7 @@ raw3270_request_alloc(size_t size)
* Allocate a new 3270 ccw request from bootmem. Only works very
* early in the boot process. Only con3270.c should be using this.
*/
-struct raw3270_request *
-raw3270_request_alloc_bootmem(size_t size)
+struct raw3270_request __init *raw3270_request_alloc_bootmem(size_t size)
{
struct raw3270_request *rq;
@@ -848,8 +848,7 @@ raw3270_setup_device(struct ccw_device *cdev, struct raw3270 *rp, char *ascebc)
/*
* Setup 3270 device configured as console.
*/
-struct raw3270 *
-raw3270_setup_console(struct ccw_device *cdev)
+struct raw3270 __init *raw3270_setup_console(struct ccw_device *cdev)
{
struct raw3270 *rp;
char *ascebc;
diff --git a/drivers/s390/char/sclp.c b/drivers/s390/char/sclp.c
index fa62e694405..25629b92dec 100644
--- a/drivers/s390/char/sclp.c
+++ b/drivers/s390/char/sclp.c
@@ -93,6 +93,7 @@ static volatile enum sclp_mask_state_t {
#define SCLP_RETRY_INTERVAL 30
static void sclp_process_queue(void);
+static void __sclp_make_read_req(void);
static int sclp_init_mask(int calculate);
static int sclp_init(void);
@@ -115,7 +116,6 @@ sclp_service_call(sclp_cmdw_t command, void *sccb)
return 0;
}
-static inline void __sclp_make_read_req(void);
static void
__sclp_queue_read_req(void)
@@ -318,8 +318,7 @@ sclp_read_cb(struct sclp_req *req, void *data)
}
/* Prepare read event data request. Called while sclp_lock is locked. */
-static inline void
-__sclp_make_read_req(void)
+static void __sclp_make_read_req(void)
{
struct sccb_header *sccb;
diff --git a/drivers/s390/char/sclp_vt220.c b/drivers/s390/char/sclp_vt220.c
index 726334757bb..40cd21bc5cc 100644
--- a/drivers/s390/char/sclp_vt220.c
+++ b/drivers/s390/char/sclp_vt220.c
@@ -621,11 +621,24 @@ sclp_vt220_flush_buffer(struct tty_struct *tty)
/*
* Initialize all relevant components and register driver with system.
*/
-static int
-__sclp_vt220_init(int early)
+static void __init __sclp_vt220_cleanup(void)
+{
+ struct list_head *page, *p;
+
+ list_for_each_safe(page, p, &sclp_vt220_empty) {
+ list_del(page);
+ if (slab_is_available())
+ free_page((unsigned long) page);
+ else
+ free_bootmem((unsigned long) page, PAGE_SIZE);
+ }
+}
+
+static int __init __sclp_vt220_init(void)
{
void *page;
int i;
+ int num_pages;
if (sclp_vt220_initialized)
return 0;
@@ -642,13 +655,16 @@ __sclp_vt220_init(int early)
sclp_vt220_flush_later = 0;
/* Allocate pages for output buffering */
- for (i = 0; i < (early ? MAX_CONSOLE_PAGES : MAX_KMEM_PAGES); i++) {
- if (early)
- page = alloc_bootmem_low_pages(PAGE_SIZE);
- else
+ num_pages = slab_is_available() ? MAX_KMEM_PAGES : MAX_CONSOLE_PAGES;
+ for (i = 0; i < num_pages; i++) {
+ if (slab_is_available())
page = (void *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
- if (!page)
+ else
+ page = alloc_bootmem_low_pages(PAGE_SIZE);
+ if (!page) {
+ __sclp_vt220_cleanup();
return -ENOMEM;
+ }
list_add_tail((struct list_head *) page, &sclp_vt220_empty);
}
return 0;
@@ -662,14 +678,13 @@ static const struct tty_operations sclp_vt220_ops = {
.flush_chars = sclp_vt220_flush_chars,
.write_room = sclp_vt220_write_room,
.chars_in_buffer = sclp_vt220_chars_in_buffer,
- .flush_buffer = sclp_vt220_flush_buffer
+ .flush_buffer = sclp_vt220_flush_buffer,
};
/*
* Register driver with SCLP and Linux and initialize internal tty structures.
*/
-static int __init
-sclp_vt220_tty_init(void)
+static int __init sclp_vt220_tty_init(void)
{
struct tty_driver *driver;
int rc;
@@ -679,18 +694,15 @@ sclp_vt220_tty_init(void)
driver = alloc_tty_driver(1);
if (!driver)
return -ENOMEM;
- rc = __sclp_vt220_init(0);
- if (rc) {
- put_tty_driver(driver);
- return rc;
- }
+ rc = __sclp_vt220_init();
+ if (rc)
+ goto out_driver;
rc = sclp_register(&sclp_vt220_register);
if (rc) {
printk(KERN_ERR SCLP_VT220_PRINT_HEADER
"could not register tty - "
"sclp_register returned %d\n", rc);
- put_tty_driver(driver);
- return rc;
+ goto out_init;
}
driver->owner = THIS_MODULE;
@@ -709,14 +721,20 @@ sclp_vt220_tty_init(void)
printk(KERN_ERR SCLP_VT220_PRINT_HEADER
"could not register tty - "
"tty_register_driver returned %d\n", rc);
- put_tty_driver(driver);
- return rc;
+ goto out_sclp;
}
sclp_vt220_driver = driver;
return 0;
-}
-module_init(sclp_vt220_tty_init);
+out_sclp:
+ sclp_unregister(&sclp_vt220_register);
+out_init:
+ __sclp_vt220_cleanup();
+out_driver:
+ put_tty_driver(driver);
+ return rc;
+}
+__initcall(sclp_vt220_tty_init);
#ifdef CONFIG_SCLP_VT220_CONSOLE
@@ -762,7 +780,7 @@ sclp_vt220_con_init(void)
if (!CONSOLE_IS_SCLP)
return 0;
- rc = __sclp_vt220_init(1);
+ rc = __sclp_vt220_init();
if (rc)
return rc;
/* Attach linux console */
diff --git a/drivers/s390/char/tape.h b/drivers/s390/char/tape.h
index 3b52f5c1dbe..dddf8d62c15 100644
--- a/drivers/s390/char/tape.h
+++ b/drivers/s390/char/tape.h
@@ -188,7 +188,7 @@ struct tape_blk_data
{
struct tape_device * device;
/* Block device request queue. */
- request_queue_t * request_queue;
+ struct request_queue * request_queue;
spinlock_t request_queue_lock;
/* Task to move entries from block request to CCS request queue. */
diff --git a/drivers/s390/char/tape_34xx.c b/drivers/s390/char/tape_34xx.c
index e765875e8db..5b47e9cce75 100644
--- a/drivers/s390/char/tape_34xx.c
+++ b/drivers/s390/char/tape_34xx.c
@@ -131,10 +131,9 @@ tape_34xx_schedule_work(struct tape_device *device, enum tape_op op)
{
struct tape_34xx_work *p;
- if ((p = kmalloc(sizeof(*p), GFP_ATOMIC)) == NULL)
+ if ((p = kzalloc(sizeof(*p), GFP_ATOMIC)) == NULL)
return -ENOMEM;
- memset(p, 0, sizeof(*p));
INIT_WORK(&p->work, tape_34xx_work_handler);
p->device = tape_get_device_reference(device);
@@ -1135,21 +1134,18 @@ tape_34xx_bread(struct tape_device *device, struct request *req)
{
struct tape_request *request;
struct ccw1 *ccw;
- int count = 0, i;
+ int count = 0;
unsigned off;
char *dst;
struct bio_vec *bv;
- struct bio *bio;
+ struct req_iterator iter;
struct tape_34xx_block_id * start_block;
DBF_EVENT(6, "xBREDid:");
/* Count the number of blocks for the request. */
- rq_for_each_bio(bio, req) {
- bio_for_each_segment(bv, bio, i) {
- count += bv->bv_len >> (TAPEBLOCK_HSEC_S2B + 9);
- }
- }
+ rq_for_each_segment(bv, req, iter)
+ count += bv->bv_len >> (TAPEBLOCK_HSEC_S2B + 9);
/* Allocate the ccw request. */
request = tape_alloc_request(3+count+1, 8);
@@ -1176,18 +1172,15 @@ tape_34xx_bread(struct tape_device *device, struct request *req)
ccw = tape_ccw_cc(ccw, NOP, 0, NULL);
ccw = tape_ccw_cc(ccw, NOP, 0, NULL);
- rq_for_each_bio(bio, req) {
- bio_for_each_segment(bv, bio, i) {
- dst = kmap(bv->bv_page) + bv->bv_offset;
- for (off = 0; off < bv->bv_len;
- off += TAPEBLOCK_HSEC_SIZE) {
- ccw->flags = CCW_FLAG_CC;
- ccw->cmd_code = READ_FORWARD;
- ccw->count = TAPEBLOCK_HSEC_SIZE;
- set_normalized_cda(ccw, (void*) __pa(dst));
- ccw++;
- dst += TAPEBLOCK_HSEC_SIZE;
- }
+ rq_for_each_segment(bv, req, iter) {
+ dst = kmap(bv->bv_page) + bv->bv_offset;
+ for (off = 0; off < bv->bv_len; off += TAPEBLOCK_HSEC_SIZE) {
+ ccw->flags = CCW_FLAG_CC;
+ ccw->cmd_code = READ_FORWARD;
+ ccw->count = TAPEBLOCK_HSEC_SIZE;
+ set_normalized_cda(ccw, (void*) __pa(dst));
+ ccw++;
+ dst += TAPEBLOCK_HSEC_SIZE;
}
}
diff --git a/drivers/s390/char/tape_3590.c b/drivers/s390/char/tape_3590.c
index 7e2b2ab4926..da25f8e2415 100644
--- a/drivers/s390/char/tape_3590.c
+++ b/drivers/s390/char/tape_3590.c
@@ -623,21 +623,19 @@ tape_3590_bread(struct tape_device *device, struct request *req)
{
struct tape_request *request;
struct ccw1 *ccw;
- int count = 0, start_block, i;
+ int count = 0, start_block;
unsigned off;
char *dst;
struct bio_vec *bv;
- struct bio *bio;
+ struct req_iterator iter;
DBF_EVENT(6, "xBREDid:");
start_block = req->sector >> TAPEBLOCK_HSEC_S2B;
DBF_EVENT(6, "start_block = %i\n", start_block);
- rq_for_each_bio(bio, req) {
- bio_for_each_segment(bv, bio, i) {
- count += bv->bv_len >> (TAPEBLOCK_HSEC_S2B + 9);
- }
- }
+ rq_for_each_segment(bv, req, iter)
+ count += bv->bv_len >> (TAPEBLOCK_HSEC_S2B + 9);
+
request = tape_alloc_request(2 + count + 1, 4);
if (IS_ERR(request))
return request;
@@ -653,21 +651,18 @@ tape_3590_bread(struct tape_device *device, struct request *req)
*/
ccw = tape_ccw_cc(ccw, NOP, 0, NULL);
- rq_for_each_bio(bio, req) {
- bio_for_each_segment(bv, bio, i) {
- dst = page_address(bv->bv_page) + bv->bv_offset;
- for (off = 0; off < bv->bv_len;
- off += TAPEBLOCK_HSEC_SIZE) {
- ccw->flags = CCW_FLAG_CC;
- ccw->cmd_code = READ_FORWARD;
- ccw->count = TAPEBLOCK_HSEC_SIZE;
- set_normalized_cda(ccw, (void *) __pa(dst));
- ccw++;
- dst += TAPEBLOCK_HSEC_SIZE;
- }
- if (off > bv->bv_len)
- BUG();
+ rq_for_each_segment(bv, req, iter) {
+ dst = page_address(bv->bv_page) + bv->bv_offset;
+ for (off = 0; off < bv->bv_len; off += TAPEBLOCK_HSEC_SIZE) {
+ ccw->flags = CCW_FLAG_CC;
+ ccw->cmd_code = READ_FORWARD;
+ ccw->count = TAPEBLOCK_HSEC_SIZE;
+ set_normalized_cda(ccw, (void *) __pa(dst));
+ ccw++;
+ dst += TAPEBLOCK_HSEC_SIZE;
}
+ if (off > bv->bv_len)
+ BUG();
}
ccw = tape_ccw_end(ccw, NOP, 0, NULL);
DBF_EVENT(6, "xBREDccwg\n");
@@ -713,16 +708,22 @@ static void tape_3590_med_state_set(struct tape_device *device,
c_info = &TAPE_3590_CRYPT_INFO(device);
- if (sense->masst == MSENSE_UNASSOCIATED) {
+ DBF_EVENT(6, "medium state: %x:%x\n", sense->macst, sense->masst);
+ switch (sense->macst) {
+ case 0x04:
+ case 0x05:
+ case 0x06:
tape_med_state_set(device, MS_UNLOADED);
TAPE_3590_CRYPT_INFO(device).medium_status = 0;
return;
- }
- if (sense->masst != MSENSE_ASSOCIATED_MOUNT) {
- PRINT_ERR("Unknown medium state: %x\n", sense->masst);
+ case 0x08:
+ case 0x09:
+ tape_med_state_set(device, MS_LOADED);
+ break;
+ default:
+ tape_med_state_set(device, MS_UNKNOWN);
return;
}
- tape_med_state_set(device, MS_LOADED);
c_info->medium_status |= TAPE390_MEDIUM_LOADED_MASK;
if (sense->flags & MSENSE_CRYPT_MASK) {
PRINT_INFO("Medium is encrypted (%04x)\n", sense->flags);
@@ -840,15 +841,17 @@ tape_3590_unsolicited_irq(struct tape_device *device, struct irb *irb)
/* Probably result of halt ssch */
return TAPE_IO_PENDING;
else if (irb->scsw.dstat == 0x85)
- /* Device Ready -> check medium state */
- tape_3590_schedule_work(device, TO_MSEN);
- else if (irb->scsw.dstat & DEV_STAT_ATTENTION)
+ /* Device Ready */
+ DBF_EVENT(3, "unsol.irq! tape ready: %08x\n", device->cdev_id);
+ else if (irb->scsw.dstat & DEV_STAT_ATTENTION) {
tape_3590_schedule_work(device, TO_READ_ATTMSG);
- else {
+ } else {
DBF_EVENT(3, "unsol.irq! dev end: %08x\n", device->cdev_id);
PRINT_WARN("Unsolicited IRQ (Device End) caught.\n");
tape_dump_sense(device, NULL, irb);
}
+ /* check medium state */
+ tape_3590_schedule_work(device, TO_MSEN);
return TAPE_IO_SUCCESS;
}
diff --git a/drivers/s390/char/tape_block.c b/drivers/s390/char/tape_block.c
index dd0ecaed592..eeb92e2ed0c 100644
--- a/drivers/s390/char/tape_block.c
+++ b/drivers/s390/char/tape_block.c
@@ -147,7 +147,7 @@ static void
tapeblock_requeue(struct work_struct *work) {
struct tape_blk_data * blkdat;
struct tape_device * device;
- request_queue_t * queue;
+ struct request_queue * queue;
int nr_queued;
struct request * req;
struct list_head * l;
@@ -194,7 +194,7 @@ tapeblock_requeue(struct work_struct *work) {
* Tape request queue function. Called from ll_rw_blk.c
*/
static void
-tapeblock_request_fn(request_queue_t *queue)
+tapeblock_request_fn(struct request_queue *queue)
{
struct tape_device *device;
diff --git a/drivers/s390/char/tty3270.c b/drivers/s390/char/tty3270.c
index bc33068b9ce..70b1980a08b 100644
--- a/drivers/s390/char/tty3270.c
+++ b/drivers/s390/char/tty3270.c
@@ -25,8 +25,8 @@
#include <asm/ebcdic.h>
#include <asm/uaccess.h>
-
#include "raw3270.h"
+#include "tty3270.h"
#include "keyboard.h"
#define TTY3270_CHAR_BUF_SIZE 256
@@ -1338,8 +1338,11 @@ tty3270_getpar(struct tty3270 *tp, int ix)
static void
tty3270_goto_xy(struct tty3270 *tp, int cx, int cy)
{
- tp->cx = min_t(int, tp->view.cols - 1, max_t(int, 0, cx));
- cy = min_t(int, tp->view.rows - 3, max_t(int, 0, cy));
+ int max_cx = max(0, cx);
+ int max_cy = max(0, cy);
+
+ tp->cx = min_t(int, tp->view.cols - 1, max_cx);
+ cy = min_t(int, tp->view.rows - 3, max_cy);
if (cy != tp->cy) {
tty3270_convert_line(tp, tp->cy);
tp->cy = cy;
diff --git a/drivers/s390/char/tty3270.h b/drivers/s390/char/tty3270.h
new file mode 100644
index 00000000000..799da57f039
--- /dev/null
+++ b/drivers/s390/char/tty3270.h
@@ -0,0 +1,16 @@
+/*
+ * drivers/s390/char/tty3270.h
+ *
+ * Copyright IBM Corp. 2007
+ *
+ */
+
+#ifndef __DRIVERS_S390_CHAR_TTY3270_H
+#define __DRIVERS_S390_CHAR_TTY3270_H
+
+#include <linux/tty.h>
+#include <linux/tty_driver.h>
+
+extern struct tty_driver *tty3270_driver;
+
+#endif /* __DRIVERS_S390_CHAR_TTY3270_H */
diff --git a/drivers/s390/char/vmur.c b/drivers/s390/char/vmur.c
index e90b0f84619..d70a6e65bf1 100644
--- a/drivers/s390/char/vmur.c
+++ b/drivers/s390/char/vmur.c
@@ -14,6 +14,7 @@
#include <asm/cio.h>
#include <asm/ccwdev.h>
#include <asm/debug.h>
+#include <asm/diag.h>
#include "vmur.h"
@@ -68,8 +69,26 @@ static struct ccw_driver ur_driver = {
.set_offline = ur_set_offline,
};
+static DEFINE_MUTEX(vmur_mutex);
+
/*
* Allocation, freeing, getting and putting of urdev structures
+ *
+ * Each ur device (urd) contains a reference to its corresponding ccw device
+ * (cdev) using the urd->cdev pointer. Each ccw device has a reference to the
+ * ur device using the cdev->dev.driver_data pointer.
+ *
+ * urd references:
+ * - ur_probe gets a urd reference, ur_remove drops the reference
+ * (cdev->dev.driver_data)
+ * - ur_open gets a urd reference, ur_relase drops the reference
+ * (urf->urd)
+ *
+ * cdev references:
+ * - urdev_alloc get a cdev reference (urd->cdev)
+ * - urdev_free drops the cdev reference (urd->cdev)
+ *
+ * Setting and clearing of cdev->dev.driver_data is protected by the ccwdev lock
*/
static struct urdev *urdev_alloc(struct ccw_device *cdev)
{
@@ -78,51 +97,72 @@ static struct urdev *urdev_alloc(struct ccw_device *cdev)
urd = kzalloc(sizeof(struct urdev), GFP_KERNEL);
if (!urd)
return NULL;
- urd->cdev = cdev;
urd->reclen = cdev->id.driver_info;
ccw_device_get_id(cdev, &urd->dev_id);
mutex_init(&urd->io_mutex);
mutex_init(&urd->open_mutex);
+ atomic_set(&urd->ref_count, 1);
+ urd->cdev = cdev;
+ get_device(&cdev->dev);
return urd;
}
static void urdev_free(struct urdev *urd)
{
+ TRACE("urdev_free: %p\n", urd);
+ if (urd->cdev)
+ put_device(&urd->cdev->dev);
kfree(urd);
}
-/*
- * This is how the character device driver gets a reference to a
- * ur device. When this call returns successfully, a reference has
- * been taken (by get_device) on the underlying kobject. The recipient
- * of this urdev pointer must eventually drop it with urdev_put(urd)
- * which does the corresponding put_device().
- */
+static void urdev_get(struct urdev *urd)
+{
+ atomic_inc(&urd->ref_count);
+}
+
+static struct urdev *urdev_get_from_cdev(struct ccw_device *cdev)
+{
+ struct urdev *urd;
+ unsigned long flags;
+
+ spin_lock_irqsave(get_ccwdev_lock(cdev), flags);
+ urd = cdev->dev.driver_data;
+ if (urd)
+ urdev_get(urd);
+ spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags);
+ return urd;
+}
+
static struct urdev *urdev_get_from_devno(u16 devno)
{
char bus_id[16];
struct ccw_device *cdev;
+ struct urdev *urd;
sprintf(bus_id, "0.0.%04x", devno);
cdev = get_ccwdev_by_busid(&ur_driver, bus_id);
if (!cdev)
return NULL;
-
- return cdev->dev.driver_data;
+ urd = urdev_get_from_cdev(cdev);
+ put_device(&cdev->dev);
+ return urd;
}
static void urdev_put(struct urdev *urd)
{
- put_device(&urd->cdev->dev);
+ if (atomic_dec_and_test(&urd->ref_count))
+ urdev_free(urd);
}
/*
* Low-level functions to do I/O to a ur device.
* alloc_chan_prog
+ * free_chan_prog
* do_ur_io
* ur_int_handler
*
* alloc_chan_prog allocates and builds the channel program
+ * free_chan_prog frees memory of the channel program
*
* do_ur_io issues the channel program to the device and blocks waiting
* on a completion event it publishes at urd->io_done. The function
@@ -137,6 +177,16 @@ static void urdev_put(struct urdev *urd)
* address pointer that alloc_chan_prog returned.
*/
+static void free_chan_prog(struct ccw1 *cpa)
+{
+ struct ccw1 *ptr = cpa;
+
+ while (ptr->cda) {
+ kfree((void *)(addr_t) ptr->cda);
+ ptr++;
+ }
+ kfree(cpa);
+}
/*
* alloc_chan_prog
@@ -144,44 +194,45 @@ static void urdev_put(struct urdev *urd)
* with a final NOP CCW command-chained on (which ensures that CE and DE
* are presented together in a single interrupt instead of as separate
* interrupts unless an incorrect length indication kicks in first). The
- * data length in each CCW is reclen. The caller must ensure that count
- * is an integral multiple of reclen.
- * The channel program pointer returned by this function must be freed
- * with kfree. The caller is responsible for checking that
- * count/reclen is not ridiculously large.
+ * data length in each CCW is reclen.
*/
-static struct ccw1 *alloc_chan_prog(char *buf, size_t count, size_t reclen)
+static struct ccw1 *alloc_chan_prog(const char __user *ubuf, int rec_count,
+ int reclen)
{
- size_t num_ccws;
struct ccw1 *cpa;
+ void *kbuf;
int i;
- TRACE("alloc_chan_prog(%p, %zu, %zu)\n", buf, count, reclen);
+ TRACE("alloc_chan_prog(%p, %i, %i)\n", ubuf, rec_count, reclen);
/*
* We chain a NOP onto the writes to force CE+DE together.
* That means we allocate room for CCWs to cover count/reclen
* records plus a NOP.
*/
- num_ccws = count / reclen + 1;
- cpa = kmalloc(num_ccws * sizeof(struct ccw1), GFP_KERNEL | GFP_DMA);
+ cpa = kzalloc((rec_count + 1) * sizeof(struct ccw1),
+ GFP_KERNEL | GFP_DMA);
if (!cpa)
- return NULL;
+ return ERR_PTR(-ENOMEM);
- for (i = 0; count; i++) {
+ for (i = 0; i < rec_count; i++) {
cpa[i].cmd_code = WRITE_CCW_CMD;
cpa[i].flags = CCW_FLAG_CC | CCW_FLAG_SLI;
cpa[i].count = reclen;
- cpa[i].cda = __pa(buf);
- buf += reclen;
- count -= reclen;
+ kbuf = kmalloc(reclen, GFP_KERNEL | GFP_DMA);
+ if (!kbuf) {
+ free_chan_prog(cpa);
+ return ERR_PTR(-ENOMEM);
+ }
+ cpa[i].cda = (u32)(addr_t) kbuf;
+ if (copy_from_user(kbuf, ubuf, reclen)) {
+ free_chan_prog(cpa);
+ return ERR_PTR(-EFAULT);
+ }
+ ubuf += reclen;
}
/* The following NOP CCW forces CE+DE to be presented together */
cpa[i].cmd_code = CCW_CMD_NOOP;
- cpa[i].flags = 0;
- cpa[i].count = 0;
- cpa[i].cda = 0;
-
return cpa;
}
@@ -189,7 +240,7 @@ static int do_ur_io(struct urdev *urd, struct ccw1 *cpa)
{
int rc;
struct ccw_device *cdev = urd->cdev;
- DECLARE_COMPLETION(event);
+ DECLARE_COMPLETION_ONSTACK(event);
TRACE("do_ur_io: cpa=%p\n", cpa);
@@ -232,6 +283,7 @@ static void ur_int_handler(struct ccw_device *cdev, unsigned long intparm,
return;
}
urd = cdev->dev.driver_data;
+ BUG_ON(!urd);
/* On special conditions irb is an error pointer */
if (IS_ERR(irb))
urd->io_request_rc = PTR_ERR(irb);
@@ -249,9 +301,15 @@ static void ur_int_handler(struct ccw_device *cdev, unsigned long intparm,
static ssize_t ur_attr_reclen_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
- struct urdev *urd = dev->driver_data;
+ struct urdev *urd;
+ int rc;
- return sprintf(buf, "%zu\n", urd->reclen);
+ urd = urdev_get_from_cdev(to_ccwdev(dev));
+ if (!urd)
+ return -ENODEV;
+ rc = sprintf(buf, "%zu\n", urd->reclen);
+ urdev_put(urd);
+ return rc;
}
static DEVICE_ATTR(reclen, 0444, ur_attr_reclen_show, NULL);
@@ -325,24 +383,11 @@ static ssize_t do_write(struct urdev *urd, const char __user *udata,
size_t count, size_t reclen, loff_t *ppos)
{
struct ccw1 *cpa;
- char *buf;
int rc;
- /* Data buffer must be under 2GB line for fmt1 CCWs: hence GFP_DMA */
- buf = kmalloc(count, GFP_KERNEL | GFP_DMA);
- if (!buf)
- return -ENOMEM;
-
- if (copy_from_user(buf, udata, count)) {
- rc = -EFAULT;
- goto fail_kfree_buf;
- }
-
- cpa = alloc_chan_prog(buf, count, reclen);
- if (!cpa) {
- rc = -ENOMEM;
- goto fail_kfree_buf;
- }
+ cpa = alloc_chan_prog(udata, count / reclen, reclen);
+ if (IS_ERR(cpa))
+ return PTR_ERR(cpa);
rc = do_ur_io(urd, cpa);
if (rc)
@@ -354,10 +399,9 @@ static ssize_t do_write(struct urdev *urd, const char __user *udata,
}
*ppos += count;
rc = count;
+
fail_kfree_cpa:
- kfree(cpa);
-fail_kfree_buf:
- kfree(buf);
+ free_chan_prog(cpa);
return rc;
}
@@ -380,31 +424,6 @@ static ssize_t ur_write(struct file *file, const char __user *udata,
return do_write(urf->urd, udata, count, urf->dev_reclen, ppos);
}
-static int do_diag_14(unsigned long rx, unsigned long ry1,
- unsigned long subcode)
-{
- register unsigned long _ry1 asm("2") = ry1;
- register unsigned long _ry2 asm("3") = subcode;
- int rc = 0;
-
- asm volatile(
-#ifdef CONFIG_64BIT
- " sam31\n"
- " diag %2,2,0x14\n"
- " sam64\n"
-#else
- " diag %2,2,0x14\n"
-#endif
- " ipm %0\n"
- " srl %0,28\n"
- : "=d" (rc), "+d" (_ry2)
- : "d" (rx), "d" (_ry1)
- : "cc");
-
- TRACE("diag 14: subcode=0x%lx, cc=%i\n", subcode, rc);
- return rc;
-}
-
/*
* diagnose code 0x14 subcode 0x0028 - position spool file to designated
* record
@@ -416,7 +435,7 @@ static int diag_position_to_record(int devno, int record)
{
int cc;
- cc = do_diag_14(record, devno, 0x28);
+ cc = diag14(record, devno, 0x28);
switch (cc) {
case 0:
return 0;
@@ -441,7 +460,7 @@ static int diag_read_file(int devno, char *buf)
{
int cc;
- cc = do_diag_14((unsigned long) buf, devno, 0x00);
+ cc = diag14((unsigned long) buf, devno, 0x00);
switch (cc) {
case 0:
return 0;
@@ -473,7 +492,7 @@ static ssize_t diag14_read(struct file *file, char __user *ubuf, size_t count,
return rc;
len = min((size_t) PAGE_SIZE, count);
- buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
+ buf = (char *) __get_free_page(GFP_KERNEL | GFP_DMA);
if (!buf)
return -ENOMEM;
@@ -486,7 +505,7 @@ static ssize_t diag14_read(struct file *file, char __user *ubuf, size_t count,
}
if (rc)
goto fail;
- if (reclen)
+ if (reclen && (copied == 0) && (*offs < PAGE_SIZE))
*((u16 *) &buf[FILE_RECLEN_OFFSET]) = reclen;
len = min(count - copied, PAGE_SIZE - res);
if (copy_to_user(ubuf + copied, buf + res, len)) {
@@ -500,7 +519,7 @@ static ssize_t diag14_read(struct file *file, char __user *ubuf, size_t count,
*offs += copied;
rc = copied;
fail:
- kfree(buf);
+ free_page((unsigned long) buf);
return rc;
}
@@ -534,7 +553,7 @@ static int diag_read_next_file_info(struct file_control_block *buf, int spid)
{
int cc;
- cc = do_diag_14((unsigned long) buf, spid, 0xfff);
+ cc = diag14((unsigned long) buf, spid, 0xfff);
switch (cc) {
case 0:
return 0;
@@ -543,56 +562,97 @@ static int diag_read_next_file_info(struct file_control_block *buf, int spid)
}
}
-static int verify_device(struct urdev *urd)
+static int verify_uri_device(struct urdev *urd)
{
- struct file_control_block fcb;
+ struct file_control_block *fcb;
char *buf;
int rc;
+ fcb = kmalloc(sizeof(*fcb), GFP_KERNEL | GFP_DMA);
+ if (!fcb)
+ return -ENOMEM;
+
+ /* check for empty reader device (beginning of chain) */
+ rc = diag_read_next_file_info(fcb, 0);
+ if (rc)
+ goto fail_free_fcb;
+
+ /* if file is in hold status, we do not read it */
+ if (fcb->file_stat & (FLG_SYSTEM_HOLD | FLG_USER_HOLD)) {
+ rc = -EPERM;
+ goto fail_free_fcb;
+ }
+
+ /* open file on virtual reader */
+ buf = (char *) __get_free_page(GFP_KERNEL | GFP_DMA);
+ if (!buf) {
+ rc = -ENOMEM;
+ goto fail_free_fcb;
+ }
+ rc = diag_read_file(urd->dev_id.devno, buf);
+ if ((rc != 0) && (rc != -ENODATA)) /* EOF does not hurt */
+ goto fail_free_buf;
+
+ /* check if the file on top of the queue is open now */
+ rc = diag_read_next_file_info(fcb, 0);
+ if (rc)
+ goto fail_free_buf;
+ if (!(fcb->file_stat & FLG_IN_USE)) {
+ rc = -EMFILE;
+ goto fail_free_buf;
+ }
+ rc = 0;
+
+fail_free_buf:
+ free_page((unsigned long) buf);
+fail_free_fcb:
+ kfree(fcb);
+ return rc;
+}
+
+static int verify_device(struct urdev *urd)
+{
switch (urd->class) {
case DEV_CLASS_UR_O:
return 0; /* no check needed here */
case DEV_CLASS_UR_I:
- /* check for empty reader device (beginning of chain) */
- rc = diag_read_next_file_info(&fcb, 0);
- if (rc)
- return rc;
-
- /* open file on virtual reader */
- buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
- if (!buf)
- return -ENOMEM;
- rc = diag_read_file(urd->dev_id.devno, buf);
- kfree(buf);
-
- if ((rc != 0) && (rc != -ENODATA)) /* EOF does not hurt */
- return rc;
- return 0;
+ return verify_uri_device(urd);
default:
return -ENOTSUPP;
}
}
-static int get_file_reclen(struct urdev *urd)
+static int get_uri_file_reclen(struct urdev *urd)
{
- struct file_control_block fcb;
+ struct file_control_block *fcb;
int rc;
+ fcb = kmalloc(sizeof(*fcb), GFP_KERNEL | GFP_DMA);
+ if (!fcb)
+ return -ENOMEM;
+ rc = diag_read_next_file_info(fcb, 0);
+ if (rc)
+ goto fail_free;
+ if (fcb->file_stat & FLG_CP_DUMP)
+ rc = 0;
+ else
+ rc = fcb->rec_len;
+
+fail_free:
+ kfree(fcb);
+ return rc;
+}
+
+static int get_file_reclen(struct urdev *urd)
+{
switch (urd->class) {
case DEV_CLASS_UR_O:
return 0;
case DEV_CLASS_UR_I:
- rc = diag_read_next_file_info(&fcb, 0);
- if (rc)
- return rc;
- break;
+ return get_uri_file_reclen(urd);
default:
return -ENOTSUPP;
}
- if (fcb.file_stat & FLG_CP_DUMP)
- return 0;
-
- return fcb.rec_len;
}
static int ur_open(struct inode *inode, struct file *file)
@@ -710,64 +770,63 @@ static struct file_operations ur_fops = {
/*
* ccw_device infrastructure:
- * ur_probe gets its own ref to the device (i.e. get_device),
- * creates the struct urdev, the device attributes, sets up
- * the interrupt handler and validates the virtual unit record device.
- * ur_remove removes the device attributes, frees the struct urdev
- * and drops (put_device) the ref to the device we got in ur_probe.
+ * ur_probe creates the struct urdev (with refcount = 1), the device
+ * attributes, sets up the interrupt handler and validates the virtual
+ * unit record device.
+ * ur_remove removes the device attributes and drops the reference to
+ * struct urdev.
+ *
+ * ur_probe, ur_remove, ur_set_online and ur_set_offline are serialized
+ * by the vmur_mutex lock.
+ *
+ * urd->char_device is used as indication that the online function has
+ * been completed successfully.
*/
static int ur_probe(struct ccw_device *cdev)
{
struct urdev *urd;
int rc;
- TRACE("ur_probe: cdev=%p state=%d\n", cdev, *(int *) cdev->private);
-
- if (!get_device(&cdev->dev))
- return -ENODEV;
+ TRACE("ur_probe: cdev=%p\n", cdev);
+ mutex_lock(&vmur_mutex);
urd = urdev_alloc(cdev);
if (!urd) {
rc = -ENOMEM;
- goto fail;
+ goto fail_unlock;
}
+
rc = ur_create_attributes(&cdev->dev);
if (rc) {
rc = -ENOMEM;
- goto fail;
+ goto fail_urdev_put;
}
- cdev->dev.driver_data = urd;
cdev->handler = ur_int_handler;
/* validate virtual unit record device */
urd->class = get_urd_class(urd);
if (urd->class < 0) {
rc = urd->class;
- goto fail;
+ goto fail_remove_attr;
}
if ((urd->class != DEV_CLASS_UR_I) && (urd->class != DEV_CLASS_UR_O)) {
rc = -ENOTSUPP;
- goto fail;
+ goto fail_remove_attr;
}
+ spin_lock_irq(get_ccwdev_lock(cdev));
+ cdev->dev.driver_data = urd;
+ spin_unlock_irq(get_ccwdev_lock(cdev));
+ mutex_unlock(&vmur_mutex);
return 0;
-fail:
- urdev_free(urd);
- put_device(&cdev->dev);
- return rc;
-}
-
-static void ur_remove(struct ccw_device *cdev)
-{
- struct urdev *urd = cdev->dev.driver_data;
-
- TRACE("ur_remove\n");
- if (cdev->online)
- ur_set_offline(cdev);
+fail_remove_attr:
ur_remove_attributes(&cdev->dev);
- urdev_free(urd);
- put_device(&cdev->dev);
+fail_urdev_put:
+ urdev_put(urd);
+fail_unlock:
+ mutex_unlock(&vmur_mutex);
+ return rc;
}
static int ur_set_online(struct ccw_device *cdev)
@@ -776,20 +835,29 @@ static int ur_set_online(struct ccw_device *cdev)
int minor, major, rc;
char node_id[16];
- TRACE("ur_set_online: cdev=%p state=%d\n", cdev,
- *(int *) cdev->private);
+ TRACE("ur_set_online: cdev=%p\n", cdev);
- if (!try_module_get(ur_driver.owner))
- return -EINVAL;
+ mutex_lock(&vmur_mutex);
+ urd = urdev_get_from_cdev(cdev);
+ if (!urd) {
+ /* ur_remove already deleted our urd */
+ rc = -ENODEV;
+ goto fail_unlock;
+ }
+
+ if (urd->char_device) {
+ /* Another ur_set_online was faster */
+ rc = -EBUSY;
+ goto fail_urdev_put;
+ }
- urd = (struct urdev *) cdev->dev.driver_data;
minor = urd->dev_id.devno;
major = MAJOR(ur_first_dev_maj_min);
urd->char_device = cdev_alloc();
if (!urd->char_device) {
rc = -ENOMEM;
- goto fail_module_put;
+ goto fail_urdev_put;
}
cdev_init(urd->char_device, &ur_fops);
@@ -818,29 +886,79 @@ static int ur_set_online(struct ccw_device *cdev)
TRACE("ur_set_online: device_create rc=%d\n", rc);
goto fail_free_cdev;
}
-
+ urdev_put(urd);
+ mutex_unlock(&vmur_mutex);
return 0;
fail_free_cdev:
cdev_del(urd->char_device);
-fail_module_put:
- module_put(ur_driver.owner);
-
+ urd->char_device = NULL;
+fail_urdev_put:
+ urdev_put(urd);
+fail_unlock:
+ mutex_unlock(&vmur_mutex);
return rc;
}
-static int ur_set_offline(struct ccw_device *cdev)
+static int ur_set_offline_force(struct ccw_device *cdev, int force)
{
struct urdev *urd;
+ int rc;
- TRACE("ur_set_offline: cdev=%p cdev->private=%p state=%d\n",
- cdev, cdev->private, *(int *) cdev->private);
- urd = (struct urdev *) cdev->dev.driver_data;
+ TRACE("ur_set_offline: cdev=%p\n", cdev);
+ urd = urdev_get_from_cdev(cdev);
+ if (!urd)
+ /* ur_remove already deleted our urd */
+ return -ENODEV;
+ if (!urd->char_device) {
+ /* Another ur_set_offline was faster */
+ rc = -EBUSY;
+ goto fail_urdev_put;
+ }
+ if (!force && (atomic_read(&urd->ref_count) > 2)) {
+ /* There is still a user of urd (e.g. ur_open) */
+ TRACE("ur_set_offline: BUSY\n");
+ rc = -EBUSY;
+ goto fail_urdev_put;
+ }
device_destroy(vmur_class, urd->char_device->dev);
cdev_del(urd->char_device);
- module_put(ur_driver.owner);
+ urd->char_device = NULL;
+ rc = 0;
- return 0;
+fail_urdev_put:
+ urdev_put(urd);
+ return rc;
+}
+
+static int ur_set_offline(struct ccw_device *cdev)
+{
+ int rc;
+
+ mutex_lock(&vmur_mutex);
+ rc = ur_set_offline_force(cdev, 0);
+ mutex_unlock(&vmur_mutex);
+ return rc;
+}
+
+static void ur_remove(struct ccw_device *cdev)
+{
+ unsigned long flags;
+
+ TRACE("ur_remove\n");
+
+ mutex_lock(&vmur_mutex);
+
+ if (cdev->online)
+ ur_set_offline_force(cdev, 1);
+ ur_remove_attributes(&cdev->dev);
+
+ spin_lock_irqsave(get_ccwdev_lock(cdev), flags);
+ urdev_put(cdev->dev.driver_data);
+ cdev->dev.driver_data = NULL;
+ spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags);
+
+ mutex_unlock(&vmur_mutex);
}
/*
diff --git a/drivers/s390/char/vmur.h b/drivers/s390/char/vmur.h
index 16d0a4e38e4..fa959644735 100644
--- a/drivers/s390/char/vmur.h
+++ b/drivers/s390/char/vmur.h
@@ -50,7 +50,10 @@ struct file_control_block {
char rest[200];
} __attribute__ ((packed));
-#define FLG_CP_DUMP 0x10
+#define FLG_SYSTEM_HOLD 0x04
+#define FLG_CP_DUMP 0x10
+#define FLG_USER_HOLD 0x20
+#define FLG_IN_USE 0x80
/*
* A struct urdev is created for each ur device that is made available
@@ -67,6 +70,7 @@ struct urdev {
size_t reclen; /* Record length for *write* CCWs */
int class; /* VM device class */
int io_request_rc; /* return code from I/O request */
+ atomic_t ref_count; /* reference counter */
};
/*
diff --git a/drivers/s390/char/vmwatchdog.c b/drivers/s390/char/vmwatchdog.c
index 680b9b58b80..6f40facb1c4 100644
--- a/drivers/s390/char/vmwatchdog.c
+++ b/drivers/s390/char/vmwatchdog.c
@@ -66,8 +66,8 @@ static int __diag288(enum vmwdt_func func, unsigned int timeout,
"0: la %0,0\n"
"1:\n"
EX_TABLE(0b,1b)
- : "=d" (err) : "d"(__func), "d"(__timeout),
- "d"(__cmdp), "d"(__cmdl), "0" (-EINVAL) : "1", "cc");
+ : "+d" (err) : "d"(__func), "d"(__timeout),
+ "d"(__cmdp), "d"(__cmdl) : "1", "cc");
return err;
}
diff --git a/drivers/s390/char/zcore.c b/drivers/s390/char/zcore.c
index 3712ede1672..7073daf7798 100644
--- a/drivers/s390/char/zcore.c
+++ b/drivers/s390/char/zcore.c
@@ -141,15 +141,16 @@ static int memcpy_real(void *dest, unsigned long src, size_t count)
if (count == 0)
return 0;
- flags = __raw_local_irq_stnsm(0xf8); /* switch to real mode */
+ flags = __raw_local_irq_stnsm(0xf8UL); /* switch to real mode */
asm volatile (
"0: mvcle %1,%2,0x0\n"
"1: jo 0b\n"
" lhi %0,0x0\n"
"2:\n"
EX_TABLE(1b,2b)
- : "+d" (rc)
- : "d" (_dest), "d" (_src), "d" (_len1), "d" (_len2)
+ : "+d" (rc), "+d" (_dest), "+d" (_src), "+d" (_len1),
+ "+d" (_len2), "=m" (*((long*)dest))
+ : "m" (*((long*)src))
: "cc", "memory");
__raw_local_irq_ssm(flags);
diff --git a/drivers/s390/cio/blacklist.c b/drivers/s390/cio/blacklist.c
index ec0404874fa..bd5f16f80bf 100644
--- a/drivers/s390/cio/blacklist.c
+++ b/drivers/s390/cio/blacklist.c
@@ -51,7 +51,7 @@ blacklist_range (range_action action, unsigned int from, unsigned int to,
to = from;
if (from > to || to > __MAX_SUBCHANNEL || ssid > __MAX_SSID) {
- printk (KERN_WARNING "Invalid blacklist range "
+ printk (KERN_WARNING "cio: Invalid blacklist range "
"0.%x.%04x to 0.%x.%04x, skipping\n",
ssid, from, ssid, to);
return;
@@ -119,7 +119,7 @@ blacklist_busid(char **str, int *id0, int *ssid, int *devno)
return 0;
confused:
strsep(str, ",\n");
- printk(KERN_WARNING "Invalid cio_ignore parameter '%s'\n", sav);
+ printk(KERN_WARNING "cio: Invalid cio_ignore parameter '%s'\n", sav);
return 1;
}
@@ -166,22 +166,19 @@ blacklist_parse_parameters (char *str, range_action action)
continue;
}
if (*str == '-') {
- printk(KERN_WARNING "invalid cio_ignore "
+ printk(KERN_WARNING "cio: invalid cio_ignore "
"parameter '%s'\n",
strsep(&str, ",\n"));
continue;
}
if ((from_id0 != to_id0) ||
(from_ssid != to_ssid)) {
- printk(KERN_WARNING "invalid cio_ignore range "
- "%x.%x.%04x-%x.%x.%04x\n",
- from_id0, from_ssid, from,
- to_id0, to_ssid, to);
+ printk(KERN_WARNING "cio: invalid cio_ignore "
+ "range %x.%x.%04x-%x.%x.%04x\n",
+ from_id0, from_ssid, from,
+ to_id0, to_ssid, to);
continue;
}
- pr_debug("blacklist_setup: adding range "
- "from %x.%x.%04x to %x.%x.%04x\n",
- from_id0, from_ssid, from, to_id0, to_ssid, to);
blacklist_range (ra, from, to, to_ssid);
}
}
@@ -239,7 +236,7 @@ blacklist_parse_proc_parameters (char *buf)
*/
blacklist_parse_parameters (buf + 4, add);
} else {
- printk (KERN_WARNING "cio_ignore: Parse error; \n"
+ printk (KERN_WARNING "cio: cio_ignore: Parse error; \n"
KERN_WARNING "try using 'free all|<devno-range>,"
"<devno-range>,...'\n"
KERN_WARNING "or 'add <devno-range>,"
diff --git a/drivers/s390/cio/ccwgroup.c b/drivers/s390/cio/ccwgroup.c
index e5ccda63e88..5baa517c3b6 100644
--- a/drivers/s390/cio/ccwgroup.c
+++ b/drivers/s390/cio/ccwgroup.c
@@ -44,8 +44,7 @@ ccwgroup_bus_match (struct device * dev, struct device_driver * drv)
return 0;
}
static int
-ccwgroup_uevent (struct device *dev, char **envp, int num_envp, char *buffer,
- int buffer_size)
+ccwgroup_uevent (struct device *dev, struct kobj_uevent_env *env)
{
/* TODO */
return 0;
@@ -152,16 +151,24 @@ __ccwgroup_create_symlinks(struct ccwgroup_device *gdev)
return 0;
}
-/*
- * try to add a new ccwgroup device for one driver
- * argc and argv[] are a list of bus_id's of devices
- * belonging to the driver.
+/**
+ * ccwgroup_create() - create and register a ccw group device
+ * @root: parent device for the new device
+ * @creator_id: identifier of creating driver
+ * @cdrv: ccw driver of slave devices
+ * @argc: number of slave devices
+ * @argv: bus ids of slave devices
+ *
+ * Create and register a new ccw group device as a child of @root. Slave
+ * devices are obtained from the list of bus ids given in @argv[] and must all
+ * belong to @cdrv.
+ * Returns:
+ * %0 on success and an error code on failure.
+ * Context:
+ * non-atomic
*/
-int
-ccwgroup_create(struct device *root,
- unsigned int creator_id,
- struct ccw_driver *cdrv,
- int argc, char *argv[])
+int ccwgroup_create(struct device *root, unsigned int creator_id,
+ struct ccw_driver *cdrv, int argc, char *argv[])
{
struct ccwgroup_device *gdev;
int i;
@@ -359,7 +366,6 @@ ccwgroup_probe (struct device *dev)
if ((ret = device_create_file(dev, &dev_attr_online)))
return ret;
- pr_debug("%s: device %s\n", __func__, gdev->dev.bus_id);
ret = gdrv->probe ? gdrv->probe(gdev) : -ENODEV;
if (ret)
device_remove_file(dev, &dev_attr_online);
@@ -376,8 +382,6 @@ ccwgroup_remove (struct device *dev)
gdev = to_ccwgroupdev(dev);
gdrv = to_ccwgroupdrv(dev->driver);
- pr_debug("%s: device %s\n", __func__, gdev->dev.bus_id);
-
device_remove_file(dev, &dev_attr_online);
if (gdrv && gdrv->remove)
@@ -393,8 +397,13 @@ static struct bus_type ccwgroup_bus_type = {
.remove = ccwgroup_remove,
};
-int
-ccwgroup_driver_register (struct ccwgroup_driver *cdriver)
+/**
+ * ccwgroup_driver_register() - register a ccw group driver
+ * @cdriver: driver to be registered
+ *
+ * This function is mainly a wrapper around driver_register().
+ */
+int ccwgroup_driver_register(struct ccwgroup_driver *cdriver)
{
/* register our new driver with the core */
cdriver->driver.bus = &ccwgroup_bus_type;
@@ -409,8 +418,13 @@ __ccwgroup_match_all(struct device *dev, void *data)
return 1;
}
-void
-ccwgroup_driver_unregister (struct ccwgroup_driver *cdriver)
+/**
+ * ccwgroup_driver_unregister() - deregister a ccw group driver
+ * @cdriver: driver to be deregistered
+ *
+ * This function is mainly a wrapper around driver_unregister().
+ */
+void ccwgroup_driver_unregister(struct ccwgroup_driver *cdriver)
{
struct device *dev;
@@ -430,8 +444,16 @@ ccwgroup_driver_unregister (struct ccwgroup_driver *cdriver)
driver_unregister(&cdriver->driver);
}
-int
-ccwgroup_probe_ccwdev(struct ccw_device *cdev)
+/**
+ * ccwgroup_probe_ccwdev() - probe function for slave devices
+ * @cdev: ccw device to be probed
+ *
+ * This is a dummy probe function for ccw devices that are slave devices in
+ * a ccw group device.
+ * Returns:
+ * always %0
+ */
+int ccwgroup_probe_ccwdev(struct ccw_device *cdev)
{
return 0;
}
@@ -455,8 +477,15 @@ __ccwgroup_get_gdev_by_cdev(struct ccw_device *cdev)
return NULL;
}
-void
-ccwgroup_remove_ccwdev(struct ccw_device *cdev)
+/**
+ * ccwgroup_remove_ccwdev() - remove function for slave devices
+ * @cdev: ccw device to be removed
+ *
+ * This is a remove function for ccw devices that are slave devices in a ccw
+ * group device. It sets the ccw device offline and also deregisters the
+ * embedding ccw group device.
+ */
+void ccwgroup_remove_ccwdev(struct ccw_device *cdev)
{
struct ccwgroup_device *gdev;
diff --git a/drivers/s390/cio/chp.c b/drivers/s390/cio/chp.c
index b57d93d986c..42c1f4659ad 100644
--- a/drivers/s390/cio/chp.c
+++ b/drivers/s390/cio/chp.c
@@ -14,7 +14,7 @@
#include <linux/jiffies.h>
#include <linux/wait.h>
#include <linux/mutex.h>
-#include <asm/errno.h>
+#include <linux/errno.h>
#include <asm/chpid.h>
#include <asm/sclp.h>
@@ -55,7 +55,7 @@ static wait_queue_head_t cfg_wait_queue;
/* Return channel_path struct for given chpid. */
static inline struct channel_path *chpid_to_chp(struct chp_id chpid)
{
- return css[chpid.cssid]->chps[chpid.id];
+ return channel_subsystems[chpid.cssid]->chps[chpid.id];
}
/* Set vary state for given chpid. */
@@ -86,7 +86,7 @@ u8 chp_get_sch_opm(struct subchannel *sch)
opm = 0;
chp_id_init(&chpid);
- for (i=0; i < 8; i++) {
+ for (i = 0; i < 8; i++) {
opm <<= 1;
chpid.id = sch->schib.pmcw.chpid[i];
if (chp_get_status(chpid) != 0)
@@ -118,17 +118,11 @@ static int s390_vary_chpid(struct chp_id chpid, int on)
sprintf(dbf_text, on?"varyon%x.%02x":"varyoff%x.%02x", chpid.cssid,
chpid.id);
- CIO_TRACE_EVENT( 2, dbf_text);
+ CIO_TRACE_EVENT(2, dbf_text);
status = chp_get_status(chpid);
- if (status < 0) {
- printk(KERN_ERR "Can't vary unknown chpid %x.%02x\n",
- chpid.cssid, chpid.id);
- return -EINVAL;
- }
-
if (!on && !status) {
- printk(KERN_ERR "chpid %x.%02x is already offline\n",
+ printk(KERN_ERR "cio: chpid %x.%02x is already offline\n",
chpid.cssid, chpid.id);
return -EINVAL;
}
@@ -146,9 +140,11 @@ static ssize_t chp_measurement_chars_read(struct kobject *kobj,
char *buf, loff_t off, size_t count)
{
struct channel_path *chp;
+ struct device *device;
unsigned int size;
- chp = to_channelpath(container_of(kobj, struct device, kobj));
+ device = container_of(kobj, struct device, kobj);
+ chp = to_channelpath(device);
if (!chp->cmg_chars)
return 0;
@@ -199,9 +195,11 @@ static ssize_t chp_measurement_read(struct kobject *kobj,
{
struct channel_path *chp;
struct channel_subsystem *css;
+ struct device *device;
unsigned int size;
- chp = to_channelpath(container_of(kobj, struct device, kobj));
+ device = container_of(kobj, struct device, kobj);
+ chp = to_channelpath(device);
css = to_css(chp->dev.parent);
size = sizeof(struct cmg_entry);
@@ -359,7 +357,7 @@ static ssize_t chp_shared_show(struct device *dev,
static DEVICE_ATTR(shared, 0444, chp_shared_show, NULL);
-static struct attribute * chp_attrs[] = {
+static struct attribute *chp_attrs[] = {
&dev_attr_status.attr,
&dev_attr_configure.attr,
&dev_attr_type.attr,
@@ -401,7 +399,7 @@ int chp_new(struct chp_id chpid)
/* fill in status, etc. */
chp->chpid = chpid;
chp->state = 1;
- chp->dev.parent = &css[chpid.cssid]->device;
+ chp->dev.parent = &channel_subsystems[chpid.cssid]->device;
chp->dev.release = chp_release;
snprintf(chp->dev.bus_id, BUS_ID_SIZE, "chp%x.%02x", chpid.cssid,
chpid.id);
@@ -421,21 +419,14 @@ int chp_new(struct chp_id chpid)
if (ret)
goto out_free;
} else {
- static int msg_done;
-
- if (!msg_done) {
- printk(KERN_WARNING "cio: Channel measurements not "
- "available, continuing.\n");
- msg_done = 1;
- }
chp->cmg = -1;
}
/* make it known to the system */
ret = device_register(&chp->dev);
if (ret) {
- printk(KERN_WARNING "%s: could not register %x.%02x\n",
- __func__, chpid.cssid, chpid.id);
+ CIO_MSG_EVENT(0, "Could not register chp%x.%02x: %d\n",
+ chpid.cssid, chpid.id, ret);
goto out_free;
}
ret = sysfs_create_group(&chp->dev.kobj, &chp_attr_group);
@@ -443,18 +434,18 @@ int chp_new(struct chp_id chpid)
device_unregister(&chp->dev);
goto out_free;
}
- mutex_lock(&css[chpid.cssid]->mutex);
- if (css[chpid.cssid]->cm_enabled) {
+ mutex_lock(&channel_subsystems[chpid.cssid]->mutex);
+ if (channel_subsystems[chpid.cssid]->cm_enabled) {
ret = chp_add_cmg_attr(chp);
if (ret) {
sysfs_remove_group(&chp->dev.kobj, &chp_attr_group);
device_unregister(&chp->dev);
- mutex_unlock(&css[chpid.cssid]->mutex);
+ mutex_unlock(&channel_subsystems[chpid.cssid]->mutex);
goto out_free;
}
}
- css[chpid.cssid]->chps[chpid.id] = chp;
- mutex_unlock(&css[chpid.cssid]->mutex);
+ channel_subsystems[chpid.cssid]->chps[chpid.id] = chp;
+ mutex_unlock(&channel_subsystems[chpid.cssid]->mutex);
return ret;
out_free:
kfree(chp);
diff --git a/drivers/s390/cio/chsc.c b/drivers/s390/cio/chsc.c
index ea92ac4d657..597c0c76a2a 100644
--- a/drivers/s390/cio/chsc.c
+++ b/drivers/s390/cio/chsc.c
@@ -990,16 +990,20 @@ out:
return ret;
}
-static int __init
-chsc_alloc_sei_area(void)
+int __init chsc_alloc_sei_area(void)
{
sei_page = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
if (!sei_page)
- printk(KERN_WARNING"Can't allocate page for processing of " \
- "chsc machine checks!\n");
+ CIO_MSG_EVENT(0, "Can't allocate page for processing of "
+ "chsc machine checks!\n");
return (sei_page ? 0 : -ENOMEM);
}
+void __init chsc_free_sei_area(void)
+{
+ kfree(sei_page);
+}
+
int __init
chsc_enable_facility(int operation_code)
{
@@ -1051,8 +1055,6 @@ chsc_enable_facility(int operation_code)
return ret;
}
-subsys_initcall(chsc_alloc_sei_area);
-
struct css_general_char css_general_characteristics;
struct css_chsc_char css_chsc_characteristics;
@@ -1073,8 +1075,8 @@ chsc_determine_css_characteristics(void)
scsc_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
if (!scsc_area) {
- printk(KERN_WARNING"cio: Was not able to determine available" \
- "CHSCs due to no memory.\n");
+ CIO_MSG_EVENT(0, "Was not able to determine available"
+ "CHSCs due to no memory.\n");
return -ENOMEM;
}
@@ -1083,15 +1085,15 @@ chsc_determine_css_characteristics(void)
result = chsc(scsc_area);
if (result) {
- printk(KERN_WARNING"cio: Was not able to determine " \
- "available CHSCs, cc=%i.\n", result);
+ CIO_MSG_EVENT(0, "Was not able to determine available CHSCs, "
+ "cc=%i.\n", result);
result = -EIO;
goto exit;
}
if (scsc_area->response.code != 1) {
- printk(KERN_WARNING"cio: Was not able to determine " \
- "available CHSCs.\n");
+ CIO_MSG_EVENT(0, "Was not able to determine "
+ "available CHSCs.\n");
result = -EIO;
goto exit;
}
diff --git a/drivers/s390/cio/chsc.h b/drivers/s390/cio/chsc.h
index 2ad81d11cf7..d1f5db1e69b 100644
--- a/drivers/s390/cio/chsc.h
+++ b/drivers/s390/cio/chsc.h
@@ -79,6 +79,8 @@ extern int chsc_get_ssd_info(struct subchannel_id schid,
struct chsc_ssd_info *ssd);
extern int chsc_determine_css_characteristics(void);
extern int css_characteristics_avail;
+extern int chsc_alloc_sei_area(void);
+extern void chsc_free_sei_area(void);
extern int chsc_enable_facility(int);
struct channel_subsystem;
diff --git a/drivers/s390/cio/cio.c b/drivers/s390/cio/cio.c
index ea1defba569..46905345159 100644
--- a/drivers/s390/cio/cio.c
+++ b/drivers/s390/cio/cio.c
@@ -47,8 +47,8 @@ cio_setup (char *parm)
else if (!strcmp (parm, "no"))
cio_show_msg = 0;
else
- printk (KERN_ERR "cio_setup : invalid cio_msg parameter '%s'",
- parm);
+ printk(KERN_ERR "cio: cio_setup: "
+ "invalid cio_msg parameter '%s'", parm);
return 1;
}
@@ -80,7 +80,6 @@ cio_debug_init (void)
goto out_unregister;
debug_register_view (cio_debug_crw_id, &debug_sprintf_view);
debug_set_level (cio_debug_crw_id, 2);
- pr_debug("debugging initialized\n");
return 0;
out_unregister:
@@ -90,7 +89,7 @@ out_unregister:
debug_unregister (cio_debug_trace_id);
if (cio_debug_crw_id)
debug_unregister (cio_debug_crw_id);
- pr_debug("could not initialize debugging\n");
+ printk(KERN_WARNING"cio: could not initialize debugging\n");
return -1;
}
@@ -568,7 +567,7 @@ cio_validate_subchannel (struct subchannel *sch, struct subchannel_id schid)
*/
if (sch->st != 0) {
CIO_DEBUG(KERN_INFO, 0,
- "Subchannel 0.%x.%04x reports "
+ "cio: Subchannel 0.%x.%04x reports "
"non-I/O subchannel type %04X\n",
sch->schid.ssid, sch->schid.sch_no, sch->st);
/* We stop here for non-io subchannels. */
@@ -601,7 +600,7 @@ cio_validate_subchannel (struct subchannel *sch, struct subchannel_id schid)
sch->lpm = sch->schib.pmcw.pam & sch->opm;
CIO_DEBUG(KERN_INFO, 0,
- "Detected device %04x on subchannel 0.%x.%04X"
+ "cio: Detected device %04x on subchannel 0.%x.%04X"
" - PIM = %02X, PAM = %02X, POM = %02X\n",
sch->schib.pmcw.dev, sch->schid.ssid,
sch->schid.sch_no, sch->schib.pmcw.pim,
@@ -620,6 +619,11 @@ cio_validate_subchannel (struct subchannel *sch, struct subchannel_id schid)
sch->schib.pmcw.ena = 0;
if ((sch->lpm & (sch->lpm - 1)) != 0)
sch->schib.pmcw.mp = 1; /* multipath mode */
+ /* clean up possible residual cmf stuff */
+ sch->schib.pmcw.mme = 0;
+ sch->schib.pmcw.mbfc = 0;
+ sch->schib.pmcw.mbi = 0;
+ sch->schib.mba = 0;
return 0;
out:
if (!cio_is_console(schid))
@@ -766,7 +770,7 @@ cio_get_console_sch_no(void)
/* unlike in 2.4, we cannot autoprobe here, since
* the channel subsystem is not fully initialized.
* With some luck, the HWC console can take over */
- printk(KERN_WARNING "No ccw console found!\n");
+ printk(KERN_WARNING "cio: No ccw console found!\n");
return -1;
}
return console_irq;
diff --git a/drivers/s390/cio/cio_debug.h b/drivers/s390/cio/cio_debug.h
index f88844adae1..c9bf8989930 100644
--- a/drivers/s390/cio/cio_debug.h
+++ b/drivers/s390/cio/cio_debug.h
@@ -23,6 +23,8 @@ extern debug_info_t *cio_debug_crw_id;
static inline void
CIO_HEX_EVENT(int level, void *data, int length)
{
+ if (unlikely(!cio_debug_trace_id))
+ return;
while (length > 0) {
debug_event(cio_debug_trace_id, level, data, length);
length -= cio_debug_trace_id->buf_size;
diff --git a/drivers/s390/cio/cmf.c b/drivers/s390/cio/cmf.c
index 28abd697be1..b960f66843e 100644
--- a/drivers/s390/cio/cmf.c
+++ b/drivers/s390/cio/cmf.c
@@ -45,7 +45,8 @@
#include "ioasm.h"
#include "chsc.h"
-/* parameter to enable cmf during boot, possible uses are:
+/*
+ * parameter to enable cmf during boot, possible uses are:
* "s390cmf" -- enable cmf and allocate 2 MB of ram so measuring can be
* used on any subchannel
* "s390cmf=<num>" -- enable cmf and allocate enough memory to measure
@@ -73,18 +74,20 @@ enum cmb_index {
* enum cmb_format - types of supported measurement block formats
*
* @CMF_BASIC: traditional channel measurement blocks supported
- * by all machines that we run on
+ * by all machines that we run on
* @CMF_EXTENDED: improved format that was introduced with the z990
- * machine
- * @CMF_AUTODETECT: default: use extended format when running on a z990
- * or later machine, otherwise fall back to basic format
- **/
+ * machine
+ * @CMF_AUTODETECT: default: use extended format when running on a machine
+ * supporting extended format, otherwise fall back to
+ * basic format
+ */
enum cmb_format {
CMF_BASIC,
CMF_EXTENDED,
CMF_AUTODETECT = -1,
};
-/**
+
+/*
* format - actual format for all measurement blocks
*
* The format module parameter can be set to a value of 0 (zero)
@@ -105,20 +108,21 @@ module_param(format, bool, 0444);
* either with the help of a special pool or with kmalloc
* @free: free memory allocated with @alloc
* @set: enable or disable measurement
+ * @read: read a measurement entry at an index
* @readall: read a measurement block in a common format
* @reset: clear the data in the associated measurement block and
* reset its time stamp
* @align: align an allocated block so that the hardware can use it
*/
struct cmb_operations {
- int (*alloc) (struct ccw_device*);
- void(*free) (struct ccw_device*);
- int (*set) (struct ccw_device*, u32);
- u64 (*read) (struct ccw_device*, int);
- int (*readall)(struct ccw_device*, struct cmbdata *);
- void (*reset) (struct ccw_device*);
- void * (*align) (void *);
-
+ int (*alloc) (struct ccw_device *);
+ void (*free) (struct ccw_device *);
+ int (*set) (struct ccw_device *, u32);
+ u64 (*read) (struct ccw_device *, int);
+ int (*readall)(struct ccw_device *, struct cmbdata *);
+ void (*reset) (struct ccw_device *);
+ void *(*align) (void *);
+/* private: */
struct attribute_group *attr_group;
};
static struct cmb_operations *cmbops;
@@ -130,9 +134,11 @@ struct cmb_data {
unsigned long long last_update; /* when last_block was updated */
};
-/* our user interface is designed in terms of nanoseconds,
+/*
+ * Our user interface is designed in terms of nanoseconds,
* while the hardware measures total times in its own
- * unit.*/
+ * unit.
+ */
static inline u64 time_to_nsec(u32 value)
{
return ((u64)value) * 128000ull;
@@ -159,12 +165,13 @@ static inline u64 time_to_avg_nsec(u32 value, u32 count)
return ret;
}
-/* activate or deactivate the channel monitor. When area is NULL,
+/*
+ * Activate or deactivate the channel monitor. When area is NULL,
* the monitor is deactivated. The channel monitor needs to
* be active in order to measure subchannels, which also need
- * to be enabled. */
-static inline void
-cmf_activate(void *area, unsigned int onoff)
+ * to be enabled.
+ */
+static inline void cmf_activate(void *area, unsigned int onoff)
{
register void * __gpr2 asm("2");
register long __gpr1 asm("1");
@@ -175,8 +182,8 @@ cmf_activate(void *area, unsigned int onoff)
asm("schm" : : "d" (__gpr2), "d" (__gpr1) );
}
-static int
-set_schib(struct ccw_device *cdev, u32 mme, int mbfc, unsigned long address)
+static int set_schib(struct ccw_device *cdev, u32 mme, int mbfc,
+ unsigned long address)
{
int ret;
int retry;
@@ -466,6 +473,7 @@ static void cmf_generic_reset(struct ccw_device *cdev)
*
* @mem: pointer to CMBs (only in basic measurement mode)
* @list: contains a linked list of all subchannels
+ * @num_channels: number of channels to be measured
* @lock: protect concurrent access to @mem and @list
*/
struct cmb_area {
@@ -481,28 +489,36 @@ static struct cmb_area cmb_area = {
.num_channels = 1024,
};
-
/* ****** old style CMB handling ********/
-/** int maxchannels
- *
+/*
* Basic channel measurement blocks are allocated in one contiguous
* block of memory, which can not be moved as long as any channel
* is active. Therefore, a maximum number of subchannels needs to
* be defined somewhere. This is a module parameter, defaulting to
* a resonable value of 1024, or 32 kb of memory.
* Current kernels don't allow kmalloc with more than 128kb, so the
- * maximum is 4096
+ * maximum is 4096.
*/
module_param_named(maxchannels, cmb_area.num_channels, uint, 0444);
/**
* struct cmb - basic channel measurement block
+ * @ssch_rsch_count: number of ssch and rsch
+ * @sample_count: number of samples
+ * @device_connect_time: time of device connect
+ * @function_pending_time: time of function pending
+ * @device_disconnect_time: time of device disconnect
+ * @control_unit_queuing_time: time of control unit queuing
+ * @device_active_only_time: time of device active only
+ * @reserved: unused in basic measurement mode
*
- * cmb as used by the hardware the fields are described in z/Architecture
- * Principles of Operation, chapter 17.
- * The area to be a contiguous array and may not be reallocated or freed.
+ * The measurement block as used by the hardware. The fields are described
+ * further in z/Architecture Principles of Operation, chapter 17.
+ *
+ * The cmb area made up from these blocks must be a contiguous array and may
+ * not be reallocated or freed.
* Only one cmb area can be present in the system.
*/
struct cmb {
@@ -516,8 +532,9 @@ struct cmb {
u32 reserved[2];
};
-/* insert a single device into the cmb_area list
- * called with cmb_area.lock held from alloc_cmb
+/*
+ * Insert a single device into the cmb_area list.
+ * Called with cmb_area.lock held from alloc_cmb.
*/
static int alloc_cmb_single(struct ccw_device *cdev,
struct cmb_data *cmb_data)
@@ -532,9 +549,11 @@ static int alloc_cmb_single(struct ccw_device *cdev,
goto out;
}
- /* find first unused cmb in cmb_area.mem.
- * this is a little tricky: cmb_area.list
- * remains sorted by ->cmb->hw_data pointers */
+ /*
+ * Find first unused cmb in cmb_area.mem.
+ * This is a little tricky: cmb_area.list
+ * remains sorted by ->cmb->hw_data pointers.
+ */
cmb = cmb_area.mem;
list_for_each_entry(node, &cmb_area.list, cmb_list) {
struct cmb_data *data;
@@ -558,8 +577,7 @@ out:
return ret;
}
-static int
-alloc_cmb (struct ccw_device *cdev)
+static int alloc_cmb(struct ccw_device *cdev)
{
int ret;
struct cmb *mem;
@@ -594,6 +612,9 @@ alloc_cmb (struct ccw_device *cdev)
free_pages((unsigned long)mem, get_order(size));
} else if (!mem) {
/* no luck */
+ printk(KERN_WARNING "cio: failed to allocate area "
+ "for measuring %d subchannels\n",
+ cmb_area.num_channels);
ret = -ENOMEM;
goto out;
} else {
@@ -667,7 +688,7 @@ static int set_cmb(struct ccw_device *cdev, u32 mme)
return set_schib_wait(cdev, mme, 0, offset);
}
-static u64 read_cmb (struct ccw_device *cdev, int index)
+static u64 read_cmb(struct ccw_device *cdev, int index)
{
struct cmb *cmb;
u32 val;
@@ -717,7 +738,7 @@ out:
return ret;
}
-static int readall_cmb (struct ccw_device *cdev, struct cmbdata *data)
+static int readall_cmb(struct ccw_device *cdev, struct cmbdata *data)
{
struct cmb *cmb;
struct cmb_data *cmb_data;
@@ -790,14 +811,25 @@ static struct cmb_operations cmbops_basic = {
.align = align_cmb,
.attr_group = &cmf_attr_group,
};
-
+
/* ******** extended cmb handling ********/
/**
* struct cmbe - extended channel measurement block
+ * @ssch_rsch_count: number of ssch and rsch
+ * @sample_count: number of samples
+ * @device_connect_time: time of device connect
+ * @function_pending_time: time of function pending
+ * @device_disconnect_time: time of device disconnect
+ * @control_unit_queuing_time: time of control unit queuing
+ * @device_active_only_time: time of device active only
+ * @device_busy_time: time of device busy
+ * @initial_command_response_time: initial command response time
+ * @reserved: unused
*
- * cmb as used by the hardware, may be in any 64 bit physical location,
- * the fields are described in z/Architecture Principles of Operation,
+ * The measurement block as used by the hardware. May be in any 64 bit physical
+ * location.
+ * The fields are described further in z/Architecture Principles of Operation,
* third edition, chapter 17.
*/
struct cmbe {
@@ -813,10 +845,12 @@ struct cmbe {
u32 reserved[7];
};
-/* kmalloc only guarantees 8 byte alignment, but we need cmbe
+/*
+ * kmalloc only guarantees 8 byte alignment, but we need cmbe
* pointers to be naturally aligned. Make sure to allocate
- * enough space for two cmbes */
-static inline struct cmbe* cmbe_align(struct cmbe *c)
+ * enough space for two cmbes.
+ */
+static inline struct cmbe *cmbe_align(struct cmbe *c)
{
unsigned long addr;
addr = ((unsigned long)c + sizeof (struct cmbe) - sizeof(long)) &
@@ -824,7 +858,7 @@ static inline struct cmbe* cmbe_align(struct cmbe *c)
return (struct cmbe*)addr;
}
-static int alloc_cmbe (struct ccw_device *cdev)
+static int alloc_cmbe(struct ccw_device *cdev)
{
struct cmbe *cmbe;
struct cmb_data *cmb_data;
@@ -870,7 +904,7 @@ out_free:
return ret;
}
-static void free_cmbe (struct ccw_device *cdev)
+static void free_cmbe(struct ccw_device *cdev)
{
struct cmb_data *cmb_data;
@@ -909,7 +943,7 @@ static int set_cmbe(struct ccw_device *cdev, u32 mme)
}
-static u64 read_cmbe (struct ccw_device *cdev, int index)
+static u64 read_cmbe(struct ccw_device *cdev, int index)
{
struct cmbe *cmb;
struct cmb_data *cmb_data;
@@ -967,7 +1001,7 @@ out:
return ret;
}
-static int readall_cmbe (struct ccw_device *cdev, struct cmbdata *data)
+static int readall_cmbe(struct ccw_device *cdev, struct cmbdata *data)
{
struct cmbe *cmb;
struct cmb_data *cmb_data;
@@ -1044,17 +1078,16 @@ static struct cmb_operations cmbops_extended = {
.align = align_cmbe,
.attr_group = &cmf_attr_group_ext,
};
-
-static ssize_t
-cmb_show_attr(struct device *dev, char *buf, enum cmb_index idx)
+static ssize_t cmb_show_attr(struct device *dev, char *buf, enum cmb_index idx)
{
return sprintf(buf, "%lld\n",
(unsigned long long) cmf_read(to_ccwdev(dev), idx));
}
-static ssize_t
-cmb_show_avg_sample_interval(struct device *dev, struct device_attribute *attr, char *buf)
+static ssize_t cmb_show_avg_sample_interval(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
{
struct ccw_device *cdev;
long interval;
@@ -1076,8 +1109,9 @@ cmb_show_avg_sample_interval(struct device *dev, struct device_attribute *attr,
return sprintf(buf, "%ld\n", interval);
}
-static ssize_t
-cmb_show_avg_utilization(struct device *dev, struct device_attribute *attr, char *buf)
+static ssize_t cmb_show_avg_utilization(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
{
struct cmbdata data;
u64 utilization;
@@ -1109,14 +1143,16 @@ cmb_show_avg_utilization(struct device *dev, struct device_attribute *attr, char
}
#define cmf_attr(name) \
-static ssize_t show_ ## name (struct device * dev, struct device_attribute *attr, char * buf) \
-{ return cmb_show_attr((dev), buf, cmb_ ## name); } \
-static DEVICE_ATTR(name, 0444, show_ ## name, NULL);
+static ssize_t show_##name(struct device *dev, \
+ struct device_attribute *attr, char *buf) \
+{ return cmb_show_attr((dev), buf, cmb_##name); } \
+static DEVICE_ATTR(name, 0444, show_##name, NULL);
#define cmf_attr_avg(name) \
-static ssize_t show_avg_ ## name (struct device * dev, struct device_attribute *attr, char * buf) \
-{ return cmb_show_attr((dev), buf, cmb_ ## name); } \
-static DEVICE_ATTR(avg_ ## name, 0444, show_avg_ ## name, NULL);
+static ssize_t show_avg_##name(struct device *dev, \
+ struct device_attribute *attr, char *buf) \
+{ return cmb_show_attr((dev), buf, cmb_##name); } \
+static DEVICE_ATTR(avg_##name, 0444, show_avg_##name, NULL);
cmf_attr(ssch_rsch_count);
cmf_attr(sample_count);
@@ -1128,7 +1164,8 @@ cmf_attr_avg(device_active_only_time);
cmf_attr_avg(device_busy_time);
cmf_attr_avg(initial_command_response_time);
-static DEVICE_ATTR(avg_sample_interval, 0444, cmb_show_avg_sample_interval, NULL);
+static DEVICE_ATTR(avg_sample_interval, 0444, cmb_show_avg_sample_interval,
+ NULL);
static DEVICE_ATTR(avg_utilization, 0444, cmb_show_avg_utilization, NULL);
static struct attribute *cmf_attributes[] = {
@@ -1169,12 +1206,16 @@ static struct attribute_group cmf_attr_group_ext = {
.attrs = cmf_attributes_ext,
};
-static ssize_t cmb_enable_show(struct device *dev, struct device_attribute *attr, char *buf)
+static ssize_t cmb_enable_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
{
return sprintf(buf, "%d\n", to_ccwdev(dev)->private->cmb ? 1 : 0);
}
-static ssize_t cmb_enable_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t c)
+static ssize_t cmb_enable_store(struct device *dev,
+ struct device_attribute *attr, const char *buf,
+ size_t c)
{
struct ccw_device *cdev;
int ret;
@@ -1185,12 +1226,12 @@ static ssize_t cmb_enable_store(struct device *dev, struct device_attribute *att
case '0':
ret = disable_cmf(cdev);
if (ret)
- printk(KERN_INFO "disable_cmf failed (%d)\n", ret);
+ dev_info(&cdev->dev, "disable_cmf failed (%d)\n", ret);
break;
case '1':
ret = enable_cmf(cdev);
if (ret && ret != -EBUSY)
- printk(KERN_INFO "enable_cmf failed (%d)\n", ret);
+ dev_info(&cdev->dev, "enable_cmf failed (%d)\n", ret);
break;
}
@@ -1199,9 +1240,16 @@ static ssize_t cmb_enable_store(struct device *dev, struct device_attribute *att
DEVICE_ATTR(cmb_enable, 0644, cmb_enable_show, cmb_enable_store);
-/* enable_cmf/disable_cmf: module interface for cmf (de)activation */
-int
-enable_cmf(struct ccw_device *cdev)
+/**
+ * enable_cmf() - switch on the channel measurement for a specific device
+ * @cdev: The ccw device to be enabled
+ *
+ * Returns %0 for success or a negative error value.
+ *
+ * Context:
+ * non-atomic
+ */
+int enable_cmf(struct ccw_device *cdev)
{
int ret;
@@ -1222,8 +1270,16 @@ enable_cmf(struct ccw_device *cdev)
return ret;
}
-int
-disable_cmf(struct ccw_device *cdev)
+/**
+ * disable_cmf() - switch off the channel measurement for a specific device
+ * @cdev: The ccw device to be disabled
+ *
+ * Returns %0 for success or a negative error value.
+ *
+ * Context:
+ * non-atomic
+ */
+int disable_cmf(struct ccw_device *cdev)
{
int ret;
@@ -1235,14 +1291,32 @@ disable_cmf(struct ccw_device *cdev)
return ret;
}
-u64
-cmf_read(struct ccw_device *cdev, int index)
+/**
+ * cmf_read() - read one value from the current channel measurement block
+ * @cdev: the channel to be read
+ * @index: the index of the value to be read
+ *
+ * Returns the value read or %0 if the value cannot be read.
+ *
+ * Context:
+ * any
+ */
+u64 cmf_read(struct ccw_device *cdev, int index)
{
return cmbops->read(cdev, index);
}
-int
-cmf_readall(struct ccw_device *cdev, struct cmbdata *data)
+/**
+ * cmf_readall() - read the current channel measurement block
+ * @cdev: the channel to be read
+ * @data: a pointer to a data block that will be filled
+ *
+ * Returns %0 on success, a negative error value otherwise.
+ *
+ * Context:
+ * any
+ */
+int cmf_readall(struct ccw_device *cdev, struct cmbdata *data)
{
return cmbops->readall(cdev, data);
}
@@ -1254,15 +1328,16 @@ int cmf_reenable(struct ccw_device *cdev)
return cmbops->set(cdev, 2);
}
-static int __init
-init_cmf(void)
+static int __init init_cmf(void)
{
char *format_string;
char *detect_string = "parameter";
- /* We cannot really autoprobe this. If the user did not give a parameter,
- see if we are running on z990 or up, otherwise fall back to basic mode. */
-
+ /*
+ * If the user did not give a parameter, see if we are running on a
+ * machine supporting extended measurement blocks, otherwise fall back
+ * to basic mode.
+ */
if (format == CMF_AUTODETECT) {
if (!css_characteristics_avail ||
!css_general_characteristics.ext_mb) {
@@ -1279,26 +1354,19 @@ init_cmf(void)
case CMF_BASIC:
format_string = "basic";
cmbops = &cmbops_basic;
- if (cmb_area.num_channels > 4096 || cmb_area.num_channels < 1) {
- printk(KERN_ERR "Basic channel measurement facility"
- " can only use 1 to 4096 devices\n"
- KERN_ERR "when the cmf driver is built"
- " as a loadable module\n");
- return 1;
- }
break;
case CMF_EXTENDED:
- format_string = "extended";
+ format_string = "extended";
cmbops = &cmbops_extended;
break;
default:
- printk(KERN_ERR "Invalid format %d for channel "
+ printk(KERN_ERR "cio: Invalid format %d for channel "
"measurement facility\n", format);
return 1;
}
- printk(KERN_INFO "Channel measurement facility using %s format (%s)\n",
- format_string, detect_string);
+ printk(KERN_INFO "cio: Channel measurement facility using %s "
+ "format (%s)\n", format_string, detect_string);
return 0;
}
diff --git a/drivers/s390/cio/css.c b/drivers/s390/cio/css.c
index dfca0ef139f..5d83dd47146 100644
--- a/drivers/s390/cio/css.c
+++ b/drivers/s390/cio/css.c
@@ -13,6 +13,7 @@
#include <linux/slab.h>
#include <linux/errno.h>
#include <linux/list.h>
+#include <linux/reboot.h>
#include "css.h"
#include "cio.h"
@@ -27,7 +28,7 @@ int css_init_done = 0;
static int need_reprobe = 0;
static int max_ssid = 0;
-struct channel_subsystem *css[__MAX_CSSID + 1];
+struct channel_subsystem *channel_subsystems[__MAX_CSSID + 1];
int css_characteristics_avail = 0;
@@ -79,6 +80,7 @@ css_alloc_subchannel(struct subchannel_id schid)
sch->schib.pmcw.intparm = (__u32)(unsigned long)sch;
ret = cio_modify(sch);
if (ret) {
+ kfree(sch->lock);
kfree(sch);
return ERR_PTR(ret);
}
@@ -109,7 +111,7 @@ css_subchannel_release(struct device *dev)
}
}
-int css_sch_device_register(struct subchannel *sch)
+static int css_sch_device_register(struct subchannel *sch)
{
int ret;
@@ -176,7 +178,7 @@ static int css_register_subchannel(struct subchannel *sch)
int ret;
/* Initialize the subchannel structure */
- sch->dev.parent = &css[0]->device;
+ sch->dev.parent = &channel_subsystems[0]->device;
sch->dev.bus = &css_bus_type;
sch->dev.release = &css_subchannel_release;
sch->dev.groups = subch_attr_groups;
@@ -184,8 +186,8 @@ static int css_register_subchannel(struct subchannel *sch)
/* make it known to the system */
ret = css_sch_device_register(sch);
if (ret) {
- printk (KERN_WARNING "%s: could not register %s\n",
- __func__, sch->dev.bus_id);
+ CIO_MSG_EVENT(0, "Could not register sch 0.%x.%04x: %d\n",
+ sch->schid.ssid, sch->schid.sch_no, ret);
return ret;
}
return ret;
@@ -371,15 +373,12 @@ static int __init slow_subchannel_init(void)
spin_lock_init(&slow_subchannel_lock);
slow_subchannel_set = idset_sch_new();
if (!slow_subchannel_set) {
- printk(KERN_WARNING "cio: could not allocate slow subchannel "
- "set\n");
+ CIO_MSG_EVENT(0, "could not allocate slow subchannel set\n");
return -ENOMEM;
}
return 0;
}
-subsys_initcall(slow_subchannel_init);
-
static void css_slow_path_func(struct work_struct *unused)
{
struct subchannel_id schid;
@@ -425,8 +424,8 @@ static int reprobe_subchannel(struct subchannel_id schid, void *data)
struct subchannel *sch;
int ret;
- CIO_DEBUG(KERN_INFO, 6, "cio: reprobe 0.%x.%04x\n",
- schid.ssid, schid.sch_no);
+ CIO_MSG_EVENT(6, "cio: reprobe 0.%x.%04x\n",
+ schid.ssid, schid.sch_no);
if (need_reprobe)
return -EAGAIN;
@@ -608,30 +607,55 @@ static int __init setup_css(int nr)
{
u32 tod_high;
int ret;
+ struct channel_subsystem *css;
- memset(css[nr], 0, sizeof(struct channel_subsystem));
- css[nr]->pseudo_subchannel =
- kzalloc(sizeof(*css[nr]->pseudo_subchannel), GFP_KERNEL);
- if (!css[nr]->pseudo_subchannel)
+ css = channel_subsystems[nr];
+ memset(css, 0, sizeof(struct channel_subsystem));
+ css->pseudo_subchannel =
+ kzalloc(sizeof(*css->pseudo_subchannel), GFP_KERNEL);
+ if (!css->pseudo_subchannel)
return -ENOMEM;
- css[nr]->pseudo_subchannel->dev.parent = &css[nr]->device;
- css[nr]->pseudo_subchannel->dev.release = css_subchannel_release;
- sprintf(css[nr]->pseudo_subchannel->dev.bus_id, "defunct");
- ret = cio_create_sch_lock(css[nr]->pseudo_subchannel);
+ css->pseudo_subchannel->dev.parent = &css->device;
+ css->pseudo_subchannel->dev.release = css_subchannel_release;
+ sprintf(css->pseudo_subchannel->dev.bus_id, "defunct");
+ ret = cio_create_sch_lock(css->pseudo_subchannel);
if (ret) {
- kfree(css[nr]->pseudo_subchannel);
+ kfree(css->pseudo_subchannel);
return ret;
}
- mutex_init(&css[nr]->mutex);
- css[nr]->valid = 1;
- css[nr]->cssid = nr;
- sprintf(css[nr]->device.bus_id, "css%x", nr);
- css[nr]->device.release = channel_subsystem_release;
+ mutex_init(&css->mutex);
+ css->valid = 1;
+ css->cssid = nr;
+ sprintf(css->device.bus_id, "css%x", nr);
+ css->device.release = channel_subsystem_release;
tod_high = (u32) (get_clock() >> 32);
- css_generate_pgid(css[nr], tod_high);
+ css_generate_pgid(css, tod_high);
return 0;
}
+static int css_reboot_event(struct notifier_block *this,
+ unsigned long event,
+ void *ptr)
+{
+ int ret, i;
+
+ ret = NOTIFY_DONE;
+ for (i = 0; i <= __MAX_CSSID; i++) {
+ struct channel_subsystem *css;
+
+ css = channel_subsystems[i];
+ if (css->cm_enabled)
+ if (chsc_secm(css, 0))
+ ret = NOTIFY_BAD;
+ }
+
+ return ret;
+}
+
+static struct notifier_block css_reboot_notifier = {
+ .notifier_call = css_reboot_event,
+};
+
/*
* Now that the driver core is running, we can setup our channel subsystem.
* The struct subchannel's are created during probing (except for the
@@ -642,9 +666,20 @@ init_channel_subsystem (void)
{
int ret, i;
- if (chsc_determine_css_characteristics() == 0)
+ ret = chsc_determine_css_characteristics();
+ if (ret == -ENOMEM)
+ goto out; /* No need to continue. */
+ if (ret == 0)
css_characteristics_avail = 1;
+ ret = chsc_alloc_sei_area();
+ if (ret)
+ goto out;
+
+ ret = slow_subchannel_init();
+ if (ret)
+ goto out;
+
if ((ret = bus_register(&css_bus_type)))
goto out;
@@ -661,55 +696,71 @@ init_channel_subsystem (void)
}
/* Setup css structure. */
for (i = 0; i <= __MAX_CSSID; i++) {
- css[i] = kmalloc(sizeof(struct channel_subsystem), GFP_KERNEL);
- if (!css[i]) {
+ struct channel_subsystem *css;
+
+ css = kmalloc(sizeof(struct channel_subsystem), GFP_KERNEL);
+ if (!css) {
ret = -ENOMEM;
goto out_unregister;
}
+ channel_subsystems[i] = css;
ret = setup_css(i);
if (ret)
goto out_free;
- ret = device_register(&css[i]->device);
+ ret = device_register(&css->device);
if (ret)
goto out_free_all;
if (css_characteristics_avail &&
css_chsc_characteristics.secm) {
- ret = device_create_file(&css[i]->device,
+ ret = device_create_file(&css->device,
&dev_attr_cm_enable);
if (ret)
goto out_device;
}
- ret = device_register(&css[i]->pseudo_subchannel->dev);
+ ret = device_register(&css->pseudo_subchannel->dev);
if (ret)
goto out_file;
}
+ ret = register_reboot_notifier(&css_reboot_notifier);
+ if (ret)
+ goto out_pseudo;
css_init_done = 1;
ctl_set_bit(6, 28);
for_each_subchannel(__init_channel_subsystem, NULL);
return 0;
+out_pseudo:
+ device_unregister(&channel_subsystems[i]->pseudo_subchannel->dev);
out_file:
- device_remove_file(&css[i]->device, &dev_attr_cm_enable);
+ device_remove_file(&channel_subsystems[i]->device,
+ &dev_attr_cm_enable);
out_device:
- device_unregister(&css[i]->device);
+ device_unregister(&channel_subsystems[i]->device);
out_free_all:
- kfree(css[i]->pseudo_subchannel->lock);
- kfree(css[i]->pseudo_subchannel);
+ kfree(channel_subsystems[i]->pseudo_subchannel->lock);
+ kfree(channel_subsystems[i]->pseudo_subchannel);
out_free:
- kfree(css[i]);
+ kfree(channel_subsystems[i]);
out_unregister:
while (i > 0) {
+ struct channel_subsystem *css;
+
i--;
- device_unregister(&css[i]->pseudo_subchannel->dev);
+ css = channel_subsystems[i];
+ device_unregister(&css->pseudo_subchannel->dev);
if (css_characteristics_avail && css_chsc_characteristics.secm)
- device_remove_file(&css[i]->device,
+ device_remove_file(&css->device,
&dev_attr_cm_enable);
- device_unregister(&css[i]->device);
+ device_unregister(&css->device);
}
out_bus:
bus_unregister(&css_bus_type);
out:
+ chsc_free_sei_area();
+ kfree(slow_subchannel_set);
+ printk(KERN_WARNING"cio: failed to initialize css driver (%d)!\n",
+ ret);
return ret;
}
diff --git a/drivers/s390/cio/css.h b/drivers/s390/cio/css.h
index ed7977531c3..81215ef3243 100644
--- a/drivers/s390/cio/css.h
+++ b/drivers/s390/cio/css.h
@@ -139,7 +139,6 @@ struct css_driver {
*/
extern struct bus_type css_bus_type;
-extern int css_sch_device_register(struct subchannel *);
extern void css_sch_device_unregister(struct subchannel *);
extern struct subchannel * get_subchannel_by_schid(struct subchannel_id);
extern int css_init_done;
@@ -168,7 +167,7 @@ struct channel_subsystem {
#define to_css(dev) container_of(dev, struct channel_subsystem, device)
extern struct bus_type css_bus_type;
-extern struct channel_subsystem *css[];
+extern struct channel_subsystem *channel_subsystems[];
/* Some helper functions for disconnected state. */
int device_is_disconnected(struct subchannel *);
@@ -192,6 +191,5 @@ int sch_is_pseudo_sch(struct subchannel *);
extern struct workqueue_struct *slow_path_wq;
-int subchannel_add_files (struct device *);
extern struct attribute_group *subch_attr_groups[];
#endif
diff --git a/drivers/s390/cio/device.c b/drivers/s390/cio/device.c
index 001682e70f6..7ee57f084a8 100644
--- a/drivers/s390/cio/device.c
+++ b/drivers/s390/cio/device.c
@@ -21,6 +21,7 @@
#include <asm/ccwdev.h>
#include <asm/cio.h>
#include <asm/param.h> /* HZ */
+#include <asm/cmb.h>
#include "cio.h"
#include "cio_debug.h"
@@ -78,45 +79,37 @@ static int snprint_alias(char *buf, size_t size,
/* Set up environment variables for ccw device uevent. Return 0 on success,
* non-zero otherwise. */
-static int ccw_uevent(struct device *dev, char **envp, int num_envp,
- char *buffer, int buffer_size)
+static int ccw_uevent(struct device *dev, struct kobj_uevent_env *env)
{
struct ccw_device *cdev = to_ccwdev(dev);
struct ccw_device_id *id = &(cdev->id);
- int i = 0;
- int len = 0;
int ret;
char modalias_buf[30];
/* CU_TYPE= */
- ret = add_uevent_var(envp, num_envp, &i, buffer, buffer_size, &len,
- "CU_TYPE=%04X", id->cu_type);
+ ret = add_uevent_var(env, "CU_TYPE=%04X", id->cu_type);
if (ret)
return ret;
/* CU_MODEL= */
- ret = add_uevent_var(envp, num_envp, &i, buffer, buffer_size, &len,
- "CU_MODEL=%02X", id->cu_model);
+ ret = add_uevent_var(env, "CU_MODEL=%02X", id->cu_model);
if (ret)
return ret;
/* The next two can be zero, that's ok for us */
/* DEV_TYPE= */
- ret = add_uevent_var(envp, num_envp, &i, buffer, buffer_size, &len,
- "DEV_TYPE=%04X", id->dev_type);
+ ret = add_uevent_var(env, "DEV_TYPE=%04X", id->dev_type);
if (ret)
return ret;
/* DEV_MODEL= */
- ret = add_uevent_var(envp, num_envp, &i, buffer, buffer_size, &len,
- "DEV_MODEL=%02X", id->dev_model);
+ ret = add_uevent_var(env, "DEV_MODEL=%02X", id->dev_model);
if (ret)
return ret;
/* MODALIAS= */
snprint_alias(modalias_buf, sizeof(modalias_buf), id, "");
- ret = add_uevent_var(envp, num_envp, &i, buffer, buffer_size, &len,
- "MODALIAS=%s", modalias_buf);
+ ret = add_uevent_var(env, "MODALIAS=%s", modalias_buf);
return ret;
}
@@ -338,19 +331,34 @@ ccw_device_remove_disconnected(struct ccw_device *cdev)
rc = device_schedule_callback(&cdev->dev,
ccw_device_remove_orphan_cb);
if (rc)
- dev_info(&cdev->dev, "Couldn't unregister orphan\n");
+ CIO_MSG_EVENT(2, "Couldn't unregister orphan "
+ "0.%x.%04x\n",
+ cdev->private->dev_id.ssid,
+ cdev->private->dev_id.devno);
return;
}
/* Deregister subchannel, which will kill the ccw device. */
rc = device_schedule_callback(cdev->dev.parent,
ccw_device_remove_sch_cb);
if (rc)
- dev_info(&cdev->dev,
- "Couldn't unregister disconnected device\n");
+ CIO_MSG_EVENT(2, "Couldn't unregister disconnected device "
+ "0.%x.%04x\n",
+ cdev->private->dev_id.ssid,
+ cdev->private->dev_id.devno);
}
-int
-ccw_device_set_offline(struct ccw_device *cdev)
+/**
+ * ccw_device_set_offline() - disable a ccw device for I/O
+ * @cdev: target ccw device
+ *
+ * This function calls the driver's set_offline() function for @cdev, if
+ * given, and then disables @cdev.
+ * Returns:
+ * %0 on success and a negative error value on failure.
+ * Context:
+ * enabled, ccw device lock not held
+ */
+int ccw_device_set_offline(struct ccw_device *cdev)
{
int ret;
@@ -379,15 +387,28 @@ ccw_device_set_offline(struct ccw_device *cdev)
if (ret == 0)
wait_event(cdev->private->wait_q, dev_fsm_final_state(cdev));
else {
- pr_debug("ccw_device_offline returned %d, device %s\n",
- ret, cdev->dev.bus_id);
+ CIO_MSG_EVENT(2, "ccw_device_offline returned %d, "
+ "device 0.%x.%04x\n",
+ ret, cdev->private->dev_id.ssid,
+ cdev->private->dev_id.devno);
cdev->online = 1;
}
return ret;
}
-int
-ccw_device_set_online(struct ccw_device *cdev)
+/**
+ * ccw_device_set_online() - enable a ccw device for I/O
+ * @cdev: target ccw device
+ *
+ * This function first enables @cdev and then calls the driver's set_online()
+ * function for @cdev, if given. If set_online() returns an error, @cdev is
+ * disabled again.
+ * Returns:
+ * %0 on success and a negative error value on failure.
+ * Context:
+ * enabled, ccw device lock not held
+ */
+int ccw_device_set_online(struct ccw_device *cdev)
{
int ret;
@@ -402,8 +423,10 @@ ccw_device_set_online(struct ccw_device *cdev)
if (ret == 0)
wait_event(cdev->private->wait_q, dev_fsm_final_state(cdev));
else {
- pr_debug("ccw_device_online returned %d, device %s\n",
- ret, cdev->dev.bus_id);
+ CIO_MSG_EVENT(2, "ccw_device_online returned %d, "
+ "device 0.%x.%04x\n",
+ ret, cdev->private->dev_id.ssid,
+ cdev->private->dev_id.devno);
return ret;
}
if (cdev->private->state != DEV_STATE_ONLINE)
@@ -417,9 +440,11 @@ ccw_device_set_online(struct ccw_device *cdev)
spin_unlock_irq(cdev->ccwlock);
if (ret == 0)
wait_event(cdev->private->wait_q, dev_fsm_final_state(cdev));
- else
- pr_debug("ccw_device_offline returned %d, device %s\n",
- ret, cdev->dev.bus_id);
+ else
+ CIO_MSG_EVENT(2, "ccw_device_offline returned %d, "
+ "device 0.%x.%04x\n",
+ ret, cdev->private->dev_id.ssid,
+ cdev->private->dev_id.devno);
return (ret == 0) ? -ENODEV : ret;
}
@@ -439,9 +464,10 @@ static int online_store_recog_and_online(struct ccw_device *cdev)
if (cdev->id.cu_type == 0) {
ret = ccw_device_recognition(cdev);
if (ret) {
- printk(KERN_WARNING"Couldn't start recognition "
- "for device %s (ret=%d)\n",
- cdev->dev.bus_id, ret);
+ CIO_MSG_EVENT(0, "Couldn't start recognition "
+ "for device 0.%x.%04x (ret=%d)\n",
+ cdev->private->dev_id.ssid,
+ cdev->private->dev_id.devno, ret);
return ret;
}
wait_event(cdev->private->wait_q,
@@ -461,8 +487,8 @@ static void online_store_handle_online(struct ccw_device *cdev, int force)
if (force && cdev->private->state == DEV_STATE_BOXED) {
ret = ccw_device_stlck(cdev);
if (ret) {
- printk(KERN_WARNING"ccw_device_stlck for device %s "
- "returned %d!\n", cdev->dev.bus_id, ret);
+ dev_warn(&cdev->dev,
+ "ccw_device_stlck returned %d!\n", ret);
return;
}
if (cdev->id.cu_type == 0)
@@ -893,8 +919,10 @@ io_subchannel_register(struct work_struct *work)
ret = device_reprobe(&cdev->dev);
if (ret)
/* We can't do much here. */
- dev_info(&cdev->dev, "device_reprobe() returned"
- " %d\n", ret);
+ CIO_MSG_EVENT(2, "device_reprobe() returned"
+ " %d for 0.%x.%04x\n", ret,
+ cdev->private->dev_id.ssid,
+ cdev->private->dev_id.devno);
}
goto out;
}
@@ -907,8 +935,9 @@ io_subchannel_register(struct work_struct *work)
/* make it known to the system */
ret = ccw_device_register(cdev);
if (ret) {
- printk (KERN_WARNING "%s: could not register %s\n",
- __func__, cdev->dev.bus_id);
+ CIO_MSG_EVENT(0, "Could not register ccw dev 0.%x.%04x: %d\n",
+ cdev->private->dev_id.ssid,
+ cdev->private->dev_id.devno, ret);
put_device(&cdev->dev);
spin_lock_irqsave(sch->lock, flags);
sch->dev.driver_data = NULL;
@@ -929,8 +958,7 @@ out:
wake_up(&ccw_device_init_wq);
}
-void
-ccw_device_call_sch_unregister(struct work_struct *work)
+static void ccw_device_call_sch_unregister(struct work_struct *work)
{
struct ccw_device_private *priv;
struct ccw_device *cdev;
@@ -1083,6 +1111,7 @@ io_subchannel_probe (struct subchannel *sch)
* device, e.g. the console.
*/
cdev = sch->dev.driver_data;
+ cdev->dev.groups = ccwdev_attr_groups;
device_initialize(&cdev->dev);
ccw_device_register(cdev);
/*
@@ -1308,8 +1337,19 @@ __ccwdev_check_busid(struct device *dev, void *id)
}
-struct ccw_device *
-get_ccwdev_by_busid(struct ccw_driver *cdrv, const char *bus_id)
+/**
+ * get_ccwdev_by_busid() - obtain device from a bus id
+ * @cdrv: driver the device is owned by
+ * @bus_id: bus id of the device to be searched
+ *
+ * This function searches all devices owned by @cdrv for a device with a bus
+ * id matching @bus_id.
+ * Returns:
+ * If a match is found, its reference count of the found device is increased
+ * and it is returned; else %NULL is returned.
+ */
+struct ccw_device *get_ccwdev_by_busid(struct ccw_driver *cdrv,
+ const char *bus_id)
{
struct device *dev;
struct device_driver *drv;
@@ -1361,7 +1401,6 @@ ccw_device_remove (struct device *dev)
struct ccw_driver *cdrv = cdev->drv;
int ret;
- pr_debug("removing device %s\n", cdev->dev.bus_id);
if (cdrv->remove)
cdrv->remove(cdev);
if (cdev->online) {
@@ -1374,24 +1413,44 @@ ccw_device_remove (struct device *dev)
dev_fsm_final_state(cdev));
else
//FIXME: we can't fail!
- pr_debug("ccw_device_offline returned %d, device %s\n",
- ret, cdev->dev.bus_id);
+ CIO_MSG_EVENT(2, "ccw_device_offline returned %d, "
+ "device 0.%x.%04x\n",
+ ret, cdev->private->dev_id.ssid,
+ cdev->private->dev_id.devno);
}
ccw_device_set_timeout(cdev, 0);
cdev->drv = NULL;
return 0;
}
+static void ccw_device_shutdown(struct device *dev)
+{
+ struct ccw_device *cdev;
+
+ cdev = to_ccwdev(dev);
+ if (cdev->drv && cdev->drv->shutdown)
+ cdev->drv->shutdown(cdev);
+ disable_cmf(cdev);
+}
+
struct bus_type ccw_bus_type = {
.name = "ccw",
.match = ccw_bus_match,
.uevent = ccw_uevent,
.probe = ccw_device_probe,
.remove = ccw_device_remove,
+ .shutdown = ccw_device_shutdown,
};
-int
-ccw_driver_register (struct ccw_driver *cdriver)
+/**
+ * ccw_driver_register() - register a ccw driver
+ * @cdriver: driver to be registered
+ *
+ * This function is mainly a wrapper around driver_register().
+ * Returns:
+ * %0 on success and a negative error value on failure.
+ */
+int ccw_driver_register(struct ccw_driver *cdriver)
{
struct device_driver *drv = &cdriver->driver;
@@ -1401,8 +1460,13 @@ ccw_driver_register (struct ccw_driver *cdriver)
return driver_register(drv);
}
-void
-ccw_driver_unregister (struct ccw_driver *cdriver)
+/**
+ * ccw_driver_unregister() - deregister a ccw driver
+ * @cdriver: driver to be deregistered
+ *
+ * This function is mainly a wrapper around driver_unregister().
+ */
+void ccw_driver_unregister(struct ccw_driver *cdriver)
{
driver_unregister(&cdriver->driver);
}
diff --git a/drivers/s390/cio/device.h b/drivers/s390/cio/device.h
index b66338b7657..0d408960043 100644
--- a/drivers/s390/cio/device.h
+++ b/drivers/s390/cio/device.h
@@ -80,7 +80,6 @@ void io_subchannel_recog_done(struct ccw_device *cdev);
int ccw_device_cancel_halt_clear(struct ccw_device *);
void ccw_device_do_unreg_rereg(struct work_struct *);
-void ccw_device_call_sch_unregister(struct work_struct *);
void ccw_device_move_to_orphanage(struct work_struct *);
int ccw_device_is_orphan(struct ccw_device *);
diff --git a/drivers/s390/cio/device_fsm.c b/drivers/s390/cio/device_fsm.c
index 6bba8092957..8867443b806 100644
--- a/drivers/s390/cio/device_fsm.c
+++ b/drivers/s390/cio/device_fsm.c
@@ -268,7 +268,7 @@ ccw_device_recog_done(struct ccw_device *cdev, int state)
switch (state) {
case DEV_STATE_NOT_OPER:
CIO_DEBUG(KERN_WARNING, 2,
- "SenseID : unknown device %04x on subchannel "
+ "cio: SenseID : unknown device %04x on subchannel "
"0.%x.%04x\n", cdev->private->dev_id.devno,
sch->schid.ssid, sch->schid.sch_no);
break;
@@ -293,7 +293,8 @@ ccw_device_recog_done(struct ccw_device *cdev, int state)
return;
}
/* Issue device info message. */
- CIO_DEBUG(KERN_INFO, 2, "SenseID : device 0.%x.%04x reports: "
+ CIO_DEBUG(KERN_INFO, 2,
+ "cio: SenseID : device 0.%x.%04x reports: "
"CU Type/Mod = %04X/%02X, Dev Type/Mod = "
"%04X/%02X\n",
cdev->private->dev_id.ssid,
@@ -303,7 +304,7 @@ ccw_device_recog_done(struct ccw_device *cdev, int state)
break;
case DEV_STATE_BOXED:
CIO_DEBUG(KERN_WARNING, 2,
- "SenseID : boxed device %04x on subchannel "
+ "cio: SenseID : boxed device %04x on subchannel "
"0.%x.%04x\n", cdev->private->dev_id.devno,
sch->schid.ssid, sch->schid.sch_no);
break;
@@ -388,7 +389,7 @@ ccw_device_done(struct ccw_device *cdev, int state)
if (state == DEV_STATE_BOXED)
CIO_DEBUG(KERN_WARNING, 2,
- "Boxed device %04x on subchannel %04x\n",
+ "cio: Boxed device %04x on subchannel %04x\n",
cdev->private->dev_id.devno, sch->schid.sch_no);
if (cdev->private->flags.donotify) {
@@ -445,7 +446,8 @@ static void __ccw_device_get_common_pgid(struct ccw_device *cdev)
if (cdev->private->pgid[last].inf.ps.state1 ==
SNID_STATE1_RESET)
/* No previous pgid found */
- memcpy(&cdev->private->pgid[0], &css[0]->global_pgid,
+ memcpy(&cdev->private->pgid[0],
+ &channel_subsystems[0]->global_pgid,
sizeof(struct pgid));
else
/* Use existing pgid */
@@ -542,51 +544,6 @@ ccw_device_recog_timeout(struct ccw_device *cdev, enum dev_event dev_event)
}
-static void
-ccw_device_nopath_notify(struct work_struct *work)
-{
- struct ccw_device_private *priv;
- struct ccw_device *cdev;
- struct subchannel *sch;
- int ret;
- unsigned long flags;
-
- priv = container_of(work, struct ccw_device_private, kick_work);
- cdev = priv->cdev;
- spin_lock_irqsave(cdev->ccwlock, flags);
- sch = to_subchannel(cdev->dev.parent);
- /* Extra sanity. */
- if (sch->lpm)
- goto out_unlock;
- if (sch->driver && sch->driver->notify) {
- spin_unlock_irqrestore(cdev->ccwlock, flags);
- ret = sch->driver->notify(&sch->dev, CIO_NO_PATH);
- spin_lock_irqsave(cdev->ccwlock, flags);
- } else
- ret = 0;
- if (!ret) {
- if (get_device(&sch->dev)) {
- /* Driver doesn't want to keep device. */
- cio_disable_subchannel(sch);
- if (get_device(&cdev->dev)) {
- PREPARE_WORK(&cdev->private->kick_work,
- ccw_device_call_sch_unregister);
- queue_work(ccw_device_work,
- &cdev->private->kick_work);
- } else
- put_device(&sch->dev);
- }
- } else {
- cio_disable_subchannel(sch);
- ccw_device_set_timeout(cdev, 0);
- cdev->private->flags.fake_irb = 0;
- cdev->private->state = DEV_STATE_DISCONNECTED;
- wake_up(&cdev->private->wait_q);
- }
-out_unlock:
- spin_unlock_irqrestore(cdev->ccwlock, flags);
-}
-
void
ccw_device_verify_done(struct ccw_device *cdev, int err)
{
@@ -630,12 +587,9 @@ ccw_device_verify_done(struct ccw_device *cdev, int err)
default:
/* Reset oper notify indication after verify error. */
cdev->private->flags.donotify = 0;
- if (cdev->online) {
- PREPARE_WORK(&cdev->private->kick_work,
- ccw_device_nopath_notify);
- queue_work(ccw_device_notify_work,
- &cdev->private->kick_work);
- } else
+ if (cdev->online)
+ dev_fsm_event(cdev, DEV_EVENT_NOTOPER);
+ else
ccw_device_done(cdev, DEV_STATE_NOT_OPER);
break;
}
@@ -689,11 +643,7 @@ ccw_device_disband_done(struct ccw_device *cdev, int err)
break;
default:
cdev->private->flags.donotify = 0;
- if (get_device(&cdev->dev)) {
- PREPARE_WORK(&cdev->private->kick_work,
- ccw_device_call_sch_unregister);
- queue_work(ccw_device_work, &cdev->private->kick_work);
- }
+ dev_fsm_event(cdev, DEV_EVENT_NOTOPER);
ccw_device_done(cdev, DEV_STATE_NOT_OPER);
break;
}
@@ -764,59 +714,16 @@ ccw_device_recog_notoper(struct ccw_device *cdev, enum dev_event dev_event)
}
/*
- * Handle not operational event while offline.
+ * Handle not operational event in non-special state.
*/
-static void
-ccw_device_offline_notoper(struct ccw_device *cdev, enum dev_event dev_event)
+static void ccw_device_generic_notoper(struct ccw_device *cdev,
+ enum dev_event dev_event)
{
struct subchannel *sch;
cdev->private->state = DEV_STATE_NOT_OPER;
sch = to_subchannel(cdev->dev.parent);
- if (get_device(&cdev->dev)) {
- PREPARE_WORK(&cdev->private->kick_work,
- ccw_device_call_sch_unregister);
- queue_work(ccw_device_work, &cdev->private->kick_work);
- }
- wake_up(&cdev->private->wait_q);
-}
-
-/*
- * Handle not operational event while online.
- */
-static void
-ccw_device_online_notoper(struct ccw_device *cdev, enum dev_event dev_event)
-{
- struct subchannel *sch;
- int ret;
-
- sch = to_subchannel(cdev->dev.parent);
- if (sch->driver->notify) {
- spin_unlock_irq(cdev->ccwlock);
- ret = sch->driver->notify(&sch->dev,
- sch->lpm ? CIO_GONE : CIO_NO_PATH);
- spin_lock_irq(cdev->ccwlock);
- } else
- ret = 0;
- if (ret) {
- ccw_device_set_timeout(cdev, 0);
- cdev->private->flags.fake_irb = 0;
- cdev->private->state = DEV_STATE_DISCONNECTED;
- wake_up(&cdev->private->wait_q);
- return;
- }
- cdev->private->state = DEV_STATE_NOT_OPER;
- cio_disable_subchannel(sch);
- if (sch->schib.scsw.actl != 0) {
- // FIXME: not-oper indication to device driver ?
- ccw_device_call_handler(cdev);
- }
- if (get_device(&cdev->dev)) {
- PREPARE_WORK(&cdev->private->kick_work,
- ccw_device_call_sch_unregister);
- queue_work(ccw_device_work, &cdev->private->kick_work);
- }
- wake_up(&cdev->private->wait_q);
+ css_schedule_eval(sch->schid);
}
/*
@@ -914,18 +821,9 @@ ccw_device_online_timeout(struct ccw_device *cdev, enum dev_event dev_event)
cdev->private->state = DEV_STATE_TIMEOUT_KILL;
return;
}
- if (ret == -ENODEV) {
- struct subchannel *sch;
-
- sch = to_subchannel(cdev->dev.parent);
- if (!sch->lpm) {
- PREPARE_WORK(&cdev->private->kick_work,
- ccw_device_nopath_notify);
- queue_work(ccw_device_notify_work,
- &cdev->private->kick_work);
- } else
- dev_fsm_event(cdev, DEV_EVENT_NOTOPER);
- } else if (cdev->handler)
+ if (ret == -ENODEV)
+ dev_fsm_event(cdev, DEV_EVENT_NOTOPER);
+ else if (cdev->handler)
cdev->handler(cdev, cdev->private->intparm,
ERR_PTR(-ETIMEDOUT));
}
@@ -946,9 +844,10 @@ ccw_device_w4sense(struct ccw_device *cdev, enum dev_event dev_event)
/* Basic sense hasn't started. Try again. */
ccw_device_do_sense(cdev, irb);
else {
- printk(KERN_INFO "Huh? %s(%s): unsolicited "
- "interrupt...\n",
- __FUNCTION__, cdev->dev.bus_id);
+ CIO_MSG_EVENT(2, "Huh? 0.%x.%04x: unsolicited "
+ "interrupt during w4sense...\n",
+ cdev->private->dev_id.ssid,
+ cdev->private->dev_id.devno);
if (cdev->handler)
cdev->handler (cdev, 0, irb);
}
@@ -1215,8 +1114,8 @@ ccw_device_nop(struct ccw_device *cdev, enum dev_event dev_event)
static void
ccw_device_bug(struct ccw_device *cdev, enum dev_event dev_event)
{
- printk(KERN_EMERG "dev_jumptable[%i][%i] == NULL\n",
- cdev->private->state, dev_event);
+ CIO_MSG_EVENT(0, "dev_jumptable[%i][%i] == NULL\n",
+ cdev->private->state, dev_event);
BUG();
}
@@ -1231,7 +1130,7 @@ fsm_func_t *dev_jumptable[NR_DEV_STATES][NR_DEV_EVENTS] = {
[DEV_EVENT_VERIFY] = ccw_device_nop,
},
[DEV_STATE_SENSE_PGID] = {
- [DEV_EVENT_NOTOPER] = ccw_device_online_notoper,
+ [DEV_EVENT_NOTOPER] = ccw_device_generic_notoper,
[DEV_EVENT_INTERRUPT] = ccw_device_sense_pgid_irq,
[DEV_EVENT_TIMEOUT] = ccw_device_onoff_timeout,
[DEV_EVENT_VERIFY] = ccw_device_nop,
@@ -1243,50 +1142,50 @@ fsm_func_t *dev_jumptable[NR_DEV_STATES][NR_DEV_EVENTS] = {
[DEV_EVENT_VERIFY] = ccw_device_nop,
},
[DEV_STATE_OFFLINE] = {
- [DEV_EVENT_NOTOPER] = ccw_device_offline_notoper,
+ [DEV_EVENT_NOTOPER] = ccw_device_generic_notoper,
[DEV_EVENT_INTERRUPT] = ccw_device_offline_irq,
[DEV_EVENT_TIMEOUT] = ccw_device_nop,
[DEV_EVENT_VERIFY] = ccw_device_nop,
},
[DEV_STATE_VERIFY] = {
- [DEV_EVENT_NOTOPER] = ccw_device_online_notoper,
+ [DEV_EVENT_NOTOPER] = ccw_device_generic_notoper,
[DEV_EVENT_INTERRUPT] = ccw_device_verify_irq,
[DEV_EVENT_TIMEOUT] = ccw_device_onoff_timeout,
[DEV_EVENT_VERIFY] = ccw_device_delay_verify,
},
[DEV_STATE_ONLINE] = {
- [DEV_EVENT_NOTOPER] = ccw_device_online_notoper,
+ [DEV_EVENT_NOTOPER] = ccw_device_generic_notoper,
[DEV_EVENT_INTERRUPT] = ccw_device_irq,
[DEV_EVENT_TIMEOUT] = ccw_device_online_timeout,
[DEV_EVENT_VERIFY] = ccw_device_online_verify,
},
[DEV_STATE_W4SENSE] = {
- [DEV_EVENT_NOTOPER] = ccw_device_online_notoper,
+ [DEV_EVENT_NOTOPER] = ccw_device_generic_notoper,
[DEV_EVENT_INTERRUPT] = ccw_device_w4sense,
[DEV_EVENT_TIMEOUT] = ccw_device_nop,
[DEV_EVENT_VERIFY] = ccw_device_online_verify,
},
[DEV_STATE_DISBAND_PGID] = {
- [DEV_EVENT_NOTOPER] = ccw_device_online_notoper,
+ [DEV_EVENT_NOTOPER] = ccw_device_generic_notoper,
[DEV_EVENT_INTERRUPT] = ccw_device_disband_irq,
[DEV_EVENT_TIMEOUT] = ccw_device_onoff_timeout,
[DEV_EVENT_VERIFY] = ccw_device_nop,
},
[DEV_STATE_BOXED] = {
- [DEV_EVENT_NOTOPER] = ccw_device_offline_notoper,
+ [DEV_EVENT_NOTOPER] = ccw_device_generic_notoper,
[DEV_EVENT_INTERRUPT] = ccw_device_stlck_done,
[DEV_EVENT_TIMEOUT] = ccw_device_stlck_done,
[DEV_EVENT_VERIFY] = ccw_device_nop,
},
/* states to wait for i/o completion before doing something */
[DEV_STATE_CLEAR_VERIFY] = {
- [DEV_EVENT_NOTOPER] = ccw_device_online_notoper,
+ [DEV_EVENT_NOTOPER] = ccw_device_generic_notoper,
[DEV_EVENT_INTERRUPT] = ccw_device_clear_verify,
[DEV_EVENT_TIMEOUT] = ccw_device_nop,
[DEV_EVENT_VERIFY] = ccw_device_nop,
},
[DEV_STATE_TIMEOUT_KILL] = {
- [DEV_EVENT_NOTOPER] = ccw_device_online_notoper,
+ [DEV_EVENT_NOTOPER] = ccw_device_generic_notoper,
[DEV_EVENT_INTERRUPT] = ccw_device_killing_irq,
[DEV_EVENT_TIMEOUT] = ccw_device_killing_timeout,
[DEV_EVENT_VERIFY] = ccw_device_nop, //FIXME
diff --git a/drivers/s390/cio/device_id.c b/drivers/s390/cio/device_id.c
index 60b9347f7c9..f232832f2b2 100644
--- a/drivers/s390/cio/device_id.c
+++ b/drivers/s390/cio/device_id.c
@@ -17,6 +17,7 @@
#include <asm/delay.h>
#include <asm/cio.h>
#include <asm/lowcore.h>
+#include <asm/diag.h>
#include "cio.h"
#include "cio_debug.h"
@@ -25,51 +26,6 @@
#include "ioasm.h"
/*
- * diag210 is used under VM to get information about a virtual device
- */
-int
-diag210(struct diag210 * addr)
-{
- /*
- * diag 210 needs its data below the 2GB border, so we
- * use a static data area to be sure
- */
- static struct diag210 diag210_tmp;
- static DEFINE_SPINLOCK(diag210_lock);
- unsigned long flags;
- int ccode;
-
- spin_lock_irqsave(&diag210_lock, flags);
- diag210_tmp = *addr;
-
-#ifdef CONFIG_64BIT
- asm volatile(
- " lhi %0,-1\n"
- " sam31\n"
- " diag %1,0,0x210\n"
- "0: ipm %0\n"
- " srl %0,28\n"
- "1: sam64\n"
- EX_TABLE(0b,1b)
- : "=&d" (ccode) : "a" (&diag210_tmp) : "cc", "memory");
-#else
- asm volatile(
- " lhi %0,-1\n"
- " diag %1,0,0x210\n"
- "0: ipm %0\n"
- " srl %0,28\n"
- "1:\n"
- EX_TABLE(0b,1b)
- : "=&d" (ccode) : "a" (&diag210_tmp) : "cc", "memory");
-#endif
-
- *addr = diag210_tmp;
- spin_unlock_irqrestore(&diag210_lock, flags);
-
- return ccode;
-}
-
-/*
* Input :
* devno - device number
* ps - pointer to sense ID data area
@@ -349,5 +305,3 @@ ccw_device_sense_id_irq(struct ccw_device *cdev, enum dev_event dev_event)
break;
}
}
-
-EXPORT_SYMBOL(diag210);
diff --git a/drivers/s390/cio/device_ops.c b/drivers/s390/cio/device_ops.c
index a5d263fb55a..7fd2dadc329 100644
--- a/drivers/s390/cio/device_ops.c
+++ b/drivers/s390/cio/device_ops.c
@@ -25,6 +25,16 @@
#include "device.h"
#include "chp.h"
+/**
+ * ccw_device_set_options_mask() - set some options and unset the rest
+ * @cdev: device for which the options are to be set
+ * @flags: options to be set
+ *
+ * All flags specified in @flags are set, all flags not specified in @flags
+ * are cleared.
+ * Returns:
+ * %0 on success, -%EINVAL on an invalid flag combination.
+ */
int ccw_device_set_options_mask(struct ccw_device *cdev, unsigned long flags)
{
/*
@@ -40,6 +50,15 @@ int ccw_device_set_options_mask(struct ccw_device *cdev, unsigned long flags)
return 0;
}
+/**
+ * ccw_device_set_options() - set some options
+ * @cdev: device for which the options are to be set
+ * @flags: options to be set
+ *
+ * All flags specified in @flags are set, the remainder is left untouched.
+ * Returns:
+ * %0 on success, -%EINVAL if an invalid flag combination would ensue.
+ */
int ccw_device_set_options(struct ccw_device *cdev, unsigned long flags)
{
/*
@@ -59,6 +78,13 @@ int ccw_device_set_options(struct ccw_device *cdev, unsigned long flags)
return 0;
}
+/**
+ * ccw_device_clear_options() - clear some options
+ * @cdev: device for which the options are to be cleared
+ * @flags: options to be cleared
+ *
+ * All flags specified in @flags are cleared, the remainder is left untouched.
+ */
void ccw_device_clear_options(struct ccw_device *cdev, unsigned long flags)
{
cdev->private->options.fast &= (flags & CCWDEV_EARLY_NOTIFICATION) == 0;
@@ -67,8 +93,22 @@ void ccw_device_clear_options(struct ccw_device *cdev, unsigned long flags)
cdev->private->options.force &= (flags & CCWDEV_ALLOW_FORCE) == 0;
}
-int
-ccw_device_clear(struct ccw_device *cdev, unsigned long intparm)
+/**
+ * ccw_device_clear() - terminate I/O request processing
+ * @cdev: target ccw device
+ * @intparm: interruption parameter; value is only used if no I/O is
+ * outstanding, otherwise the intparm associated with the I/O request
+ * is returned
+ *
+ * ccw_device_clear() calls csch on @cdev's subchannel.
+ * Returns:
+ * %0 on success,
+ * -%ENODEV on device not operational,
+ * -%EINVAL on invalid device state.
+ * Context:
+ * Interrupts disabled, ccw device lock held
+ */
+int ccw_device_clear(struct ccw_device *cdev, unsigned long intparm)
{
struct subchannel *sch;
int ret;
@@ -89,10 +129,33 @@ ccw_device_clear(struct ccw_device *cdev, unsigned long intparm)
return ret;
}
-int
-ccw_device_start_key(struct ccw_device *cdev, struct ccw1 *cpa,
- unsigned long intparm, __u8 lpm, __u8 key,
- unsigned long flags)
+/**
+ * ccw_device_start_key() - start a s390 channel program with key
+ * @cdev: target ccw device
+ * @cpa: logical start address of channel program
+ * @intparm: user specific interruption parameter; will be presented back to
+ * @cdev's interrupt handler. Allows a device driver to associate
+ * the interrupt with a particular I/O request.
+ * @lpm: defines the channel path to be used for a specific I/O request. A
+ * value of 0 will make cio use the opm.
+ * @key: storage key to be used for the I/O
+ * @flags: additional flags; defines the action to be performed for I/O
+ * processing.
+ *
+ * Start a S/390 channel program. When the interrupt arrives, the
+ * IRQ handler is called, either immediately, delayed (dev-end missing,
+ * or sense required) or never (no IRQ handler registered).
+ * Returns:
+ * %0, if the operation was successful;
+ * -%EBUSY, if the device is busy, or status pending;
+ * -%EACCES, if no path specified in @lpm is operational;
+ * -%ENODEV, if the device is not operational.
+ * Context:
+ * Interrupts disabled, ccw device lock held
+ */
+int ccw_device_start_key(struct ccw_device *cdev, struct ccw1 *cpa,
+ unsigned long intparm, __u8 lpm, __u8 key,
+ unsigned long flags)
{
struct subchannel *sch;
int ret;
@@ -135,11 +198,38 @@ ccw_device_start_key(struct ccw_device *cdev, struct ccw1 *cpa,
return ret;
}
-
-int
-ccw_device_start_timeout_key(struct ccw_device *cdev, struct ccw1 *cpa,
- unsigned long intparm, __u8 lpm, __u8 key,
- unsigned long flags, int expires)
+/**
+ * ccw_device_start_timeout_key() - start a s390 channel program with timeout and key
+ * @cdev: target ccw device
+ * @cpa: logical start address of channel program
+ * @intparm: user specific interruption parameter; will be presented back to
+ * @cdev's interrupt handler. Allows a device driver to associate
+ * the interrupt with a particular I/O request.
+ * @lpm: defines the channel path to be used for a specific I/O request. A
+ * value of 0 will make cio use the opm.
+ * @key: storage key to be used for the I/O
+ * @flags: additional flags; defines the action to be performed for I/O
+ * processing.
+ * @expires: timeout value in jiffies
+ *
+ * Start a S/390 channel program. When the interrupt arrives, the
+ * IRQ handler is called, either immediately, delayed (dev-end missing,
+ * or sense required) or never (no IRQ handler registered).
+ * This function notifies the device driver if the channel program has not
+ * completed during the time specified by @expires. If a timeout occurs, the
+ * channel program is terminated via xsch, hsch or csch, and the device's
+ * interrupt handler will be called with an irb containing ERR_PTR(-%ETIMEDOUT).
+ * Returns:
+ * %0, if the operation was successful;
+ * -%EBUSY, if the device is busy, or status pending;
+ * -%EACCES, if no path specified in @lpm is operational;
+ * -%ENODEV, if the device is not operational.
+ * Context:
+ * Interrupts disabled, ccw device lock held
+ */
+int ccw_device_start_timeout_key(struct ccw_device *cdev, struct ccw1 *cpa,
+ unsigned long intparm, __u8 lpm, __u8 key,
+ unsigned long flags, int expires)
{
int ret;
@@ -152,18 +242,67 @@ ccw_device_start_timeout_key(struct ccw_device *cdev, struct ccw1 *cpa,
return ret;
}
-int
-ccw_device_start(struct ccw_device *cdev, struct ccw1 *cpa,
- unsigned long intparm, __u8 lpm, unsigned long flags)
+/**
+ * ccw_device_start() - start a s390 channel program
+ * @cdev: target ccw device
+ * @cpa: logical start address of channel program
+ * @intparm: user specific interruption parameter; will be presented back to
+ * @cdev's interrupt handler. Allows a device driver to associate
+ * the interrupt with a particular I/O request.
+ * @lpm: defines the channel path to be used for a specific I/O request. A
+ * value of 0 will make cio use the opm.
+ * @flags: additional flags; defines the action to be performed for I/O
+ * processing.
+ *
+ * Start a S/390 channel program. When the interrupt arrives, the
+ * IRQ handler is called, either immediately, delayed (dev-end missing,
+ * or sense required) or never (no IRQ handler registered).
+ * Returns:
+ * %0, if the operation was successful;
+ * -%EBUSY, if the device is busy, or status pending;
+ * -%EACCES, if no path specified in @lpm is operational;
+ * -%ENODEV, if the device is not operational.
+ * Context:
+ * Interrupts disabled, ccw device lock held
+ */
+int ccw_device_start(struct ccw_device *cdev, struct ccw1 *cpa,
+ unsigned long intparm, __u8 lpm, unsigned long flags)
{
return ccw_device_start_key(cdev, cpa, intparm, lpm,
PAGE_DEFAULT_KEY, flags);
}
-int
-ccw_device_start_timeout(struct ccw_device *cdev, struct ccw1 *cpa,
- unsigned long intparm, __u8 lpm, unsigned long flags,
- int expires)
+/**
+ * ccw_device_start_timeout() - start a s390 channel program with timeout
+ * @cdev: target ccw device
+ * @cpa: logical start address of channel program
+ * @intparm: user specific interruption parameter; will be presented back to
+ * @cdev's interrupt handler. Allows a device driver to associate
+ * the interrupt with a particular I/O request.
+ * @lpm: defines the channel path to be used for a specific I/O request. A
+ * value of 0 will make cio use the opm.
+ * @flags: additional flags; defines the action to be performed for I/O
+ * processing.
+ * @expires: timeout value in jiffies
+ *
+ * Start a S/390 channel program. When the interrupt arrives, the
+ * IRQ handler is called, either immediately, delayed (dev-end missing,
+ * or sense required) or never (no IRQ handler registered).
+ * This function notifies the device driver if the channel program has not
+ * completed during the time specified by @expires. If a timeout occurs, the
+ * channel program is terminated via xsch, hsch or csch, and the device's
+ * interrupt handler will be called with an irb containing ERR_PTR(-%ETIMEDOUT).
+ * Returns:
+ * %0, if the operation was successful;
+ * -%EBUSY, if the device is busy, or status pending;
+ * -%EACCES, if no path specified in @lpm is operational;
+ * -%ENODEV, if the device is not operational.
+ * Context:
+ * Interrupts disabled, ccw device lock held
+ */
+int ccw_device_start_timeout(struct ccw_device *cdev, struct ccw1 *cpa,
+ unsigned long intparm, __u8 lpm,
+ unsigned long flags, int expires)
{
return ccw_device_start_timeout_key(cdev, cpa, intparm, lpm,
PAGE_DEFAULT_KEY, flags,
@@ -171,8 +310,23 @@ ccw_device_start_timeout(struct ccw_device *cdev, struct ccw1 *cpa,
}
-int
-ccw_device_halt(struct ccw_device *cdev, unsigned long intparm)
+/**
+ * ccw_device_halt() - halt I/O request processing
+ * @cdev: target ccw device
+ * @intparm: interruption parameter; value is only used if no I/O is
+ * outstanding, otherwise the intparm associated with the I/O request
+ * is returned
+ *
+ * ccw_device_halt() calls hsch on @cdev's subchannel.
+ * Returns:
+ * %0 on success,
+ * -%ENODEV on device not operational,
+ * -%EINVAL on invalid device state,
+ * -%EBUSY on device busy or interrupt pending.
+ * Context:
+ * Interrupts disabled, ccw device lock held
+ */
+int ccw_device_halt(struct ccw_device *cdev, unsigned long intparm)
{
struct subchannel *sch;
int ret;
@@ -193,8 +347,20 @@ ccw_device_halt(struct ccw_device *cdev, unsigned long intparm)
return ret;
}
-int
-ccw_device_resume(struct ccw_device *cdev)
+/**
+ * ccw_device_resume() - resume channel program execution
+ * @cdev: target ccw device
+ *
+ * ccw_device_resume() calls rsch on @cdev's subchannel.
+ * Returns:
+ * %0 on success,
+ * -%ENODEV on device not operational,
+ * -%EINVAL on invalid device state,
+ * -%EBUSY on device busy or interrupt pending.
+ * Context:
+ * Interrupts disabled, ccw device lock held
+ */
+int ccw_device_resume(struct ccw_device *cdev)
{
struct subchannel *sch;
@@ -260,11 +426,21 @@ ccw_device_call_handler(struct ccw_device *cdev)
return 1;
}
-/*
- * Search for CIW command in extended sense data.
+/**
+ * ccw_device_get_ciw() - Search for CIW command in extended sense data.
+ * @cdev: ccw device to inspect
+ * @ct: command type to look for
+ *
+ * During SenseID, command information words (CIWs) describing special
+ * commands available to the device may have been stored in the extended
+ * sense data. This function searches for CIWs of a specified command
+ * type in the extended sense data.
+ * Returns:
+ * %NULL if no extended sense data has been stored or if no CIW of the
+ * specified command type could be found,
+ * else a pointer to the CIW of the specified command type.
*/
-struct ciw *
-ccw_device_get_ciw(struct ccw_device *cdev, __u32 ct)
+struct ciw *ccw_device_get_ciw(struct ccw_device *cdev, __u32 ct)
{
int ciw_cnt;
@@ -276,8 +452,14 @@ ccw_device_get_ciw(struct ccw_device *cdev, __u32 ct)
return NULL;
}
-__u8
-ccw_device_get_path_mask(struct ccw_device *cdev)
+/**
+ * ccw_device_get_path_mask() - get currently available paths
+ * @cdev: ccw device to be queried
+ * Returns:
+ * %0 if no subchannel for the device is available,
+ * else the mask of currently available paths for the ccw device's subchannel.
+ */
+__u8 ccw_device_get_path_mask(struct ccw_device *cdev)
{
struct subchannel *sch;
@@ -288,253 +470,6 @@ ccw_device_get_path_mask(struct ccw_device *cdev)
return sch->lpm;
}
-static void
-ccw_device_wake_up(struct ccw_device *cdev, unsigned long ip, struct irb *irb)
-{
- if (!ip)
- /* unsolicited interrupt */
- return;
-
- /* Abuse intparm for error reporting. */
- if (IS_ERR(irb))
- cdev->private->intparm = -EIO;
- else if (irb->scsw.cc == 1)
- /* Retry for deferred condition code. */
- cdev->private->intparm = -EAGAIN;
- else if ((irb->scsw.dstat !=
- (DEV_STAT_CHN_END|DEV_STAT_DEV_END)) ||
- (irb->scsw.cstat != 0)) {
- /*
- * We didn't get channel end / device end. Check if path
- * verification has been started; we can retry after it has
- * finished. We also retry unit checks except for command reject
- * or intervention required. Also check for long busy
- * conditions.
- */
- if (cdev->private->flags.doverify ||
- cdev->private->state == DEV_STATE_VERIFY)
- cdev->private->intparm = -EAGAIN;
- else if ((irb->scsw.dstat & DEV_STAT_UNIT_CHECK) &&
- !(irb->ecw[0] &
- (SNS0_CMD_REJECT | SNS0_INTERVENTION_REQ)))
- cdev->private->intparm = -EAGAIN;
- else if ((irb->scsw.dstat & DEV_STAT_ATTENTION) &&
- (irb->scsw.dstat & DEV_STAT_DEV_END) &&
- (irb->scsw.dstat & DEV_STAT_UNIT_EXCEP))
- cdev->private->intparm = -EAGAIN;
- else
- cdev->private->intparm = -EIO;
-
- } else
- cdev->private->intparm = 0;
- wake_up(&cdev->private->wait_q);
-}
-
-static int
-__ccw_device_retry_loop(struct ccw_device *cdev, struct ccw1 *ccw, long magic, __u8 lpm)
-{
- int ret;
- struct subchannel *sch;
-
- sch = to_subchannel(cdev->dev.parent);
- do {
- ccw_device_set_timeout(cdev, 60 * HZ);
- ret = cio_start (sch, ccw, lpm);
- if (ret != 0)
- ccw_device_set_timeout(cdev, 0);
- if (ret == -EBUSY) {
- /* Try again later. */
- spin_unlock_irq(sch->lock);
- msleep(10);
- spin_lock_irq(sch->lock);
- continue;
- }
- if (ret != 0)
- /* Non-retryable error. */
- break;
- /* Wait for end of request. */
- cdev->private->intparm = magic;
- spin_unlock_irq(sch->lock);
- wait_event(cdev->private->wait_q,
- (cdev->private->intparm == -EIO) ||
- (cdev->private->intparm == -EAGAIN) ||
- (cdev->private->intparm == 0));
- spin_lock_irq(sch->lock);
- /* Check at least for channel end / device end */
- if (cdev->private->intparm == -EIO) {
- /* Non-retryable error. */
- ret = -EIO;
- break;
- }
- if (cdev->private->intparm == 0)
- /* Success. */
- break;
- /* Try again later. */
- spin_unlock_irq(sch->lock);
- msleep(10);
- spin_lock_irq(sch->lock);
- } while (1);
-
- return ret;
-}
-
-/**
- * read_dev_chars() - read device characteristics
- * @param cdev target ccw device
- * @param buffer pointer to buffer for rdc data
- * @param length size of rdc data
- * @returns 0 for success, negative error value on failure
- *
- * Context:
- * called for online device, lock not held
- **/
-int
-read_dev_chars (struct ccw_device *cdev, void **buffer, int length)
-{
- void (*handler)(struct ccw_device *, unsigned long, struct irb *);
- struct subchannel *sch;
- int ret;
- struct ccw1 *rdc_ccw;
-
- if (!cdev)
- return -ENODEV;
- if (!buffer || !length)
- return -EINVAL;
- sch = to_subchannel(cdev->dev.parent);
-
- CIO_TRACE_EVENT (4, "rddevch");
- CIO_TRACE_EVENT (4, sch->dev.bus_id);
-
- rdc_ccw = kzalloc(sizeof(struct ccw1), GFP_KERNEL | GFP_DMA);
- if (!rdc_ccw)
- return -ENOMEM;
- rdc_ccw->cmd_code = CCW_CMD_RDC;
- rdc_ccw->count = length;
- rdc_ccw->flags = CCW_FLAG_SLI;
- ret = set_normalized_cda (rdc_ccw, (*buffer));
- if (ret != 0) {
- kfree(rdc_ccw);
- return ret;
- }
-
- spin_lock_irq(sch->lock);
- /* Save interrupt handler. */
- handler = cdev->handler;
- /* Temporarily install own handler. */
- cdev->handler = ccw_device_wake_up;
- if (cdev->private->state != DEV_STATE_ONLINE)
- ret = -ENODEV;
- else if (((sch->schib.scsw.stctl & SCSW_STCTL_PRIM_STATUS) &&
- !(sch->schib.scsw.stctl & SCSW_STCTL_SEC_STATUS)) ||
- cdev->private->flags.doverify)
- ret = -EBUSY;
- else
- /* 0x00D9C4C3 == ebcdic "RDC" */
- ret = __ccw_device_retry_loop(cdev, rdc_ccw, 0x00D9C4C3, 0);
-
- /* Restore interrupt handler. */
- cdev->handler = handler;
- spin_unlock_irq(sch->lock);
-
- clear_normalized_cda (rdc_ccw);
- kfree(rdc_ccw);
-
- return ret;
-}
-
-/*
- * Read Configuration data using path mask
- */
-int
-read_conf_data_lpm (struct ccw_device *cdev, void **buffer, int *length, __u8 lpm)
-{
- void (*handler)(struct ccw_device *, unsigned long, struct irb *);
- struct subchannel *sch;
- struct ciw *ciw;
- char *rcd_buf;
- int ret;
- struct ccw1 *rcd_ccw;
-
- if (!cdev)
- return -ENODEV;
- if (!buffer || !length)
- return -EINVAL;
- sch = to_subchannel(cdev->dev.parent);
-
- CIO_TRACE_EVENT (4, "rdconf");
- CIO_TRACE_EVENT (4, sch->dev.bus_id);
-
- /*
- * scan for RCD command in extended SenseID data
- */
- ciw = ccw_device_get_ciw(cdev, CIW_TYPE_RCD);
- if (!ciw || ciw->cmd == 0)
- return -EOPNOTSUPP;
-
- /* Adjust requested path mask to excluded varied off paths. */
- if (lpm) {
- lpm &= sch->opm;
- if (lpm == 0)
- return -EACCES;
- }
-
- rcd_ccw = kzalloc(sizeof(struct ccw1), GFP_KERNEL | GFP_DMA);
- if (!rcd_ccw)
- return -ENOMEM;
- rcd_buf = kzalloc(ciw->count, GFP_KERNEL | GFP_DMA);
- if (!rcd_buf) {
- kfree(rcd_ccw);
- return -ENOMEM;
- }
- rcd_ccw->cmd_code = ciw->cmd;
- rcd_ccw->cda = (__u32) __pa (rcd_buf);
- rcd_ccw->count = ciw->count;
- rcd_ccw->flags = CCW_FLAG_SLI;
-
- spin_lock_irq(sch->lock);
- /* Save interrupt handler. */
- handler = cdev->handler;
- /* Temporarily install own handler. */
- cdev->handler = ccw_device_wake_up;
- if (cdev->private->state != DEV_STATE_ONLINE)
- ret = -ENODEV;
- else if (((sch->schib.scsw.stctl & SCSW_STCTL_PRIM_STATUS) &&
- !(sch->schib.scsw.stctl & SCSW_STCTL_SEC_STATUS)) ||
- cdev->private->flags.doverify)
- ret = -EBUSY;
- else
- /* 0x00D9C3C4 == ebcdic "RCD" */
- ret = __ccw_device_retry_loop(cdev, rcd_ccw, 0x00D9C3C4, lpm);
-
- /* Restore interrupt handler. */
- cdev->handler = handler;
- spin_unlock_irq(sch->lock);
-
- /*
- * on success we update the user input parms
- */
- if (ret) {
- kfree (rcd_buf);
- *buffer = NULL;
- *length = 0;
- } else {
- *length = ciw->count;
- *buffer = rcd_buf;
- }
- kfree(rcd_ccw);
-
- return ret;
-}
-
-/*
- * Read Configuration data
- */
-int
-read_conf_data (struct ccw_device *cdev, void **buffer, int *length)
-{
- return read_conf_data_lpm (cdev, buffer, length, 0);
-}
-
/*
* Try to break the lock on a boxed device.
*/
@@ -604,8 +539,7 @@ out_unlock:
return ret;
}
-void *
-ccw_device_get_chp_desc(struct ccw_device *cdev, int chp_no)
+void *ccw_device_get_chp_desc(struct ccw_device *cdev, int chp_no)
{
struct subchannel *sch;
struct chp_id chpid;
@@ -635,12 +569,6 @@ _ccw_device_get_subchannel_number(struct ccw_device *cdev)
return cdev->private->schid.sch_no;
}
-int
-_ccw_device_get_device_number(struct ccw_device *cdev)
-{
- return cdev->private->dev_id.devno;
-}
-
MODULE_LICENSE("GPL");
EXPORT_SYMBOL(ccw_device_set_options_mask);
@@ -655,9 +583,5 @@ EXPORT_SYMBOL(ccw_device_start_timeout_key);
EXPORT_SYMBOL(ccw_device_start_key);
EXPORT_SYMBOL(ccw_device_get_ciw);
EXPORT_SYMBOL(ccw_device_get_path_mask);
-EXPORT_SYMBOL(read_conf_data);
-EXPORT_SYMBOL(read_dev_chars);
EXPORT_SYMBOL(_ccw_device_get_subchannel_number);
-EXPORT_SYMBOL(_ccw_device_get_device_number);
EXPORT_SYMBOL_GPL(ccw_device_get_chp_desc);
-EXPORT_SYMBOL_GPL(read_conf_data_lpm);
diff --git a/drivers/s390/cio/qdio.c b/drivers/s390/cio/qdio.c
index ed026a1dc32..40a3208c7cf 100644
--- a/drivers/s390/cio/qdio.c
+++ b/drivers/s390/cio/qdio.c
@@ -81,6 +81,7 @@ static __u32 volatile spare_indicator;
static atomic_t spare_indicator_usecount;
#define QDIO_MEMPOOL_SCSSC_ELEMENTS 2
static mempool_t *qdio_mempool_scssc;
+static struct kmem_cache *qdio_q_cache;
static debug_info_t *qdio_dbf_setup;
static debug_info_t *qdio_dbf_sbal;
@@ -194,6 +195,8 @@ qdio_do_eqbs(struct qdio_q *q, unsigned char *state,
again:
ccq = do_eqbs(irq->sch_token, state, q_no, start, cnt);
rc = qdio_check_ccq(q, ccq);
+ if ((ccq == 96) && (tmp_cnt != *cnt))
+ rc = 0;
if (rc == 1) {
QDIO_DBF_TEXT5(1,trace,"eqAGAIN");
goto again;
@@ -739,7 +742,8 @@ qdio_get_outbound_buffer_frontier(struct qdio_q *q)
first_not_to_check=f+qdio_min(atomic_read(&q->number_of_buffers_used),
(QDIO_MAX_BUFFERS_PER_Q-1));
- if ((!q->is_iqdio_q)&&(!q->hydra_gives_outbound_pcis))
+ if (((!q->is_iqdio_q) && (!q->hydra_gives_outbound_pcis)) ||
+ (q->queue_type == QDIO_IQDIO_QFMT_ASYNCH))
SYNC_MEMORY;
check_next:
@@ -1020,9 +1024,9 @@ __qdio_outbound_processing(struct qdio_q *q)
}
static void
-qdio_outbound_processing(struct qdio_q *q)
+qdio_outbound_processing(unsigned long q)
{
- __qdio_outbound_processing(q);
+ __qdio_outbound_processing((struct qdio_q *) q);
}
/************************* INBOUND ROUTINES *******************************/
@@ -1445,9 +1449,10 @@ out:
}
static void
-tiqdio_inbound_processing(struct qdio_q *q)
+tiqdio_inbound_processing(unsigned long q)
{
- __tiqdio_inbound_processing(q, atomic_read(&spare_indicator_usecount));
+ __tiqdio_inbound_processing((struct qdio_q *) q,
+ atomic_read(&spare_indicator_usecount));
}
static void
@@ -1490,9 +1495,9 @@ again:
}
static void
-qdio_inbound_processing(struct qdio_q *q)
+qdio_inbound_processing(unsigned long q)
{
- __qdio_inbound_processing(q);
+ __qdio_inbound_processing((struct qdio_q *) q);
}
/************************* MAIN ROUTINES *******************************/
@@ -1617,23 +1622,21 @@ static void
qdio_release_irq_memory(struct qdio_irq *irq_ptr)
{
int i;
+ struct qdio_q *q;
- for (i=0;i<QDIO_MAX_QUEUES_PER_IRQ;i++) {
- if (!irq_ptr->input_qs[i])
- goto next;
-
- kfree(irq_ptr->input_qs[i]->slib);
- kfree(irq_ptr->input_qs[i]);
-
-next:
- if (!irq_ptr->output_qs[i])
- continue;
-
- kfree(irq_ptr->output_qs[i]->slib);
- kfree(irq_ptr->output_qs[i]);
-
+ for (i = 0; i < QDIO_MAX_QUEUES_PER_IRQ; i++) {
+ q = irq_ptr->input_qs[i];
+ if (q) {
+ free_page((unsigned long) q->slib);
+ kmem_cache_free(qdio_q_cache, q);
+ }
+ q = irq_ptr->output_qs[i];
+ if (q) {
+ free_page((unsigned long) q->slib);
+ kmem_cache_free(qdio_q_cache, q);
+ }
}
- kfree(irq_ptr->qdr);
+ free_page((unsigned long) irq_ptr->qdr);
free_page((unsigned long) irq_ptr);
}
@@ -1680,44 +1683,35 @@ qdio_alloc_qs(struct qdio_irq *irq_ptr,
{
int i;
struct qdio_q *q;
- int result=-ENOMEM;
-
- for (i=0;i<no_input_qs;i++) {
- q = kzalloc(sizeof(struct qdio_q), GFP_KERNEL);
- if (!q) {
- QDIO_PRINT_ERR("kmalloc of q failed!\n");
- goto out;
- }
+ for (i = 0; i < no_input_qs; i++) {
+ q = kmem_cache_alloc(qdio_q_cache, GFP_KERNEL);
+ if (!q)
+ return -ENOMEM;
+ memset(q, 0, sizeof(*q));
- q->slib = kmalloc(PAGE_SIZE, GFP_KERNEL);
+ q->slib = (struct slib *) __get_free_page(GFP_KERNEL);
if (!q->slib) {
- QDIO_PRINT_ERR("kmalloc of slib failed!\n");
- goto out;
+ kmem_cache_free(qdio_q_cache, q);
+ return -ENOMEM;
}
-
irq_ptr->input_qs[i]=q;
}
- for (i=0;i<no_output_qs;i++) {
- q = kzalloc(sizeof(struct qdio_q), GFP_KERNEL);
-
- if (!q) {
- goto out;
- }
+ for (i = 0; i < no_output_qs; i++) {
+ q = kmem_cache_alloc(qdio_q_cache, GFP_KERNEL);
+ if (!q)
+ return -ENOMEM;
+ memset(q, 0, sizeof(*q));
- q->slib=kmalloc(PAGE_SIZE,GFP_KERNEL);
+ q->slib = (struct slib *) __get_free_page(GFP_KERNEL);
if (!q->slib) {
- QDIO_PRINT_ERR("kmalloc of slib failed!\n");
- goto out;
+ kmem_cache_free(qdio_q_cache, q);
+ return -ENOMEM;
}
-
irq_ptr->output_qs[i]=q;
}
-
- result=0;
-out:
- return result;
+ return 0;
}
static void
@@ -1767,12 +1761,15 @@ qdio_fill_qs(struct qdio_irq *irq_ptr, struct ccw_device *cdev,
q->handler=input_handler;
q->dev_st_chg_ind=irq_ptr->dev_st_chg_ind;
- q->tasklet.data=(unsigned long)q;
/* q->is_thinint_q isn't valid at this time, but
- * irq_ptr->is_thinint_irq is */
- q->tasklet.func=(void(*)(unsigned long))
- ((irq_ptr->is_thinint_irq)?&tiqdio_inbound_processing:
- &qdio_inbound_processing);
+ * irq_ptr->is_thinint_irq is
+ */
+ if (irq_ptr->is_thinint_irq)
+ tasklet_init(&q->tasklet, tiqdio_inbound_processing,
+ (unsigned long) q);
+ else
+ tasklet_init(&q->tasklet, qdio_inbound_processing,
+ (unsigned long) q);
/* actually this is not used for inbound queues. yet. */
atomic_set(&q->busy_siga_counter,0);
@@ -1843,13 +1840,10 @@ qdio_fill_qs(struct qdio_irq *irq_ptr, struct ccw_device *cdev,
q->last_move_ftc=0;
q->handler=output_handler;
- q->tasklet.data=(unsigned long)q;
- q->tasklet.func=(void(*)(unsigned long))
- &qdio_outbound_processing;
- q->timer.function=(void(*)(unsigned long))
- &qdio_outbound_processing;
- q->timer.data = (long)q;
- init_timer(&q->timer);
+ tasklet_init(&q->tasklet, qdio_outbound_processing,
+ (unsigned long) q);
+ setup_timer(&q->timer, qdio_outbound_processing,
+ (unsigned long) q);
atomic_set(&q->busy_siga_counter,0);
q->timing.busy_start=0;
@@ -2985,17 +2979,17 @@ qdio_allocate(struct qdio_initialize *init_data)
QDIO_DBF_HEX0(0,setup,&irq_ptr,sizeof(void*));
if (!irq_ptr) {
- QDIO_PRINT_ERR("kmalloc of irq_ptr failed!\n");
+ QDIO_PRINT_ERR("allocation of irq_ptr failed!\n");
return -ENOMEM;
}
init_MUTEX(&irq_ptr->setting_up_sema);
/* QDR must be in DMA area since CCW data address is only 32 bit */
- irq_ptr->qdr=kmalloc(sizeof(struct qdr), GFP_KERNEL | GFP_DMA);
+ irq_ptr->qdr = (struct qdr *) __get_free_page(GFP_KERNEL | GFP_DMA);
if (!(irq_ptr->qdr)) {
free_page((unsigned long) irq_ptr);
- QDIO_PRINT_ERR("kmalloc of irq_ptr->qdr failed!\n");
+ QDIO_PRINT_ERR("allocation of irq_ptr->qdr failed!\n");
return -ENOMEM;
}
QDIO_DBF_TEXT0(0,setup,"qdr:");
@@ -3004,6 +2998,7 @@ qdio_allocate(struct qdio_initialize *init_data)
if (qdio_alloc_qs(irq_ptr,
init_data->no_input_qs,
init_data->no_output_qs)) {
+ QDIO_PRINT_ERR("queue allocation failed!\n");
qdio_release_irq_memory(irq_ptr);
return -ENOMEM;
}
@@ -3732,7 +3727,7 @@ qdio_performance_stats_store(struct bus_type *bus, const char *buf, size_t count
#endif /* CONFIG_64BIT */
}
} else {
- QDIO_PRINT_WARN("QDIO performance_stats: write 0 or 1 to this file!\n");
+ QDIO_PRINT_ERR("QDIO performance_stats: write 0 or 1 to this file!\n");
return -EINVAL;
}
return count;
@@ -3895,9 +3890,19 @@ init_QDIO(void)
if (res)
return res;
+ qdio_q_cache = kmem_cache_create("qdio_q", sizeof(struct qdio_q),
+ 256, 0, NULL);
+ if (!qdio_q_cache) {
+ qdio_release_qdio_memory();
+ return -ENOMEM;
+ }
+
res = qdio_register_dbf_views();
- if (res)
+ if (res) {
+ kmem_cache_destroy(qdio_q_cache);
+ qdio_release_qdio_memory();
return res;
+ }
QDIO_DBF_TEXT0(0,setup,"initQDIO");
res = bus_create_file(&ccw_bus_type, &bus_attr_qdio_performance_stats);
@@ -3929,6 +3934,7 @@ cleanup_QDIO(void)
qdio_release_qdio_memory();
qdio_unregister_dbf_views();
mempool_destroy(qdio_mempool_scssc);
+ kmem_cache_destroy(qdio_q_cache);
bus_remove_file(&ccw_bus_type, &bus_attr_qdio_performance_stats);
printk("qdio: %s: module removed\n",version);
}
diff --git a/drivers/s390/crypto/ap_bus.c b/drivers/s390/crypto/ap_bus.c
index 90bd2201451..67aaff3e668 100644
--- a/drivers/s390/crypto/ap_bus.c
+++ b/drivers/s390/crypto/ap_bus.c
@@ -458,28 +458,22 @@ static int ap_bus_match(struct device *dev, struct device_driver *drv)
* uevent function for AP devices. It sets up a single environment
* variable DEV_TYPE which contains the hardware device type.
*/
-static int ap_uevent (struct device *dev, char **envp, int num_envp,
- char *buffer, int buffer_size)
+static int ap_uevent (struct device *dev, struct kobj_uevent_env *env)
{
struct ap_device *ap_dev = to_ap_dev(dev);
- int retval = 0, length = 0, i = 0;
+ int retval = 0;
if (!ap_dev)
return -ENODEV;
/* Set up DEV_TYPE environment variable. */
- retval = add_uevent_var(envp, num_envp, &i,
- buffer, buffer_size, &length,
- "DEV_TYPE=%04X", ap_dev->device_type);
+ retval = add_uevent_var(env, "DEV_TYPE=%04X", ap_dev->device_type);
if (retval)
return retval;
/* Add MODALIAS= */
- retval = add_uevent_var(envp, num_envp, &i,
- buffer, buffer_size, &length,
- "MODALIAS=ap:t%02X", ap_dev->device_type);
+ retval = add_uevent_var(env, "MODALIAS=ap:t%02X", ap_dev->device_type);
- envp[i] = NULL;
return retval;
}
@@ -1231,8 +1225,9 @@ static void ap_reset_domain(void)
{
int i;
- for (i = 0; i < AP_DEVICES; i++)
- ap_reset_queue(AP_MKQID(i, ap_domain_index));
+ if (ap_domain_index != -1)
+ for (i = 0; i < AP_DEVICES; i++)
+ ap_reset_queue(AP_MKQID(i, ap_domain_index));
}
static void ap_reset_all(void)
diff --git a/drivers/s390/crypto/zcrypt_mono.c b/drivers/s390/crypto/zcrypt_mono.c
index 2a9349ad68b..44253fdd413 100644
--- a/drivers/s390/crypto/zcrypt_mono.c
+++ b/drivers/s390/crypto/zcrypt_mono.c
@@ -45,7 +45,7 @@
/**
* The module initialization code.
*/
-int __init zcrypt_init(void)
+static int __init zcrypt_init(void)
{
int rc;
@@ -86,7 +86,7 @@ out:
/**
* The module termination code.
*/
-void __exit zcrypt_exit(void)
+static void __exit zcrypt_exit(void)
{
zcrypt_cex2a_exit();
zcrypt_pcixcc_exit();
diff --git a/drivers/s390/crypto/zcrypt_pcixcc.c b/drivers/s390/crypto/zcrypt_pcixcc.c
index 64948788d30..70b9ddc8cf9 100644
--- a/drivers/s390/crypto/zcrypt_pcixcc.c
+++ b/drivers/s390/crypto/zcrypt_pcixcc.c
@@ -277,7 +277,7 @@ static int XCRB_msg_to_type6CPRB_msgX(struct zcrypt_device *zdev,
};
struct {
struct type6_hdr hdr;
- struct ica_CPRBX cprbx;
+ struct CPRBX cprbx;
} __attribute__((packed)) *msg = ap_msg->message;
int rcblen = CEIL4(xcRB->request_control_blk_length);
@@ -432,14 +432,17 @@ static int convert_type86_ica(struct zcrypt_device *zdev,
}
if (service_rc == 8 && service_rs == 770) {
PDEBUG("Invalid key length on PCIXCC/CEX2C\n");
- zdev->min_mod_size = PCIXCC_MIN_MOD_SIZE_OLD;
- return -EAGAIN;
+ return -EINVAL;
}
if (service_rc == 8 && service_rs == 783) {
PDEBUG("Extended bitlengths not enabled on PCIXCC/CEX2C\n");
zdev->min_mod_size = PCIXCC_MIN_MOD_SIZE_OLD;
return -EAGAIN;
}
+ if (service_rc == 12 && service_rs == 769) {
+ PDEBUG("Invalid key on PCIXCC/CEX2C\n");
+ return -EINVAL;
+ }
PRINTK("Unknown service rc/rs (PCIXCC/CEX2C): %d/%d\n",
service_rc, service_rs);
zdev->online = 0;
diff --git a/drivers/s390/crypto/zcrypt_pcixcc.h b/drivers/s390/crypto/zcrypt_pcixcc.h
index a78ff307fd1..8cb7d7a6973 100644
--- a/drivers/s390/crypto/zcrypt_pcixcc.h
+++ b/drivers/s390/crypto/zcrypt_pcixcc.h
@@ -28,51 +28,6 @@
#ifndef _ZCRYPT_PCIXCC_H_
#define _ZCRYPT_PCIXCC_H_
-/**
- * CPRBX
- * Note that all shorts and ints are big-endian.
- * All pointer fields are 16 bytes long, and mean nothing.
- *
- * A request CPRB is followed by a request_parameter_block.
- *
- * The request (or reply) parameter block is organized thus:
- * function code
- * VUD block
- * key block
- */
-struct CPRBX {
- unsigned short cprb_len; /* CPRB length 220 */
- unsigned char cprb_ver_id; /* CPRB version id. 0x02 */
- unsigned char pad_000[3]; /* Alignment pad bytes */
- unsigned char func_id[2]; /* function id 0x5432 */
- unsigned char cprb_flags[4]; /* Flags */
- unsigned int req_parml; /* request parameter buffer len */
- unsigned int req_datal; /* request data buffer */
- unsigned int rpl_msgbl; /* reply message block length */
- unsigned int rpld_parml; /* replied parameter block len */
- unsigned int rpl_datal; /* reply data block len */
- unsigned int rpld_datal; /* replied data block len */
- unsigned int req_extbl; /* request extension block len */
- unsigned char pad_001[4]; /* reserved */
- unsigned int rpld_extbl; /* replied extension block len */
- unsigned char req_parmb[16]; /* request parm block 'address' */
- unsigned char req_datab[16]; /* request data block 'address' */
- unsigned char rpl_parmb[16]; /* reply parm block 'address' */
- unsigned char rpl_datab[16]; /* reply data block 'address' */
- unsigned char req_extb[16]; /* request extension block 'addr'*/
- unsigned char rpl_extb[16]; /* reply extension block 'addres'*/
- unsigned short ccp_rtcode; /* server return code */
- unsigned short ccp_rscode; /* server reason code */
- unsigned int mac_data_len; /* Mac Data Length */
- unsigned char logon_id[8]; /* Logon Identifier */
- unsigned char mac_value[8]; /* Mac Value */
- unsigned char mac_content_flgs;/* Mac content flag byte */
- unsigned char pad_002; /* Alignment */
- unsigned short domain; /* Domain */
- unsigned char pad_003[12]; /* Domain masks */
- unsigned char pad_004[36]; /* reserved */
-} __attribute__((packed));
-
int zcrypt_pcixcc_init(void);
void zcrypt_pcixcc_exit(void);
diff --git a/drivers/s390/net/claw.c b/drivers/s390/net/claw.c
index 348bb7b8277..399695f7b1a 100644
--- a/drivers/s390/net/claw.c
+++ b/drivers/s390/net/claw.c
@@ -317,8 +317,8 @@ claw_probe(struct ccwgroup_device *cgdev)
CLAW_DBF_TEXT_(2,setup,"probex%d",-ENOMEM);
return -ENOMEM;
}
- privptr->p_mtc_envelope= kmalloc( MAX_ENVELOPE_SIZE, GFP_KERNEL);
- privptr->p_env = kmalloc(sizeof(struct claw_env), GFP_KERNEL);
+ privptr->p_mtc_envelope= kzalloc( MAX_ENVELOPE_SIZE, GFP_KERNEL);
+ privptr->p_env = kzalloc(sizeof(struct claw_env), GFP_KERNEL);
if ((privptr->p_mtc_envelope==NULL) || (privptr->p_env==NULL)) {
probe_error(cgdev);
put_device(&cgdev->dev);
@@ -327,8 +327,6 @@ claw_probe(struct ccwgroup_device *cgdev)
CLAW_DBF_TEXT_(2,setup,"probex%d",-ENOMEM);
return -ENOMEM;
}
- memset(privptr->p_mtc_envelope, 0x00, MAX_ENVELOPE_SIZE);
- memset(privptr->p_env, 0x00, sizeof(struct claw_env));
memcpy(privptr->p_env->adapter_name,WS_NAME_NOT_DEF,8);
memcpy(privptr->p_env->host_name,WS_NAME_NOT_DEF,8);
memcpy(privptr->p_env->api_type,WS_NAME_NOT_DEF,8);
@@ -3893,7 +3891,6 @@ claw_init_netdevice(struct net_device * dev)
dev->type = ARPHRD_SLIP;
dev->tx_queue_len = 1300;
dev->flags = IFF_POINTOPOINT | IFF_NOARP;
- SET_MODULE_OWNER(dev);
#ifdef FUNCTRACE
printk(KERN_INFO "%s:%s Exit\n",dev->name,__FUNCTION__);
#endif
@@ -3924,7 +3921,7 @@ add_channel(struct ccw_device *cdev,int i,struct claw_privbk *privptr)
snprintf(p_ch->id, CLAW_ID_SIZE, "cl-%s", cdev->dev.bus_id);
ccw_device_get_id(cdev, &dev_id);
p_ch->devno = dev_id.devno;
- if ((p_ch->irb = kmalloc(sizeof (struct irb),GFP_KERNEL)) == NULL) {
+ if ((p_ch->irb = kzalloc(sizeof (struct irb),GFP_KERNEL)) == NULL) {
printk(KERN_WARNING "%s Out of memory in %s for irb\n",
p_ch->id,__FUNCTION__);
#ifdef FUNCTRACE
@@ -3933,7 +3930,6 @@ add_channel(struct ccw_device *cdev,int i,struct claw_privbk *privptr)
#endif
return -ENOMEM;
}
- memset(p_ch->irb, 0, sizeof (struct irb));
#ifdef FUNCTRACE
printk(KERN_INFO "%s:%s Exit on line %d\n",
cdev->dev.bus_id,__FUNCTION__,__LINE__);
diff --git a/drivers/s390/net/ctcmain.c b/drivers/s390/net/ctcmain.c
index b20fd068173..44993723373 100644
--- a/drivers/s390/net/ctcmain.c
+++ b/drivers/s390/net/ctcmain.c
@@ -674,7 +674,7 @@ ch_action_txdone(fsm_instance * fi, int event, void *arg)
int first = 1;
int i;
unsigned long duration;
- struct timespec done_stamp = xtime;
+ struct timespec done_stamp = current_kernel_time();
DBF_TEXT(trace, 4, __FUNCTION__);
@@ -730,7 +730,7 @@ ch_action_txdone(fsm_instance * fi, int event, void *arg)
spin_unlock(&ch->collect_lock);
ch->ccw[1].count = ch->trans_skb->len;
fsm_addtimer(&ch->timer, CTC_TIMEOUT_5SEC, CH_EVENT_TIMER, ch);
- ch->prof.send_stamp = xtime;
+ ch->prof.send_stamp = current_kernel_time();
rc = ccw_device_start(ch->cdev, &ch->ccw[0],
(unsigned long) ch, 0xff, 0);
ch->prof.doios_multi++;
@@ -2281,7 +2281,7 @@ transmit_skb(struct channel *ch, struct sk_buff *skb)
fsm_newstate(ch->fsm, CH_STATE_TX);
fsm_addtimer(&ch->timer, CTC_TIMEOUT_5SEC, CH_EVENT_TIMER, ch);
spin_lock_irqsave(get_ccwdev_lock(ch->cdev), saveflags);
- ch->prof.send_stamp = xtime;
+ ch->prof.send_stamp = current_kernel_time();
rc = ccw_device_start(ch->cdev, &ch->ccw[ccw_idx],
(unsigned long) ch, 0xff, 0);
spin_unlock_irqrestore(get_ccwdev_lock(ch->cdev), saveflags);
@@ -2823,7 +2823,6 @@ ctc_init_netdevice(struct net_device * dev, int alloc_device,
dev->type = ARPHRD_SLIP;
dev->tx_queue_len = 100;
dev->flags = IFF_POINTOPOINT | IFF_NOARP;
- SET_MODULE_OWNER(dev);
return dev;
}
diff --git a/drivers/s390/net/lcs.c b/drivers/s390/net/lcs.c
index 08a994fdd1a..0fd663b23d7 100644
--- a/drivers/s390/net/lcs.c
+++ b/drivers/s390/net/lcs.c
@@ -1400,11 +1400,14 @@ lcs_irq(struct ccw_device *cdev, unsigned long intparm, struct irb *irb)
PRINT_WARN("check on device %s, dstat=0x%X, cstat=0x%X \n",
cdev->dev.bus_id, dstat, cstat);
if (rc) {
- lcs_schedule_recovery(card);
- wake_up(&card->wait_q);
- return;
+ channel->state = LCS_CH_STATE_ERROR;
}
}
+ if (channel->state == LCS_CH_STATE_ERROR) {
+ lcs_schedule_recovery(card);
+ wake_up(&card->wait_q);
+ return;
+ }
/* How far in the ccw chain have we processed? */
if ((channel->state != LCS_CH_STATE_INIT) &&
(irb->scsw.fctl & SCSW_FCTL_START_FUNC)) {
@@ -1708,6 +1711,8 @@ lcs_stopcard(struct lcs_card *card)
if (card->read.state != LCS_CH_STATE_STOPPED &&
card->write.state != LCS_CH_STATE_STOPPED &&
+ card->read.state != LCS_CH_STATE_ERROR &&
+ card->write.state != LCS_CH_STATE_ERROR &&
card->state == DEV_STATE_UP) {
lcs_clear_multicast_list(card);
rc = lcs_send_stoplan(card,LCS_INITIATOR_TCPIP);
@@ -2145,7 +2150,6 @@ lcs_new_device(struct ccwgroup_device *ccwgdev)
card->dev->stop = lcs_stop_device;
card->dev->hard_start_xmit = lcs_start_xmit;
card->dev->get_stats = lcs_getstats;
- SET_MODULE_OWNER(dev);
memcpy(card->dev->dev_addr, card->mac, LCS_MAC_LENGTH);
#ifdef CONFIG_IP_MULTICAST
if (!lcs_check_multicast_support(card))
diff --git a/drivers/s390/net/lcs.h b/drivers/s390/net/lcs.h
index 0e1e4a0a88f..8976fb0b070 100644
--- a/drivers/s390/net/lcs.h
+++ b/drivers/s390/net/lcs.h
@@ -138,6 +138,7 @@ enum lcs_channel_states {
LCS_CH_STATE_RUNNING,
LCS_CH_STATE_SUSPENDED,
LCS_CH_STATE_CLEARED,
+ LCS_CH_STATE_ERROR,
};
/**
diff --git a/drivers/s390/net/netiucv.c b/drivers/s390/net/netiucv.c
index 3d28e1a5bf7..4d18d6419dd 100644
--- a/drivers/s390/net/netiucv.c
+++ b/drivers/s390/net/netiucv.c
@@ -753,7 +753,7 @@ static void conn_action_txdone(fsm_instance *fi, int event, void *arg)
header.next = 0;
memcpy(skb_put(conn->tx_buff, NETIUCV_HDRLEN), &header, NETIUCV_HDRLEN);
- conn->prof.send_stamp = xtime;
+ conn->prof.send_stamp = current_kernel_time();
txmsg.class = 0;
txmsg.tag = 0;
rc = iucv_message_send(conn->path, &txmsg, 0, 0,
@@ -1185,7 +1185,7 @@ static int netiucv_transmit_skb(struct iucv_connection *conn,
memcpy(skb_put(nskb, NETIUCV_HDRLEN), &header, NETIUCV_HDRLEN);
fsm_newstate(conn->fsm, CONN_STATE_TX);
- conn->prof.send_stamp = xtime;
+ conn->prof.send_stamp = current_kernel_time();
msg.tag = 1;
msg.class = 0;
@@ -1904,7 +1904,6 @@ static void netiucv_setup_netdevice(struct net_device *dev)
dev->type = ARPHRD_SLIP;
dev->tx_queue_len = NETIUCV_QUEUELEN_DEFAULT;
dev->flags = IFF_POINTOPOINT | IFF_NOARP;
- SET_MODULE_OWNER(dev);
}
/**
diff --git a/drivers/s390/net/qeth.h b/drivers/s390/net/qeth.h
index ec18bae05df..8c6b72d05b1 100644
--- a/drivers/s390/net/qeth.h
+++ b/drivers/s390/net/qeth.h
@@ -833,8 +833,7 @@ struct qeth_card {
struct qeth_qdio_info qdio;
struct qeth_perf_stats perf_stats;
int use_hard_stop;
- int (*orig_hard_header)(struct sk_buff *,struct net_device *,
- unsigned short,void *,void *,unsigned);
+ const struct header_ops *orig_header_ops;
struct qeth_osn_info osn_info;
atomic_t force_alloc_skb;
};
@@ -1178,9 +1177,9 @@ qeth_ipaddr_to_string(enum qeth_prot_versions proto, const __u8 *addr,
char *buf)
{
if (proto == QETH_PROT_IPV4)
- return qeth_ipaddr4_to_string(addr, buf);
+ qeth_ipaddr4_to_string(addr, buf);
else if (proto == QETH_PROT_IPV6)
- return qeth_ipaddr6_to_string(addr, buf);
+ qeth_ipaddr6_to_string(addr, buf);
}
static inline int
diff --git a/drivers/s390/net/qeth_eddp.c b/drivers/s390/net/qeth_eddp.c
index 70108fb1690..e3c268cfbff 100644
--- a/drivers/s390/net/qeth_eddp.c
+++ b/drivers/s390/net/qeth_eddp.c
@@ -159,13 +159,15 @@ qeth_eddp_fill_buffer(struct qeth_qdio_out_q *queue,
buffer = buf->buffer;
/* fill one skb into buffer */
for (i = 0; i < ctx->elements_per_skb; ++i){
- buffer->element[buf->next_element_to_fill].addr =
- ctx->elements[element].addr;
- buffer->element[buf->next_element_to_fill].length =
- ctx->elements[element].length;
- buffer->element[buf->next_element_to_fill].flags =
- ctx->elements[element].flags;
- buf->next_element_to_fill++;
+ if (ctx->elements[element].length != 0) {
+ buffer->element[buf->next_element_to_fill].
+ addr = ctx->elements[element].addr;
+ buffer->element[buf->next_element_to_fill].
+ length = ctx->elements[element].length;
+ buffer->element[buf->next_element_to_fill].
+ flags = ctx->elements[element].flags;
+ buf->next_element_to_fill++;
+ }
element++;
elements--;
}
diff --git a/drivers/s390/net/qeth_main.c b/drivers/s390/net/qeth_main.c
index 57f69434fbf..a2d08c9ba3c 100644
--- a/drivers/s390/net/qeth_main.c
+++ b/drivers/s390/net/qeth_main.c
@@ -160,6 +160,9 @@ qeth_set_multicast_list(struct net_device *);
static void
qeth_setadp_promisc_mode(struct qeth_card *);
+static int
+qeth_hard_header_parse(const struct sk_buff *skb, unsigned char *haddr);
+
static void
qeth_notify_processes(void)
{
@@ -561,7 +564,7 @@ qeth_set_offline(struct ccwgroup_device *cgdev)
}
static int
-qeth_wait_for_threads(struct qeth_card *card, unsigned long threads);
+qeth_threads_running(struct qeth_card *card, unsigned long threads);
static void
@@ -576,8 +579,7 @@ qeth_remove_device(struct ccwgroup_device *cgdev)
if (!card)
return;
- if (qeth_wait_for_threads(card, 0xffffffff))
- return;
+ wait_event(card->wait_q, qeth_threads_running(card, 0xffffffff) == 0);
if (cgdev->state == CCWGROUP_ONLINE){
card->use_hard_stop = 1;
@@ -821,14 +823,15 @@ __qeth_delete_all_mc(struct qeth_card *card, unsigned long *flags)
again:
list_for_each_entry_safe(addr, tmp, &card->ip_list, entry) {
if (addr->is_multicast) {
+ list_del(&addr->entry);
spin_unlock_irqrestore(&card->ip_lock, *flags);
rc = qeth_deregister_addr_entry(card, addr);
spin_lock_irqsave(&card->ip_lock, *flags);
if (!rc) {
- list_del(&addr->entry);
kfree(addr);
goto again;
- }
+ } else
+ list_add(&addr->entry, &card->ip_list);
}
}
}
@@ -1542,16 +1545,21 @@ qeth_idx_write_cb(struct qeth_channel *channel, struct qeth_cmd_buffer *iob)
card = CARD_FROM_CDEV(channel->ccwdev);
if (!(QETH_IS_IDX_ACT_POS_REPLY(iob->data))) {
- PRINT_ERR("IDX_ACTIVATE on write channel device %s: negative "
- "reply\n", CARD_WDEV_ID(card));
+ if (QETH_IDX_ACT_CAUSE_CODE(iob->data) == 0x19)
+ PRINT_ERR("IDX_ACTIVATE on write channel device %s: "
+ "adapter exclusively used by another host\n",
+ CARD_WDEV_ID(card));
+ else
+ PRINT_ERR("IDX_ACTIVATE on write channel device %s: "
+ "negative reply\n", CARD_WDEV_ID(card));
goto out;
}
memcpy(&temp, QETH_IDX_ACT_FUNC_LEVEL(iob->data), 2);
if ((temp & ~0x0100) != qeth_peer_func_level(card->info.func_level)) {
PRINT_WARN("IDX_ACTIVATE on write channel device %s: "
- "function level mismatch "
- "(sent: 0x%x, received: 0x%x)\n",
- CARD_WDEV_ID(card), card->info.func_level, temp);
+ "function level mismatch "
+ "(sent: 0x%x, received: 0x%x)\n",
+ CARD_WDEV_ID(card), card->info.func_level, temp);
goto out;
}
channel->state = CH_STATE_UP;
@@ -1597,8 +1605,13 @@ qeth_idx_read_cb(struct qeth_channel *channel, struct qeth_cmd_buffer *iob)
goto out;
}
if (!(QETH_IS_IDX_ACT_POS_REPLY(iob->data))) {
- PRINT_ERR("IDX_ACTIVATE on read channel device %s: negative "
- "reply\n", CARD_RDEV_ID(card));
+ if (QETH_IDX_ACT_CAUSE_CODE(iob->data) == 0x19)
+ PRINT_ERR("IDX_ACTIVATE on read channel device %s: "
+ "adapter exclusively used by another host\n",
+ CARD_RDEV_ID(card));
+ else
+ PRINT_ERR("IDX_ACTIVATE on read channel device %s: "
+ "negative reply\n", CARD_RDEV_ID(card));
goto out;
}
@@ -1613,8 +1626,8 @@ qeth_idx_read_cb(struct qeth_channel *channel, struct qeth_cmd_buffer *iob)
memcpy(&temp, QETH_IDX_ACT_FUNC_LEVEL(iob->data), 2);
if (temp != qeth_peer_func_level(card->info.func_level)) {
PRINT_WARN("IDX_ACTIVATE on read channel device %s: function "
- "level mismatch (sent: 0x%x, received: 0x%x)\n",
- CARD_RDEV_ID(card), card->info.func_level, temp);
+ "level mismatch (sent: 0x%x, received: 0x%x)\n",
+ CARD_RDEV_ID(card), card->info.func_level, temp);
goto out;
}
memcpy(&card->token.issuer_rm_r,
@@ -2496,7 +2509,7 @@ qeth_rebuild_skb_fake_ll_tr(struct qeth_card *card, struct sk_buff *skb,
struct iphdr *ip_hdr;
QETH_DBF_TEXT(trace,5,"skbfktr");
- skb_set_mac_header(skb, -QETH_FAKE_LL_LEN_TR);
+ skb_set_mac_header(skb, (int)-QETH_FAKE_LL_LEN_TR);
/* this is a fake ethernet header */
fake_hdr = tr_hdr(skb);
@@ -2689,10 +2702,15 @@ qeth_process_inbound_buffer(struct qeth_card *card,
qeth_layer2_rebuild_skb(card, skb, hdr);
else if (hdr->hdr.l3.id == QETH_HEADER_TYPE_LAYER3)
vlan_tag = qeth_rebuild_skb(card, skb, hdr);
- else { /*in case of OSN*/
+ else if (hdr->hdr.osn.id == QETH_HEADER_TYPE_OSN) {
skb_push(skb, sizeof(struct qeth_hdr));
skb_copy_to_linear_data(skb, hdr,
sizeof(struct qeth_hdr));
+ } else { /* unknown header type */
+ dev_kfree_skb_any(skb);
+ QETH_DBF_TEXT(trace, 3, "inbunkno");
+ QETH_DBF_HEX(control, 3, hdr, QETH_DBF_CONTROL_LEN);
+ continue;
}
/* is device UP ? */
if (!(card->dev->flags & IFF_UP)){
@@ -2804,13 +2822,16 @@ qeth_queue_input_buffer(struct qeth_card *card, int index)
if (newcount < count) {
/* we are in memory shortage so we switch back to
traditional skb allocation and drop packages */
- if (atomic_cmpxchg(&card->force_alloc_skb, 0, 1))
- printk(KERN_WARNING
- "qeth: switch to alloc skb\n");
+ if (!atomic_read(&card->force_alloc_skb) &&
+ net_ratelimit())
+ PRINT_WARN("Switch to alloc skb\n");
+ atomic_set(&card->force_alloc_skb, 3);
count = newcount;
} else {
- if (atomic_cmpxchg(&card->force_alloc_skb, 1, 0))
- printk(KERN_WARNING "qeth: switch to sg\n");
+ if ((atomic_read(&card->force_alloc_skb) == 1) &&
+ net_ratelimit())
+ PRINT_WARN("Switch to sg\n");
+ atomic_add_unless(&card->force_alloc_skb, -1, 0);
}
/*
@@ -3354,10 +3375,12 @@ out_freeoutq:
while (i > 0)
kfree(card->qdio.out_qs[--i]);
kfree(card->qdio.out_qs);
+ card->qdio.out_qs = NULL;
out_freepool:
qeth_free_buffer_pool(card);
out_freeinq:
kfree(card->qdio.in_q);
+ card->qdio.in_q = NULL;
out_nomem:
atomic_set(&card->qdio.state, QETH_QDIO_UNINITIALIZED);
return -ENOMEM;
@@ -3373,16 +3396,20 @@ qeth_free_qdio_buffers(struct qeth_card *card)
QETH_QDIO_UNINITIALIZED)
return;
kfree(card->qdio.in_q);
+ card->qdio.in_q = NULL;
/* inbound buffer pool */
qeth_free_buffer_pool(card);
/* free outbound qdio_qs */
- for (i = 0; i < card->qdio.no_out_queues; ++i){
- for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; ++j)
- qeth_clear_output_buffer(card->qdio.out_qs[i],
- &card->qdio.out_qs[i]->bufs[j]);
- kfree(card->qdio.out_qs[i]);
+ if (card->qdio.out_qs) {
+ for (i = 0; i < card->qdio.no_out_queues; ++i) {
+ for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; ++j)
+ qeth_clear_output_buffer(card->qdio.out_qs[i],
+ &card->qdio.out_qs[i]->bufs[j]);
+ kfree(card->qdio.out_qs[i]);
+ }
+ kfree(card->qdio.out_qs);
+ card->qdio.out_qs = NULL;
}
- kfree(card->qdio.out_qs);
}
static void
@@ -3393,7 +3420,7 @@ qeth_clear_qdio_buffers(struct qeth_card *card)
QETH_DBF_TEXT(trace, 2, "clearqdbf");
/* clear outbound buffers to free skbs */
for (i = 0; i < card->qdio.no_out_queues; ++i)
- if (card->qdio.out_qs[i]){
+ if (card->qdio.out_qs && card->qdio.out_qs[i]) {
for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; ++j)
qeth_clear_output_buffer(card->qdio.out_qs[i],
&card->qdio.out_qs[i]->bufs[j]);
@@ -3769,8 +3796,8 @@ qeth_get_netdevice(enum qeth_card_types type, enum qeth_link_types linktype)
/*hard_header fake function; used in case fake_ll is set */
static int
qeth_fake_header(struct sk_buff *skb, struct net_device *dev,
- unsigned short type, void *daddr, void *saddr,
- unsigned len)
+ unsigned short type, const void *daddr, const void *saddr,
+ unsigned len)
{
if(dev->type == ARPHRD_IEEE802_TR){
struct trh_hdr *hdr;
@@ -3793,6 +3820,11 @@ qeth_fake_header(struct sk_buff *skb, struct net_device *dev,
}
}
+static const struct header_ops qeth_fake_ops = {
+ .create = qeth_fake_header,
+ .parse = qeth_hard_header_parse,
+};
+
static int
qeth_send_packet(struct qeth_card *, struct sk_buff *);
@@ -4482,7 +4514,8 @@ qeth_do_send_packet(struct qeth_card *card, struct qeth_qdio_out_q *queue,
/* check if we have enough elements (including following
* free buffers) to handle eddp context */
if (qeth_eddp_check_buffers_for_context(queue,ctx) < 0){
- printk("eddp tx_dropped 1\n");
+ if (net_ratelimit())
+ PRINT_WARN("eddp tx_dropped 1\n");
rc = -EBUSY;
goto out;
}
@@ -4553,6 +4586,53 @@ qeth_get_elements_no(struct qeth_card *card, void *hdr,
return elements_needed;
}
+static void qeth_tx_csum(struct sk_buff *skb)
+{
+ int tlen;
+
+ if (skb->protocol == htons(ETH_P_IP)) {
+ tlen = ntohs(ip_hdr(skb)->tot_len) - (ip_hdr(skb)->ihl << 2);
+ switch (ip_hdr(skb)->protocol) {
+ case IPPROTO_TCP:
+ tcp_hdr(skb)->check = 0;
+ tcp_hdr(skb)->check = csum_tcpudp_magic(
+ ip_hdr(skb)->saddr, ip_hdr(skb)->daddr,
+ tlen, ip_hdr(skb)->protocol,
+ skb_checksum(skb, skb_transport_offset(skb),
+ tlen, 0));
+ break;
+ case IPPROTO_UDP:
+ udp_hdr(skb)->check = 0;
+ udp_hdr(skb)->check = csum_tcpudp_magic(
+ ip_hdr(skb)->saddr, ip_hdr(skb)->daddr,
+ tlen, ip_hdr(skb)->protocol,
+ skb_checksum(skb, skb_transport_offset(skb),
+ tlen, 0));
+ break;
+ }
+ } else if (skb->protocol == htons(ETH_P_IPV6)) {
+ switch (ipv6_hdr(skb)->nexthdr) {
+ case IPPROTO_TCP:
+ tcp_hdr(skb)->check = 0;
+ tcp_hdr(skb)->check = csum_ipv6_magic(
+ &ipv6_hdr(skb)->saddr, &ipv6_hdr(skb)->daddr,
+ ipv6_hdr(skb)->payload_len,
+ ipv6_hdr(skb)->nexthdr,
+ skb_checksum(skb, skb_transport_offset(skb),
+ ipv6_hdr(skb)->payload_len, 0));
+ break;
+ case IPPROTO_UDP:
+ udp_hdr(skb)->check = 0;
+ udp_hdr(skb)->check = csum_ipv6_magic(
+ &ipv6_hdr(skb)->saddr, &ipv6_hdr(skb)->daddr,
+ ipv6_hdr(skb)->payload_len,
+ ipv6_hdr(skb)->nexthdr,
+ skb_checksum(skb, skb_transport_offset(skb),
+ ipv6_hdr(skb)->payload_len, 0));
+ break;
+ }
+ }
+}
static int
qeth_send_packet(struct qeth_card *card, struct sk_buff *skb)
@@ -4584,7 +4664,7 @@ qeth_send_packet(struct qeth_card *card, struct sk_buff *skb)
[qeth_get_priority_queue(card, skb, ipv, cast_type)];
if (!card->options.layer2) {
ipv = qeth_get_ip_version(skb);
- if ((card->dev->hard_header == qeth_fake_header) && ipv) {
+ if ((card->dev->header_ops == &qeth_fake_ops) && ipv) {
new_skb = qeth_pskb_unshare(skb, GFP_ATOMIC);
if (!new_skb)
return -ENOMEM;
@@ -4638,12 +4718,22 @@ qeth_send_packet(struct qeth_card *card, struct sk_buff *skb)
elements_needed += elems;
}
+ if ((large_send == QETH_LARGE_SEND_NO) &&
+ (skb->ip_summed == CHECKSUM_PARTIAL))
+ qeth_tx_csum(new_skb);
+
if (card->info.type != QETH_CARD_TYPE_IQD)
rc = qeth_do_send_packet(card, queue, new_skb, hdr,
elements_needed, ctx);
- else
+ else {
+ if ((!card->options.layer2) &&
+ (ipv == 0)) {
+ __qeth_free_new_skb(skb, new_skb);
+ return -EPERM;
+ }
rc = qeth_do_send_packet_fast(card, queue, new_skb, hdr,
elements_needed, ctx);
+ }
if (!rc) {
card->stats.tx_packets++;
card->stats.tx_bytes += tx_bytes;
@@ -6385,20 +6475,18 @@ qeth_deregister_addr_entry(struct qeth_card *card, struct qeth_ipaddr *addr)
static u32
qeth_ethtool_get_tx_csum(struct net_device *dev)
{
- /* We may need to say that we support tx csum offload if
- * we do EDDP or TSO. There are discussions going on to
- * enforce rules in the stack and in ethtool that make
- * SG and TSO depend on HW_CSUM. At the moment there are
- * no such rules....
- * If we say yes here, we have to checksum outbound packets
- * any time. */
- return 0;
+ return (dev->features & NETIF_F_HW_CSUM) != 0;
}
static int
qeth_ethtool_set_tx_csum(struct net_device *dev, u32 data)
{
- return -EINVAL;
+ if (data)
+ dev->features |= NETIF_F_HW_CSUM;
+ else
+ dev->features &= ~NETIF_F_HW_CSUM;
+
+ return 0;
}
static u32
@@ -6488,12 +6576,16 @@ static struct ethtool_ops qeth_ethtool_ops = {
};
static int
-qeth_hard_header_parse(struct sk_buff *skb, unsigned char *haddr)
+qeth_hard_header_parse(const struct sk_buff *skb, unsigned char *haddr)
{
- struct qeth_card *card;
- struct ethhdr *eth;
+ const struct qeth_card *card;
+ const struct ethhdr *eth;
+ struct net_device *dev = skb->dev;
- card = qeth_get_card_from_dev(skb->dev);
+ if (dev->type != ARPHRD_IEEE802_TR)
+ return 0;
+
+ card = qeth_get_card_from_dev(dev);
if (card->options.layer2)
goto haveheader;
#ifdef CONFIG_QETH_IPV6
@@ -6523,6 +6615,10 @@ haveheader:
return ETH_ALEN;
}
+static const struct header_ops qeth_null_ops = {
+ .parse = qeth_hard_header_parse,
+};
+
static int
qeth_netdev_init(struct net_device *dev)
{
@@ -6547,12 +6643,8 @@ qeth_netdev_init(struct net_device *dev)
dev->vlan_rx_kill_vid = qeth_vlan_rx_kill_vid;
dev->vlan_rx_add_vid = qeth_vlan_rx_add_vid;
#endif
- if (qeth_get_netdev_flags(card) & IFF_NOARP) {
- dev->rebuild_header = NULL;
- dev->hard_header = NULL;
- dev->header_cache_update = NULL;
- dev->hard_header_cache = NULL;
- }
+ dev->header_ops = &qeth_null_ops;
+
#ifdef CONFIG_QETH_IPV6
/*IPv6 address autoconfiguration stuff*/
if (!(card->info.unique_id & UNIQUE_ID_NOT_BY_CARD))
@@ -6560,11 +6652,8 @@ qeth_netdev_init(struct net_device *dev)
#endif
if (card->options.fake_ll &&
(qeth_get_netdev_flags(card) & IFF_NOARP))
- dev->hard_header = qeth_fake_header;
- if (dev->type == ARPHRD_IEEE802_TR)
- dev->hard_header_parse = NULL;
- else
- dev->hard_header_parse = qeth_hard_header_parse;
+ dev->header_ops = &qeth_fake_ops;
+
dev->set_mac_address = qeth_layer2_set_mac_address;
dev->flags |= qeth_get_netdev_flags(card);
if ((card->options.fake_broadcast) ||
@@ -6576,7 +6665,6 @@ qeth_netdev_init(struct net_device *dev)
dev->mtu = card->info.initial_mtu;
if (card->info.type != QETH_CARD_TYPE_OSN)
SET_ETHTOOL_OPS(dev, &qeth_ethtool_ops);
- SET_MODULE_OWNER(dev);
return 0;
}
@@ -6668,10 +6756,10 @@ retry:
}
/*network device will be recovered*/
if (card->dev) {
- card->dev->hard_header = card->orig_hard_header;
+ card->dev->header_ops = card->orig_header_ops;
if (card->options.fake_ll &&
(qeth_get_netdev_flags(card) & IFF_NOARP))
- card->dev->hard_header = qeth_fake_header;
+ card->dev->header_ops = &qeth_fake_ops;
return 0;
}
/* at first set_online allocate netdev */
@@ -6685,7 +6773,7 @@ retry:
goto out;
}
card->dev->priv = card;
- card->orig_hard_header = card->dev->hard_header;
+ card->orig_header_ops = card->dev->header_ops;
card->dev->type = qeth_get_arphdr_type(card->info.type,
card->info.link_type);
card->dev->init = qeth_netdev_init;
@@ -7412,7 +7500,8 @@ qeth_start_ipa_tso(struct qeth_card *card)
}
if (rc && (card->options.large_send == QETH_LARGE_SEND_TSO)){
card->options.large_send = QETH_LARGE_SEND_NO;
- card->dev->features &= ~ (NETIF_F_TSO | NETIF_F_SG);
+ card->dev->features &= ~(NETIF_F_TSO | NETIF_F_SG |
+ NETIF_F_HW_CSUM);
}
return rc;
}
@@ -7552,22 +7641,26 @@ qeth_set_large_send(struct qeth_card *card, enum qeth_large_send_types type)
card->options.large_send = type;
switch (card->options.large_send) {
case QETH_LARGE_SEND_EDDP:
- card->dev->features |= NETIF_F_TSO | NETIF_F_SG;
+ card->dev->features |= NETIF_F_TSO | NETIF_F_SG |
+ NETIF_F_HW_CSUM;
break;
case QETH_LARGE_SEND_TSO:
if (qeth_is_supported(card, IPA_OUTBOUND_TSO)){
- card->dev->features |= NETIF_F_TSO | NETIF_F_SG;
+ card->dev->features |= NETIF_F_TSO | NETIF_F_SG |
+ NETIF_F_HW_CSUM;
} else {
PRINT_WARN("TSO not supported on %s. "
"large_send set to 'no'.\n",
card->dev->name);
- card->dev->features &= ~(NETIF_F_TSO | NETIF_F_SG);
+ card->dev->features &= ~(NETIF_F_TSO | NETIF_F_SG |
+ NETIF_F_HW_CSUM);
card->options.large_send = QETH_LARGE_SEND_NO;
rc = -EOPNOTSUPP;
}
break;
default: /* includes QETH_LARGE_SEND_NO */
- card->dev->features &= ~(NETIF_F_TSO | NETIF_F_SG);
+ card->dev->features &= ~(NETIF_F_TSO | NETIF_F_SG |
+ NETIF_F_HW_CSUM);
break;
}
if (card->state == CARD_STATE_UP)
@@ -8231,7 +8324,7 @@ qeth_arp_constructor(struct neighbour *neigh)
if (card == NULL)
goto out;
if((card->options.layer2) ||
- (card->dev->hard_header == qeth_fake_header))
+ (card->dev->header_ops == &qeth_fake_ops))
goto out;
rcu_read_lock();
diff --git a/drivers/s390/net/qeth_mpc.h b/drivers/s390/net/qeth_mpc.h
index 1d8083c9176..6de2da5ed5f 100644
--- a/drivers/s390/net/qeth_mpc.h
+++ b/drivers/s390/net/qeth_mpc.h
@@ -565,6 +565,7 @@ extern unsigned char IDX_ACTIVATE_WRITE[];
#define QETH_IDX_ACT_QDIO_DEV_REALADDR(buffer) (buffer+0x20)
#define QETH_IS_IDX_ACT_POS_REPLY(buffer) (((buffer)[0x08]&3)==2)
#define QETH_IDX_REPLY_LEVEL(buffer) (buffer+0x12)
+#define QETH_IDX_ACT_CAUSE_CODE(buffer) (buffer)[0x09]
#define PDU_ENCAPSULATION(buffer) \
(buffer + *(buffer + (*(buffer+0x0b)) + \
diff --git a/drivers/s390/net/qeth_sys.c b/drivers/s390/net/qeth_sys.c
index bb0287ad1aa..2cc3f3a0e39 100644
--- a/drivers/s390/net/qeth_sys.c
+++ b/drivers/s390/net/qeth_sys.c
@@ -1760,10 +1760,10 @@ qeth_remove_device_attributes(struct device *dev)
{
struct qeth_card *card = dev->driver_data;
- if (card->info.type == QETH_CARD_TYPE_OSN)
- return sysfs_remove_group(&dev->kobj,
- &qeth_osn_device_attr_group);
-
+ if (card->info.type == QETH_CARD_TYPE_OSN) {
+ sysfs_remove_group(&dev->kobj, &qeth_osn_device_attr_group);
+ return;
+ }
sysfs_remove_group(&dev->kobj, &qeth_device_attr_group);
sysfs_remove_group(&dev->kobj, &qeth_device_ipato_group);
sysfs_remove_group(&dev->kobj, &qeth_device_vipa_group);
diff --git a/drivers/s390/scsi/zfcp_aux.c b/drivers/s390/scsi/zfcp_aux.c
index a1db9592513..90aa53fc4f3 100644
--- a/drivers/s390/scsi/zfcp_aux.c
+++ b/drivers/s390/scsi/zfcp_aux.c
@@ -259,21 +259,21 @@ zfcp_module_init(void)
size = sizeof(struct zfcp_fsf_req_qtcb);
align = calc_alignment(size);
zfcp_data.fsf_req_qtcb_cache =
- kmem_cache_create("zfcp_fsf", size, align, 0, NULL, NULL);
+ kmem_cache_create("zfcp_fsf", size, align, 0, NULL);
if (!zfcp_data.fsf_req_qtcb_cache)
goto out;
size = sizeof(struct fsf_status_read_buffer);
align = calc_alignment(size);
zfcp_data.sr_buffer_cache =
- kmem_cache_create("zfcp_sr", size, align, 0, NULL, NULL);
+ kmem_cache_create("zfcp_sr", size, align, 0, NULL);
if (!zfcp_data.sr_buffer_cache)
goto out_sr_cache;
size = sizeof(struct zfcp_gid_pn_data);
align = calc_alignment(size);
zfcp_data.gid_pn_cache =
- kmem_cache_create("zfcp_gid", size, align, 0, NULL, NULL);
+ kmem_cache_create("zfcp_gid", size, align, 0, NULL);
if (!zfcp_data.gid_pn_cache)
goto out_gid_cache;
@@ -1503,7 +1503,7 @@ zfcp_gid_pn_buffers_alloc(struct zfcp_gid_pn_data **gid_pn, mempool_t *pool)
data->ct.pool = pool;
}
} else {
- data = kmalloc(sizeof(struct zfcp_gid_pn_data), GFP_ATOMIC);
+ data = kmem_cache_alloc(zfcp_data.gid_pn_cache, GFP_ATOMIC);
}
if (NULL == data)
@@ -1526,15 +1526,12 @@ zfcp_gid_pn_buffers_alloc(struct zfcp_gid_pn_data **gid_pn, mempool_t *pool)
* zfcp_gid_pn_buffers_free - free buffers for GID_PN nameserver request
* @gid_pn: pointer to struct zfcp_gid_pn_data which has to be freed
*/
-static void
-zfcp_gid_pn_buffers_free(struct zfcp_gid_pn_data *gid_pn)
+static void zfcp_gid_pn_buffers_free(struct zfcp_gid_pn_data *gid_pn)
{
- if ((gid_pn->ct.pool != 0))
+ if (gid_pn->ct.pool)
mempool_free(gid_pn, gid_pn->ct.pool);
else
- kfree(gid_pn);
-
- return;
+ kmem_cache_free(zfcp_data.gid_pn_cache, gid_pn);
}
/**
diff --git a/drivers/s390/scsi/zfcp_ccw.c b/drivers/s390/scsi/zfcp_ccw.c
index 1c8f71a5985..c0d1c0eb320 100644
--- a/drivers/s390/scsi/zfcp_ccw.c
+++ b/drivers/s390/scsi/zfcp_ccw.c
@@ -28,7 +28,7 @@ static void zfcp_ccw_remove(struct ccw_device *);
static int zfcp_ccw_set_online(struct ccw_device *);
static int zfcp_ccw_set_offline(struct ccw_device *);
static int zfcp_ccw_notify(struct ccw_device *, int);
-static void zfcp_ccw_shutdown(struct device *);
+static void zfcp_ccw_shutdown(struct ccw_device *);
static struct ccw_device_id zfcp_ccw_device_id[] = {
{CCW_DEVICE_DEVTYPE(ZFCP_CONTROL_UNIT_TYPE,
@@ -51,9 +51,7 @@ static struct ccw_driver zfcp_ccw_driver = {
.set_online = zfcp_ccw_set_online,
.set_offline = zfcp_ccw_set_offline,
.notify = zfcp_ccw_notify,
- .driver = {
- .shutdown = zfcp_ccw_shutdown,
- },
+ .shutdown = zfcp_ccw_shutdown,
};
MODULE_DEVICE_TABLE(ccw, zfcp_ccw_device_id);
@@ -277,12 +275,12 @@ zfcp_ccw_register(void)
* Makes sure that QDIO queues are down when the system gets stopped.
*/
static void
-zfcp_ccw_shutdown(struct device *dev)
+zfcp_ccw_shutdown(struct ccw_device *cdev)
{
struct zfcp_adapter *adapter;
down(&zfcp_data.config_sema);
- adapter = dev_get_drvdata(dev);
+ adapter = dev_get_drvdata(&cdev->dev);
zfcp_erp_adapter_shutdown(adapter, 0);
zfcp_erp_wait(adapter);
up(&zfcp_data.config_sema);
diff --git a/drivers/s390/scsi/zfcp_dbf.c b/drivers/s390/scsi/zfcp_dbf.c
index 5f3212440f6..ffa3bf75694 100644
--- a/drivers/s390/scsi/zfcp_dbf.c
+++ b/drivers/s390/scsi/zfcp_dbf.c
@@ -19,8 +19,8 @@
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
-#include <asm/debug.h>
#include <linux/ctype.h>
+#include <asm/debug.h>
#include "zfcp_ext.h"
static u32 dbfsize = 4;
@@ -35,17 +35,17 @@ static int
zfcp_dbf_stck(char *out_buf, const char *label, unsigned long long stck)
{
unsigned long long sec;
- struct timespec xtime;
+ struct timespec dbftime;
int len = 0;
stck -= 0x8126d60e46000000LL - (0x3c26700LL * 1000000 * 4096);
sec = stck >> 12;
do_div(sec, 1000000);
- xtime.tv_sec = sec;
+ dbftime.tv_sec = sec;
stck -= (sec * 1000000) << 12;
- xtime.tv_nsec = ((stck * 1000) >> 12);
+ dbftime.tv_nsec = ((stck * 1000) >> 12);
len += sprintf(out_buf + len, "%-24s%011lu:%06lu\n",
- label, xtime.tv_sec, xtime.tv_nsec);
+ label, dbftime.tv_sec, dbftime.tv_nsec);
return len;
}
diff --git a/drivers/s390/scsi/zfcp_def.h b/drivers/s390/scsi/zfcp_def.h
index 22649639230..b36dfc40d9f 100644
--- a/drivers/s390/scsi/zfcp_def.h
+++ b/drivers/s390/scsi/zfcp_def.h
@@ -126,6 +126,7 @@ zfcp_address_to_sg(void *address, struct scatterlist *list)
#define ZFCP_MIN_OUTPUT_THRESHOLD 1 /* ignored by QDIO layer */
#define QDIO_SCSI_QFMT 1 /* 1 for FSF */
+#define QBUFF_PER_PAGE (PAGE_SIZE / sizeof(struct qdio_buffer))
/********************* FSF SPECIFIC DEFINES *********************************/
diff --git a/drivers/s390/scsi/zfcp_erp.c b/drivers/s390/scsi/zfcp_erp.c
index 4e7cb6dc4d3..16b4418ab25 100644
--- a/drivers/s390/scsi/zfcp_erp.c
+++ b/drivers/s390/scsi/zfcp_erp.c
@@ -54,7 +54,7 @@ static int zfcp_erp_strategy_check_adapter(struct zfcp_adapter *, int);
static int zfcp_erp_strategy_statechange(int, u32, struct zfcp_adapter *,
struct zfcp_port *,
struct zfcp_unit *, int);
-static inline int zfcp_erp_strategy_statechange_detected(atomic_t *, u32);
+static int zfcp_erp_strategy_statechange_detected(atomic_t *, u32);
static int zfcp_erp_strategy_followup_actions(int, struct zfcp_adapter *,
struct zfcp_port *,
struct zfcp_unit *, int);
@@ -106,8 +106,8 @@ static void zfcp_erp_action_cleanup(int, struct zfcp_adapter *,
static void zfcp_erp_action_ready(struct zfcp_erp_action *);
static int zfcp_erp_action_exists(struct zfcp_erp_action *);
-static inline void zfcp_erp_action_to_ready(struct zfcp_erp_action *);
-static inline void zfcp_erp_action_to_running(struct zfcp_erp_action *);
+static void zfcp_erp_action_to_ready(struct zfcp_erp_action *);
+static void zfcp_erp_action_to_running(struct zfcp_erp_action *);
static void zfcp_erp_memwait_handler(unsigned long);
@@ -952,7 +952,7 @@ zfcp_erp_memwait_handler(unsigned long data)
* action gets an appropriate flag and will be processed
* accordingly
*/
-void zfcp_erp_timeout_handler(unsigned long data)
+static void zfcp_erp_timeout_handler(unsigned long data)
{
struct zfcp_erp_action *erp_action = (struct zfcp_erp_action *) data;
struct zfcp_adapter *adapter = erp_action->adapter;
@@ -1491,7 +1491,7 @@ zfcp_erp_strategy_statechange(int action,
return retval;
}
-static inline int
+static int
zfcp_erp_strategy_statechange_detected(atomic_t * target_status, u32 erp_status)
{
return
@@ -1626,7 +1626,7 @@ zfcp_erp_schedule_work(struct zfcp_unit *unit)
{
struct zfcp_erp_add_work *p;
- p = kmalloc(sizeof(*p), GFP_KERNEL);
+ p = kzalloc(sizeof(*p), GFP_KERNEL);
if (!p) {
ZFCP_LOG_NORMAL("error: Out of resources. Could not register "
"the FCP-LUN 0x%Lx connected to "
@@ -1639,7 +1639,6 @@ zfcp_erp_schedule_work(struct zfcp_unit *unit)
}
zfcp_unit_get(unit);
- memset(p, 0, sizeof(*p));
atomic_set_mask(ZFCP_STATUS_UNIT_SCSI_WORK_PENDING, &unit->status);
INIT_WORK(&p->work, zfcp_erp_scsi_scan);
p->unit = unit;
@@ -2002,7 +2001,7 @@ zfcp_erp_adapter_strategy_generic(struct zfcp_erp_action *erp_action, int close)
* returns: 0 - successful setup
* !0 - failed setup
*/
-int
+static int
zfcp_erp_adapter_strategy_open_qdio(struct zfcp_erp_action *erp_action)
{
int retval;
@@ -3249,8 +3248,7 @@ static void zfcp_erp_action_dismiss_unit(struct zfcp_unit *unit)
zfcp_erp_action_dismiss(&unit->erp_action);
}
-static inline void
-zfcp_erp_action_to_running(struct zfcp_erp_action *erp_action)
+static void zfcp_erp_action_to_running(struct zfcp_erp_action *erp_action)
{
struct zfcp_adapter *adapter = erp_action->adapter;
@@ -3259,8 +3257,7 @@ zfcp_erp_action_to_running(struct zfcp_erp_action *erp_action)
list_move(&erp_action->list, &erp_action->adapter->erp_running_head);
}
-static inline void
-zfcp_erp_action_to_ready(struct zfcp_erp_action *erp_action)
+static void zfcp_erp_action_to_ready(struct zfcp_erp_action *erp_action)
{
struct zfcp_adapter *adapter = erp_action->adapter;
diff --git a/drivers/s390/scsi/zfcp_fsf.c b/drivers/s390/scsi/zfcp_fsf.c
index 0eb31e162b1..99299976e89 100644
--- a/drivers/s390/scsi/zfcp_fsf.c
+++ b/drivers/s390/scsi/zfcp_fsf.c
@@ -1930,7 +1930,7 @@ static int zfcp_fsf_send_els_handler(struct zfcp_fsf_req *fsf_req)
skip_fsfstatus:
send_els->status = retval;
- if (send_els->handler != 0)
+ if (send_els->handler)
send_els->handler(send_els->handler_data);
return retval;
@@ -4154,8 +4154,9 @@ zfcp_fsf_send_fcp_command_task_handler(struct zfcp_fsf_req *fsf_req)
fcp_rsp_iu->fcp_resid,
(int) zfcp_get_fcp_dl(fcp_cmnd_iu));
- scpnt->resid = fcp_rsp_iu->fcp_resid;
- if (scpnt->request_bufflen - scpnt->resid < scpnt->underflow)
+ scsi_set_resid(scpnt, fcp_rsp_iu->fcp_resid);
+ if (scsi_bufflen(scpnt) - scsi_get_resid(scpnt) <
+ scpnt->underflow)
set_host_byte(&scpnt->result, DID_ERROR);
}
diff --git a/drivers/s390/scsi/zfcp_qdio.c b/drivers/s390/scsi/zfcp_qdio.c
index bdf5782b8a7..c6899efdc8f 100644
--- a/drivers/s390/scsi/zfcp_qdio.c
+++ b/drivers/s390/scsi/zfcp_qdio.c
@@ -36,8 +36,6 @@ static void zfcp_qdio_sbale_fill
(struct zfcp_fsf_req *, unsigned long, void *, int);
static int zfcp_qdio_sbals_from_segment
(struct zfcp_fsf_req *, unsigned long, void *, unsigned long);
-static int zfcp_qdio_sbals_from_buffer
- (struct zfcp_fsf_req *, unsigned long, void *, unsigned long, int);
static qdio_handler_t zfcp_qdio_request_handler;
static qdio_handler_t zfcp_qdio_response_handler;
@@ -47,103 +45,56 @@ static int zfcp_qdio_handler_error_check(struct zfcp_adapter *,
#define ZFCP_LOG_AREA ZFCP_LOG_AREA_QDIO
/*
- * Allocates BUFFER memory to each of the pointers of the qdio_buffer_t
- * array in the adapter struct.
- * Cur_buf is the pointer array and count can be any number of required
- * buffers, the page-fitting arithmetic is done entirely within this funciton.
+ * Frees BUFFER memory for each of the pointers of the struct qdio_buffer array
+ * in the adapter struct sbuf is the pointer array.
*
- * returns: number of buffers allocated
* locks: must only be called with zfcp_data.config_sema taken
*/
-static int
-zfcp_qdio_buffers_enqueue(struct qdio_buffer **cur_buf, int count)
+static void
+zfcp_qdio_buffers_dequeue(struct qdio_buffer **sbuf)
{
- int buf_pos;
- int qdio_buffers_per_page;
- int page_pos = 0;
- struct qdio_buffer *first_in_page = NULL;
-
- qdio_buffers_per_page = PAGE_SIZE / sizeof (struct qdio_buffer);
- ZFCP_LOG_TRACE("buffers_per_page=%d\n", qdio_buffers_per_page);
-
- for (buf_pos = 0; buf_pos < count; buf_pos++) {
- if (page_pos == 0) {
- cur_buf[buf_pos] = (struct qdio_buffer *)
- get_zeroed_page(GFP_KERNEL);
- if (cur_buf[buf_pos] == NULL) {
- ZFCP_LOG_INFO("error: allocation of "
- "QDIO buffer failed \n");
- goto out;
- }
- first_in_page = cur_buf[buf_pos];
- } else {
- cur_buf[buf_pos] = first_in_page + page_pos;
+ int pos;
- }
- /* was initialised to zero */
- page_pos++;
- page_pos %= qdio_buffers_per_page;
- }
- out:
- return buf_pos;
+ for (pos = 0; pos < QDIO_MAX_BUFFERS_PER_Q; pos += QBUFF_PER_PAGE)
+ free_page((unsigned long) sbuf[pos]);
}
/*
- * Frees BUFFER memory for each of the pointers of the struct qdio_buffer array
- * in the adapter struct cur_buf is the pointer array and count can be any
- * number of buffers in the array that should be freed starting from buffer 0
+ * Allocates BUFFER memory to each of the pointers of the qdio_buffer_t
+ * array in the adapter struct.
+ * Cur_buf is the pointer array
*
+ * returns: zero on success else -ENOMEM
* locks: must only be called with zfcp_data.config_sema taken
*/
-static void
-zfcp_qdio_buffers_dequeue(struct qdio_buffer **cur_buf, int count)
+static int
+zfcp_qdio_buffers_enqueue(struct qdio_buffer **sbuf)
{
- int buf_pos;
- int qdio_buffers_per_page;
-
- qdio_buffers_per_page = PAGE_SIZE / sizeof (struct qdio_buffer);
- ZFCP_LOG_TRACE("buffers_per_page=%d\n", qdio_buffers_per_page);
+ int pos;
- for (buf_pos = 0; buf_pos < count; buf_pos += qdio_buffers_per_page)
- free_page((unsigned long) cur_buf[buf_pos]);
- return;
+ for (pos = 0; pos < QDIO_MAX_BUFFERS_PER_Q; pos += QBUFF_PER_PAGE) {
+ sbuf[pos] = (struct qdio_buffer *) get_zeroed_page(GFP_KERNEL);
+ if (!sbuf[pos]) {
+ zfcp_qdio_buffers_dequeue(sbuf);
+ return -ENOMEM;
+ }
+ }
+ for (pos = 0; pos < QDIO_MAX_BUFFERS_PER_Q; pos++)
+ if (pos % QBUFF_PER_PAGE)
+ sbuf[pos] = sbuf[pos - 1] + 1;
+ return 0;
}
/* locks: must only be called with zfcp_data.config_sema taken */
int
zfcp_qdio_allocate_queues(struct zfcp_adapter *adapter)
{
- int buffer_count;
- int retval = 0;
-
- buffer_count =
- zfcp_qdio_buffers_enqueue(&(adapter->request_queue.buffer[0]),
- QDIO_MAX_BUFFERS_PER_Q);
- if (buffer_count < QDIO_MAX_BUFFERS_PER_Q) {
- ZFCP_LOG_DEBUG("only %d QDIO buffers allocated for request "
- "queue\n", buffer_count);
- zfcp_qdio_buffers_dequeue(&(adapter->request_queue.buffer[0]),
- buffer_count);
- retval = -ENOMEM;
- goto out;
- }
+ int ret;
- buffer_count =
- zfcp_qdio_buffers_enqueue(&(adapter->response_queue.buffer[0]),
- QDIO_MAX_BUFFERS_PER_Q);
- if (buffer_count < QDIO_MAX_BUFFERS_PER_Q) {
- ZFCP_LOG_DEBUG("only %d QDIO buffers allocated for response "
- "queue", buffer_count);
- zfcp_qdio_buffers_dequeue(&(adapter->response_queue.buffer[0]),
- buffer_count);
- ZFCP_LOG_TRACE("freeing request_queue buffers\n");
- zfcp_qdio_buffers_dequeue(&(adapter->request_queue.buffer[0]),
- QDIO_MAX_BUFFERS_PER_Q);
- retval = -ENOMEM;
- goto out;
- }
- out:
- return retval;
+ ret = zfcp_qdio_buffers_enqueue(adapter->request_queue.buffer);
+ if (ret)
+ return ret;
+ return zfcp_qdio_buffers_enqueue(adapter->response_queue.buffer);
}
/* locks: must only be called with zfcp_data.config_sema taken */
@@ -151,12 +102,10 @@ void
zfcp_qdio_free_queues(struct zfcp_adapter *adapter)
{
ZFCP_LOG_TRACE("freeing request_queue buffers\n");
- zfcp_qdio_buffers_dequeue(&(adapter->request_queue.buffer[0]),
- QDIO_MAX_BUFFERS_PER_Q);
+ zfcp_qdio_buffers_dequeue(adapter->request_queue.buffer);
ZFCP_LOG_TRACE("freeing response_queue buffers\n");
- zfcp_qdio_buffers_dequeue(&(adapter->response_queue.buffer[0]),
- QDIO_MAX_BUFFERS_PER_Q);
+ zfcp_qdio_buffers_dequeue(adapter->response_queue.buffer);
}
int
@@ -681,28 +630,6 @@ out:
/**
- * zfcp_qdio_sbals_from_buffer - fill SBALs from buffer
- * @fsf_req: request to be processed
- * @sbtype: SBALE flags
- * @buffer: data buffer
- * @length: length of buffer
- * @max_sbals: upper bound for number of SBALs to be used
- */
-static int
-zfcp_qdio_sbals_from_buffer(struct zfcp_fsf_req *fsf_req, unsigned long sbtype,
- void *buffer, unsigned long length, int max_sbals)
-{
- struct scatterlist sg_segment;
-
- zfcp_address_to_sg(buffer, &sg_segment);
- sg_segment.length = length;
-
- return zfcp_qdio_sbals_from_sg(fsf_req, sbtype, &sg_segment, 1,
- max_sbals);
-}
-
-
-/**
* zfcp_qdio_sbals_from_scsicmnd - fill SBALs from scsi command
* @fsf_req: request to be processed
* @sbtype: SBALE flags
@@ -713,18 +640,9 @@ int
zfcp_qdio_sbals_from_scsicmnd(struct zfcp_fsf_req *fsf_req,
unsigned long sbtype, struct scsi_cmnd *scsi_cmnd)
{
- if (scsi_cmnd->use_sg) {
- return zfcp_qdio_sbals_from_sg(fsf_req, sbtype,
- (struct scatterlist *)
- scsi_cmnd->request_buffer,
- scsi_cmnd->use_sg,
- ZFCP_MAX_SBALS_PER_REQ);
- } else {
- return zfcp_qdio_sbals_from_buffer(fsf_req, sbtype,
- scsi_cmnd->request_buffer,
- scsi_cmnd->request_bufflen,
- ZFCP_MAX_SBALS_PER_REQ);
- }
+ return zfcp_qdio_sbals_from_sg(fsf_req, sbtype, scsi_sglist(scsi_cmnd),
+ scsi_sg_count(scsi_cmnd),
+ ZFCP_MAX_SBALS_PER_REQ);
}
/**
diff --git a/drivers/s390/scsi/zfcp_scsi.c b/drivers/s390/scsi/zfcp_scsi.c
index 0acf6db0a08..ad7eb4a9261 100644
--- a/drivers/s390/scsi/zfcp_scsi.c
+++ b/drivers/s390/scsi/zfcp_scsi.c
@@ -764,7 +764,9 @@ zfcp_reset_fc_host_stats(struct Scsi_Host *shost)
return;
ret = zfcp_fsf_exchange_port_data(NULL, adapter, data);
- if (ret == 0) {
+ if (ret) {
+ kfree(data);
+ } else {
adapter->stats_reset = jiffies/HZ;
old_data = adapter->stats_reset_data;
adapter->stats_reset_data = data; /* finally freed in