diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2008-07-14 13:25:01 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2008-07-14 13:25:01 -0700 |
commit | b7f80afa28866c257876c272d6c013e0dbed3c31 (patch) | |
tree | 4e72598307cda046a2e0db5e7c7cb1d8a15574ae /drivers/s390 | |
parent | 42c59208219a2d43f0dde94bebc68c20b95b13ce (diff) | |
parent | 5e34599fc8ba1e8889095bd56a71fd9802ed5a51 (diff) |
Merge branch 'for-linus' of git://git390.osdl.marist.edu/pub/scm/linux-2.6
* 'for-linus' of git://git390.osdl.marist.edu/pub/scm/linux-2.6: (71 commits)
[S390] sclp_tty: Fix scheduling while atomic bug.
[S390] sclp_tty: remove ioctl interface.
[S390] Remove P390 support.
[S390] Cleanup vmcp printk messages.
[S390] Cleanup lcs printk messages.
[S390] Cleanup kprobes printk messages.
[S390] Cleanup vmwatch printk messages.
[S390] Cleanup dcssblk printk messages.
[S390] Cleanup zfcp dumper printk messages.
[S390] Cleanup vmlogrdr printk messages.
[S390] Cleanup s390 debug feature print messages.
[S390] Cleanup monreader printk messages.
[S390] Cleanup appldata printk messages.
[S390] Cleanup smsgiucv printk messages.
[S390] Cleanup cpacf printk messages.
[S390] Cleanup qeth print messages.
[S390] Cleanup netiucv printk messages.
[S390] Cleanup iucv printk messages.
[S390] Cleanup sclp printk messages.
[S390] Cleanup zcrypt printk messages.
...
Diffstat (limited to 'drivers/s390')
82 files changed, 4573 insertions, 2046 deletions
diff --git a/drivers/s390/block/dasd.c b/drivers/s390/block/dasd.c index 1a402568336..1b6c52ef733 100644 --- a/drivers/s390/block/dasd.c +++ b/drivers/s390/block/dasd.c @@ -995,14 +995,14 @@ void dasd_int_handler(struct ccw_device *cdev, unsigned long intparm, now = get_clock(); DBF_EVENT(DBF_ERR, "Interrupt: bus_id %s CS/DS %04x ip %08x", - cdev->dev.bus_id, ((irb->scsw.cstat<<8)|irb->scsw.dstat), - (unsigned int) intparm); + cdev->dev.bus_id, ((irb->scsw.cmd.cstat << 8) | + irb->scsw.cmd.dstat), (unsigned int) intparm); /* check for unsolicited interrupts */ cqr = (struct dasd_ccw_req *) intparm; - if (!cqr || ((irb->scsw.cc == 1) && - (irb->scsw.fctl & SCSW_FCTL_START_FUNC) && - (irb->scsw.stctl & SCSW_STCTL_STATUS_PEND)) ) { + if (!cqr || ((irb->scsw.cmd.cc == 1) && + (irb->scsw.cmd.fctl & SCSW_FCTL_START_FUNC) && + (irb->scsw.cmd.stctl & SCSW_STCTL_STATUS_PEND))) { if (cqr && cqr->status == DASD_CQR_IN_IO) cqr->status = DASD_CQR_QUEUED; device = dasd_device_from_cdev_locked(cdev); @@ -1025,7 +1025,7 @@ void dasd_int_handler(struct ccw_device *cdev, unsigned long intparm, /* Check for clear pending */ if (cqr->status == DASD_CQR_CLEAR_PENDING && - irb->scsw.fctl & SCSW_FCTL_CLEAR_FUNC) { + irb->scsw.cmd.fctl & SCSW_FCTL_CLEAR_FUNC) { cqr->status = DASD_CQR_CLEARED; dasd_device_clear_timer(device); wake_up(&dasd_flush_wq); @@ -1041,11 +1041,11 @@ void dasd_int_handler(struct ccw_device *cdev, unsigned long intparm, return; } DBF_DEV_EVENT(DBF_DEBUG, device, "Int: CS/DS 0x%04x for cqr %p", - ((irb->scsw.cstat << 8) | irb->scsw.dstat), cqr); + ((irb->scsw.cmd.cstat << 8) | irb->scsw.cmd.dstat), cqr); next = NULL; expires = 0; - if (irb->scsw.dstat == (DEV_STAT_CHN_END | DEV_STAT_DEV_END) && - irb->scsw.cstat == 0 && !irb->esw.esw0.erw.cons) { + if (irb->scsw.cmd.dstat == (DEV_STAT_CHN_END | DEV_STAT_DEV_END) && + irb->scsw.cmd.cstat == 0 && !irb->esw.esw0.erw.cons) { /* request was completed successfully */ cqr->status = DASD_CQR_SUCCESS; cqr->stopclk = now; diff --git a/drivers/s390/block/dasd_3990_erp.c b/drivers/s390/block/dasd_3990_erp.c index e6700df52df..5c6e6f331cb 100644 --- a/drivers/s390/block/dasd_3990_erp.c +++ b/drivers/s390/block/dasd_3990_erp.c @@ -1572,7 +1572,7 @@ dasd_3990_erp_action_1B_32(struct dasd_ccw_req * default_erp, char *sense) /* determine the address of the CCW to be restarted */ /* Imprecise ending is not set -> addr from IRB-SCSW */ - cpa = default_erp->refers->irb.scsw.cpa; + cpa = default_erp->refers->irb.scsw.cmd.cpa; if (cpa == 0) { @@ -1725,7 +1725,7 @@ dasd_3990_update_1B(struct dasd_ccw_req * previous_erp, char *sense) /* determine the address of the CCW to be restarted */ /* Imprecise ending is not set -> addr from IRB-SCSW */ - cpa = previous_erp->irb.scsw.cpa; + cpa = previous_erp->irb.scsw.cmd.cpa; if (cpa == 0) { @@ -2171,7 +2171,7 @@ dasd_3990_erp_control_check(struct dasd_ccw_req *erp) { struct dasd_device *device = erp->startdev; - if (erp->refers->irb.scsw.cstat & (SCHN_STAT_INTF_CTRL_CHK + if (erp->refers->irb.scsw.cmd.cstat & (SCHN_STAT_INTF_CTRL_CHK | SCHN_STAT_CHN_CTRL_CHK)) { DEV_MESSAGE(KERN_DEBUG, device, "%s", "channel or interface control check"); @@ -2352,9 +2352,9 @@ dasd_3990_erp_error_match(struct dasd_ccw_req *cqr1, struct dasd_ccw_req *cqr2) if ((cqr1->irb.esw.esw0.erw.cons == 0) && (cqr2->irb.esw.esw0.erw.cons == 0)) { - if ((cqr1->irb.scsw.cstat & (SCHN_STAT_INTF_CTRL_CHK | + if ((cqr1->irb.scsw.cmd.cstat & (SCHN_STAT_INTF_CTRL_CHK | SCHN_STAT_CHN_CTRL_CHK)) == - (cqr2->irb.scsw.cstat & (SCHN_STAT_INTF_CTRL_CHK | + (cqr2->irb.scsw.cmd.cstat & (SCHN_STAT_INTF_CTRL_CHK | SCHN_STAT_CHN_CTRL_CHK))) return 1; /* match with ifcc*/ } @@ -2622,8 +2622,9 @@ dasd_3990_erp_action(struct dasd_ccw_req * cqr) } /* double-check if current erp/cqr was successfull */ - if ((cqr->irb.scsw.cstat == 0x00) && - (cqr->irb.scsw.dstat == (DEV_STAT_CHN_END|DEV_STAT_DEV_END))) { + if ((cqr->irb.scsw.cmd.cstat == 0x00) && + (cqr->irb.scsw.cmd.dstat == + (DEV_STAT_CHN_END | DEV_STAT_DEV_END))) { DEV_MESSAGE(KERN_DEBUG, device, "ERP called for successful request %p" diff --git a/drivers/s390/block/dasd_eckd.c b/drivers/s390/block/dasd_eckd.c index a0edae091b5..e0b77210d37 100644 --- a/drivers/s390/block/dasd_eckd.c +++ b/drivers/s390/block/dasd_eckd.c @@ -1404,13 +1404,14 @@ static void dasd_eckd_handle_unsolicited_interrupt(struct dasd_device *device, /* first of all check for state change pending interrupt */ mask = DEV_STAT_ATTENTION | DEV_STAT_DEV_END | DEV_STAT_UNIT_EXCEP; - if ((irb->scsw.dstat & mask) == mask) { + if ((irb->scsw.cmd.dstat & mask) == mask) { dasd_generic_handle_state_change(device); return; } /* summary unit check */ - if ((irb->scsw.dstat & DEV_STAT_UNIT_CHECK) && irb->ecw[7] == 0x0D) { + if ((irb->scsw.cmd.dstat & DEV_STAT_UNIT_CHECK) && + (irb->ecw[7] == 0x0D)) { dasd_alias_handle_summary_unit_check(device, irb); return; } @@ -2068,11 +2069,11 @@ static void dasd_eckd_dump_sense(struct dasd_device *device, device->cdev->dev.bus_id); len += sprintf(page + len, KERN_ERR PRINTK_HEADER " in req: %p CS: 0x%02X DS: 0x%02X\n", req, - irb->scsw.cstat, irb->scsw.dstat); + irb->scsw.cmd.cstat, irb->scsw.cmd.dstat); len += sprintf(page + len, KERN_ERR PRINTK_HEADER " device %s: Failing CCW: %p\n", device->cdev->dev.bus_id, - (void *) (addr_t) irb->scsw.cpa); + (void *) (addr_t) irb->scsw.cmd.cpa); if (irb->esw.esw0.erw.cons) { for (sl = 0; sl < 4; sl++) { len += sprintf(page + len, KERN_ERR PRINTK_HEADER @@ -2122,7 +2123,8 @@ static void dasd_eckd_dump_sense(struct dasd_device *device, /* scsw->cda is either valid or zero */ len = 0; from = ++to; - fail = (struct ccw1 *)(addr_t) irb->scsw.cpa; /* failing CCW */ + fail = (struct ccw1 *)(addr_t) + irb->scsw.cmd.cpa; /* failing CCW */ if (from < fail - 2) { from = fail - 2; /* there is a gap - print header */ len += sprintf(page, KERN_ERR PRINTK_HEADER "......\n"); diff --git a/drivers/s390/block/dasd_fba.c b/drivers/s390/block/dasd_fba.c index 116611583df..aee4656127f 100644 --- a/drivers/s390/block/dasd_fba.c +++ b/drivers/s390/block/dasd_fba.c @@ -222,7 +222,7 @@ static void dasd_fba_handle_unsolicited_interrupt(struct dasd_device *device, /* first of all check for state change pending interrupt */ mask = DEV_STAT_ATTENTION | DEV_STAT_DEV_END | DEV_STAT_UNIT_EXCEP; - if ((irb->scsw.dstat & mask) == mask) { + if ((irb->scsw.cmd.dstat & mask) == mask) { dasd_generic_handle_state_change(device); return; } @@ -449,11 +449,11 @@ dasd_fba_dump_sense(struct dasd_device *device, struct dasd_ccw_req * req, device->cdev->dev.bus_id); len += sprintf(page + len, KERN_ERR PRINTK_HEADER " in req: %p CS: 0x%02X DS: 0x%02X\n", req, - irb->scsw.cstat, irb->scsw.dstat); + irb->scsw.cmd.cstat, irb->scsw.cmd.dstat); len += sprintf(page + len, KERN_ERR PRINTK_HEADER " device %s: Failing CCW: %p\n", device->cdev->dev.bus_id, - (void *) (addr_t) irb->scsw.cpa); + (void *) (addr_t) irb->scsw.cmd.cpa); if (irb->esw.esw0.erw.cons) { for (sl = 0; sl < 4; sl++) { len += sprintf(page + len, KERN_ERR PRINTK_HEADER @@ -498,11 +498,11 @@ dasd_fba_dump_sense(struct dasd_device *device, struct dasd_ccw_req * req, /* print failing CCW area */ len = 0; - if (act < ((struct ccw1 *)(addr_t) irb->scsw.cpa) - 2) { - act = ((struct ccw1 *)(addr_t) irb->scsw.cpa) - 2; + if (act < ((struct ccw1 *)(addr_t) irb->scsw.cmd.cpa) - 2) { + act = ((struct ccw1 *)(addr_t) irb->scsw.cmd.cpa) - 2; len += sprintf(page + len, KERN_ERR PRINTK_HEADER "......\n"); } - end = min((struct ccw1 *)(addr_t) irb->scsw.cpa + 2, last); + end = min((struct ccw1 *)(addr_t) irb->scsw.cmd.cpa + 2, last); while (act <= end) { len += sprintf(page + len, KERN_ERR PRINTK_HEADER " CCW %p: %08X %08X DAT:", diff --git a/drivers/s390/block/dcssblk.c b/drivers/s390/block/dcssblk.c index bb52d2fbac1..01fcdd91b84 100644 --- a/drivers/s390/block/dcssblk.c +++ b/drivers/s390/block/dcssblk.c @@ -167,10 +167,8 @@ dcssblk_shared_store(struct device *dev, struct device_attribute *attr, const ch struct dcssblk_dev_info *dev_info; int rc; - if ((count > 1) && (inbuf[1] != '\n') && (inbuf[1] != '\0')) { - PRINT_WARN("Invalid value, must be 0 or 1\n"); + if ((count > 1) && (inbuf[1] != '\n') && (inbuf[1] != '\0')) return -EINVAL; - } down_write(&dcssblk_devices_sem); dev_info = container_of(dev, struct dcssblk_dev_info, dev); if (atomic_read(&dev_info->use_count)) { @@ -215,7 +213,6 @@ dcssblk_shared_store(struct device *dev, struct device_attribute *attr, const ch set_disk_ro(dev_info->gd, 0); } } else { - PRINT_WARN("Invalid value, must be 0 or 1\n"); rc = -EINVAL; goto out; } @@ -258,10 +255,8 @@ dcssblk_save_store(struct device *dev, struct device_attribute *attr, const char { struct dcssblk_dev_info *dev_info; - if ((count > 1) && (inbuf[1] != '\n') && (inbuf[1] != '\0')) { - PRINT_WARN("Invalid value, must be 0 or 1\n"); + if ((count > 1) && (inbuf[1] != '\n') && (inbuf[1] != '\0')) return -EINVAL; - } dev_info = container_of(dev, struct dcssblk_dev_info, dev); down_write(&dcssblk_devices_sem); @@ -289,7 +284,6 @@ dcssblk_save_store(struct device *dev, struct device_attribute *attr, const char } } else { up_write(&dcssblk_devices_sem); - PRINT_WARN("Invalid value, must be 0 or 1\n"); return -EINVAL; } up_write(&dcssblk_devices_sem); @@ -441,7 +435,6 @@ dcssblk_add_store(struct device *dev, struct device_attribute *attr, const char goto out; unregister_dev: - PRINT_ERR("device_create_file() failed!\n"); list_del(&dev_info->lh); blk_cleanup_queue(dev_info->dcssblk_queue); dev_info->gd->queue = NULL; @@ -702,10 +695,8 @@ dcssblk_check_params(void) static void __exit dcssblk_exit(void) { - PRINT_DEBUG("DCSSBLOCK EXIT...\n"); s390_root_dev_unregister(dcssblk_root_dev); unregister_blkdev(dcssblk_major, DCSSBLK_NAME); - PRINT_DEBUG("...finished!\n"); } static int __init @@ -713,27 +704,21 @@ dcssblk_init(void) { int rc; - PRINT_DEBUG("DCSSBLOCK INIT...\n"); dcssblk_root_dev = s390_root_dev_register("dcssblk"); - if (IS_ERR(dcssblk_root_dev)) { - PRINT_ERR("device_register() failed!\n"); + if (IS_ERR(dcssblk_root_dev)) return PTR_ERR(dcssblk_root_dev); - } rc = device_create_file(dcssblk_root_dev, &dev_attr_add); if (rc) { - PRINT_ERR("device_create_file(add) failed!\n"); s390_root_dev_unregister(dcssblk_root_dev); return rc; } rc = device_create_file(dcssblk_root_dev, &dev_attr_remove); if (rc) { - PRINT_ERR("device_create_file(remove) failed!\n"); s390_root_dev_unregister(dcssblk_root_dev); return rc; } rc = register_blkdev(0, DCSSBLK_NAME); if (rc < 0) { - PRINT_ERR("Can't get dynamic major!\n"); s390_root_dev_unregister(dcssblk_root_dev); return rc; } @@ -742,7 +727,6 @@ dcssblk_init(void) dcssblk_check_params(); - PRINT_DEBUG("...finished!\n"); return 0; } diff --git a/drivers/s390/block/xpram.c b/drivers/s390/block/xpram.c index f231bc21b1c..dd9b986389a 100644 --- a/drivers/s390/block/xpram.c +++ b/drivers/s390/block/xpram.c @@ -100,15 +100,10 @@ static int xpram_page_in (unsigned long page_addr, unsigned int xpage_index) : "+d" (cc) : "a" (__pa(page_addr)), "d" (xpage_index) : "cc"); if (cc == 3) return -ENXIO; - if (cc == 2) { - PRINT_ERR("expanded storage lost!\n"); + if (cc == 2) return -ENXIO; - } - if (cc == 1) { - PRINT_ERR("page in failed for page index %u.\n", - xpage_index); + if (cc == 1) return -EIO; - } return 0; } @@ -135,15 +130,10 @@ static long xpram_page_out (unsigned long page_addr, unsigned int xpage_index) : "+d" (cc) : "a" (__pa(page_addr)), "d" (xpage_index) : "cc"); if (cc == 3) return -ENXIO; - if (cc == 2) { - PRINT_ERR("expanded storage lost!\n"); + if (cc == 2) return -ENXIO; - } - if (cc == 1) { - PRINT_ERR("page out failed for page index %u.\n", - xpage_index); + if (cc == 1) return -EIO; - } return 0; } diff --git a/drivers/s390/char/con3215.c b/drivers/s390/char/con3215.c index 3e5653c92f4..d3ec9b55ab3 100644 --- a/drivers/s390/char/con3215.c +++ b/drivers/s390/char/con3215.c @@ -93,9 +93,6 @@ struct raw3215_info { struct raw3215_req *queued_write;/* pointer to queued write requests */ wait_queue_head_t empty_wait; /* wait queue for flushing */ struct timer_list timer; /* timer for delayed output */ - char *message; /* pending message from raw3215_irq */ - int msg_dstat; /* dstat for pending message */ - int msg_cstat; /* cstat for pending message */ int line_pos; /* position on the line (for tabs) */ char ubuffer[80]; /* copy_from_user buffer */ }; @@ -359,11 +356,6 @@ raw3215_tasklet(void *data) raw3215_mk_write_req(raw); raw3215_try_io(raw); spin_unlock_irqrestore(get_ccwdev_lock(raw->cdev), flags); - /* Check for pending message from raw3215_irq */ - if (raw->message != NULL) { - printk(raw->message, raw->msg_dstat, raw->msg_cstat); - raw->message = NULL; - } tty = raw->tty; if (tty != NULL && RAW3215_BUFFER_SIZE - raw->count >= RAW3215_MIN_SPACE) { @@ -381,20 +373,14 @@ raw3215_irq(struct ccw_device *cdev, unsigned long intparm, struct irb *irb) struct raw3215_req *req; struct tty_struct *tty; int cstat, dstat; - int count, slen; + int count; raw = cdev->dev.driver_data; req = (struct raw3215_req *) intparm; - cstat = irb->scsw.cstat; - dstat = irb->scsw.dstat; - if (cstat != 0) { - raw->message = KERN_WARNING - "Got nonzero channel status in raw3215_irq " - "(dev sts 0x%2x, sch sts 0x%2x)"; - raw->msg_dstat = dstat; - raw->msg_cstat = cstat; + cstat = irb->scsw.cmd.cstat; + dstat = irb->scsw.cmd.dstat; + if (cstat != 0) tasklet_schedule(&raw->tasklet); - } if (dstat & 0x01) { /* we got a unit exception */ dstat &= ~0x01; /* we can ignore it */ } @@ -404,8 +390,6 @@ raw3215_irq(struct ccw_device *cdev, unsigned long intparm, struct irb *irb) break; /* Attention interrupt, someone hit the enter key */ raw3215_mk_read_req(raw); - if (MACHINE_IS_P390) - memset(raw->inbuf, 0, RAW3215_INBUF_SIZE); tasklet_schedule(&raw->tasklet); break; case 0x08: @@ -415,7 +399,7 @@ raw3215_irq(struct ccw_device *cdev, unsigned long intparm, struct irb *irb) return; /* That shouldn't happen ... */ if (req->type == RAW3215_READ) { /* store residual count, then wait for device end */ - req->residual = irb->scsw.count; + req->residual = irb->scsw.cmd.count; } if (dstat == 0x08) break; @@ -428,11 +412,6 @@ raw3215_irq(struct ccw_device *cdev, unsigned long intparm, struct irb *irb) tty = raw->tty; count = 160 - req->residual; - if (MACHINE_IS_P390) { - slen = strnlen(raw->inbuf, RAW3215_INBUF_SIZE); - if (count > slen) - count = slen; - } else EBCASC(raw->inbuf, count); cchar = ctrlchar_handle(raw->inbuf, count, tty); switch (cchar & CTRLCHAR_MASK) { @@ -481,11 +460,6 @@ raw3215_irq(struct ccw_device *cdev, unsigned long intparm, struct irb *irb) raw->flags &= ~RAW3215_WORKING; raw3215_free_req(req); } - raw->message = KERN_WARNING - "Spurious interrupt in in raw3215_irq " - "(dev sts 0x%2x, sch sts 0x%2x)"; - raw->msg_dstat = dstat; - raw->msg_cstat = cstat; tasklet_schedule(&raw->tasklet); } return; @@ -883,7 +857,6 @@ con3215_init(void) free_bootmem((unsigned long) raw->buffer, RAW3215_BUFFER_SIZE); free_bootmem((unsigned long) raw, sizeof(struct raw3215_info)); raw3215[0] = NULL; - printk("Couldn't find a 3215 console device\n"); return -ENODEV; } register_console(&con3215); @@ -1157,7 +1130,6 @@ tty3215_init(void) tty_set_operations(driver, &tty3215_ops); ret = tty_register_driver(driver); if (ret) { - printk("Couldn't register tty3215 driver\n"); put_tty_driver(driver); return ret; } diff --git a/drivers/s390/char/con3270.c b/drivers/s390/char/con3270.c index 0b040557db0..3c07974886e 100644 --- a/drivers/s390/char/con3270.c +++ b/drivers/s390/char/con3270.c @@ -411,15 +411,15 @@ static int con3270_irq(struct con3270 *cp, struct raw3270_request *rq, struct irb *irb) { /* Handle ATTN. Schedule tasklet to read aid. */ - if (irb->scsw.dstat & DEV_STAT_ATTENTION) + if (irb->scsw.cmd.dstat & DEV_STAT_ATTENTION) con3270_issue_read(cp); if (rq) { - if (irb->scsw.dstat & DEV_STAT_UNIT_CHECK) + if (irb->scsw.cmd.dstat & DEV_STAT_UNIT_CHECK) rq->rc = -EIO; else /* Normal end. Copy residual count. */ - rq->rescnt = irb->scsw.count; + rq->rescnt = irb->scsw.cmd.count; } return RAW3270_IO_DONE; } diff --git a/drivers/s390/char/fs3270.c b/drivers/s390/char/fs3270.c index ef36f2132aa..e136d10a0de 100644 --- a/drivers/s390/char/fs3270.c +++ b/drivers/s390/char/fs3270.c @@ -216,17 +216,17 @@ static int fs3270_irq(struct fs3270 *fp, struct raw3270_request *rq, struct irb *irb) { /* Handle ATTN. Set indication and wake waiters for attention. */ - if (irb->scsw.dstat & DEV_STAT_ATTENTION) { + if (irb->scsw.cmd.dstat & DEV_STAT_ATTENTION) { fp->attention = 1; wake_up(&fp->wait); } if (rq) { - if (irb->scsw.dstat & DEV_STAT_UNIT_CHECK) + if (irb->scsw.cmd.dstat & DEV_STAT_UNIT_CHECK) rq->rc = -EIO; else /* Normal end. Copy residual count. */ - rq->rescnt = irb->scsw.count; + rq->rescnt = irb->scsw.cmd.count; } return RAW3270_IO_DONE; } @@ -512,11 +512,8 @@ fs3270_init(void) int rc; rc = register_chrdev(IBM_FS3270_MAJOR, "fs3270", &fs3270_fops); - if (rc) { - printk(KERN_ERR "fs3270 can't get major number %d: errno %d\n", - IBM_FS3270_MAJOR, rc); + if (rc) return rc; - } return 0; } diff --git a/drivers/s390/char/monreader.c b/drivers/s390/char/monreader.c index 1e1f50655bb..f0e4c96afbf 100644 --- a/drivers/s390/char/monreader.c +++ b/drivers/s390/char/monreader.c @@ -3,9 +3,8 @@ * * Character device driver for reading z/VM *MONITOR service records. * - * Copyright 2004 IBM Corporation, IBM Deutschland Entwicklung GmbH. - * - * Author: Gerald Schaefer <geraldsc@de.ibm.com> + * Copyright IBM Corp. 2004, 2008 + * Author: Gerald Schaefer <gerald.schaefer@de.ibm.com> */ #include <linux/module.h> @@ -18,12 +17,11 @@ #include <linux/ctype.h> #include <linux/spinlock.h> #include <linux/interrupt.h> +#include <linux/poll.h> +#include <net/iucv/iucv.h> #include <asm/uaccess.h> #include <asm/ebcdic.h> #include <asm/extmem.h> -#include <linux/poll.h> -#include <net/iucv/iucv.h> - //#define MON_DEBUG /* Debug messages on/off */ @@ -152,10 +150,7 @@ static int mon_check_mca(struct mon_msg *monmsg) (mon_mca_end(monmsg) > mon_dcss_end) || (mon_mca_start(monmsg) < mon_dcss_start) || ((mon_mca_type(monmsg, 1) == 0) && (mon_mca_type(monmsg, 2) == 0))) - { - P_DEBUG("READ, IGNORED INVALID MCA\n\n"); return -EINVAL; - } return 0; } @@ -164,10 +159,6 @@ static int mon_send_reply(struct mon_msg *monmsg, { int rc; - P_DEBUG("read, REPLY: pathid = 0x%04X, msgid = 0x%08X, trgcls = " - "0x%08X\n\n", - monpriv->path->pathid, monmsg->msg.id, monmsg->msg.class); - rc = iucv_message_reply(monpriv->path, &monmsg->msg, IUCV_IPRMDATA, NULL, 0); atomic_dec(&monpriv->msglim_count); @@ -202,15 +193,12 @@ static struct mon_private *mon_alloc_mem(void) struct mon_private *monpriv; monpriv = kzalloc(sizeof(struct mon_private), GFP_KERNEL); - if (!monpriv) { - P_ERROR("no memory for monpriv\n"); + if (!monpriv) return NULL; - } for (i = 0; i < MON_MSGLIM; i++) { monpriv->msg_array[i] = kzalloc(sizeof(struct mon_msg), GFP_KERNEL); if (!monpriv->msg_array[i]) { - P_ERROR("open, no memory for msg_array\n"); mon_free_mem(monpriv); return NULL; } @@ -218,41 +206,10 @@ static struct mon_private *mon_alloc_mem(void) return monpriv; } -static inline void mon_read_debug(struct mon_msg *monmsg, - struct mon_private *monpriv) -{ -#ifdef MON_DEBUG - u8 msg_type[2], mca_type; - unsigned long records_len; - - records_len = mon_rec_end(monmsg) - mon_rec_start(monmsg) + 1; - - memcpy(msg_type, &monmsg->msg.class, 2); - EBCASC(msg_type, 2); - mca_type = mon_mca_type(monmsg, 0); - EBCASC(&mca_type, 1); - - P_DEBUG("read, mon_read_index = %i, mon_write_index = %i\n", - monpriv->read_index, monpriv->write_index); - P_DEBUG("read, pathid = 0x%04X, msgid = 0x%08X, trgcls = 0x%08X\n", - monpriv->path->pathid, monmsg->msg.id, monmsg->msg.class); - P_DEBUG("read, msg_type = '%c%c', mca_type = '%c' / 0x%X / 0x%X\n", - msg_type[0], msg_type[1], mca_type ? mca_type : 'X', - mon_mca_type(monmsg, 1), mon_mca_type(monmsg, 2)); - P_DEBUG("read, MCA: start = 0x%lX, end = 0x%lX\n", - mon_mca_start(monmsg), mon_mca_end(monmsg)); - P_DEBUG("read, REC: start = 0x%X, end = 0x%X, len = %lu\n\n", - mon_rec_start(monmsg), mon_rec_end(monmsg), records_len); - if (mon_mca_size(monmsg) > 12) - P_DEBUG("READ, MORE THAN ONE MCA\n\n"); -#endif -} - static inline void mon_next_mca(struct mon_msg *monmsg) { if (likely((mon_mca_size(monmsg) - monmsg->mca_offset) == 12)) return; - P_DEBUG("READ, NEXT MCA\n\n"); monmsg->mca_offset += 12; monmsg->pos = 0; } @@ -269,7 +226,6 @@ static struct mon_msg *mon_next_message(struct mon_private *monpriv) monmsg->msglim_reached = 0; monmsg->pos = 0; monmsg->mca_offset = 0; - P_WARNING("read, message limit reached\n"); monpriv->read_index = (monpriv->read_index + 1) % MON_MSGLIM; atomic_dec(&monpriv->read_ready); @@ -286,10 +242,6 @@ static void mon_iucv_path_complete(struct iucv_path *path, u8 ipuser[16]) { struct mon_private *monpriv = path->private; - P_DEBUG("IUCV connection completed\n"); - P_DEBUG("IUCV ACCEPT (from *MONITOR): Version = 0x%02X, Event = " - "0x%02X, Sample = 0x%02X\n", - ipuser[0], ipuser[1], ipuser[2]); atomic_set(&monpriv->iucv_connected, 1); wake_up(&mon_conn_wait_queue); } @@ -310,7 +262,6 @@ static void mon_iucv_message_pending(struct iucv_path *path, { struct mon_private *monpriv = path->private; - P_DEBUG("IUCV message pending\n"); memcpy(&monpriv->msg_array[monpriv->write_index]->msg, msg, sizeof(*msg)); if (atomic_inc_return(&monpriv->msglim_count) == MON_MSGLIM) { @@ -375,7 +326,6 @@ static int mon_open(struct inode *inode, struct file *filp) rc = -EIO; goto out_path; } - P_INFO("open, established connection to *MONITOR service\n\n"); filp->private_data = monpriv; return nonseekable_open(inode, filp); @@ -400,8 +350,6 @@ static int mon_close(struct inode *inode, struct file *filp) rc = iucv_path_sever(monpriv->path, user_data_sever); if (rc) P_ERROR("close, iucv_sever failed with rc = %i\n", rc); - else - P_INFO("close, terminated connection to *MONITOR service\n"); atomic_set(&monpriv->iucv_severed, 0); atomic_set(&monpriv->iucv_connected, 0); @@ -442,10 +390,8 @@ static ssize_t mon_read(struct file *filp, char __user *data, monmsg = monpriv->msg_array[monpriv->read_index]; } - if (!monmsg->pos) { + if (!monmsg->pos) monmsg->pos = mon_mca_start(monmsg) + monmsg->mca_offset; - mon_read_debug(monmsg, monpriv); - } if (mon_check_mca(monmsg)) goto reply; @@ -531,7 +477,6 @@ static int __init mon_init(void) P_ERROR("failed to register with iucv driver\n"); return rc; } - P_INFO("open, registered with IUCV\n"); rc = segment_type(mon_dcss_name); if (rc < 0) { @@ -555,13 +500,8 @@ static int __init mon_init(void) dcss_mkname(mon_dcss_name, &user_data_connect[8]); rc = misc_register(&mon_dev); - if (rc < 0 ) { - P_ERROR("misc_register failed, rc = %i\n", rc); + if (rc < 0 ) goto out; - } - P_INFO("Loaded segment %s from %p to %p, size = %lu Byte\n", - mon_dcss_name, (void *) mon_dcss_start, (void *) mon_dcss_end, - mon_dcss_end - mon_dcss_start + 1); return 0; out: diff --git a/drivers/s390/char/raw3270.c b/drivers/s390/char/raw3270.c index 848ef7e8523..81a96e01908 100644 --- a/drivers/s390/char/raw3270.c +++ b/drivers/s390/char/raw3270.c @@ -153,19 +153,10 @@ struct raw3270_request __init *raw3270_request_alloc_bootmem(size_t size) struct raw3270_request *rq; rq = alloc_bootmem_low(sizeof(struct raw3270)); - if (!rq) - return ERR_PTR(-ENOMEM); - memset(rq, 0, sizeof(struct raw3270_request)); /* alloc output buffer. */ - if (size > 0) { + if (size > 0) rq->buffer = alloc_bootmem_low(size); - if (!rq->buffer) { - free_bootmem((unsigned long) rq, - sizeof(struct raw3270)); - return ERR_PTR(-ENOMEM); - } - } rq->size = size; INIT_LIST_HEAD(&rq->list); @@ -372,17 +363,17 @@ raw3270_irq (struct ccw_device *cdev, unsigned long intparm, struct irb *irb) if (IS_ERR(irb)) rc = RAW3270_IO_RETRY; - else if (irb->scsw.fctl & SCSW_FCTL_HALT_FUNC) { + else if (irb->scsw.cmd.fctl & SCSW_FCTL_HALT_FUNC) { rq->rc = -EIO; rc = RAW3270_IO_DONE; - } else if (irb->scsw.dstat == (DEV_STAT_CHN_END | DEV_STAT_DEV_END | - DEV_STAT_UNIT_EXCEP)) { + } else if (irb->scsw.cmd.dstat == (DEV_STAT_CHN_END | DEV_STAT_DEV_END | + DEV_STAT_UNIT_EXCEP)) { /* Handle CE-DE-UE and subsequent UDE */ set_bit(RAW3270_FLAGS_BUSY, &rp->flags); rc = RAW3270_IO_BUSY; } else if (test_bit(RAW3270_FLAGS_BUSY, &rp->flags)) { /* Wait for UDE if busy flag is set. */ - if (irb->scsw.dstat & DEV_STAT_DEV_END) { + if (irb->scsw.cmd.dstat & DEV_STAT_DEV_END) { clear_bit(RAW3270_FLAGS_BUSY, &rp->flags); /* Got it, now retry. */ rc = RAW3270_IO_RETRY; @@ -497,7 +488,7 @@ raw3270_init_irq(struct raw3270_view *view, struct raw3270_request *rq, * Unit-Check Processing: * Expect Command Reject or Intervention Required. */ - if (irb->scsw.dstat & DEV_STAT_UNIT_CHECK) { + if (irb->scsw.cmd.dstat & DEV_STAT_UNIT_CHECK) { /* Request finished abnormally. */ if (irb->ecw[0] & SNS0_INTERVENTION_REQ) { set_bit(RAW3270_FLAGS_BUSY, &view->dev->flags); @@ -505,16 +496,16 @@ raw3270_init_irq(struct raw3270_view *view, struct raw3270_request *rq, } } if (rq) { - if (irb->scsw.dstat & DEV_STAT_UNIT_CHECK) { + if (irb->scsw.cmd.dstat & DEV_STAT_UNIT_CHECK) { if (irb->ecw[0] & SNS0_CMD_REJECT) rq->rc = -EOPNOTSUPP; else rq->rc = -EIO; } else /* Request finished normally. Copy residual count. */ - rq->rescnt = irb->scsw.count; + rq->rescnt = irb->scsw.cmd.count; } - if (irb->scsw.dstat & DEV_STAT_ATTENTION) { + if (irb->scsw.cmd.dstat & DEV_STAT_ATTENTION) { set_bit(RAW3270_FLAGS_ATTN, &view->dev->flags); wake_up(&raw3270_wait_queue); } @@ -619,7 +610,6 @@ __raw3270_size_device_vm(struct raw3270 *rp) rp->cols = 132; break; default: - printk(KERN_WARNING "vrdccrmd is 0x%.8x\n", model); rc = -EOPNOTSUPP; break; } diff --git a/drivers/s390/char/sclp.c b/drivers/s390/char/sclp.c index 2c7a1ee6b04..3c8b25e6c34 100644 --- a/drivers/s390/char/sclp.c +++ b/drivers/s390/char/sclp.c @@ -506,6 +506,8 @@ sclp_state_change_cb(struct evbuf_header *evbuf) if (scbuf->validity_sclp_send_mask) sclp_send_mask = scbuf->sclp_send_mask; spin_unlock_irqrestore(&sclp_lock, flags); + if (scbuf->validity_sclp_active_facility_mask) + sclp_facilities = scbuf->sclp_active_facility_mask; sclp_dispatch_state_change(); } @@ -782,11 +784,9 @@ sclp_check_handler(__u16 code) /* Is this the interrupt we are waiting for? */ if (finished_sccb == 0) return; - if (finished_sccb != (u32) (addr_t) sclp_init_sccb) { - printk(KERN_WARNING SCLP_HEADER "unsolicited interrupt " - "for buffer at 0x%x\n", finished_sccb); - return; - } + if (finished_sccb != (u32) (addr_t) sclp_init_sccb) + panic("sclp: unsolicited interrupt for buffer at 0x%x\n", + finished_sccb); spin_lock(&sclp_lock); if (sclp_running_state == sclp_running_state_running) { sclp_init_req.status = SCLP_REQ_DONE; @@ -883,8 +883,6 @@ sclp_init(void) unsigned long flags; int rc; - if (!MACHINE_HAS_SCLP) - return -ENODEV; spin_lock_irqsave(&sclp_lock, flags); /* Check for previous or running initialization */ if (sclp_init_state != sclp_init_state_uninitialized) { diff --git a/drivers/s390/char/sclp_cmd.c b/drivers/s390/char/sclp_cmd.c index b5c23396f8f..0c2b77493db 100644 --- a/drivers/s390/char/sclp_cmd.c +++ b/drivers/s390/char/sclp_cmd.c @@ -11,6 +11,9 @@ #include <linux/errno.h> #include <linux/slab.h> #include <linux/string.h> +#include <linux/mm.h> +#include <linux/mmzone.h> +#include <linux/memory.h> #include <asm/chpid.h> #include <asm/sclp.h> #include "sclp.h" @@ -43,6 +46,8 @@ static int __initdata early_read_info_sccb_valid; u64 sclp_facilities; static u8 sclp_fac84; +static unsigned long long rzm; +static unsigned long long rnmax; static int __init sclp_cmd_sync_early(sclp_cmdw_t cmd, void *sccb) { @@ -62,7 +67,7 @@ out: return rc; } -void __init sclp_read_info_early(void) +static void __init sclp_read_info_early(void) { int rc; int i; @@ -92,34 +97,33 @@ void __init sclp_read_info_early(void) void __init sclp_facilities_detect(void) { + struct read_info_sccb *sccb; + + sclp_read_info_early(); if (!early_read_info_sccb_valid) return; - sclp_facilities = early_read_info_sccb.facilities; - sclp_fac84 = early_read_info_sccb.fac84; + + sccb = &early_read_info_sccb; + sclp_facilities = sccb->facilities; + sclp_fac84 = sccb->fac84; + rnmax = sccb->rnmax ? sccb->rnmax : sccb->rnmax2; + rzm = sccb->rnsize ? sccb->rnsize : sccb->rnsize2; + rzm <<= 20; } -unsigned long long __init sclp_memory_detect(void) +unsigned long long sclp_get_rnmax(void) { - unsigned long long memsize; - struct read_info_sccb *sccb; + return rnmax; +} - if (!early_read_info_sccb_valid) - return 0; - sccb = &early_read_info_sccb; - if (sccb->rnsize) - memsize = sccb->rnsize << 20; - else - memsize = sccb->rnsize2 << 20; - if (sccb->rnmax) - memsize *= sccb->rnmax; - else - memsize *= sccb->rnmax2; - return memsize; +unsigned long long sclp_get_rzm(void) +{ + return rzm; } /* - * This function will be called after sclp_memory_detect(), which gets called - * early from early.c code. Therefore the sccb should have valid contents. + * This function will be called after sclp_facilities_detect(), which gets + * called from early.c code. Therefore the sccb should have valid contents. */ void __init sclp_get_ipl_info(struct sclp_ipl_info *info) { @@ -278,6 +282,305 @@ int sclp_cpu_deconfigure(u8 cpu) return do_cpu_configure(SCLP_CMDW_DECONFIGURE_CPU | cpu << 8); } +#ifdef CONFIG_MEMORY_HOTPLUG + +static DEFINE_MUTEX(sclp_mem_mutex); +static LIST_HEAD(sclp_mem_list); +static u8 sclp_max_storage_id; +static unsigned long sclp_storage_ids[256 / BITS_PER_LONG]; + +struct memory_increment { + struct list_head list; + u16 rn; + int standby; + int usecount; +}; + +struct assign_storage_sccb { + struct sccb_header header; + u16 rn; +} __packed; + +static unsigned long long rn2addr(u16 rn) +{ + return (unsigned long long) (rn - 1) * rzm; +} + +static int do_assign_storage(sclp_cmdw_t cmd, u16 rn) +{ + struct assign_storage_sccb *sccb; + int rc; + + sccb = (void *) get_zeroed_page(GFP_KERNEL | GFP_DMA); + if (!sccb) + return -ENOMEM; + sccb->header.length = PAGE_SIZE; + sccb->rn = rn; + rc = do_sync_request(cmd, sccb); + if (rc) + goto out; + switch (sccb->header.response_code) { + case 0x0020: + case 0x0120: + break; + default: + rc = -EIO; + break; + } +out: + free_page((unsigned long) sccb); + return rc; +} + +static int sclp_assign_storage(u16 rn) +{ + return do_assign_storage(0x000d0001, rn); +} + +static int sclp_unassign_storage(u16 rn) +{ + return do_assign_storage(0x000c0001, rn); +} + +struct attach_storage_sccb { + struct sccb_header header; + u16 :16; + u16 assigned; + u32 :32; + u32 entries[0]; +} __packed; + +static int sclp_attach_storage(u8 id) +{ + struct attach_storage_sccb *sccb; + int rc; + int i; + + sccb = (void *) get_zeroed_page(GFP_KERNEL | GFP_DMA); + if (!sccb) + return -ENOMEM; + sccb->header.length = PAGE_SIZE; + rc = do_sync_request(0x00080001 | id << 8, sccb); + if (rc) + goto out; + switch (sccb->header.response_code) { + case 0x0020: + set_bit(id, sclp_storage_ids); + for (i = 0; i < sccb->assigned; i++) + sclp_unassign_storage(sccb->entries[i] >> 16); + break; + default: + rc = -EIO; + break; + } +out: + free_page((unsigned long) sccb); + return rc; +} + +static int sclp_mem_change_state(unsigned long start, unsigned long size, + int online) +{ + struct memory_increment *incr; + unsigned long long istart; + int rc = 0; + + list_for_each_entry(incr, &sclp_mem_list, list) { + istart = rn2addr(incr->rn); + if (start + size - 1 < istart) + break; + if (start > istart + rzm - 1) + continue; + if (online) { + if (incr->usecount++) + continue; + /* + * Don't break the loop if one assign fails. Loop may + * be walked again on CANCEL and we can't save + * information if state changed before or not. + * So continue and increase usecount for all increments. + */ + rc |= sclp_assign_storage(incr->rn); + } else { + if (--incr->usecount) + continue; + sclp_unassign_storage(incr->rn); + } + } + return rc ? -EIO : 0; +} + +static int sclp_mem_notifier(struct notifier_block *nb, + unsigned long action, void *data) +{ + unsigned long start, size; + struct memory_notify *arg; + unsigned char id; + int rc = 0; + + arg = data; + start = arg->start_pfn << PAGE_SHIFT; + size = arg->nr_pages << PAGE_SHIFT; + mutex_lock(&sclp_mem_mutex); + for (id = 0; id <= sclp_max_storage_id; id++) + if (!test_bit(id, sclp_storage_ids)) + sclp_attach_storage(id); + switch (action) { + case MEM_ONLINE: + break; + case MEM_GOING_ONLINE: + rc = sclp_mem_change_state(start, size, 1); + break; + case MEM_CANCEL_ONLINE: + sclp_mem_change_state(start, size, 0); + break; + default: + rc = -EINVAL; + break; + } + mutex_unlock(&sclp_mem_mutex); + return rc ? NOTIFY_BAD : NOTIFY_OK; +} + +static struct notifier_block sclp_mem_nb = { + .notifier_call = sclp_mem_notifier, +}; + +static void __init add_memory_merged(u16 rn) +{ + static u16 first_rn, num; + unsigned long long start, size; + + if (rn && first_rn && (first_rn + num == rn)) { + num++; + return; + } + if (!first_rn) + goto skip_add; + start = rn2addr(first_rn); + size = (unsigned long long ) num * rzm; + if (start >= VMEM_MAX_PHYS) + goto skip_add; + if (start + size > VMEM_MAX_PHYS) + size = VMEM_MAX_PHYS - start; + add_memory(0, start, size); +skip_add: + first_rn = rn; + num = 1; +} + +static void __init sclp_add_standby_memory(void) +{ + struct memory_increment *incr; + + list_for_each_entry(incr, &sclp_mem_list, list) + if (incr->standby) + add_memory_merged(incr->rn); + add_memory_merged(0); +} + +static void __init insert_increment(u16 rn, int standby, int assigned) +{ + struct memory_increment *incr, *new_incr; + struct list_head *prev; + u16 last_rn; + + new_incr = kzalloc(sizeof(*new_incr), GFP_KERNEL); + if (!new_incr) + return; + new_incr->rn = rn; + new_incr->standby = standby; + last_rn = 0; + prev = &sclp_mem_list; + list_for_each_entry(incr, &sclp_mem_list, list) { + if (assigned && incr->rn > rn) + break; + if (!assigned && incr->rn - last_rn > 1) + break; + last_rn = incr->rn; + prev = &incr->list; + } + if (!assigned) + new_incr->rn = last_rn + 1; + if (new_incr->rn > rnmax) { + kfree(new_incr); + return; + } + list_add(&new_incr->list, prev); +} + +struct read_storage_sccb { + struct sccb_header header; + u16 max_id; + u16 assigned; + u16 standby; + u16 :16; + u32 entries[0]; +} __packed; + +static int __init sclp_detect_standby_memory(void) +{ + struct read_storage_sccb *sccb; + int i, id, assigned, rc; + + if (!early_read_info_sccb_valid) + return 0; + if ((sclp_facilities & 0xe00000000000ULL) != 0xe00000000000ULL) + return 0; + rc = -ENOMEM; + sccb = (void *) __get_free_page(GFP_KERNEL | GFP_DMA); + if (!sccb) + goto out; + assigned = 0; + for (id = 0; id <= sclp_max_storage_id; id++) { + memset(sccb, 0, PAGE_SIZE); + sccb->header.length = PAGE_SIZE; + rc = do_sync_request(0x00040001 | id << 8, sccb); + if (rc) + goto out; + switch (sccb->header.response_code) { + case 0x0010: + set_bit(id, sclp_storage_ids); + for (i = 0; i < sccb->assigned; i++) { + if (!sccb->entries[i]) + continue; + assigned++; + insert_increment(sccb->entries[i] >> 16, 0, 1); + } + break; + case 0x0310: + break; + case 0x0410: + for (i = 0; i < sccb->assigned; i++) { + if (!sccb->entries[i]) + continue; + assigned++; + insert_increment(sccb->entries[i] >> 16, 1, 1); + } + break; + default: + rc = -EIO; + break; + } + if (!rc) + sclp_max_storage_id = sccb->max_id; + } + if (rc || list_empty(&sclp_mem_list)) + goto out; + for (i = 1; i <= rnmax - assigned; i++) + insert_increment(0, 1, 0); + rc = register_memory_notifier(&sclp_mem_nb); + if (rc) + goto out; + sclp_add_standby_memory(); +out: + free_page((unsigned long) sccb); + return rc; +} +__initcall(sclp_detect_standby_memory); + +#endif /* CONFIG_MEMORY_HOTPLUG */ + /* * Channel path configuration related functions. */ diff --git a/drivers/s390/char/sclp_con.c b/drivers/s390/char/sclp_con.c index ead1043d788..7e619c534bf 100644 --- a/drivers/s390/char/sclp_con.c +++ b/drivers/s390/char/sclp_con.c @@ -14,14 +14,13 @@ #include <linux/timer.h> #include <linux/jiffies.h> #include <linux/bootmem.h> +#include <linux/termios.h> #include <linux/err.h> #include "sclp.h" #include "sclp_rw.h" #include "sclp_tty.h" -#define SCLP_CON_PRINT_HEADER "sclp console driver: " - #define sclp_console_major 4 /* TTYAUX_MAJOR */ #define sclp_console_minor 64 #define sclp_console_name "ttyS" @@ -222,8 +221,6 @@ sclp_console_init(void) INIT_LIST_HEAD(&sclp_con_pages); for (i = 0; i < MAX_CONSOLE_PAGES; i++) { page = alloc_bootmem_low_pages(PAGE_SIZE); - if (page == NULL) - return -ENOMEM; list_add_tail((struct list_head *) page, &sclp_con_pages); } INIT_LIST_HEAD(&sclp_con_outqueue); diff --git a/drivers/s390/char/sclp_config.c b/drivers/s390/char/sclp_config.c index ad05a87bc48..fff4ff485d9 100644 --- a/drivers/s390/char/sclp_config.c +++ b/drivers/s390/char/sclp_config.c @@ -8,6 +8,7 @@ #include <linux/init.h> #include <linux/errno.h> #include <linux/cpu.h> +#include <linux/kthread.h> #include <linux/sysdev.h> #include <linux/workqueue.h> #include <asm/smp.h> @@ -40,9 +41,19 @@ static void sclp_cpu_capability_notify(struct work_struct *work) put_online_cpus(); } -static void __ref sclp_cpu_change_notify(struct work_struct *work) +static int sclp_cpu_kthread(void *data) { smp_rescan_cpus(); + return 0; +} + +static void __ref sclp_cpu_change_notify(struct work_struct *work) +{ + /* Can't call smp_rescan_cpus() from workqueue context since it may + * deadlock in case of cpu hotplug. So we have to create a kernel + * thread in order to call it. + */ + kthread_run(sclp_cpu_kthread, NULL, "cpu_rescan"); } static void sclp_conf_receiver_fn(struct evbuf_header *evbuf) @@ -74,10 +85,8 @@ static int __init sclp_conf_init(void) INIT_WORK(&sclp_cpu_change_work, sclp_cpu_change_notify); rc = sclp_register(&sclp_conf_register); - if (rc) { - printk(KERN_ERR TAG "failed to register (%d).\n", rc); + if (rc) return rc; - } if (!(sclp_conf_register.sclp_send_mask & EVTYP_CONFMGMDATA_MASK)) { printk(KERN_WARNING TAG "no configuration management.\n"); diff --git a/drivers/s390/char/sclp_cpi_sys.c b/drivers/s390/char/sclp_cpi_sys.c index 9f37456222e..d887bd261d2 100644 --- a/drivers/s390/char/sclp_cpi_sys.c +++ b/drivers/s390/char/sclp_cpi_sys.c @@ -27,6 +27,8 @@ #define CPI_LENGTH_NAME 8 #define CPI_LENGTH_LEVEL 16 +static DEFINE_MUTEX(sclp_cpi_mutex); + struct cpi_evbuf { struct evbuf_header header; u8 id_format; @@ -124,21 +126,15 @@ static int cpi_req(void) int response; rc = sclp_register(&sclp_cpi_event); - if (rc) { - printk(KERN_WARNING "cpi: could not register " - "to hardware console.\n"); + if (rc) goto out; - } if (!(sclp_cpi_event.sclp_receive_mask & EVTYP_CTLPROGIDENT_MASK)) { - printk(KERN_WARNING "cpi: no control program " - "identification support\n"); rc = -EOPNOTSUPP; goto out_unregister; } req = cpi_prepare_req(); if (IS_ERR(req)) { - printk(KERN_WARNING "cpi: could not allocate request\n"); rc = PTR_ERR(req); goto out_unregister; } @@ -148,10 +144,8 @@ static int cpi_req(void) /* Add request to sclp queue */ rc = sclp_add_request(req); - if (rc) { - printk(KERN_WARNING "cpi: could not start request\n"); + if (rc) goto out_free_req; - } wait_for_completion(&completion); @@ -223,7 +217,12 @@ static void set_string(char *attr, const char *value) static ssize_t system_name_show(struct kobject *kobj, struct kobj_attribute *attr, char *page) { - return snprintf(page, PAGE_SIZE, "%s\n", system_name); + int rc; + + mutex_lock(&sclp_cpi_mutex); + rc = snprintf(page, PAGE_SIZE, "%s\n", system_name); + mutex_unlock(&sclp_cpi_mutex); + return rc; } static ssize_t system_name_store(struct kobject *kobj, @@ -237,7 +236,9 @@ static ssize_t system_name_store(struct kobject *kobj, if (rc) return rc; + mutex_lock(&sclp_cpi_mutex); set_string(system_name, buf); + mutex_unlock(&sclp_cpi_mutex); return len; } @@ -248,7 +249,12 @@ static struct kobj_attribute system_name_attr = static ssize_t sysplex_name_show(struct kobject *kobj, struct kobj_attribute *attr, char *page) { - return snprintf(page, PAGE_SIZE, "%s\n", sysplex_name); + int rc; + + mutex_lock(&sclp_cpi_mutex); + rc = snprintf(page, PAGE_SIZE, "%s\n", sysplex_name); + mutex_unlock(&sclp_cpi_mutex); + return rc; } static ssize_t sysplex_name_store(struct kobject *kobj, @@ -262,7 +268,9 @@ static ssize_t sysplex_name_store(struct kobject *kobj, if (rc) return rc; + mutex_lock(&sclp_cpi_mutex); set_string(sysplex_name, buf); + mutex_unlock(&sclp_cpi_mutex); return len; } @@ -273,7 +281,12 @@ static struct kobj_attribute sysplex_name_attr = static ssize_t system_type_show(struct kobject *kobj, struct kobj_attribute *attr, char *page) { - return snprintf(page, PAGE_SIZE, "%s\n", system_type); + int rc; + + mutex_lock(&sclp_cpi_mutex); + rc = snprintf(page, PAGE_SIZE, "%s\n", system_type); + mutex_unlock(&sclp_cpi_mutex); + return rc; } static ssize_t system_type_store(struct kobject *kobj, @@ -287,7 +300,9 @@ static ssize_t system_type_store(struct kobject *kobj, if (rc) return rc; + mutex_lock(&sclp_cpi_mutex); set_string(system_type, buf); + mutex_unlock(&sclp_cpi_mutex); return len; } @@ -298,8 +313,11 @@ static struct kobj_attribute system_type_attr = static ssize_t system_level_show(struct kobject *kobj, struct kobj_attribute *attr, char *page) { - unsigned long long level = system_level; + unsigned long long level; + mutex_lock(&sclp_cpi_mutex); + level = system_level; + mutex_unlock(&sclp_cpi_mutex); return snprintf(page, PAGE_SIZE, "%#018llx\n", level); } @@ -320,8 +338,9 @@ static ssize_t system_level_store(struct kobject *kobj, if (*endp) return -EINVAL; + mutex_lock(&sclp_cpi_mutex); system_level = level; - + mutex_unlock(&sclp_cpi_mutex); return len; } @@ -334,7 +353,9 @@ static ssize_t set_store(struct kobject *kobj, { int rc; + mutex_lock(&sclp_cpi_mutex); rc = cpi_req(); + mutex_unlock(&sclp_cpi_mutex); if (rc) return rc; @@ -373,12 +394,16 @@ int sclp_cpi_set_data(const char *system, const char *sysplex, const char *type, if (rc) return rc; + mutex_lock(&sclp_cpi_mutex); set_string(system_name, system); set_string(sysplex_name, sysplex); set_string(system_type, type); system_level = level; - return cpi_req(); + rc = cpi_req(); + mutex_unlock(&sclp_cpi_mutex); + + return rc; } EXPORT_SYMBOL(sclp_cpi_set_data); diff --git a/drivers/s390/char/sclp_quiesce.c b/drivers/s390/char/sclp_quiesce.c index 45ff25e787c..84c191c1cd6 100644 --- a/drivers/s390/char/sclp_quiesce.c +++ b/drivers/s390/char/sclp_quiesce.c @@ -51,13 +51,7 @@ static struct sclp_register sclp_quiesce_event = { static int __init sclp_quiesce_init(void) { - int rc; - - rc = sclp_register(&sclp_quiesce_event); - if (rc) - printk(KERN_WARNING "sclp: could not register quiesce handler " - "(rc=%d)\n", rc); - return rc; + return sclp_register(&sclp_quiesce_event); } module_init(sclp_quiesce_init); diff --git a/drivers/s390/char/sclp_rw.c b/drivers/s390/char/sclp_rw.c index da09781b32f..710af42603f 100644 --- a/drivers/s390/char/sclp_rw.c +++ b/drivers/s390/char/sclp_rw.c @@ -19,8 +19,6 @@ #include "sclp.h" #include "sclp_rw.h" -#define SCLP_RW_PRINT_HEADER "sclp low level driver: " - /* * The room for the SCCB (only for writing) is not equal to a pages size * (as it is specified as the maximum size in the SCLP documentation) diff --git a/drivers/s390/char/sclp_sdias.c b/drivers/s390/char/sclp_sdias.c index 1c064976b32..8b854857ba0 100644 --- a/drivers/s390/char/sclp_sdias.c +++ b/drivers/s390/char/sclp_sdias.c @@ -239,10 +239,8 @@ int __init sclp_sdias_init(void) debug_register_view(sdias_dbf, &debug_sprintf_view); debug_set_level(sdias_dbf, 6); rc = sclp_register(&sclp_sdias_register); - if (rc) { - ERROR_MSG("sclp register failed\n"); + if (rc) return rc; - } init_waitqueue_head(&sdias_wq); TRACE("init done\n"); return 0; diff --git a/drivers/s390/char/sclp_tty.c b/drivers/s390/char/sclp_tty.c index 40b11521cd2..434ba04b130 100644 --- a/drivers/s390/char/sclp_tty.c +++ b/drivers/s390/char/sclp_tty.c @@ -13,7 +13,6 @@ #include <linux/tty.h> #include <linux/tty_driver.h> #include <linux/tty_flip.h> -#include <linux/wait.h> #include <linux/slab.h> #include <linux/err.h> #include <linux/init.h> @@ -25,8 +24,6 @@ #include "sclp_rw.h" #include "sclp_tty.h" -#define SCLP_TTY_PRINT_HEADER "sclp tty driver: " - /* * size of a buffer that collects single characters coming in * via sclp_tty_put_char() @@ -50,8 +47,6 @@ static int sclp_tty_buffer_count; static struct sclp_buffer *sclp_ttybuf; /* Timer for delayed output of console messages. */ static struct timer_list sclp_tty_timer; -/* Waitqueue to wait for buffers to get empty. */ -static wait_queue_head_t sclp_tty_waitq; static struct tty_struct *sclp_tty; static unsigned char sclp_tty_chars[SCLP_TTY_BUF_SIZE]; @@ -59,19 +54,11 @@ static unsigned short int sclp_tty_chars_count; struct tty_driver *sclp_tty_driver; -static struct sclp_ioctls sclp_ioctls; -static struct sclp_ioctls sclp_ioctls_init = -{ - 8, /* 1 hor. tab. = 8 spaces */ - 0, /* no echo of input by this driver */ - 80, /* 80 characters/line */ - 1, /* write after 1/10 s without final new line */ - MAX_KMEM_PAGES, /* quick fix: avoid __alloc_pages */ - MAX_KMEM_PAGES, /* take 32/64 pages from kernel memory, */ - 0, /* do not convert to lower case */ - 0x6c /* to seprate upper and lower case */ - /* ('%' in EBCDIC) */ -}; +static int sclp_tty_tolower; +static int sclp_tty_columns = 80; + +#define SPACES_PER_TAB 8 +#define CASE_DELIMITER 0x6c /* to separate upper and lower case (% in EBCDIC) */ /* This routine is called whenever we try to open a SCLP terminal. */ static int @@ -92,136 +79,6 @@ sclp_tty_close(struct tty_struct *tty, struct file *filp) sclp_tty = NULL; } -/* execute commands to control the i/o behaviour of the SCLP tty at runtime */ -static int -sclp_tty_ioctl(struct tty_struct *tty, struct file * file, - unsigned int cmd, unsigned long arg) -{ - unsigned long flags; - unsigned int obuf; - int check; - int rc; - - if (tty->flags & (1 << TTY_IO_ERROR)) - return -EIO; - rc = 0; - check = 0; - switch (cmd) { - case TIOCSCLPSHTAB: - /* set width of horizontal tab */ - if (get_user(sclp_ioctls.htab, (unsigned short __user *) arg)) - rc = -EFAULT; - else - check = 1; - break; - case TIOCSCLPGHTAB: - /* get width of horizontal tab */ - if (put_user(sclp_ioctls.htab, (unsigned short __user *) arg)) - rc = -EFAULT; - break; - case TIOCSCLPSECHO: - /* enable/disable echo of input */ - if (get_user(sclp_ioctls.echo, (unsigned char __user *) arg)) - rc = -EFAULT; - break; - case TIOCSCLPGECHO: - /* Is echo of input enabled ? */ - if (put_user(sclp_ioctls.echo, (unsigned char __user *) arg)) - rc = -EFAULT; - break; - case TIOCSCLPSCOLS: - /* set number of columns for output */ - if (get_user(sclp_ioctls.columns, (unsigned short __user *) arg)) - rc = -EFAULT; - else - check = 1; - break; - case TIOCSCLPGCOLS: - /* get number of columns for output */ - if (put_user(sclp_ioctls.columns, (unsigned short __user *) arg)) - rc = -EFAULT; - break; - case TIOCSCLPSNL: - /* enable/disable writing without final new line character */ - if (get_user(sclp_ioctls.final_nl, (signed char __user *) arg)) - rc = -EFAULT; - break; - case TIOCSCLPGNL: - /* Is writing without final new line character enabled ? */ - if (put_user(sclp_ioctls.final_nl, (signed char __user *) arg)) - rc = -EFAULT; - break; - case TIOCSCLPSOBUF: - /* - * set the maximum buffers size for output, will be rounded - * up to next 4kB boundary and stored as number of SCCBs - * (4kB Buffers) limitation: 256 x 4kB - */ - if (get_user(obuf, (unsigned int __user *) arg) == 0) { - if (obuf & 0xFFF) - sclp_ioctls.max_sccb = (obuf >> 12) + 1; - else - sclp_ioctls.max_sccb = (obuf >> 12); - } else - rc = -EFAULT; - break; - case TIOCSCLPGOBUF: - /* get the maximum buffers size for output */ - obuf = sclp_ioctls.max_sccb << 12; - if (put_user(obuf, (unsigned int __user *) arg)) - rc = -EFAULT; - break; - case TIOCSCLPGKBUF: - /* get the number of buffers got from kernel at startup */ - if (put_user(sclp_ioctls.kmem_sccb, (unsigned short __user *) arg)) - rc = -EFAULT; - break; - case TIOCSCLPSCASE: - /* enable/disable conversion from upper to lower case */ - if (get_user(sclp_ioctls.tolower, (unsigned char __user *) arg)) - rc = -EFAULT; - break; - case TIOCSCLPGCASE: - /* Is conversion from upper to lower case of input enabled? */ - if (put_user(sclp_ioctls.tolower, (unsigned char __user *) arg)) - rc = -EFAULT; - break; - case TIOCSCLPSDELIM: - /* - * set special character used for separating upper and - * lower case, 0x00 disables this feature - */ - if (get_user(sclp_ioctls.delim, (unsigned char __user *) arg)) - rc = -EFAULT; - break; - case TIOCSCLPGDELIM: - /* - * get special character used for separating upper and - * lower case, 0x00 disables this feature - */ - if (put_user(sclp_ioctls.delim, (unsigned char __user *) arg)) - rc = -EFAULT; - break; - case TIOCSCLPSINIT: - /* set initial (default) sclp ioctls */ - sclp_ioctls = sclp_ioctls_init; - check = 1; - break; - default: - rc = -ENOIOCTLCMD; - break; - } - if (check) { - spin_lock_irqsave(&sclp_tty_lock, flags); - if (sclp_ttybuf != NULL) { - sclp_set_htab(sclp_ttybuf, sclp_ioctls.htab); - sclp_set_columns(sclp_ttybuf, sclp_ioctls.columns); - } - spin_unlock_irqrestore(&sclp_tty_lock, flags); - } - return rc; -} - /* * This routine returns the numbers of characters the tty driver * will accept for queuing to be written. This number is subject @@ -268,7 +125,6 @@ sclp_ttybuf_callback(struct sclp_buffer *buffer, int rc) struct sclp_buffer, list); spin_unlock_irqrestore(&sclp_tty_lock, flags); } while (buffer && sclp_emit_buffer(buffer, sclp_ttybuf_callback)); - wake_up(&sclp_tty_waitq); /* check if the tty needs a wake up call */ if (sclp_tty != NULL) { tty_wakeup(sclp_tty); @@ -316,37 +172,37 @@ sclp_tty_timeout(unsigned long data) /* * Write a string to the sclp tty. */ -static void -sclp_tty_write_string(const unsigned char *str, int count) +static int sclp_tty_write_string(const unsigned char *str, int count, int may_fail) { unsigned long flags; void *page; int written; + int overall_written; struct sclp_buffer *buf; if (count <= 0) - return; + return 0; + overall_written = 0; spin_lock_irqsave(&sclp_tty_lock, flags); do { /* Create a sclp output buffer if none exists yet */ if (sclp_ttybuf == NULL) { while (list_empty(&sclp_tty_pages)) { spin_unlock_irqrestore(&sclp_tty_lock, flags); - if (in_interrupt()) - sclp_sync_wait(); + if (may_fail) + goto out; else - wait_event(sclp_tty_waitq, - !list_empty(&sclp_tty_pages)); + sclp_sync_wait(); spin_lock_irqsave(&sclp_tty_lock, flags); } page = sclp_tty_pages.next; list_del((struct list_head *) page); - sclp_ttybuf = sclp_make_buffer(page, - sclp_ioctls.columns, - sclp_ioctls.htab); + sclp_ttybuf = sclp_make_buffer(page, sclp_tty_columns, + SPACES_PER_TAB); } /* try to write the string to the current output buffer */ written = sclp_write(sclp_ttybuf, str, count); + overall_written += written; if (written == count) break; /* @@ -363,27 +219,17 @@ sclp_tty_write_string(const unsigned char *str, int count) count -= written; } while (count > 0); /* Setup timer to output current console buffer after 1/10 second */ - if (sclp_ioctls.final_nl) { - if (sclp_ttybuf != NULL && - sclp_chars_in_buffer(sclp_ttybuf) != 0 && - !timer_pending(&sclp_tty_timer)) { - init_timer(&sclp_tty_timer); - sclp_tty_timer.function = sclp_tty_timeout; - sclp_tty_timer.data = 0UL; - sclp_tty_timer.expires = jiffies + HZ/10; - add_timer(&sclp_tty_timer); - } - } else { - if (sclp_ttybuf != NULL && - sclp_chars_in_buffer(sclp_ttybuf) != 0) { - buf = sclp_ttybuf; - sclp_ttybuf = NULL; - spin_unlock_irqrestore(&sclp_tty_lock, flags); - __sclp_ttybuf_emit(buf); - spin_lock_irqsave(&sclp_tty_lock, flags); - } + if (sclp_ttybuf && sclp_chars_in_buffer(sclp_ttybuf) && + !timer_pending(&sclp_tty_timer)) { + init_timer(&sclp_tty_timer); + sclp_tty_timer.function = sclp_tty_timeout; + sclp_tty_timer.data = 0UL; + sclp_tty_timer.expires = jiffies + HZ/10; + add_timer(&sclp_tty_timer); } spin_unlock_irqrestore(&sclp_tty_lock, flags); +out: + return overall_written; } /* @@ -395,11 +241,10 @@ static int sclp_tty_write(struct tty_struct *tty, const unsigned char *buf, int count) { if (sclp_tty_chars_count > 0) { - sclp_tty_write_string(sclp_tty_chars, sclp_tty_chars_count); + sclp_tty_write_string(sclp_tty_chars, sclp_tty_chars_count, 0); sclp_tty_chars_count = 0; } - sclp_tty_write_string(buf, count); - return count; + return sclp_tty_write_string(buf, count, 1); } /* @@ -417,9 +262,10 @@ sclp_tty_put_char(struct tty_struct *tty, unsigned char ch) { sclp_tty_chars[sclp_tty_chars_count++] = ch; if (ch == '\n' || sclp_tty_chars_count >= SCLP_TTY_BUF_SIZE) { - sclp_tty_write_string(sclp_tty_chars, sclp_tty_chars_count); + sclp_tty_write_string(sclp_tty_chars, sclp_tty_chars_count, 0); sclp_tty_chars_count = 0; - } return 1; + } + return 1; } /* @@ -430,7 +276,7 @@ static void sclp_tty_flush_chars(struct tty_struct *tty) { if (sclp_tty_chars_count > 0) { - sclp_tty_write_string(sclp_tty_chars, sclp_tty_chars_count); + sclp_tty_write_string(sclp_tty_chars, sclp_tty_chars_count, 0); sclp_tty_chars_count = 0; } } @@ -469,7 +315,7 @@ static void sclp_tty_flush_buffer(struct tty_struct *tty) { if (sclp_tty_chars_count > 0) { - sclp_tty_write_string(sclp_tty_chars, sclp_tty_chars_count); + sclp_tty_write_string(sclp_tty_chars, sclp_tty_chars_count, 0); sclp_tty_chars_count = 0; } } @@ -517,9 +363,7 @@ sclp_tty_input(unsigned char* buf, unsigned int count) * modifiy original string, * returns length of resulting string */ -static int -sclp_switch_cases(unsigned char *buf, int count, - unsigned char delim, int tolower) +static int sclp_switch_cases(unsigned char *buf, int count) { unsigned char *ip, *op; int toggle; @@ -529,9 +373,9 @@ sclp_switch_cases(unsigned char *buf, int count, ip = op = buf; while (count-- > 0) { /* compare with special character */ - if (*ip == delim) { + if (*ip == CASE_DELIMITER) { /* followed by another special character? */ - if (count && ip[1] == delim) { + if (count && ip[1] == CASE_DELIMITER) { /* * ... then put a single copy of the special * character to the output string @@ -550,7 +394,7 @@ sclp_switch_cases(unsigned char *buf, int count, /* not the special character */ if (toggle) /* but case switching is on */ - if (tolower) + if (sclp_tty_tolower) /* switch to uppercase */ *op++ = _ebc_toupper[(int) *ip++]; else @@ -570,30 +414,12 @@ sclp_get_input(unsigned char *start, unsigned char *end) int count; count = end - start; - /* - * if set in ioctl convert EBCDIC to lower case - * (modify original input in SCCB) - */ - if (sclp_ioctls.tolower) + if (sclp_tty_tolower) EBC_TOLOWER(start, count); - - /* - * if set in ioctl find out characters in lower or upper case - * (depends on current case) separated by a special character, - * works on EBCDIC - */ - if (sclp_ioctls.delim) - count = sclp_switch_cases(start, count, - sclp_ioctls.delim, - sclp_ioctls.tolower); - + count = sclp_switch_cases(start, count); /* convert EBCDIC to ASCII (modify original input in SCCB) */ sclp_ebcasc_str(start, count); - /* if set in ioctl write operators input to console */ - if (sclp_ioctls.echo) - sclp_tty_write(sclp_tty, start, count); - /* transfer input to high level driver */ sclp_tty_input(start, count); } @@ -717,7 +543,6 @@ static const struct tty_operations sclp_ops = { .write_room = sclp_tty_write_room, .chars_in_buffer = sclp_tty_chars_in_buffer, .flush_buffer = sclp_tty_flush_buffer, - .ioctl = sclp_tty_ioctl, }; static int __init @@ -736,9 +561,6 @@ sclp_tty_init(void) rc = sclp_rw_init(); if (rc) { - printk(KERN_ERR SCLP_TTY_PRINT_HEADER - "could not register tty - " - "sclp_rw_init returned %d\n", rc); put_tty_driver(driver); return rc; } @@ -754,7 +576,6 @@ sclp_tty_init(void) } INIT_LIST_HEAD(&sclp_tty_outqueue); spin_lock_init(&sclp_tty_lock); - init_waitqueue_head(&sclp_tty_waitq); init_timer(&sclp_tty_timer); sclp_ttybuf = NULL; sclp_tty_buffer_count = 0; @@ -763,11 +584,10 @@ sclp_tty_init(void) * save 4 characters for the CPU number * written at start of each line by VM/CP */ - sclp_ioctls_init.columns = 76; + sclp_tty_columns = 76; /* case input lines to lowercase */ - sclp_ioctls_init.tolower = 1; + sclp_tty_tolower = 1; } - sclp_ioctls = sclp_ioctls_init; sclp_tty_chars_count = 0; sclp_tty = NULL; @@ -792,9 +612,6 @@ sclp_tty_init(void) tty_set_operations(driver, &sclp_ops); rc = tty_register_driver(driver); if (rc) { - printk(KERN_ERR SCLP_TTY_PRINT_HEADER - "could not register tty - " - "tty_register_driver returned %d\n", rc); put_tty_driver(driver); return rc; } diff --git a/drivers/s390/char/sclp_tty.h b/drivers/s390/char/sclp_tty.h index 0ce2c1fc534..4b965b22fec 100644 --- a/drivers/s390/char/sclp_tty.h +++ b/drivers/s390/char/sclp_tty.h @@ -11,61 +11,8 @@ #ifndef __SCLP_TTY_H__ #define __SCLP_TTY_H__ -#include <linux/ioctl.h> -#include <linux/termios.h> #include <linux/tty_driver.h> -/* This is the type of data structures storing sclp ioctl setting. */ -struct sclp_ioctls { - unsigned short htab; - unsigned char echo; - unsigned short columns; - unsigned char final_nl; - unsigned short max_sccb; - unsigned short kmem_sccb; /* can't be modified at run time */ - unsigned char tolower; - unsigned char delim; -}; - -/* must be unique, FIXME: must be added in Documentation/ioctl_number.txt */ -#define SCLP_IOCTL_LETTER 'B' - -/* set width of horizontal tabulator */ -#define TIOCSCLPSHTAB _IOW(SCLP_IOCTL_LETTER, 0, unsigned short) -/* enable/disable echo of input (independent from line discipline) */ -#define TIOCSCLPSECHO _IOW(SCLP_IOCTL_LETTER, 1, unsigned char) -/* set number of colums for output */ -#define TIOCSCLPSCOLS _IOW(SCLP_IOCTL_LETTER, 2, unsigned short) -/* enable/disable writing without final new line character */ -#define TIOCSCLPSNL _IOW(SCLP_IOCTL_LETTER, 4, signed char) -/* set the maximum buffers size for output, rounded up to next 4kB boundary */ -#define TIOCSCLPSOBUF _IOW(SCLP_IOCTL_LETTER, 5, unsigned short) -/* set initial (default) sclp ioctls */ -#define TIOCSCLPSINIT _IO(SCLP_IOCTL_LETTER, 6) -/* enable/disable conversion from upper to lower case of input */ -#define TIOCSCLPSCASE _IOW(SCLP_IOCTL_LETTER, 7, unsigned char) -/* set special character used for separating upper and lower case, */ -/* 0x00 disables this feature */ -#define TIOCSCLPSDELIM _IOW(SCLP_IOCTL_LETTER, 9, unsigned char) - -/* get width of horizontal tabulator */ -#define TIOCSCLPGHTAB _IOR(SCLP_IOCTL_LETTER, 10, unsigned short) -/* Is echo of input enabled ? (independent from line discipline) */ -#define TIOCSCLPGECHO _IOR(SCLP_IOCTL_LETTER, 11, unsigned char) -/* get number of colums for output */ -#define TIOCSCLPGCOLS _IOR(SCLP_IOCTL_LETTER, 12, unsigned short) -/* Is writing without final new line character enabled ? */ -#define TIOCSCLPGNL _IOR(SCLP_IOCTL_LETTER, 14, signed char) -/* get the maximum buffers size for output */ -#define TIOCSCLPGOBUF _IOR(SCLP_IOCTL_LETTER, 15, unsigned short) -/* Is conversion from upper to lower case of input enabled ? */ -#define TIOCSCLPGCASE _IOR(SCLP_IOCTL_LETTER, 17, unsigned char) -/* get special character used for separating upper and lower case, */ -/* 0x00 disables this feature */ -#define TIOCSCLPGDELIM _IOR(SCLP_IOCTL_LETTER, 19, unsigned char) -/* get the number of buffers/pages got from kernel at startup */ -#define TIOCSCLPGKBUF _IOR(SCLP_IOCTL_LETTER, 20, unsigned short) - extern struct tty_driver *sclp_tty_driver; #endif /* __SCLP_TTY_H__ */ diff --git a/drivers/s390/char/sclp_vt220.c b/drivers/s390/char/sclp_vt220.c index 3e577f655b1..ad51738c426 100644 --- a/drivers/s390/char/sclp_vt220.c +++ b/drivers/s390/char/sclp_vt220.c @@ -27,7 +27,6 @@ #include <asm/uaccess.h> #include "sclp.h" -#define SCLP_VT220_PRINT_HEADER "sclp vt220 tty driver: " #define SCLP_VT220_MAJOR TTY_MAJOR #define SCLP_VT220_MINOR 65 #define SCLP_VT220_DRIVER_NAME "sclp_vt220" @@ -82,8 +81,8 @@ static struct sclp_vt220_request *sclp_vt220_current_request; /* Number of characters in current request buffer */ static int sclp_vt220_buffered_chars; -/* Flag indicating whether this driver has already been initialized */ -static int sclp_vt220_initialized = 0; +/* Counter controlling core driver initialization. */ +static int __initdata sclp_vt220_init_count; /* Flag indicating that sclp_vt220_current_request should really * have been already queued but wasn't because the SCLP was processing @@ -609,10 +608,8 @@ sclp_vt220_flush_buffer(struct tty_struct *tty) sclp_vt220_emit_current(); } -/* - * Initialize all relevant components and register driver with system. - */ -static void __init __sclp_vt220_cleanup(void) +/* Release allocated pages. */ +static void __init __sclp_vt220_free_pages(void) { struct list_head *page, *p; @@ -623,21 +620,30 @@ static void __init __sclp_vt220_cleanup(void) else free_bootmem((unsigned long) page, PAGE_SIZE); } - if (!list_empty(&sclp_vt220_register.list)) - sclp_unregister(&sclp_vt220_register); - sclp_vt220_initialized = 0; } -static int __init __sclp_vt220_init(void) +/* Release memory and unregister from sclp core. Controlled by init counting - + * only the last invoker will actually perform these actions. */ +static void __init __sclp_vt220_cleanup(void) +{ + sclp_vt220_init_count--; + if (sclp_vt220_init_count != 0) + return; + sclp_unregister(&sclp_vt220_register); + __sclp_vt220_free_pages(); +} + +/* Allocate buffer pages and register with sclp core. Controlled by init + * counting - only the first invoker will actually perform these actions. */ +static int __init __sclp_vt220_init(int num_pages) { void *page; int i; - int num_pages; int rc; - if (sclp_vt220_initialized) + sclp_vt220_init_count++; + if (sclp_vt220_init_count != 1) return 0; - sclp_vt220_initialized = 1; spin_lock_init(&sclp_vt220_lock); INIT_LIST_HEAD(&sclp_vt220_empty); INIT_LIST_HEAD(&sclp_vt220_outqueue); @@ -649,24 +655,22 @@ static int __init __sclp_vt220_init(void) sclp_vt220_flush_later = 0; /* Allocate pages for output buffering */ - num_pages = slab_is_available() ? MAX_KMEM_PAGES : MAX_CONSOLE_PAGES; for (i = 0; i < num_pages; i++) { if (slab_is_available()) page = (void *) get_zeroed_page(GFP_KERNEL | GFP_DMA); else page = alloc_bootmem_low_pages(PAGE_SIZE); if (!page) { - __sclp_vt220_cleanup(); - return -ENOMEM; + rc = -ENOMEM; + goto out; } list_add_tail((struct list_head *) page, &sclp_vt220_empty); } rc = sclp_register(&sclp_vt220_register); +out: if (rc) { - printk(KERN_ERR SCLP_VT220_PRINT_HEADER - "could not register vt220 - " - "sclp_register returned %d\n", rc); - __sclp_vt220_cleanup(); + __sclp_vt220_free_pages(); + sclp_vt220_init_count--; } return rc; } @@ -689,15 +693,13 @@ static int __init sclp_vt220_tty_init(void) { struct tty_driver *driver; int rc; - int cleanup; /* Note: we're not testing for CONSOLE_IS_SCLP here to preserve * symmetry between VM and LPAR systems regarding ttyS1. */ driver = alloc_tty_driver(1); if (!driver) return -ENOMEM; - cleanup = !sclp_vt220_initialized; - rc = __sclp_vt220_init(); + rc = __sclp_vt220_init(MAX_KMEM_PAGES); if (rc) goto out_driver; @@ -713,18 +715,13 @@ static int __init sclp_vt220_tty_init(void) tty_set_operations(driver, &sclp_vt220_ops); rc = tty_register_driver(driver); - if (rc) { - printk(KERN_ERR SCLP_VT220_PRINT_HEADER - "could not register tty - " - "tty_register_driver returned %d\n", rc); + if (rc) goto out_init; - } sclp_vt220_driver = driver; return 0; out_init: - if (cleanup) - __sclp_vt220_cleanup(); + __sclp_vt220_cleanup(); out_driver: put_tty_driver(driver); return rc; @@ -773,10 +770,9 @@ sclp_vt220_con_init(void) { int rc; - INIT_LIST_HEAD(&sclp_vt220_register.list); if (!CONSOLE_IS_SCLP) return 0; - rc = __sclp_vt220_init(); + rc = __sclp_vt220_init(MAX_CONSOLE_PAGES); if (rc) return rc; /* Attach linux console */ diff --git a/drivers/s390/char/tape_34xx.c b/drivers/s390/char/tape_34xx.c index 874adf365e4..22ca34361ed 100644 --- a/drivers/s390/char/tape_34xx.c +++ b/drivers/s390/char/tape_34xx.c @@ -196,7 +196,7 @@ tape_34xx_erp_retry(struct tape_request *request) static int tape_34xx_unsolicited_irq(struct tape_device *device, struct irb *irb) { - if (irb->scsw.dstat == 0x85 /* READY */) { + if (irb->scsw.cmd.dstat == 0x85) { /* READY */ /* A medium was inserted in the drive. */ DBF_EVENT(6, "xuud med\n"); tape_34xx_delete_sbid_from(device, 0); @@ -844,22 +844,22 @@ tape_34xx_irq(struct tape_device *device, struct tape_request *request, if (request == NULL) return tape_34xx_unsolicited_irq(device, irb); - if ((irb->scsw.dstat & DEV_STAT_UNIT_EXCEP) && - (irb->scsw.dstat & DEV_STAT_DEV_END) && + if ((irb->scsw.cmd.dstat & DEV_STAT_UNIT_EXCEP) && + (irb->scsw.cmd.dstat & DEV_STAT_DEV_END) && (request->op == TO_WRI)) { /* Write at end of volume */ PRINT_INFO("End of volume\n"); /* XXX */ return tape_34xx_erp_failed(request, -ENOSPC); } - if (irb->scsw.dstat & DEV_STAT_UNIT_CHECK) + if (irb->scsw.cmd.dstat & DEV_STAT_UNIT_CHECK) return tape_34xx_unit_check(device, request, irb); - if (irb->scsw.dstat & DEV_STAT_DEV_END) { + if (irb->scsw.cmd.dstat & DEV_STAT_DEV_END) { /* * A unit exception occurs on skipping over a tapemark block. */ - if (irb->scsw.dstat & DEV_STAT_UNIT_EXCEP) { + if (irb->scsw.cmd.dstat & DEV_STAT_UNIT_EXCEP) { if (request->op == TO_BSB || request->op == TO_FSB) request->rescnt++; else diff --git a/drivers/s390/char/tape_3590.c b/drivers/s390/char/tape_3590.c index 42ce7915fc5..839987618ff 100644 --- a/drivers/s390/char/tape_3590.c +++ b/drivers/s390/char/tape_3590.c @@ -837,13 +837,13 @@ tape_3590_erp_retry(struct tape_device *device, struct tape_request *request, static int tape_3590_unsolicited_irq(struct tape_device *device, struct irb *irb) { - if (irb->scsw.dstat == DEV_STAT_CHN_END) + if (irb->scsw.cmd.dstat == DEV_STAT_CHN_END) /* Probably result of halt ssch */ return TAPE_IO_PENDING; - else if (irb->scsw.dstat == 0x85) + else if (irb->scsw.cmd.dstat == 0x85) /* Device Ready */ DBF_EVENT(3, "unsol.irq! tape ready: %08x\n", device->cdev_id); - else if (irb->scsw.dstat & DEV_STAT_ATTENTION) { + else if (irb->scsw.cmd.dstat & DEV_STAT_ATTENTION) { tape_3590_schedule_work(device, TO_READ_ATTMSG); } else { DBF_EVENT(3, "unsol.irq! dev end: %08x\n", device->cdev_id); @@ -1515,18 +1515,19 @@ tape_3590_irq(struct tape_device *device, struct tape_request *request, if (request == NULL) return tape_3590_unsolicited_irq(device, irb); - if ((irb->scsw.dstat & DEV_STAT_UNIT_EXCEP) && - (irb->scsw.dstat & DEV_STAT_DEV_END) && (request->op == TO_WRI)) { + if ((irb->scsw.cmd.dstat & DEV_STAT_UNIT_EXCEP) && + (irb->scsw.cmd.dstat & DEV_STAT_DEV_END) && + (request->op == TO_WRI)) { /* Write at end of volume */ DBF_EVENT(2, "End of volume\n"); return tape_3590_erp_failed(device, request, irb, -ENOSPC); } - if (irb->scsw.dstat & DEV_STAT_UNIT_CHECK) + if (irb->scsw.cmd.dstat & DEV_STAT_UNIT_CHECK) return tape_3590_unit_check(device, request, irb); - if (irb->scsw.dstat & DEV_STAT_DEV_END) { - if (irb->scsw.dstat == DEV_STAT_UNIT_EXCEP) { + if (irb->scsw.cmd.dstat & DEV_STAT_DEV_END) { + if (irb->scsw.cmd.dstat == DEV_STAT_UNIT_EXCEP) { if (request->op == TO_FSB || request->op == TO_BSB) request->rescnt++; else @@ -1536,12 +1537,12 @@ tape_3590_irq(struct tape_device *device, struct tape_request *request, return tape_3590_done(device, request); } - if (irb->scsw.dstat & DEV_STAT_CHN_END) { + if (irb->scsw.cmd.dstat & DEV_STAT_CHN_END) { DBF_EVENT(2, "cannel end\n"); return TAPE_IO_PENDING; } - if (irb->scsw.dstat & DEV_STAT_ATTENTION) { + if (irb->scsw.cmd.dstat & DEV_STAT_ATTENTION) { DBF_EVENT(2, "Unit Attention when busy..\n"); return TAPE_IO_PENDING; } diff --git a/drivers/s390/char/tape_core.c b/drivers/s390/char/tape_core.c index c20e3c54834..181a5441af1 100644 --- a/drivers/s390/char/tape_core.c +++ b/drivers/s390/char/tape_core.c @@ -839,7 +839,7 @@ tape_dump_sense(struct tape_device* device, struct tape_request *request, PRINT_INFO("-------------------------------------------------\n"); PRINT_INFO("DSTAT : %02x CSTAT: %02x CPA: %04x\n", - irb->scsw.dstat, irb->scsw.cstat, irb->scsw.cpa); + irb->scsw.cmd.dstat, irb->scsw.cmd.cstat, irb->scsw.cmd.cpa); PRINT_INFO("DEVICE: %s\n", device->cdev->dev.bus_id); if (request != NULL) PRINT_INFO("OP : %s\n", tape_op_verbose[request->op]); @@ -867,7 +867,7 @@ tape_dump_sense_dbf(struct tape_device *device, struct tape_request *request, else op = "---"; DBF_EVENT(3, "DSTAT : %02x CSTAT: %02x\n", - irb->scsw.dstat,irb->scsw.cstat); + irb->scsw.cmd.dstat, irb->scsw.cmd.cstat); DBF_EVENT(3, "DEVICE: %08x OP\t: %s\n", device->cdev_id, op); sptr = (unsigned int *) irb->ecw; DBF_EVENT(3, "%08x %08x\n", sptr[0], sptr[1]); @@ -1083,10 +1083,11 @@ __tape_do_irq (struct ccw_device *cdev, unsigned long intparm, struct irb *irb) * error might still apply. So we just schedule the request to be * started later. */ - if (irb->scsw.cc != 0 && (irb->scsw.fctl & SCSW_FCTL_START_FUNC) && + if (irb->scsw.cmd.cc != 0 && + (irb->scsw.cmd.fctl & SCSW_FCTL_START_FUNC) && (request->status == TAPE_REQUEST_IN_IO)) { DBF_EVENT(3,"(%08x): deferred cc=%i, fctl=%i. restarting\n", - device->cdev_id, irb->scsw.cc, irb->scsw.fctl); + device->cdev_id, irb->scsw.cmd.cc, irb->scsw.cmd.fctl); request->status = TAPE_REQUEST_QUEUED; schedule_delayed_work(&device->tape_dnr, HZ); return; @@ -1094,8 +1095,8 @@ __tape_do_irq (struct ccw_device *cdev, unsigned long intparm, struct irb *irb) /* May be an unsolicited irq */ if(request != NULL) - request->rescnt = irb->scsw.count; - else if ((irb->scsw.dstat == 0x85 || irb->scsw.dstat == 0x80) && + request->rescnt = irb->scsw.cmd.count; + else if ((irb->scsw.cmd.dstat == 0x85 || irb->scsw.cmd.dstat == 0x80) && !list_empty(&device->req_queue)) { /* Not Ready to Ready after long busy ? */ struct tape_request *req; @@ -1111,7 +1112,7 @@ __tape_do_irq (struct ccw_device *cdev, unsigned long intparm, struct irb *irb) return; } } - if (irb->scsw.dstat != 0x0c) { + if (irb->scsw.cmd.dstat != 0x0c) { /* Set the 'ONLINE' flag depending on sense byte 1 */ if(*(((__u8 *) irb->ecw) + 1) & SENSE_DRIVE_ONLINE) device->tape_generic_status |= GMT_ONLINE(~0); diff --git a/drivers/s390/char/tty3270.c b/drivers/s390/char/tty3270.c index 5043150019a..a7fe6302c98 100644 --- a/drivers/s390/char/tty3270.c +++ b/drivers/s390/char/tty3270.c @@ -663,7 +663,7 @@ static int tty3270_irq(struct tty3270 *tp, struct raw3270_request *rq, struct irb *irb) { /* Handle ATTN. Schedule tasklet to read aid. */ - if (irb->scsw.dstat & DEV_STAT_ATTENTION) { + if (irb->scsw.cmd.dstat & DEV_STAT_ATTENTION) { if (!tp->throttle) tty3270_issue_read(tp, 0); else @@ -671,11 +671,11 @@ tty3270_irq(struct tty3270 *tp, struct raw3270_request *rq, struct irb *irb) } if (rq) { - if (irb->scsw.dstat & DEV_STAT_UNIT_CHECK) + if (irb->scsw.cmd.dstat & DEV_STAT_UNIT_CHECK) rq->rc = -EIO; else /* Normal end. Copy residual count. */ - rq->rescnt = irb->scsw.count; + rq->rescnt = irb->scsw.cmd.count; } return RAW3270_IO_DONE; } @@ -1792,15 +1792,12 @@ static int __init tty3270_init(void) tty_set_operations(driver, &tty3270_ops); ret = tty_register_driver(driver); if (ret) { - printk(KERN_ERR "tty3270 registration failed with %d\n", ret); put_tty_driver(driver); return ret; } tty3270_driver = driver; ret = raw3270_register_notifier(tty3270_notifier); if (ret) { - printk(KERN_ERR "tty3270 notifier registration failed " - "with %d\n", ret); put_tty_driver(driver); return ret; diff --git a/drivers/s390/char/vmcp.c b/drivers/s390/char/vmcp.c index 2f419b0ea62..401ea84b305 100644 --- a/drivers/s390/char/vmcp.c +++ b/drivers/s390/char/vmcp.c @@ -61,30 +61,24 @@ static int vmcp_release(struct inode *inode, struct file *file) static ssize_t vmcp_read(struct file *file, char __user *buff, size_t count, loff_t *ppos) { - size_t tocopy; + ssize_t ret; + size_t size; struct vmcp_session *session; - session = (struct vmcp_session *)file->private_data; + session = file->private_data; if (mutex_lock_interruptible(&session->mutex)) return -ERESTARTSYS; if (!session->response) { mutex_unlock(&session->mutex); return 0; } - if (*ppos > session->resp_size) { - mutex_unlock(&session->mutex); - return 0; - } - tocopy = min(session->resp_size - (size_t) (*ppos), count); - tocopy = min(tocopy, session->bufsize - (size_t) (*ppos)); + size = min_t(size_t, session->resp_size, session->bufsize); + ret = simple_read_from_buffer(buff, count, ppos, + session->response, size); - if (copy_to_user(buff, session->response + (*ppos), tocopy)) { - mutex_unlock(&session->mutex); - return -EFAULT; - } mutex_unlock(&session->mutex); - *ppos += tocopy; - return tocopy; + + return ret; } static ssize_t @@ -198,27 +192,23 @@ static int __init vmcp_init(void) PRINT_WARN("z/VM CP interface is only available under z/VM\n"); return -ENODEV; } + vmcp_debug = debug_register("vmcp", 1, 1, 240); - if (!vmcp_debug) { - PRINT_ERR("z/VM CP interface not loaded. Could not register " - "debug feature\n"); + if (!vmcp_debug) return -ENOMEM; - } + ret = debug_register_view(vmcp_debug, &debug_hex_ascii_view); if (ret) { - PRINT_ERR("z/VM CP interface not loaded. Could not register " - "debug feature view. Error code: %d\n", ret); debug_unregister(vmcp_debug); return ret; } + ret = misc_register(&vmcp_dev); if (ret) { - PRINT_ERR("z/VM CP interface not loaded. Could not register " - "misc device. Error code: %d\n", ret); debug_unregister(vmcp_debug); return ret; } - PRINT_INFO("z/VM CP interface loaded\n"); + return 0; } @@ -226,7 +216,6 @@ static void __exit vmcp_exit(void) { misc_deregister(&vmcp_dev); debug_unregister(vmcp_debug); - PRINT_INFO("z/VM CP interface unloaded.\n"); } module_init(vmcp_init); diff --git a/drivers/s390/char/vmlogrdr.c b/drivers/s390/char/vmlogrdr.c index 2c2428cc05d..a246bc73ae6 100644 --- a/drivers/s390/char/vmlogrdr.c +++ b/drivers/s390/char/vmlogrdr.c @@ -216,9 +216,7 @@ static int vmlogrdr_get_recording_class_AB(void) char *tail; int len,i; - printk (KERN_DEBUG "vmlogrdr: query command: %s\n", cp_command); cpcmd(cp_command, cp_response, sizeof(cp_response), NULL); - printk (KERN_DEBUG "vmlogrdr: response: %s", cp_response); len = strnlen(cp_response,sizeof(cp_response)); // now the parsing tail=strnchr(cp_response,len,'='); @@ -268,11 +266,7 @@ static int vmlogrdr_recording(struct vmlogrdr_priv_t * logptr, logptr->recording_name, qid_string); - printk (KERN_DEBUG "vmlogrdr: recording command: %s\n", - cp_command); cpcmd(cp_command, cp_response, sizeof(cp_response), NULL); - printk (KERN_DEBUG "vmlogrdr: recording response: %s", - cp_response); } memset(cp_command, 0x00, sizeof(cp_command)); @@ -282,10 +276,7 @@ static int vmlogrdr_recording(struct vmlogrdr_priv_t * logptr, onoff, qid_string); - printk (KERN_DEBUG "vmlogrdr: recording command: %s\n", cp_command); cpcmd(cp_command, cp_response, sizeof(cp_response), NULL); - printk (KERN_DEBUG "vmlogrdr: recording response: %s", - cp_response); /* The recording command will usually answer with 'Command complete' * on success, but when the specific service was never connected * before then there might be an additional informational message @@ -567,10 +558,7 @@ static ssize_t vmlogrdr_purge_store(struct device * dev, "RECORDING %s PURGE ", priv->recording_name); - printk (KERN_DEBUG "vmlogrdr: recording command: %s\n", cp_command); cpcmd(cp_command, cp_response, sizeof(cp_response), NULL); - printk (KERN_DEBUG "vmlogrdr: recording response: %s", - cp_response); return count; } @@ -682,28 +670,20 @@ static int vmlogrdr_register_driver(void) /* Register with iucv driver */ ret = iucv_register(&vmlogrdr_iucv_handler, 1); - if (ret) { - printk (KERN_ERR "vmlogrdr: failed to register with " - "iucv driver\n"); + if (ret) goto out; - } ret = driver_register(&vmlogrdr_driver); - if (ret) { - printk(KERN_ERR "vmlogrdr: failed to register driver.\n"); + if (ret) goto out_iucv; - } ret = driver_create_file(&vmlogrdr_driver, &driver_attr_recording_status); - if (ret) { - printk(KERN_ERR "vmlogrdr: failed to add driver attribute.\n"); + if (ret) goto out_driver; - } vmlogrdr_class = class_create(THIS_MODULE, "vmlogrdr"); if (IS_ERR(vmlogrdr_class)) { - printk(KERN_ERR "vmlogrdr: failed to create class.\n"); ret = PTR_ERR(vmlogrdr_class); vmlogrdr_class = NULL; goto out_attr; @@ -871,12 +851,10 @@ static int __init vmlogrdr_init(void) rc = vmlogrdr_register_cdev(dev); if (rc) goto cleanup; - printk (KERN_INFO "vmlogrdr: driver loaded\n"); return 0; cleanup: vmlogrdr_cleanup(); - printk (KERN_ERR "vmlogrdr: driver not loaded.\n"); return rc; } @@ -884,7 +862,6 @@ cleanup: static void __exit vmlogrdr_exit(void) { vmlogrdr_cleanup(); - printk (KERN_INFO "vmlogrdr: driver unloaded\n"); return; } diff --git a/drivers/s390/char/vmur.c b/drivers/s390/char/vmur.c index 83ae9a852f0..49cba9effe8 100644 --- a/drivers/s390/char/vmur.c +++ b/drivers/s390/char/vmur.c @@ -277,7 +277,8 @@ static void ur_int_handler(struct ccw_device *cdev, unsigned long intparm, struct urdev *urd; TRACE("ur_int_handler: intparm=0x%lx cstat=%02x dstat=%02x res=%u\n", - intparm, irb->scsw.cstat, irb->scsw.dstat, irb->scsw.count); + intparm, irb->scsw.cmd.cstat, irb->scsw.cmd.dstat, + irb->scsw.cmd.count); if (!intparm) { TRACE("ur_int_handler: unsolicited interrupt\n"); @@ -288,7 +289,7 @@ static void ur_int_handler(struct ccw_device *cdev, unsigned long intparm, /* On special conditions irb is an error pointer */ if (IS_ERR(irb)) urd->io_request_rc = PTR_ERR(irb); - else if (irb->scsw.dstat == (DEV_STAT_CHN_END | DEV_STAT_DEV_END)) + else if (irb->scsw.cmd.dstat == (DEV_STAT_CHN_END | DEV_STAT_DEV_END)) urd->io_request_rc = 0; else urd->io_request_rc = -EIO; diff --git a/drivers/s390/char/vmwatchdog.c b/drivers/s390/char/vmwatchdog.c index 19f8389291b..56b3eab019c 100644 --- a/drivers/s390/char/vmwatchdog.c +++ b/drivers/s390/char/vmwatchdog.c @@ -92,23 +92,15 @@ static int vmwdt_keepalive(void) func = vmwdt_conceal ? (wdt_init | wdt_conceal) : wdt_init; ret = __diag288(func, vmwdt_interval, ebc_cmd, len); + WARN_ON(ret != 0); kfree(ebc_cmd); - - if (ret) { - printk(KERN_WARNING "%s: problem setting interval %d, " - "cmd %s\n", __func__, vmwdt_interval, - vmwdt_cmd); - } return ret; } static int vmwdt_disable(void) { int ret = __diag288(wdt_cancel, 0, "", 0); - if (ret) { - printk(KERN_WARNING "%s: problem disabling watchdog\n", - __func__); - } + WARN_ON(ret != 0); return ret; } @@ -121,10 +113,8 @@ static int __init vmwdt_probe(void) static char __initdata ebc_begin[] = { 194, 197, 199, 201, 213 }; - if (__diag288(wdt_init, 15, ebc_begin, sizeof(ebc_begin)) != 0) { - printk(KERN_INFO "z/VM watchdog not available\n"); + if (__diag288(wdt_init, 15, ebc_begin, sizeof(ebc_begin)) != 0) return -EINVAL; - } return vmwdt_disable(); } diff --git a/drivers/s390/char/zcore.c b/drivers/s390/char/zcore.c index bbbd14e9d48..047dd92ae80 100644 --- a/drivers/s390/char/zcore.c +++ b/drivers/s390/char/zcore.c @@ -223,12 +223,10 @@ static int __init init_cpu_info(enum arch_id arch) /* get info for boot cpu from lowcore, stored in the HSA */ sa = kmalloc(sizeof(*sa), GFP_KERNEL); - if (!sa) { - ERROR_MSG("kmalloc failed: %s: %i\n",__func__, __LINE__); + if (!sa) return -ENOMEM; - } if (memcpy_hsa_kernel(sa, sys_info.sa_base, sys_info.sa_size) < 0) { - ERROR_MSG("could not copy from HSA\n"); + TRACE("could not copy from HSA\n"); kfree(sa); return -EIO; } @@ -511,6 +509,8 @@ static void __init set_s390x_lc_mask(union save_area *map) */ static int __init sys_info_init(enum arch_id arch) { + int rc; + switch (arch) { case ARCH_S390X: MSG("DETECTED 'S390X (64 bit) OS'\n"); @@ -529,10 +529,9 @@ static int __init sys_info_init(enum arch_id arch) return -EINVAL; } sys_info.arch = arch; - if (init_cpu_info(arch)) { - ERROR_MSG("get cpu info failed\n"); - return -ENOMEM; - } + rc = init_cpu_info(arch); + if (rc) + return rc; sys_info.mem_size = real_memory_size; return 0; @@ -544,12 +543,12 @@ static int __init check_sdias(void) rc = sclp_sdias_blk_count(); if (rc < 0) { - ERROR_MSG("Could not determine HSA size\n"); + TRACE("Could not determine HSA size\n"); return rc; } act_hsa_size = (rc - 1) * PAGE_SIZE; if (act_hsa_size < ZFCPDUMP_HSA_SIZE) { - ERROR_MSG("HSA size too small: %i\n", act_hsa_size); + TRACE("HSA size too small: %i\n", act_hsa_size); return -EINVAL; } return 0; @@ -590,16 +589,12 @@ static int __init zcore_init(void) goto fail; rc = check_sdias(); - if (rc) { - ERROR_MSG("Dump initialization failed\n"); + if (rc) goto fail; - } rc = memcpy_hsa_kernel(&arch, __LC_AR_MODE_ID, 1); - if (rc) { - ERROR_MSG("sdial memcpy for arch id failed\n"); + if (rc) goto fail; - } #ifndef __s390x__ if (arch == ARCH_S390X) { @@ -610,10 +605,8 @@ static int __init zcore_init(void) #endif rc = sys_info_init(arch); - if (rc) { - ERROR_MSG("arch init failed\n"); + if (rc) goto fail; - } zcore_header_init(arch, &zcore_header); diff --git a/drivers/s390/cio/Makefile b/drivers/s390/cio/Makefile index cfaf77b320f..91e9e3f3073 100644 --- a/drivers/s390/cio/Makefile +++ b/drivers/s390/cio/Makefile @@ -2,9 +2,11 @@ # Makefile for the S/390 common i/o drivers # -obj-y += airq.o blacklist.o chsc.o cio.o css.o chp.o idset.o +obj-y += airq.o blacklist.o chsc.o cio.o css.o chp.o idset.o isc.o scsw.o \ + fcx.o itcw.o ccw_device-objs += device.o device_fsm.o device_ops.o ccw_device-objs += device_id.o device_pgid.o device_status.o obj-y += ccw_device.o cmf.o +obj-$(CONFIG_CHSC_SCH) += chsc_sch.o obj-$(CONFIG_CCWGROUP) += ccwgroup.o obj-$(CONFIG_QDIO) += qdio.o diff --git a/drivers/s390/cio/airq.c b/drivers/s390/cio/airq.c index b7a07a86629..fe6cea15bba 100644 --- a/drivers/s390/cio/airq.c +++ b/drivers/s390/cio/airq.c @@ -15,6 +15,7 @@ #include <linux/rcupdate.h> #include <asm/airq.h> +#include <asm/isc.h> #include "cio.h" #include "cio_debug.h" @@ -33,15 +34,15 @@ struct airq_t { void *drv_data; }; -static union indicator_t indicators; -static struct airq_t *airqs[NR_AIRQS]; +static union indicator_t indicators[MAX_ISC]; +static struct airq_t *airqs[MAX_ISC][NR_AIRQS]; -static int register_airq(struct airq_t *airq) +static int register_airq(struct airq_t *airq, u8 isc) { int i; for (i = 0; i < NR_AIRQS; i++) - if (!cmpxchg(&airqs[i], NULL, airq)) + if (!cmpxchg(&airqs[isc][i], NULL, airq)) return i; return -ENOMEM; } @@ -50,18 +51,21 @@ static int register_airq(struct airq_t *airq) * s390_register_adapter_interrupt() - register adapter interrupt handler * @handler: adapter handler to be registered * @drv_data: driver data passed with each call to the handler + * @isc: isc for which the handler should be called * * Returns: * Pointer to the indicator to be used on success * ERR_PTR() if registration failed */ void *s390_register_adapter_interrupt(adapter_int_handler_t handler, - void *drv_data) + void *drv_data, u8 isc) { struct airq_t *airq; char dbf_txt[16]; int ret; + if (isc > MAX_ISC) + return ERR_PTR(-EINVAL); airq = kmalloc(sizeof(struct airq_t), GFP_KERNEL); if (!airq) { ret = -ENOMEM; @@ -69,34 +73,35 @@ void *s390_register_adapter_interrupt(adapter_int_handler_t handler, } airq->handler = handler; airq->drv_data = drv_data; - ret = register_airq(airq); - if (ret < 0) - kfree(airq); + + ret = register_airq(airq, isc); out: snprintf(dbf_txt, sizeof(dbf_txt), "rairq:%d", ret); CIO_TRACE_EVENT(4, dbf_txt); - if (ret < 0) + if (ret < 0) { + kfree(airq); return ERR_PTR(ret); - else - return &indicators.byte[ret]; + } else + return &indicators[isc].byte[ret]; } EXPORT_SYMBOL(s390_register_adapter_interrupt); /** * s390_unregister_adapter_interrupt - unregister adapter interrupt handler * @ind: indicator for which the handler is to be unregistered + * @isc: interruption subclass */ -void s390_unregister_adapter_interrupt(void *ind) +void s390_unregister_adapter_interrupt(void *ind, u8 isc) { struct airq_t *airq; char dbf_txt[16]; int i; - i = (int) ((addr_t) ind) - ((addr_t) &indicators.byte[0]); + i = (int) ((addr_t) ind) - ((addr_t) &indicators[isc].byte[0]); snprintf(dbf_txt, sizeof(dbf_txt), "urairq:%d", i); CIO_TRACE_EVENT(4, dbf_txt); - indicators.byte[i] = 0; - airq = xchg(&airqs[i], NULL); + indicators[isc].byte[i] = 0; + airq = xchg(&airqs[isc][i], NULL); /* * Allow interrupts to complete. This will ensure that the airq handle * is no longer referenced by any interrupt handler. @@ -108,7 +113,7 @@ EXPORT_SYMBOL(s390_unregister_adapter_interrupt); #define INDICATOR_MASK (0xffUL << ((NR_AIRQS_PER_WORD - 1) * 8)) -void do_adapter_IO(void) +void do_adapter_IO(u8 isc) { int w; int i; @@ -120,22 +125,22 @@ void do_adapter_IO(void) * fetch operations. */ for (w = 0; w < NR_AIRQ_WORDS; w++) { - word = indicators.word[w]; + word = indicators[isc].word[w]; i = w * NR_AIRQS_PER_WORD; /* * Check bytes within word for active indicators. */ while (word) { if (word & INDICATOR_MASK) { - airq = airqs[i]; + airq = airqs[isc][i]; if (likely(airq)) - airq->handler(&indicators.byte[i], + airq->handler(&indicators[isc].byte[i], airq->drv_data); else /* * Reset ill-behaved indicator. */ - indicators.byte[i] = 0; + indicators[isc].byte[i] = 0; } word <<= 8; i++; diff --git a/drivers/s390/cio/chp.c b/drivers/s390/cio/chp.c index 297cdceb0ca..db00b059173 100644 --- a/drivers/s390/cio/chp.c +++ b/drivers/s390/cio/chp.c @@ -18,6 +18,7 @@ #include <asm/chpid.h> #include <asm/sclp.h> +#include "../s390mach.h" #include "cio.h" #include "css.h" #include "ioasm.h" @@ -94,6 +95,7 @@ u8 chp_get_sch_opm(struct subchannel *sch) } return opm; } +EXPORT_SYMBOL_GPL(chp_get_sch_opm); /** * chp_is_registered - check if a channel-path is registered @@ -121,11 +123,8 @@ static int s390_vary_chpid(struct chp_id chpid, int on) CIO_TRACE_EVENT(2, dbf_text); status = chp_get_status(chpid); - if (!on && !status) { - printk(KERN_ERR "cio: chpid %x.%02x is already offline\n", - chpid.cssid, chpid.id); - return -EINVAL; - } + if (!on && !status) + return 0; set_chp_logically_online(chpid, on); chsc_chp_vary(chpid, on); @@ -141,21 +140,14 @@ static ssize_t chp_measurement_chars_read(struct kobject *kobj, { struct channel_path *chp; struct device *device; - unsigned int size; device = container_of(kobj, struct device, kobj); chp = to_channelpath(device); if (!chp->cmg_chars) return 0; - size = sizeof(struct cmg_chars); - - if (off > size) - return 0; - if (off + count > size) - count = size - off; - memcpy(buf, chp->cmg_chars + off, count); - return count; + return memory_read_from_buffer(buf, count, &off, + chp->cmg_chars, sizeof(struct cmg_chars)); } static struct bin_attribute chp_measurement_chars_attr = { @@ -405,7 +397,7 @@ int chp_new(struct chp_id chpid) chpid.id); /* Obtain channel path description and fill it in. */ - ret = chsc_determine_channel_path_description(chpid, &chp->desc); + ret = chsc_determine_base_channel_path_desc(chpid, &chp->desc); if (ret) goto out_free; if ((chp->desc.flags & 0x80) == 0) { @@ -413,8 +405,7 @@ int chp_new(struct chp_id chpid) goto out_free; } /* Get channel-measurement characteristics. */ - if (css_characteristics_avail && css_chsc_characteristics.scmc - && css_chsc_characteristics.secm) { + if (css_chsc_characteristics.scmc && css_chsc_characteristics.secm) { ret = chsc_get_channel_measurement_chars(chp); if (ret) goto out_free; @@ -476,26 +467,74 @@ void *chp_get_chp_desc(struct chp_id chpid) /** * chp_process_crw - process channel-path status change - * @id: channel-path ID number - * @status: non-zero if channel-path has become available, zero otherwise + * @crw0: channel report-word to handler + * @crw1: second channel-report word (always NULL) + * @overflow: crw overflow indication * * Handle channel-report-words indicating that the status of a channel-path * has changed. */ -void chp_process_crw(int id, int status) +static void chp_process_crw(struct crw *crw0, struct crw *crw1, + int overflow) { struct chp_id chpid; + if (overflow) { + css_schedule_eval_all(); + return; + } + CIO_CRW_EVENT(2, "CRW reports slct=%d, oflw=%d, " + "chn=%d, rsc=%X, anc=%d, erc=%X, rsid=%X\n", + crw0->slct, crw0->oflw, crw0->chn, crw0->rsc, crw0->anc, + crw0->erc, crw0->rsid); + /* + * Check for solicited machine checks. These are + * created by reset channel path and need not be + * handled here. + */ + if (crw0->slct) { + CIO_CRW_EVENT(2, "solicited machine check for " + "channel path %02X\n", crw0->rsid); + return; + } chp_id_init(&chpid); - chpid.id = id; - if (status) { + chpid.id = crw0->rsid; + switch (crw0->erc) { + case CRW_ERC_IPARM: /* Path has come. */ if (!chp_is_registered(chpid)) chp_new(chpid); chsc_chp_online(chpid); - } else + break; + case CRW_ERC_PERRI: /* Path has gone. */ + case CRW_ERC_PERRN: chsc_chp_offline(chpid); + break; + default: + CIO_CRW_EVENT(2, "Don't know how to handle erc=%x\n", + crw0->erc); + } } +int chp_ssd_get_mask(struct chsc_ssd_info *ssd, struct chp_link *link) +{ + int i; + int mask; + + for (i = 0; i < 8; i++) { + mask = 0x80 >> i; + if (!(ssd->path_mask & mask)) + continue; + if (!chp_id_is_equal(&ssd->chpid[i], &link->chpid)) + continue; + if ((ssd->fla_valid_mask & mask) && + ((ssd->fla[i] & link->fla_mask) != link->fla)) + continue; + return mask; + } + return 0; +} +EXPORT_SYMBOL_GPL(chp_ssd_get_mask); + static inline int info_bit_num(struct chp_id id) { return id.id + id.cssid * (__MAX_CHPID + 1); @@ -575,6 +614,7 @@ static void cfg_func(struct work_struct *work) { struct chp_id chpid; enum cfg_task_t t; + int rc; mutex_lock(&cfg_lock); t = cfg_none; @@ -589,14 +629,24 @@ static void cfg_func(struct work_struct *work) switch (t) { case cfg_configure: - sclp_chp_configure(chpid); - info_expire(); - chsc_chp_online(chpid); + rc = sclp_chp_configure(chpid); + if (rc) + CIO_MSG_EVENT(2, "chp: sclp_chp_configure(%x.%02x)=" + "%d\n", chpid.cssid, chpid.id, rc); + else { + info_expire(); + chsc_chp_online(chpid); + } break; case cfg_deconfigure: - sclp_chp_deconfigure(chpid); - info_expire(); - chsc_chp_offline(chpid); + rc = sclp_chp_deconfigure(chpid); + if (rc) + CIO_MSG_EVENT(2, "chp: sclp_chp_deconfigure(%x.%02x)=" + "%d\n", chpid.cssid, chpid.id, rc); + else { + info_expire(); + chsc_chp_offline(chpid); + } break; case cfg_none: /* Get updated information after last change. */ @@ -654,10 +704,16 @@ static int cfg_wait_idle(void) static int __init chp_init(void) { struct chp_id chpid; + int ret; + ret = s390_register_crw_handler(CRW_RSC_CPATH, chp_process_crw); + if (ret) + return ret; chp_wq = create_singlethread_workqueue("cio_chp"); - if (!chp_wq) + if (!chp_wq) { + s390_unregister_crw_handler(CRW_RSC_CPATH); return -ENOMEM; + } INIT_WORK(&cfg_work, cfg_func); init_waitqueue_head(&cfg_wait_queue); if (info_update()) diff --git a/drivers/s390/cio/chp.h b/drivers/s390/cio/chp.h index 65286563c59..26c3d224617 100644 --- a/drivers/s390/cio/chp.h +++ b/drivers/s390/cio/chp.h @@ -12,12 +12,24 @@ #include <linux/device.h> #include <asm/chpid.h> #include "chsc.h" +#include "css.h" #define CHP_STATUS_STANDBY 0 #define CHP_STATUS_CONFIGURED 1 #define CHP_STATUS_RESERVED 2 #define CHP_STATUS_NOT_RECOGNIZED 3 +#define CHP_ONLINE 0 +#define CHP_OFFLINE 1 +#define CHP_VARY_ON 2 +#define CHP_VARY_OFF 3 + +struct chp_link { + struct chp_id chpid; + u32 fla_mask; + u16 fla; +}; + static inline int chp_test_bit(u8 *bitmap, int num) { int byte = num >> 3; @@ -42,12 +54,11 @@ int chp_get_status(struct chp_id chpid); u8 chp_get_sch_opm(struct subchannel *sch); int chp_is_registered(struct chp_id chpid); void *chp_get_chp_desc(struct chp_id chpid); -void chp_process_crw(int id, int available); void chp_remove_cmg_attr(struct channel_path *chp); int chp_add_cmg_attr(struct channel_path *chp); int chp_new(struct chp_id chpid); void chp_cfg_schedule(struct chp_id chpid, int configure); void chp_cfg_cancel_deconfigure(struct chp_id chpid); int chp_info_get_status(struct chp_id chpid); - +int chp_ssd_get_mask(struct chsc_ssd_info *, struct chp_link *); #endif /* S390_CHP_H */ diff --git a/drivers/s390/cio/chsc.c b/drivers/s390/cio/chsc.c index 5de86908b0d..65264a38057 100644 --- a/drivers/s390/cio/chsc.c +++ b/drivers/s390/cio/chsc.c @@ -2,8 +2,7 @@ * drivers/s390/cio/chsc.c * S/390 common I/O routines -- channel subsystem call * - * Copyright (C) 1999-2002 IBM Deutschland Entwicklung GmbH, - * IBM Corporation + * Copyright IBM Corp. 1999,2008 * Author(s): Ingo Adlung (adlung@de.ibm.com) * Cornelia Huck (cornelia.huck@de.ibm.com) * Arnd Bergmann (arndb@de.ibm.com) @@ -16,7 +15,9 @@ #include <asm/cio.h> #include <asm/chpid.h> +#include <asm/chsc.h> +#include "../s390mach.h" #include "css.h" #include "cio.h" #include "cio_debug.h" @@ -127,77 +128,12 @@ out_free: return ret; } -static int check_for_io_on_path(struct subchannel *sch, int mask) -{ - int cc; - - cc = stsch(sch->schid, &sch->schib); - if (cc) - return 0; - if (sch->schib.scsw.actl && sch->schib.pmcw.lpum == mask) - return 1; - return 0; -} - -static void terminate_internal_io(struct subchannel *sch) -{ - if (cio_clear(sch)) { - /* Recheck device in case clear failed. */ - sch->lpm = 0; - if (device_trigger_verify(sch) != 0) - css_schedule_eval(sch->schid); - return; - } - /* Request retry of internal operation. */ - device_set_intretry(sch); - /* Call handler. */ - if (sch->driver && sch->driver->termination) - sch->driver->termination(sch); -} - static int s390_subchannel_remove_chpid(struct subchannel *sch, void *data) { - int j; - int mask; - struct chp_id *chpid = data; - struct schib schib; - - for (j = 0; j < 8; j++) { - mask = 0x80 >> j; - if ((sch->schib.pmcw.pim & mask) && - (sch->schib.pmcw.chpid[j] == chpid->id)) - break; - } - if (j >= 8) - return 0; - spin_lock_irq(sch->lock); - - stsch(sch->schid, &schib); - if (!css_sch_is_valid(&schib)) - goto out_unreg; - memcpy(&sch->schib, &schib, sizeof(struct schib)); - /* Check for single path devices. */ - if (sch->schib.pmcw.pim == 0x80) - goto out_unreg; - - if (check_for_io_on_path(sch, mask)) { - if (device_is_online(sch)) - device_kill_io(sch); - else { - terminate_internal_io(sch); - /* Re-start path verification. */ - if (sch->driver && sch->driver->verify) - sch->driver->verify(sch); - } - } else { - /* trigger path verification. */ - if (sch->driver && sch->driver->verify) - sch->driver->verify(sch); - else if (sch->lpm == mask) + if (sch->driver && sch->driver->chp_event) + if (sch->driver->chp_event(sch, data, CHP_OFFLINE) != 0) goto out_unreg; - } - spin_unlock_irq(sch->lock); return 0; @@ -211,15 +147,18 @@ out_unreg: void chsc_chp_offline(struct chp_id chpid) { char dbf_txt[15]; + struct chp_link link; sprintf(dbf_txt, "chpr%x.%02x", chpid.cssid, chpid.id); CIO_TRACE_EVENT(2, dbf_txt); if (chp_get_status(chpid) <= 0) return; + memset(&link, 0, sizeof(struct chp_link)); + link.chpid = chpid; /* Wait until previous actions have settled. */ css_wait_for_slow_path(); - for_each_subchannel_staged(s390_subchannel_remove_chpid, NULL, &chpid); + for_each_subchannel_staged(s390_subchannel_remove_chpid, NULL, &link); } static int s390_process_res_acc_new_sch(struct subchannel_id schid, void *data) @@ -242,67 +181,25 @@ static int s390_process_res_acc_new_sch(struct subchannel_id schid, void *data) return 0; } -struct res_acc_data { - struct chp_id chpid; - u32 fla_mask; - u16 fla; -}; - -static int get_res_chpid_mask(struct chsc_ssd_info *ssd, - struct res_acc_data *data) -{ - int i; - int mask; - - for (i = 0; i < 8; i++) { - mask = 0x80 >> i; - if (!(ssd->path_mask & mask)) - continue; - if (!chp_id_is_equal(&ssd->chpid[i], &data->chpid)) - continue; - if ((ssd->fla_valid_mask & mask) && - ((ssd->fla[i] & data->fla_mask) != data->fla)) - continue; - return mask; - } - return 0; -} - static int __s390_process_res_acc(struct subchannel *sch, void *data) { - int chp_mask, old_lpm; - struct res_acc_data *res_data = data; - spin_lock_irq(sch->lock); - chp_mask = get_res_chpid_mask(&sch->ssd_info, res_data); - if (chp_mask == 0) - goto out; - if (stsch(sch->schid, &sch->schib)) - goto out; - old_lpm = sch->lpm; - sch->lpm = ((sch->schib.pmcw.pim & - sch->schib.pmcw.pam & - sch->schib.pmcw.pom) - | chp_mask) & sch->opm; - if (!old_lpm && sch->lpm) - device_trigger_reprobe(sch); - else if (sch->driver && sch->driver->verify) - sch->driver->verify(sch); -out: + if (sch->driver && sch->driver->chp_event) + sch->driver->chp_event(sch, data, CHP_ONLINE); spin_unlock_irq(sch->lock); return 0; } -static void s390_process_res_acc (struct res_acc_data *res_data) +static void s390_process_res_acc(struct chp_link *link) { char dbf_txt[15]; - sprintf(dbf_txt, "accpr%x.%02x", res_data->chpid.cssid, - res_data->chpid.id); + sprintf(dbf_txt, "accpr%x.%02x", link->chpid.cssid, + link->chpid.id); CIO_TRACE_EVENT( 2, dbf_txt); - if (res_data->fla != 0) { - sprintf(dbf_txt, "fla%x", res_data->fla); + if (link->fla != 0) { + sprintf(dbf_txt, "fla%x", link->fla); CIO_TRACE_EVENT( 2, dbf_txt); } /* Wait until previous actions have settled. */ @@ -315,7 +212,7 @@ static void s390_process_res_acc (struct res_acc_data *res_data) * will we have to do. */ for_each_subchannel_staged(__s390_process_res_acc, - s390_process_res_acc_new_sch, res_data); + s390_process_res_acc_new_sch, link); } static int @@ -388,7 +285,7 @@ static void chsc_process_sei_link_incident(struct chsc_sei_area *sei_area) static void chsc_process_sei_res_acc(struct chsc_sei_area *sei_area) { - struct res_acc_data res_data; + struct chp_link link; struct chp_id chpid; int status; @@ -404,18 +301,18 @@ static void chsc_process_sei_res_acc(struct chsc_sei_area *sei_area) chp_new(chpid); else if (!status) return; - memset(&res_data, 0, sizeof(struct res_acc_data)); - res_data.chpid = chpid; + memset(&link, 0, sizeof(struct chp_link)); + link.chpid = chpid; if ((sei_area->vf & 0xc0) != 0) { - res_data.fla = sei_area->fla; + link.fla = sei_area->fla; if ((sei_area->vf & 0xc0) == 0xc0) /* full link address */ - res_data.fla_mask = 0xffff; + link.fla_mask = 0xffff; else /* link address */ - res_data.fla_mask = 0xff00; + link.fla_mask = 0xff00; } - s390_process_res_acc(&res_data); + s390_process_res_acc(&link); } struct chp_config_data { @@ -480,17 +377,25 @@ static void chsc_process_sei(struct chsc_sei_area *sei_area) } } -void chsc_process_crw(void) +static void chsc_process_crw(struct crw *crw0, struct crw *crw1, int overflow) { struct chsc_sei_area *sei_area; + if (overflow) { + css_schedule_eval_all(); + return; + } + CIO_CRW_EVENT(2, "CRW reports slct=%d, oflw=%d, " + "chn=%d, rsc=%X, anc=%d, erc=%X, rsid=%X\n", + crw0->slct, crw0->oflw, crw0->chn, crw0->rsc, crw0->anc, + crw0->erc, crw0->rsid); if (!sei_page) return; /* Access to sei_page is serialized through machine check handler * thread, so no need for locking. */ sei_area = sei_page; - CIO_TRACE_EVENT( 2, "prcss"); + CIO_TRACE_EVENT(2, "prcss"); do { memset(sei_area, 0, sizeof(*sei_area)); sei_area->request.length = 0x0010; @@ -509,114 +414,36 @@ void chsc_process_crw(void) } while (sei_area->flags & 0x80); } -static int __chp_add_new_sch(struct subchannel_id schid, void *data) -{ - struct schib schib; - - if (stsch_err(schid, &schib)) - /* We're through */ - return -ENXIO; - - /* Put it on the slow path. */ - css_schedule_eval(schid); - return 0; -} - - -static int __chp_add(struct subchannel *sch, void *data) -{ - int i, mask; - struct chp_id *chpid = data; - - spin_lock_irq(sch->lock); - for (i=0; i<8; i++) { - mask = 0x80 >> i; - if ((sch->schib.pmcw.pim & mask) && - (sch->schib.pmcw.chpid[i] == chpid->id)) - break; - } - if (i==8) { - spin_unlock_irq(sch->lock); - return 0; - } - if (stsch(sch->schid, &sch->schib)) { - spin_unlock_irq(sch->lock); - css_schedule_eval(sch->schid); - return 0; - } - sch->lpm = ((sch->schib.pmcw.pim & - sch->schib.pmcw.pam & - sch->schib.pmcw.pom) - | mask) & sch->opm; - - if (sch->driver && sch->driver->verify) - sch->driver->verify(sch); - - spin_unlock_irq(sch->lock); - - return 0; -} - void chsc_chp_online(struct chp_id chpid) { char dbf_txt[15]; + struct chp_link link; sprintf(dbf_txt, "cadd%x.%02x", chpid.cssid, chpid.id); CIO_TRACE_EVENT(2, dbf_txt); if (chp_get_status(chpid) != 0) { + memset(&link, 0, sizeof(struct chp_link)); + link.chpid = chpid; /* Wait until previous actions have settled. */ css_wait_for_slow_path(); - for_each_subchannel_staged(__chp_add, __chp_add_new_sch, - &chpid); + for_each_subchannel_staged(__s390_process_res_acc, NULL, + &link); } } static void __s390_subchannel_vary_chpid(struct subchannel *sch, struct chp_id chpid, int on) { - int chp, old_lpm; - int mask; unsigned long flags; + struct chp_link link; + memset(&link, 0, sizeof(struct chp_link)); + link.chpid = chpid; spin_lock_irqsave(sch->lock, flags); - old_lpm = sch->lpm; - for (chp = 0; chp < 8; chp++) { - mask = 0x80 >> chp; - if (!(sch->ssd_info.path_mask & mask)) - continue; - if (!chp_id_is_equal(&sch->ssd_info.chpid[chp], &chpid)) - continue; - - if (on) { - sch->opm |= mask; - sch->lpm |= mask; - if (!old_lpm) - device_trigger_reprobe(sch); - else if (sch->driver && sch->driver->verify) - sch->driver->verify(sch); - break; - } - sch->opm &= ~mask; - sch->lpm &= ~mask; - if (check_for_io_on_path(sch, mask)) { - if (device_is_online(sch)) - /* Path verification is done after killing. */ - device_kill_io(sch); - else { - /* Kill and retry internal I/O. */ - terminate_internal_io(sch); - /* Re-start path verification. */ - if (sch->driver && sch->driver->verify) - sch->driver->verify(sch); - } - } else if (!sch->lpm) { - if (device_trigger_verify(sch) != 0) - css_schedule_eval(sch->schid); - } else if (sch->driver && sch->driver->verify) - sch->driver->verify(sch); - break; - } + if (sch->driver && sch->driver->chp_event) + sch->driver->chp_event(sch, &link, + on ? CHP_VARY_ON : CHP_VARY_OFF); spin_unlock_irqrestore(sch->lock, flags); } @@ -656,6 +483,10 @@ __s390_vary_chpid_on(struct subchannel_id schid, void *data) */ int chsc_chp_vary(struct chp_id chpid, int on) { + struct chp_link link; + + memset(&link, 0, sizeof(struct chp_link)); + link.chpid = chpid; /* Wait until previous actions have settled. */ css_wait_for_slow_path(); /* @@ -664,10 +495,10 @@ int chsc_chp_vary(struct chp_id chpid, int on) if (on) for_each_subchannel_staged(s390_subchannel_vary_chpid_on, - __s390_vary_chpid_on, &chpid); + __s390_vary_chpid_on, &link); else for_each_subchannel_staged(s390_subchannel_vary_chpid_off, - NULL, &chpid); + NULL, &link); return 0; } @@ -797,23 +628,33 @@ chsc_secm(struct channel_subsystem *css, int enable) return ret; } -int chsc_determine_channel_path_description(struct chp_id chpid, - struct channel_path_desc *desc) +int chsc_determine_channel_path_desc(struct chp_id chpid, int fmt, int rfmt, + int c, int m, + struct chsc_response_struct *resp) { int ccode, ret; struct { struct chsc_header request; - u32 : 24; + u32 : 2; + u32 m : 1; + u32 c : 1; + u32 fmt : 4; + u32 cssid : 8; + u32 : 4; + u32 rfmt : 4; u32 first_chpid : 8; u32 : 24; u32 last_chpid : 8; u32 zeroes1; struct chsc_header response; - u32 zeroes2; - struct channel_path_desc desc; + u8 data[PAGE_SIZE - 20]; } __attribute__ ((packed)) *scpd_area; + if ((rfmt == 1) && !css_general_characteristics.fcs) + return -EINVAL; + if ((rfmt == 2) && !css_general_characteristics.cib) + return -EINVAL; scpd_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA); if (!scpd_area) return -ENOMEM; @@ -821,8 +662,13 @@ int chsc_determine_channel_path_description(struct chp_id chpid, scpd_area->request.length = 0x0010; scpd_area->request.code = 0x0002; + scpd_area->cssid = chpid.cssid; scpd_area->first_chpid = chpid.id; scpd_area->last_chpid = chpid.id; + scpd_area->m = m; + scpd_area->c = c; + scpd_area->fmt = fmt; + scpd_area->rfmt = rfmt; ccode = chsc(scpd_area); if (ccode > 0) { @@ -833,8 +679,7 @@ int chsc_determine_channel_path_description(struct chp_id chpid, ret = chsc_error_from_response(scpd_area->response.code); if (ret == 0) /* Success. */ - memcpy(desc, &scpd_area->desc, - sizeof(struct channel_path_desc)); + memcpy(resp, &scpd_area->response, scpd_area->response.length); else CIO_CRW_EVENT(2, "chsc: scpd failed (rc=%04x)\n", scpd_area->response.code); @@ -842,6 +687,25 @@ out: free_page((unsigned long)scpd_area); return ret; } +EXPORT_SYMBOL_GPL(chsc_determine_channel_path_desc); + +int chsc_determine_base_channel_path_desc(struct chp_id chpid, + struct channel_path_desc *desc) +{ + struct chsc_response_struct *chsc_resp; + int ret; + + chsc_resp = kzalloc(sizeof(*chsc_resp), GFP_KERNEL); + if (!chsc_resp) + return -ENOMEM; + ret = chsc_determine_channel_path_desc(chpid, 0, 0, 0, 0, chsc_resp); + if (ret) + goto out_free; + memcpy(desc, &chsc_resp->data, chsc_resp->length); +out_free: + kfree(chsc_resp); + return ret; +} static void chsc_initialize_cmg_chars(struct channel_path *chp, u8 cmcv, @@ -937,15 +801,23 @@ out: int __init chsc_alloc_sei_area(void) { + int ret; + sei_page = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA); - if (!sei_page) + if (!sei_page) { CIO_MSG_EVENT(0, "Can't allocate page for processing of " "chsc machine checks!\n"); - return (sei_page ? 0 : -ENOMEM); + return -ENOMEM; + } + ret = s390_register_crw_handler(CRW_RSC_CSS, chsc_process_crw); + if (ret) + kfree(sei_page); + return ret; } void __init chsc_free_sei_area(void) { + s390_unregister_crw_handler(CRW_RSC_CSS); kfree(sei_page); } @@ -1043,3 +915,52 @@ exit: EXPORT_SYMBOL_GPL(css_general_characteristics); EXPORT_SYMBOL_GPL(css_chsc_characteristics); + +int chsc_sstpc(void *page, unsigned int op, u16 ctrl) +{ + struct { + struct chsc_header request; + unsigned int rsvd0; + unsigned int op : 8; + unsigned int rsvd1 : 8; + unsigned int ctrl : 16; + unsigned int rsvd2[5]; + struct chsc_header response; + unsigned int rsvd3[7]; + } __attribute__ ((packed)) *rr; + int rc; + + memset(page, 0, PAGE_SIZE); + rr = page; + rr->request.length = 0x0020; + rr->request.code = 0x0033; + rr->op = op; + rr->ctrl = ctrl; + rc = chsc(rr); + if (rc) + return -EIO; + rc = (rr->response.code == 0x0001) ? 0 : -EIO; + return rc; +} + +int chsc_sstpi(void *page, void *result, size_t size) +{ + struct { + struct chsc_header request; + unsigned int rsvd0[3]; + struct chsc_header response; + char data[size]; + } __attribute__ ((packed)) *rr; + int rc; + + memset(page, 0, PAGE_SIZE); + rr = page; + rr->request.length = 0x0010; + rr->request.code = 0x0038; + rc = chsc(rr); + if (rc) + return -EIO; + memcpy(result, &rr->data, size); + return (rr->response.code == 0x0001) ? 0 : -EIO; +} + diff --git a/drivers/s390/cio/chsc.h b/drivers/s390/cio/chsc.h index d1f5db1e69b..fb6c4d6c45b 100644 --- a/drivers/s390/cio/chsc.h +++ b/drivers/s390/cio/chsc.h @@ -4,7 +4,8 @@ #include <linux/types.h> #include <linux/device.h> #include <asm/chpid.h> -#include "schid.h" +#include <asm/chsc.h> +#include <asm/schid.h> #define CHSC_SDA_OC_MSS 0x2 @@ -36,14 +37,15 @@ struct channel_path_desc { struct channel_path; -extern void chsc_process_crw(void); - struct css_general_char { - u64 : 41; + u64 : 12; + u32 dynio : 1; /* bit 12 */ + u32 : 28; u32 aif : 1; /* bit 41 */ u32 : 3; u32 mcss : 1; /* bit 45 */ - u32 : 2; + u32 fcs : 1; /* bit 46 */ + u32 : 1; u32 ext_mb : 1; /* bit 48 */ u32 : 7; u32 aif_tdd : 1; /* bit 56 */ @@ -51,7 +53,11 @@ struct css_general_char { u32 qebsm : 1; /* bit 58 */ u32 : 8; u32 aif_osa : 1; /* bit 67 */ - u32 : 28; + u32 : 14; + u32 cib : 1; /* bit 82 */ + u32 : 5; + u32 fcx : 1; /* bit 88 */ + u32 : 7; }__attribute__((packed)); struct css_chsc_char { @@ -78,7 +84,6 @@ struct chsc_ssd_info { extern int chsc_get_ssd_info(struct subchannel_id schid, struct chsc_ssd_info *ssd); extern int chsc_determine_css_characteristics(void); -extern int css_characteristics_avail; extern int chsc_alloc_sei_area(void); extern void chsc_free_sei_area(void); @@ -87,8 +92,11 @@ struct channel_subsystem; extern int chsc_secm(struct channel_subsystem *, int); int chsc_chp_vary(struct chp_id chpid, int on); -int chsc_determine_channel_path_description(struct chp_id chpid, - struct channel_path_desc *desc); +int chsc_determine_channel_path_desc(struct chp_id chpid, int fmt, int rfmt, + int c, int m, + struct chsc_response_struct *resp); +int chsc_determine_base_channel_path_desc(struct chp_id chpid, + struct channel_path_desc *desc); void chsc_chp_online(struct chp_id chpid); void chsc_chp_offline(struct chp_id chpid); int chsc_get_channel_measurement_chars(struct channel_path *chp); diff --git a/drivers/s390/cio/chsc_sch.c b/drivers/s390/cio/chsc_sch.c new file mode 100644 index 00000000000..91ca87aa9f9 --- /dev/null +++ b/drivers/s390/cio/chsc_sch.c @@ -0,0 +1,820 @@ +/* + * Driver for s390 chsc subchannels + * + * Copyright IBM Corp. 2008 + * Author(s): Cornelia Huck <cornelia.huck@de.ibm.com> + * + */ + +#include <linux/device.h> +#include <linux/module.h> +#include <linux/uaccess.h> +#include <linux/miscdevice.h> + +#include <asm/cio.h> +#include <asm/chsc.h> +#include <asm/isc.h> + +#include "cio.h" +#include "cio_debug.h" +#include "css.h" +#include "chsc_sch.h" +#include "ioasm.h" + +static debug_info_t *chsc_debug_msg_id; +static debug_info_t *chsc_debug_log_id; + +#define CHSC_MSG(imp, args...) do { \ + debug_sprintf_event(chsc_debug_msg_id, imp , ##args); \ + } while (0) + +#define CHSC_LOG(imp, txt) do { \ + debug_text_event(chsc_debug_log_id, imp , txt); \ + } while (0) + +static void CHSC_LOG_HEX(int level, void *data, int length) +{ + while (length > 0) { + debug_event(chsc_debug_log_id, level, data, length); + length -= chsc_debug_log_id->buf_size; + data += chsc_debug_log_id->buf_size; + } +} + +MODULE_AUTHOR("IBM Corporation"); +MODULE_DESCRIPTION("driver for s390 chsc subchannels"); +MODULE_LICENSE("GPL"); + +static void chsc_subchannel_irq(struct subchannel *sch) +{ + struct chsc_private *private = sch->private; + struct chsc_request *request = private->request; + struct irb *irb = (struct irb *)__LC_IRB; + + CHSC_LOG(4, "irb"); + CHSC_LOG_HEX(4, irb, sizeof(*irb)); + /* Copy irb to provided request and set done. */ + if (!request) { + CHSC_MSG(0, "Interrupt on sch 0.%x.%04x with no request\n", + sch->schid.ssid, sch->schid.sch_no); + return; + } + private->request = NULL; + memcpy(&request->irb, irb, sizeof(*irb)); + stsch(sch->schid, &sch->schib); + complete(&request->completion); + put_device(&sch->dev); +} + +static int chsc_subchannel_probe(struct subchannel *sch) +{ + struct chsc_private *private; + int ret; + + CHSC_MSG(6, "Detected chsc subchannel 0.%x.%04x\n", + sch->schid.ssid, sch->schid.sch_no); + sch->isc = CHSC_SCH_ISC; + private = kzalloc(sizeof(*private), GFP_KERNEL); + if (!private) + return -ENOMEM; + ret = cio_enable_subchannel(sch, (u32)(unsigned long)sch); + if (ret) { + CHSC_MSG(0, "Failed to enable 0.%x.%04x: %d\n", + sch->schid.ssid, sch->schid.sch_no, ret); + kfree(private); + } else { + sch->private = private; + if (sch->dev.uevent_suppress) { + sch->dev.uevent_suppress = 0; + kobject_uevent(&sch->dev.kobj, KOBJ_ADD); + } + } + return ret; +} + +static int chsc_subchannel_remove(struct subchannel *sch) +{ + struct chsc_private *private; + + cio_disable_subchannel(sch); + private = sch->private; + sch->private = NULL; + if (private->request) { + complete(&private->request->completion); + put_device(&sch->dev); + } + kfree(private); + return 0; +} + +static void chsc_subchannel_shutdown(struct subchannel *sch) +{ + cio_disable_subchannel(sch); +} + +static struct css_device_id chsc_subchannel_ids[] = { + { .match_flags = 0x1, .type =SUBCHANNEL_TYPE_CHSC, }, + { /* end of list */ }, +}; +MODULE_DEVICE_TABLE(css, chsc_subchannel_ids); + +static struct css_driver chsc_subchannel_driver = { + .owner = THIS_MODULE, + .subchannel_type = chsc_subchannel_ids, + .irq = chsc_subchannel_irq, + .probe = chsc_subchannel_probe, + .remove = chsc_subchannel_remove, + .shutdown = chsc_subchannel_shutdown, + .name = "chsc_subchannel", +}; + +static int __init chsc_init_dbfs(void) +{ + chsc_debug_msg_id = debug_register("chsc_msg", 16, 1, + 16 * sizeof(long)); + if (!chsc_debug_msg_id) + goto out; + debug_register_view(chsc_debug_msg_id, &debug_sprintf_view); + debug_set_level(chsc_debug_msg_id, 2); + chsc_debug_log_id = debug_register("chsc_log", 16, 1, 16); + if (!chsc_debug_log_id) + goto out; + debug_register_view(chsc_debug_log_id, &debug_hex_ascii_view); + debug_set_level(chsc_debug_log_id, 2); + return 0; +out: + if (chsc_debug_msg_id) + debug_unregister(chsc_debug_msg_id); + return -ENOMEM; +} + +static void chsc_remove_dbfs(void) +{ + debug_unregister(chsc_debug_log_id); + debug_unregister(chsc_debug_msg_id); +} + +static int __init chsc_init_sch_driver(void) +{ + return css_driver_register(&chsc_subchannel_driver); +} + +static void chsc_cleanup_sch_driver(void) +{ + css_driver_unregister(&chsc_subchannel_driver); +} + +static DEFINE_SPINLOCK(chsc_lock); + +static int chsc_subchannel_match_next_free(struct device *dev, void *data) +{ + struct subchannel *sch = to_subchannel(dev); + + return sch->schib.pmcw.ena && !scsw_fctl(&sch->schib.scsw); +} + +static struct subchannel *chsc_get_next_subchannel(struct subchannel *sch) +{ + struct device *dev; + + dev = driver_find_device(&chsc_subchannel_driver.drv, + sch ? &sch->dev : NULL, NULL, + chsc_subchannel_match_next_free); + return dev ? to_subchannel(dev) : NULL; +} + +/** + * chsc_async() - try to start a chsc request asynchronously + * @chsc_area: request to be started + * @request: request structure to associate + * + * Tries to start a chsc request on one of the existing chsc subchannels. + * Returns: + * %0 if the request was performed synchronously + * %-EINPROGRESS if the request was successfully started + * %-EBUSY if all chsc subchannels are busy + * %-ENODEV if no chsc subchannels are available + * Context: + * interrupts disabled, chsc_lock held + */ +static int chsc_async(struct chsc_async_area *chsc_area, + struct chsc_request *request) +{ + int cc; + struct chsc_private *private; + struct subchannel *sch = NULL; + int ret = -ENODEV; + char dbf[10]; + + chsc_area->header.key = PAGE_DEFAULT_KEY; + while ((sch = chsc_get_next_subchannel(sch))) { + spin_lock(sch->lock); + private = sch->private; + if (private->request) { + spin_unlock(sch->lock); + ret = -EBUSY; + continue; + } + chsc_area->header.sid = sch->schid; + CHSC_LOG(2, "schid"); + CHSC_LOG_HEX(2, &sch->schid, sizeof(sch->schid)); + cc = chsc(chsc_area); + sprintf(dbf, "cc:%d", cc); + CHSC_LOG(2, dbf); + switch (cc) { + case 0: + ret = 0; + break; + case 1: + sch->schib.scsw.cmd.fctl |= SCSW_FCTL_START_FUNC; + ret = -EINPROGRESS; + private->request = request; + break; + case 2: + ret = -EBUSY; + break; + default: + ret = -ENODEV; + } + spin_unlock(sch->lock); + CHSC_MSG(2, "chsc on 0.%x.%04x returned cc=%d\n", + sch->schid.ssid, sch->schid.sch_no, cc); + if (ret == -EINPROGRESS) + return -EINPROGRESS; + put_device(&sch->dev); + if (ret == 0) + return 0; + } + return ret; +} + +static void chsc_log_command(struct chsc_async_area *chsc_area) +{ + char dbf[10]; + + sprintf(dbf, "CHSC:%x", chsc_area->header.code); + CHSC_LOG(0, dbf); + CHSC_LOG_HEX(0, chsc_area, 32); +} + +static int chsc_examine_irb(struct chsc_request *request) +{ + int backed_up; + + if (!scsw_stctl(&request->irb.scsw) & SCSW_STCTL_STATUS_PEND) + return -EIO; + backed_up = scsw_cstat(&request->irb.scsw) & SCHN_STAT_CHAIN_CHECK; + request->irb.scsw.cmd.cstat &= ~SCHN_STAT_CHAIN_CHECK; + if (scsw_cstat(&request->irb.scsw) == 0) + return 0; + if (!backed_up) + return 0; + if (scsw_cstat(&request->irb.scsw) & SCHN_STAT_PROG_CHECK) + return -EIO; + if (scsw_cstat(&request->irb.scsw) & SCHN_STAT_PROT_CHECK) + return -EPERM; + if (scsw_cstat(&request->irb.scsw) & SCHN_STAT_CHN_DATA_CHK) + return -EAGAIN; + if (scsw_cstat(&request->irb.scsw) & SCHN_STAT_CHN_CTRL_CHK) + return -EAGAIN; + return -EIO; +} + +static int chsc_ioctl_start(void __user *user_area) +{ + struct chsc_request *request; + struct chsc_async_area *chsc_area; + int ret; + char dbf[10]; + + if (!css_general_characteristics.dynio) + /* It makes no sense to try. */ + return -EOPNOTSUPP; + chsc_area = (void *)get_zeroed_page(GFP_DMA | GFP_KERNEL); + if (!chsc_area) + return -ENOMEM; + request = kzalloc(sizeof(*request), GFP_KERNEL); + if (!request) { + ret = -ENOMEM; + goto out_free; + } + init_completion(&request->completion); + if (copy_from_user(chsc_area, user_area, PAGE_SIZE)) { + ret = -EFAULT; + goto out_free; + } + chsc_log_command(chsc_area); + spin_lock_irq(&chsc_lock); + ret = chsc_async(chsc_area, request); + spin_unlock_irq(&chsc_lock); + if (ret == -EINPROGRESS) { + wait_for_completion(&request->completion); + ret = chsc_examine_irb(request); + } + /* copy area back to user */ + if (!ret) + if (copy_to_user(user_area, chsc_area, PAGE_SIZE)) + ret = -EFAULT; +out_free: + sprintf(dbf, "ret:%d", ret); + CHSC_LOG(0, dbf); + kfree(request); + free_page((unsigned long)chsc_area); + return ret; +} + +static int chsc_ioctl_info_channel_path(void __user *user_cd) +{ + struct chsc_chp_cd *cd; + int ret, ccode; + struct { + struct chsc_header request; + u32 : 2; + u32 m : 1; + u32 : 1; + u32 fmt1 : 4; + u32 cssid : 8; + u32 : 8; + u32 first_chpid : 8; + u32 : 24; + u32 last_chpid : 8; + u32 : 32; + struct chsc_header response; + u8 data[PAGE_SIZE - 20]; + } __attribute__ ((packed)) *scpcd_area; + + scpcd_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA); + if (!scpcd_area) + return -ENOMEM; + cd = kzalloc(sizeof(*cd), GFP_KERNEL); + if (!cd) { + ret = -ENOMEM; + goto out_free; + } + if (copy_from_user(cd, user_cd, sizeof(*cd))) { + ret = -EFAULT; + goto out_free; + } + scpcd_area->request.length = 0x0010; + scpcd_area->request.code = 0x0028; + scpcd_area->m = cd->m; + scpcd_area->fmt1 = cd->fmt; + scpcd_area->cssid = cd->chpid.cssid; + scpcd_area->first_chpid = cd->chpid.id; + scpcd_area->last_chpid = cd->chpid.id; + + ccode = chsc(scpcd_area); + if (ccode != 0) { + ret = -EIO; + goto out_free; + } + if (scpcd_area->response.code != 0x0001) { + ret = -EIO; + CHSC_MSG(0, "scpcd: response code=%x\n", + scpcd_area->response.code); + goto out_free; + } + memcpy(&cd->cpcb, &scpcd_area->response, scpcd_area->response.length); + if (copy_to_user(user_cd, cd, sizeof(*cd))) + ret = -EFAULT; + else + ret = 0; +out_free: + kfree(cd); + free_page((unsigned long)scpcd_area); + return ret; +} + +static int chsc_ioctl_info_cu(void __user *user_cd) +{ + struct chsc_cu_cd *cd; + int ret, ccode; + struct { + struct chsc_header request; + u32 : 2; + u32 m : 1; + u32 : 1; + u32 fmt1 : 4; + u32 cssid : 8; + u32 : 8; + u32 first_cun : 8; + u32 : 24; + u32 last_cun : 8; + u32 : 32; + struct chsc_header response; + u8 data[PAGE_SIZE - 20]; + } __attribute__ ((packed)) *scucd_area; + + scucd_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA); + if (!scucd_area) + return -ENOMEM; + cd = kzalloc(sizeof(*cd), GFP_KERNEL); + if (!cd) { + ret = -ENOMEM; + goto out_free; + } + if (copy_from_user(cd, user_cd, sizeof(*cd))) { + ret = -EFAULT; + goto out_free; + } + scucd_area->request.length = 0x0010; + scucd_area->request.code = 0x0028; + scucd_area->m = cd->m; + scucd_area->fmt1 = cd->fmt; + scucd_area->cssid = cd->cssid; + scucd_area->first_cun = cd->cun; + scucd_area->last_cun = cd->cun; + + ccode = chsc(scucd_area); + if (ccode != 0) { + ret = -EIO; + goto out_free; + } + if (scucd_area->response.code != 0x0001) { + ret = -EIO; + CHSC_MSG(0, "scucd: response code=%x\n", + scucd_area->response.code); + goto out_free; + } + memcpy(&cd->cucb, &scucd_area->response, scucd_area->response.length); + if (copy_to_user(user_cd, cd, sizeof(*cd))) + ret = -EFAULT; + else + ret = 0; +out_free: + kfree(cd); + free_page((unsigned long)scucd_area); + return ret; +} + +static int chsc_ioctl_info_sch_cu(void __user *user_cud) +{ + struct chsc_sch_cud *cud; + int ret, ccode; + struct { + struct chsc_header request; + u32 : 2; + u32 m : 1; + u32 : 5; + u32 fmt1 : 4; + u32 : 2; + u32 ssid : 2; + u32 first_sch : 16; + u32 : 8; + u32 cssid : 8; + u32 last_sch : 16; + u32 : 32; + struct chsc_header response; + u8 data[PAGE_SIZE - 20]; + } __attribute__ ((packed)) *sscud_area; + + sscud_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA); + if (!sscud_area) + return -ENOMEM; + cud = kzalloc(sizeof(*cud), GFP_KERNEL); + if (!cud) { + ret = -ENOMEM; + goto out_free; + } + if (copy_from_user(cud, user_cud, sizeof(*cud))) { + ret = -EFAULT; + goto out_free; + } + sscud_area->request.length = 0x0010; + sscud_area->request.code = 0x0006; + sscud_area->m = cud->schid.m; + sscud_area->fmt1 = cud->fmt; + sscud_area->ssid = cud->schid.ssid; + sscud_area->first_sch = cud->schid.sch_no; + sscud_area->cssid = cud->schid.cssid; + sscud_area->last_sch = cud->schid.sch_no; + + ccode = chsc(sscud_area); + if (ccode != 0) { + ret = -EIO; + goto out_free; + } + if (sscud_area->response.code != 0x0001) { + ret = -EIO; + CHSC_MSG(0, "sscud: response code=%x\n", + sscud_area->response.code); + goto out_free; + } + memcpy(&cud->scub, &sscud_area->response, sscud_area->response.length); + if (copy_to_user(user_cud, cud, sizeof(*cud))) + ret = -EFAULT; + else + ret = 0; +out_free: + kfree(cud); + free_page((unsigned long)sscud_area); + return ret; +} + +static int chsc_ioctl_conf_info(void __user *user_ci) +{ + struct chsc_conf_info *ci; + int ret, ccode; + struct { + struct chsc_header request; + u32 : 2; + u32 m : 1; + u32 : 1; + u32 fmt1 : 4; + u32 cssid : 8; + u32 : 6; + u32 ssid : 2; + u32 : 8; + u64 : 64; + struct chsc_header response; + u8 data[PAGE_SIZE - 20]; + } __attribute__ ((packed)) *sci_area; + + sci_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA); + if (!sci_area) + return -ENOMEM; + ci = kzalloc(sizeof(*ci), GFP_KERNEL); + if (!ci) { + ret = -ENOMEM; + goto out_free; + } + if (copy_from_user(ci, user_ci, sizeof(*ci))) { + ret = -EFAULT; + goto out_free; + } + sci_area->request.length = 0x0010; + sci_area->request.code = 0x0012; + sci_area->m = ci->id.m; + sci_area->fmt1 = ci->fmt; + sci_area->cssid = ci->id.cssid; + sci_area->ssid = ci->id.ssid; + + ccode = chsc(sci_area); + if (ccode != 0) { + ret = -EIO; + goto out_free; + } + if (sci_area->response.code != 0x0001) { + ret = -EIO; + CHSC_MSG(0, "sci: response code=%x\n", + sci_area->response.code); + goto out_free; + } + memcpy(&ci->scid, &sci_area->response, sci_area->response.length); + if (copy_to_user(user_ci, ci, sizeof(*ci))) + ret = -EFAULT; + else + ret = 0; +out_free: + kfree(ci); + free_page((unsigned long)sci_area); + return ret; +} + +static int chsc_ioctl_conf_comp_list(void __user *user_ccl) +{ + struct chsc_comp_list *ccl; + int ret, ccode; + struct { + struct chsc_header request; + u32 ctype : 8; + u32 : 4; + u32 fmt : 4; + u32 : 16; + u64 : 64; + u32 list_parm[2]; + u64 : 64; + struct chsc_header response; + u8 data[PAGE_SIZE - 36]; + } __attribute__ ((packed)) *sccl_area; + struct { + u32 m : 1; + u32 : 31; + u32 cssid : 8; + u32 : 16; + u32 chpid : 8; + } __attribute__ ((packed)) *chpid_parm; + struct { + u32 f_cssid : 8; + u32 l_cssid : 8; + u32 : 16; + u32 res; + } __attribute__ ((packed)) *cssids_parm; + + sccl_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA); + if (!sccl_area) + return -ENOMEM; + ccl = kzalloc(sizeof(*ccl), GFP_KERNEL); + if (!ccl) { + ret = -ENOMEM; + goto out_free; + } + if (copy_from_user(ccl, user_ccl, sizeof(*ccl))) { + ret = -EFAULT; + goto out_free; + } + sccl_area->request.length = 0x0020; + sccl_area->request.code = 0x0030; + sccl_area->fmt = ccl->req.fmt; + sccl_area->ctype = ccl->req.ctype; + switch (sccl_area->ctype) { + case CCL_CU_ON_CHP: + case CCL_IOP_CHP: + chpid_parm = (void *)&sccl_area->list_parm; + chpid_parm->m = ccl->req.chpid.m; + chpid_parm->cssid = ccl->req.chpid.chp.cssid; + chpid_parm->chpid = ccl->req.chpid.chp.id; + break; + case CCL_CSS_IMG: + case CCL_CSS_IMG_CONF_CHAR: + cssids_parm = (void *)&sccl_area->list_parm; + cssids_parm->f_cssid = ccl->req.cssids.f_cssid; + cssids_parm->l_cssid = ccl->req.cssids.l_cssid; + break; + } + ccode = chsc(sccl_area); + if (ccode != 0) { + ret = -EIO; + goto out_free; + } + if (sccl_area->response.code != 0x0001) { + ret = -EIO; + CHSC_MSG(0, "sccl: response code=%x\n", + sccl_area->response.code); + goto out_free; + } + memcpy(&ccl->sccl, &sccl_area->response, sccl_area->response.length); + if (copy_to_user(user_ccl, ccl, sizeof(*ccl))) + ret = -EFAULT; + else + ret = 0; +out_free: + kfree(ccl); + free_page((unsigned long)sccl_area); + return ret; +} + +static int chsc_ioctl_chpd(void __user *user_chpd) +{ + struct chsc_cpd_info *chpd; + int ret; + + chpd = kzalloc(sizeof(*chpd), GFP_KERNEL); + if (!chpd) + return -ENOMEM; + if (copy_from_user(chpd, user_chpd, sizeof(*chpd))) { + ret = -EFAULT; + goto out_free; + } + ret = chsc_determine_channel_path_desc(chpd->chpid, chpd->fmt, + chpd->rfmt, chpd->c, chpd->m, + &chpd->chpdb); + if (ret) + goto out_free; + if (copy_to_user(user_chpd, chpd, sizeof(*chpd))) + ret = -EFAULT; +out_free: + kfree(chpd); + return ret; +} + +static int chsc_ioctl_dcal(void __user *user_dcal) +{ + struct chsc_dcal *dcal; + int ret, ccode; + struct { + struct chsc_header request; + u32 atype : 8; + u32 : 4; + u32 fmt : 4; + u32 : 16; + u32 res0[2]; + u32 list_parm[2]; + u32 res1[2]; + struct chsc_header response; + u8 data[PAGE_SIZE - 36]; + } __attribute__ ((packed)) *sdcal_area; + + sdcal_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA); + if (!sdcal_area) + return -ENOMEM; + dcal = kzalloc(sizeof(*dcal), GFP_KERNEL); + if (!dcal) { + ret = -ENOMEM; + goto out_free; + } + if (copy_from_user(dcal, user_dcal, sizeof(*dcal))) { + ret = -EFAULT; + goto out_free; + } + sdcal_area->request.length = 0x0020; + sdcal_area->request.code = 0x0034; + sdcal_area->atype = dcal->req.atype; + sdcal_area->fmt = dcal->req.fmt; + memcpy(&sdcal_area->list_parm, &dcal->req.list_parm, + sizeof(sdcal_area->list_parm)); + + ccode = chsc(sdcal_area); + if (ccode != 0) { + ret = -EIO; + goto out_free; + } + if (sdcal_area->response.code != 0x0001) { + ret = -EIO; + CHSC_MSG(0, "sdcal: response code=%x\n", + sdcal_area->response.code); + goto out_free; + } + memcpy(&dcal->sdcal, &sdcal_area->response, + sdcal_area->response.length); + if (copy_to_user(user_dcal, dcal, sizeof(*dcal))) + ret = -EFAULT; + else + ret = 0; +out_free: + kfree(dcal); + free_page((unsigned long)sdcal_area); + return ret; +} + +static long chsc_ioctl(struct file *filp, unsigned int cmd, + unsigned long arg) +{ + CHSC_MSG(2, "chsc_ioctl called, cmd=%x\n", cmd); + switch (cmd) { + case CHSC_START: + return chsc_ioctl_start((void __user *)arg); + case CHSC_INFO_CHANNEL_PATH: + return chsc_ioctl_info_channel_path((void __user *)arg); + case CHSC_INFO_CU: + return chsc_ioctl_info_cu((void __user *)arg); + case CHSC_INFO_SCH_CU: + return chsc_ioctl_info_sch_cu((void __user *)arg); + case CHSC_INFO_CI: + return chsc_ioctl_conf_info((void __user *)arg); + case CHSC_INFO_CCL: + return chsc_ioctl_conf_comp_list((void __user *)arg); + case CHSC_INFO_CPD: + return chsc_ioctl_chpd((void __user *)arg); + case CHSC_INFO_DCAL: + return chsc_ioctl_dcal((void __user *)arg); + default: /* unknown ioctl number */ + return -ENOIOCTLCMD; + } +} + +static const struct file_operations chsc_fops = { + .owner = THIS_MODULE, + .unlocked_ioctl = chsc_ioctl, + .compat_ioctl = chsc_ioctl, +}; + +static struct miscdevice chsc_misc_device = { + .minor = MISC_DYNAMIC_MINOR, + .name = "chsc", + .fops = &chsc_fops, +}; + +static int __init chsc_misc_init(void) +{ + return misc_register(&chsc_misc_device); +} + +static void chsc_misc_cleanup(void) +{ + misc_deregister(&chsc_misc_device); +} + +static int __init chsc_sch_init(void) +{ + int ret; + + ret = chsc_init_dbfs(); + if (ret) + return ret; + isc_register(CHSC_SCH_ISC); + ret = chsc_init_sch_driver(); + if (ret) + goto out_dbf; + ret = chsc_misc_init(); + if (ret) + goto out_driver; + return ret; +out_driver: + chsc_cleanup_sch_driver(); +out_dbf: + isc_unregister(CHSC_SCH_ISC); + chsc_remove_dbfs(); + return ret; +} + +static void __exit chsc_sch_exit(void) +{ + chsc_misc_cleanup(); + chsc_cleanup_sch_driver(); + isc_unregister(CHSC_SCH_ISC); + chsc_remove_dbfs(); +} + +module_init(chsc_sch_init); +module_exit(chsc_sch_exit); diff --git a/drivers/s390/cio/chsc_sch.h b/drivers/s390/cio/chsc_sch.h new file mode 100644 index 00000000000..589ebfad6aa --- /dev/null +++ b/drivers/s390/cio/chsc_sch.h @@ -0,0 +1,13 @@ +#ifndef _CHSC_SCH_H +#define _CHSC_SCH_H + +struct chsc_request { + struct completion completion; + struct irb irb; +}; + +struct chsc_private { + struct chsc_request *request; +}; + +#endif diff --git a/drivers/s390/cio/cio.c b/drivers/s390/cio/cio.c index b32d7eb3d81..33bff8fec7d 100644 --- a/drivers/s390/cio/cio.c +++ b/drivers/s390/cio/cio.c @@ -2,7 +2,7 @@ * drivers/s390/cio/cio.c * S/390 common I/O routines -- low level i/o calls * - * Copyright (C) IBM Corp. 1999,2006 + * Copyright IBM Corp. 1999,2008 * Author(s): Ingo Adlung (adlung@de.ibm.com) * Cornelia Huck (cornelia.huck@de.ibm.com) * Arnd Bergmann (arndb@de.ibm.com) @@ -24,7 +24,9 @@ #include <asm/ipl.h> #include <asm/chpid.h> #include <asm/airq.h> +#include <asm/isc.h> #include <asm/cpu.h> +#include <asm/fcx.h> #include "cio.h" #include "css.h" #include "chsc.h" @@ -72,7 +74,6 @@ out_unregister: debug_unregister(cio_debug_trace_id); if (cio_debug_crw_id) debug_unregister(cio_debug_crw_id); - printk(KERN_WARNING"cio: could not initialize debugging\n"); return -1; } @@ -128,7 +129,7 @@ cio_tpi(void) local_bh_disable(); irq_enter (); spin_lock(sch->lock); - memcpy (&sch->schib.scsw, &irb->scsw, sizeof (struct scsw)); + memcpy(&sch->schib.scsw, &irb->scsw, sizeof(union scsw)); if (sch->driver && sch->driver->irq) sch->driver->irq(sch); spin_unlock(sch->lock); @@ -167,30 +168,30 @@ cio_start_key (struct subchannel *sch, /* subchannel structure */ { char dbf_txt[15]; int ccode; - struct orb *orb; + union orb *orb; CIO_TRACE_EVENT(4, "stIO"); CIO_TRACE_EVENT(4, sch->dev.bus_id); orb = &to_io_private(sch)->orb; /* sch is always under 2G. */ - orb->intparm = (u32)(addr_t)sch; - orb->fmt = 1; + orb->cmd.intparm = (u32)(addr_t)sch; + orb->cmd.fmt = 1; - orb->pfch = sch->options.prefetch == 0; - orb->spnd = sch->options.suspend; - orb->ssic = sch->options.suspend && sch->options.inter; - orb->lpm = (lpm != 0) ? lpm : sch->lpm; + orb->cmd.pfch = sch->options.prefetch == 0; + orb->cmd.spnd = sch->options.suspend; + orb->cmd.ssic = sch->options.suspend && sch->options.inter; + orb->cmd.lpm = (lpm != 0) ? lpm : sch->lpm; #ifdef CONFIG_64BIT /* * for 64 bit we always support 64 bit IDAWs with 4k page size only */ - orb->c64 = 1; - orb->i2k = 0; + orb->cmd.c64 = 1; + orb->cmd.i2k = 0; #endif - orb->key = key >> 4; + orb->cmd.key = key >> 4; /* issue "Start Subchannel" */ - orb->cpa = (__u32) __pa(cpa); + orb->cmd.cpa = (__u32) __pa(cpa); ccode = ssch(sch->schid, orb); /* process condition code */ @@ -202,7 +203,7 @@ cio_start_key (struct subchannel *sch, /* subchannel structure */ /* * initialize device status information */ - sch->schib.scsw.actl |= SCSW_ACTL_START_PEND; + sch->schib.scsw.cmd.actl |= SCSW_ACTL_START_PEND; return 0; case 1: /* status pending */ case 2: /* busy */ @@ -237,7 +238,7 @@ cio_resume (struct subchannel *sch) switch (ccode) { case 0: - sch->schib.scsw.actl |= SCSW_ACTL_RESUME_PEND; + sch->schib.scsw.cmd.actl |= SCSW_ACTL_RESUME_PEND; return 0; case 1: return -EBUSY; @@ -277,7 +278,7 @@ cio_halt(struct subchannel *sch) switch (ccode) { case 0: - sch->schib.scsw.actl |= SCSW_ACTL_HALT_PEND; + sch->schib.scsw.cmd.actl |= SCSW_ACTL_HALT_PEND; return 0; case 1: /* status pending */ case 2: /* busy */ @@ -312,7 +313,7 @@ cio_clear(struct subchannel *sch) switch (ccode) { case 0: - sch->schib.scsw.actl |= SCSW_ACTL_CLEAR_PEND; + sch->schib.scsw.cmd.actl |= SCSW_ACTL_CLEAR_PEND; return 0; default: /* device not operational */ return -ENODEV; @@ -387,8 +388,10 @@ cio_modify (struct subchannel *sch) return ret; } -/* - * Enable subchannel. +/** + * cio_enable_subchannel - enable a subchannel. + * @sch: subchannel to be enabled + * @intparm: interruption parameter to set */ int cio_enable_subchannel(struct subchannel *sch, u32 intparm) { @@ -434,12 +437,13 @@ int cio_enable_subchannel(struct subchannel *sch, u32 intparm) CIO_TRACE_EVENT (2, dbf_txt); return ret; } +EXPORT_SYMBOL_GPL(cio_enable_subchannel); -/* - * Disable subchannel. +/** + * cio_disable_subchannel - disable a subchannel. + * @sch: subchannel to disable */ -int -cio_disable_subchannel (struct subchannel *sch) +int cio_disable_subchannel(struct subchannel *sch) { char dbf_txt[15]; int ccode; @@ -455,7 +459,7 @@ cio_disable_subchannel (struct subchannel *sch) if (ccode == 3) /* Not operational. */ return -ENODEV; - if (sch->schib.scsw.actl != 0) + if (scsw_actl(&sch->schib.scsw) != 0) /* * the disable function must not be called while there are * requests pending for completion ! @@ -484,6 +488,7 @@ cio_disable_subchannel (struct subchannel *sch) CIO_TRACE_EVENT (2, dbf_txt); return ret; } +EXPORT_SYMBOL_GPL(cio_disable_subchannel); int cio_create_sch_lock(struct subchannel *sch) { @@ -494,27 +499,61 @@ int cio_create_sch_lock(struct subchannel *sch) return 0; } -/* - * cio_validate_subchannel() +static int cio_check_devno_blacklisted(struct subchannel *sch) +{ + if (is_blacklisted(sch->schid.ssid, sch->schib.pmcw.dev)) { + /* + * This device must not be known to Linux. So we simply + * say that there is no device and return ENODEV. + */ + CIO_MSG_EVENT(6, "Blacklisted device detected " + "at devno %04X, subchannel set %x\n", + sch->schib.pmcw.dev, sch->schid.ssid); + return -ENODEV; + } + return 0; +} + +static int cio_validate_io_subchannel(struct subchannel *sch) +{ + /* Initialization for io subchannels. */ + if (!css_sch_is_valid(&sch->schib)) + return -ENODEV; + + /* Devno is valid. */ + return cio_check_devno_blacklisted(sch); +} + +static int cio_validate_msg_subchannel(struct subchannel *sch) +{ + /* Initialization for message subchannels. */ + if (!css_sch_is_valid(&sch->schib)) + return -ENODEV; + + /* Devno is valid. */ + return cio_check_devno_blacklisted(sch); +} + +/** + * cio_validate_subchannel - basic validation of subchannel + * @sch: subchannel structure to be filled out + * @schid: subchannel id * * Find out subchannel type and initialize struct subchannel. * Return codes: - * SUBCHANNEL_TYPE_IO for a normal io subchannel - * SUBCHANNEL_TYPE_CHSC for a chsc subchannel - * SUBCHANNEL_TYPE_MESSAGE for a messaging subchannel - * SUBCHANNEL_TYPE_ADM for a adm(?) subchannel + * 0 on success * -ENXIO for non-defined subchannels - * -ENODEV for subchannels with invalid device number or blacklisted devices + * -ENODEV for invalid subchannels or blacklisted devices + * -EIO for subchannels in an invalid subchannel set */ -int -cio_validate_subchannel (struct subchannel *sch, struct subchannel_id schid) +int cio_validate_subchannel(struct subchannel *sch, struct subchannel_id schid) { char dbf_txt[15]; int ccode; int err; - sprintf (dbf_txt, "valsch%x", schid.sch_no); - CIO_TRACE_EVENT (4, dbf_txt); + sprintf(dbf_txt, "valsch%x", schid.sch_no); + CIO_TRACE_EVENT(4, dbf_txt); /* Nuke all fields. */ memset(sch, 0, sizeof(struct subchannel)); @@ -546,67 +585,21 @@ cio_validate_subchannel (struct subchannel *sch, struct subchannel_id schid) /* Copy subchannel type from path management control word. */ sch->st = sch->schib.pmcw.st; - /* - * ... just being curious we check for non I/O subchannels - */ - if (sch->st != 0) { - CIO_MSG_EVENT(4, "Subchannel 0.%x.%04x reports " - "non-I/O subchannel type %04X\n", - sch->schid.ssid, sch->schid.sch_no, sch->st); - /* We stop here for non-io subchannels. */ - err = sch->st; - goto out; - } - - /* Initialization for io subchannels. */ - if (!css_sch_is_valid(&sch->schib)) { - err = -ENODEV; - goto out; + switch (sch->st) { + case SUBCHANNEL_TYPE_IO: + err = cio_validate_io_subchannel(sch); + break; + case SUBCHANNEL_TYPE_MSG: + err = cio_validate_msg_subchannel(sch); + break; + default: + err = 0; } - - /* Devno is valid. */ - if (is_blacklisted (sch->schid.ssid, sch->schib.pmcw.dev)) { - /* - * This device must not be known to Linux. So we simply - * say that there is no device and return ENODEV. - */ - CIO_MSG_EVENT(6, "Blacklisted device detected " - "at devno %04X, subchannel set %x\n", - sch->schib.pmcw.dev, sch->schid.ssid); - err = -ENODEV; + if (err) goto out; - } - if (cio_is_console(sch->schid)) { - sch->opm = 0xff; - sch->isc = 1; - } else { - sch->opm = chp_get_sch_opm(sch); - sch->isc = 3; - } - sch->lpm = sch->schib.pmcw.pam & sch->opm; - - CIO_MSG_EVENT(6, "Detected device %04x on subchannel 0.%x.%04X " - "- PIM = %02X, PAM = %02X, POM = %02X\n", - sch->schib.pmcw.dev, sch->schid.ssid, - sch->schid.sch_no, sch->schib.pmcw.pim, - sch->schib.pmcw.pam, sch->schib.pmcw.pom); - /* - * We now have to initially ... - * ... enable "concurrent sense" - * ... enable "multipath mode" if more than one - * CHPID is available. This is done regardless - * whether multiple paths are available for us. - */ - sch->schib.pmcw.csense = 1; /* concurrent sense */ - sch->schib.pmcw.ena = 0; - if ((sch->lpm & (sch->lpm - 1)) != 0) - sch->schib.pmcw.mp = 1; /* multipath mode */ - /* clean up possible residual cmf stuff */ - sch->schib.pmcw.mme = 0; - sch->schib.pmcw.mbfc = 0; - sch->schib.pmcw.mbi = 0; - sch->schib.mba = 0; + CIO_MSG_EVENT(4, "Subchannel 0.%x.%04x reports subchannel type %04X\n", + sch->schid.ssid, sch->schid.sch_no, sch->st); return 0; out: if (!cio_is_console(schid)) @@ -647,7 +640,7 @@ do_IRQ (struct pt_regs *regs) */ if (tpi_info->adapter_IO == 1 && tpi_info->int_type == IO_INTERRUPT_TYPE) { - do_adapter_IO(); + do_adapter_IO(tpi_info->isc); continue; } sch = (struct subchannel *)(unsigned long)tpi_info->intparm; @@ -706,9 +699,9 @@ void wait_cons_dev(void) if (!console_subchannel_in_use) return; - /* disable all but isc 1 (console device) */ + /* disable all but the console isc */ __ctl_store (save_cr6, 6, 6); - cr6 = 0x40000000; + cr6 = 1UL << (31 - CONSOLE_ISC); __ctl_load (cr6, 6, 6); do { @@ -716,7 +709,7 @@ void wait_cons_dev(void) if (!cio_tpi()) cpu_relax(); spin_lock(console_subchannel.lock); - } while (console_subchannel.schib.scsw.actl != 0); + } while (console_subchannel.schib.scsw.cmd.actl != 0); /* * restore previous isc value */ @@ -761,7 +754,6 @@ cio_get_console_sch_no(void) /* unlike in 2.4, we cannot autoprobe here, since * the channel subsystem is not fully initialized. * With some luck, the HWC console can take over */ - printk(KERN_WARNING "cio: No ccw console found!\n"); return -1; } return console_irq; @@ -778,6 +770,7 @@ cio_probe_console(void) sch_no = cio_get_console_sch_no(); if (sch_no == -1) { console_subchannel_in_use = 0; + printk(KERN_WARNING "cio: No ccw console found!\n"); return ERR_PTR(-ENODEV); } memset(&console_subchannel, 0, sizeof(struct subchannel)); @@ -790,15 +783,15 @@ cio_probe_console(void) } /* - * enable console I/O-interrupt subclass 1 + * enable console I/O-interrupt subclass */ - ctl_set_bit(6, 30); - console_subchannel.isc = 1; - console_subchannel.schib.pmcw.isc = 1; + isc_register(CONSOLE_ISC); + console_subchannel.schib.pmcw.isc = CONSOLE_ISC; console_subchannel.schib.pmcw.intparm = (u32)(addr_t)&console_subchannel; ret = cio_modify(&console_subchannel); if (ret) { + isc_unregister(CONSOLE_ISC); console_subchannel_in_use = 0; return ERR_PTR(ret); } @@ -810,7 +803,7 @@ cio_release_console(void) { console_subchannel.schib.pmcw.intparm = 0; cio_modify(&console_subchannel); - ctl_clear_bit(6, 24); + isc_unregister(CONSOLE_ISC); console_subchannel_in_use = 0; } @@ -864,7 +857,7 @@ static void udelay_reset(unsigned long usecs) } static int -__clear_subchannel_easy(struct subchannel_id schid) +__clear_io_subchannel_easy(struct subchannel_id schid) { int retry; @@ -883,6 +876,12 @@ __clear_subchannel_easy(struct subchannel_id schid) return -EBUSY; } +static void __clear_chsc_subchannel_easy(void) +{ + /* It seems we can only wait for a bit here :/ */ + udelay_reset(100); +} + static int pgm_check_occured; static void cio_reset_pgm_check_handler(void) @@ -921,11 +920,22 @@ static int __shutdown_subchannel_easy(struct subchannel_id schid, void *data) case -ENODEV: break; default: /* -EBUSY */ - if (__clear_subchannel_easy(schid)) - break; /* give up... */ + switch (schib.pmcw.st) { + case SUBCHANNEL_TYPE_IO: + if (__clear_io_subchannel_easy(schid)) + goto out; /* give up... */ + break; + case SUBCHANNEL_TYPE_CHSC: + __clear_chsc_subchannel_easy(); + break; + default: + /* No default clear strategy */ + break; + } stsch(schid, &schib); __disable_subchannel_easy(schid, &schib); } +out: return 0; } @@ -1068,3 +1078,61 @@ int __init cio_get_iplinfo(struct cio_iplinfo *iplinfo) iplinfo->is_qdio = schib.pmcw.qf; return 0; } + +/** + * cio_tm_start_key - perform start function + * @sch: subchannel on which to perform the start function + * @tcw: transport-command word to be started + * @lpm: mask of paths to use + * @key: storage key to use for storage access + * + * Start the tcw on the given subchannel. Return zero on success, non-zero + * otherwise. + */ +int cio_tm_start_key(struct subchannel *sch, struct tcw *tcw, u8 lpm, u8 key) +{ + int cc; + union orb *orb = &to_io_private(sch)->orb; + + memset(orb, 0, sizeof(union orb)); + orb->tm.intparm = (u32) (addr_t) sch; + orb->tm.key = key >> 4; + orb->tm.b = 1; + orb->tm.lpm = lpm ? lpm : sch->lpm; + orb->tm.tcw = (u32) (addr_t) tcw; + cc = ssch(sch->schid, orb); + switch (cc) { + case 0: + return 0; + case 1: + case 2: + return -EBUSY; + default: + return cio_start_handle_notoper(sch, lpm); + } +} + +/** + * cio_tm_intrg - perform interrogate function + * @sch - subchannel on which to perform the interrogate function + * + * If the specified subchannel is running in transport-mode, perform the + * interrogate function. Return zero on success, non-zero otherwie. + */ +int cio_tm_intrg(struct subchannel *sch) +{ + int cc; + + if (!to_io_private(sch)->orb.tm.b) + return -EINVAL; + cc = xsch(sch->schid); + switch (cc) { + case 0: + case 2: + return 0; + case 1: + return -EBUSY; + default: + return -ENODEV; + } +} diff --git a/drivers/s390/cio/cio.h b/drivers/s390/cio/cio.h index 6e933aebe01..3b236d20e83 100644 --- a/drivers/s390/cio/cio.h +++ b/drivers/s390/cio/cio.h @@ -3,9 +3,12 @@ #include <linux/mutex.h> #include <linux/device.h> +#include <linux/mod_devicetable.h> #include <asm/chpid.h> +#include <asm/cio.h> +#include <asm/fcx.h> +#include <asm/schid.h> #include "chsc.h" -#include "schid.h" /* * path management control word @@ -13,7 +16,7 @@ struct pmcw { u32 intparm; /* interruption parameter */ u32 qf : 1; /* qdio facility */ - u32 res0 : 1; /* reserved zeros */ + u32 w : 1; u32 isc : 3; /* interruption sublass */ u32 res5 : 3; /* reserved zeros */ u32 ena : 1; /* enabled */ @@ -47,7 +50,7 @@ struct pmcw { */ struct schib { struct pmcw pmcw; /* path management control word */ - struct scsw scsw; /* subchannel status word */ + union scsw scsw; /* subchannel status word */ __u64 mba; /* measurement block address */ __u8 mda[4]; /* model dependent area */ } __attribute__ ((packed,aligned(4))); @@ -99,8 +102,11 @@ extern int cio_set_options (struct subchannel *, int); extern int cio_get_options (struct subchannel *); extern int cio_modify (struct subchannel *); +int cio_tm_start_key(struct subchannel *sch, struct tcw *tcw, u8 lpm, u8 key); +int cio_tm_intrg(struct subchannel *sch); + int cio_create_sch_lock(struct subchannel *); -void do_adapter_IO(void); +void do_adapter_IO(u8 isc); void do_IRQ(struct pt_regs *); /* Use with care. */ diff --git a/drivers/s390/cio/cmf.c b/drivers/s390/cio/cmf.c index 2808b6833b9..a90b28c0be5 100644 --- a/drivers/s390/cio/cmf.c +++ b/drivers/s390/cio/cmf.c @@ -341,12 +341,12 @@ static int cmf_copy_block(struct ccw_device *cdev) if (stsch(sch->schid, &sch->schib)) return -ENODEV; - if (sch->schib.scsw.fctl & SCSW_FCTL_START_FUNC) { + if (scsw_fctl(&sch->schib.scsw) & SCSW_FCTL_START_FUNC) { /* Don't copy if a start function is in progress. */ - if ((!(sch->schib.scsw.actl & SCSW_ACTL_SUSPENDED)) && - (sch->schib.scsw.actl & + if ((!(scsw_actl(&sch->schib.scsw) & SCSW_ACTL_SUSPENDED)) && + (scsw_actl(&sch->schib.scsw) & (SCSW_ACTL_DEVACT | SCSW_ACTL_SCHACT)) && - (!(sch->schib.scsw.stctl & SCSW_STCTL_SEC_STATUS))) + (!(scsw_stctl(&sch->schib.scsw) & SCSW_STCTL_SEC_STATUS))) return -EBUSY; } cmb_data = cdev->private->cmb; @@ -612,9 +612,6 @@ static int alloc_cmb(struct ccw_device *cdev) free_pages((unsigned long)mem, get_order(size)); } else if (!mem) { /* no luck */ - printk(KERN_WARNING "cio: failed to allocate area " - "for measuring %d subchannels\n", - cmb_area.num_channels); ret = -ENOMEM; goto out; } else { @@ -1230,13 +1227,9 @@ static ssize_t cmb_enable_store(struct device *dev, switch (val) { case 0: ret = disable_cmf(cdev); - if (ret) - dev_info(&cdev->dev, "disable_cmf failed (%d)\n", ret); break; case 1: ret = enable_cmf(cdev); - if (ret && ret != -EBUSY) - dev_info(&cdev->dev, "enable_cmf failed (%d)\n", ret); break; } @@ -1344,8 +1337,7 @@ static int __init init_cmf(void) * to basic mode. */ if (format == CMF_AUTODETECT) { - if (!css_characteristics_avail || - !css_general_characteristics.ext_mb) { + if (!css_general_characteristics.ext_mb) { format = CMF_BASIC; } else { format = CMF_EXTENDED; @@ -1365,8 +1357,6 @@ static int __init init_cmf(void) cmbops = &cmbops_extended; break; default: - printk(KERN_ERR "cio: Invalid format %d for channel " - "measurement facility\n", format); return 1; } diff --git a/drivers/s390/cio/css.c b/drivers/s390/cio/css.c index a76956512b2..46c021d880d 100644 --- a/drivers/s390/cio/css.c +++ b/drivers/s390/cio/css.c @@ -2,8 +2,7 @@ * drivers/s390/cio/css.c * driver for channel subsystem * - * Copyright (C) 2002 IBM Deutschland Entwicklung GmbH, - * IBM Corporation + * Copyright IBM Corp. 2002,2008 * Author(s): Arnd Bergmann (arndb@de.ibm.com) * Cornelia Huck (cornelia.huck@de.ibm.com) */ @@ -14,7 +13,9 @@ #include <linux/errno.h> #include <linux/list.h> #include <linux/reboot.h> +#include <asm/isc.h> +#include "../s390mach.h" #include "css.h" #include "cio.h" #include "cio_debug.h" @@ -30,8 +31,6 @@ static int max_ssid = 0; struct channel_subsystem *channel_subsystems[__MAX_CSSID + 1]; -int css_characteristics_avail = 0; - int for_each_subchannel(int(*fn)(struct subchannel_id, void *), void *data) { @@ -121,25 +120,6 @@ css_alloc_subchannel(struct subchannel_id schid) kfree(sch); return ERR_PTR(ret); } - - if (sch->st != SUBCHANNEL_TYPE_IO) { - /* For now we ignore all non-io subchannels. */ - kfree(sch); - return ERR_PTR(-EINVAL); - } - - /* - * Set intparm to subchannel address. - * This is fine even on 64bit since the subchannel is always located - * under 2G. - */ - sch->schib.pmcw.intparm = (u32)(addr_t)sch; - ret = cio_modify(sch); - if (ret) { - kfree(sch->lock); - kfree(sch); - return ERR_PTR(ret); - } return sch; } @@ -177,12 +157,18 @@ static int css_sch_device_register(struct subchannel *sch) return ret; } +/** + * css_sch_device_unregister - unregister a subchannel + * @sch: subchannel to be unregistered + */ void css_sch_device_unregister(struct subchannel *sch) { mutex_lock(&sch->reg_mutex); - device_unregister(&sch->dev); + if (device_is_registered(&sch->dev)) + device_unregister(&sch->dev); mutex_unlock(&sch->reg_mutex); } +EXPORT_SYMBOL_GPL(css_sch_device_unregister); static void ssd_from_pmcw(struct chsc_ssd_info *ssd, struct pmcw *pmcw) { @@ -229,6 +215,41 @@ void css_update_ssd_info(struct subchannel *sch) } } +static ssize_t type_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct subchannel *sch = to_subchannel(dev); + + return sprintf(buf, "%01x\n", sch->st); +} + +static DEVICE_ATTR(type, 0444, type_show, NULL); + +static ssize_t modalias_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct subchannel *sch = to_subchannel(dev); + + return sprintf(buf, "css:t%01X\n", sch->st); +} + +static DEVICE_ATTR(modalias, 0444, modalias_show, NULL); + +static struct attribute *subch_attrs[] = { + &dev_attr_type.attr, + &dev_attr_modalias.attr, + NULL, +}; + +static struct attribute_group subch_attr_group = { + .attrs = subch_attrs, +}; + +static struct attribute_group *default_subch_attr_groups[] = { + &subch_attr_group, + NULL, +}; + static int css_register_subchannel(struct subchannel *sch) { int ret; @@ -237,16 +258,17 @@ static int css_register_subchannel(struct subchannel *sch) sch->dev.parent = &channel_subsystems[0]->device; sch->dev.bus = &css_bus_type; sch->dev.release = &css_subchannel_release; - sch->dev.groups = subch_attr_groups; + sch->dev.groups = default_subch_attr_groups; /* * We don't want to generate uevents for I/O subchannels that don't * have a working ccw device behind them since they will be * unregistered before they can be used anyway, so we delay the add * uevent until after device recognition was successful. + * Note that we suppress the uevent for all subchannel types; + * the subchannel driver can decide itself when it wants to inform + * userspace of its existence. */ - if (!cio_is_console(sch->schid)) - /* Console is special, no need to suppress. */ - sch->dev.uevent_suppress = 1; + sch->dev.uevent_suppress = 1; css_update_ssd_info(sch); /* make it known to the system */ ret = css_sch_device_register(sch); @@ -255,10 +277,19 @@ static int css_register_subchannel(struct subchannel *sch) sch->schid.ssid, sch->schid.sch_no, ret); return ret; } + if (!sch->driver) { + /* + * No driver matched. Generate the uevent now so that + * a fitting driver module may be loaded based on the + * modalias. + */ + sch->dev.uevent_suppress = 0; + kobject_uevent(&sch->dev.kobj, KOBJ_ADD); + } return ret; } -static int css_probe_device(struct subchannel_id schid) +int css_probe_device(struct subchannel_id schid) { int ret; struct subchannel *sch; @@ -301,116 +332,12 @@ int css_sch_is_valid(struct schib *schib) { if ((schib->pmcw.st == SUBCHANNEL_TYPE_IO) && !schib->pmcw.dnv) return 0; + if ((schib->pmcw.st == SUBCHANNEL_TYPE_MSG) && !schib->pmcw.w) + return 0; return 1; } EXPORT_SYMBOL_GPL(css_sch_is_valid); -static int css_get_subchannel_status(struct subchannel *sch) -{ - struct schib schib; - - if (stsch(sch->schid, &schib)) - return CIO_GONE; - if (!css_sch_is_valid(&schib)) - return CIO_GONE; - if (sch->schib.pmcw.dnv && (schib.pmcw.dev != sch->schib.pmcw.dev)) - return CIO_REVALIDATE; - if (!sch->lpm) - return CIO_NO_PATH; - return CIO_OPER; -} - -static int css_evaluate_known_subchannel(struct subchannel *sch, int slow) -{ - int event, ret, disc; - unsigned long flags; - enum { NONE, UNREGISTER, UNREGISTER_PROBE, REPROBE } action; - - spin_lock_irqsave(sch->lock, flags); - disc = device_is_disconnected(sch); - if (disc && slow) { - /* Disconnected devices are evaluated directly only.*/ - spin_unlock_irqrestore(sch->lock, flags); - return 0; - } - /* No interrupt after machine check - kill pending timers. */ - device_kill_pending_timer(sch); - if (!disc && !slow) { - /* Non-disconnected devices are evaluated on the slow path. */ - spin_unlock_irqrestore(sch->lock, flags); - return -EAGAIN; - } - event = css_get_subchannel_status(sch); - CIO_MSG_EVENT(4, "Evaluating schid 0.%x.%04x, event %d, %s, %s path.\n", - sch->schid.ssid, sch->schid.sch_no, event, - disc ? "disconnected" : "normal", - slow ? "slow" : "fast"); - /* Analyze subchannel status. */ - action = NONE; - switch (event) { - case CIO_NO_PATH: - if (disc) { - /* Check if paths have become available. */ - action = REPROBE; - break; - } - /* fall through */ - case CIO_GONE: - /* Prevent unwanted effects when opening lock. */ - cio_disable_subchannel(sch); - device_set_disconnected(sch); - /* Ask driver what to do with device. */ - action = UNREGISTER; - if (sch->driver && sch->driver->notify) { - spin_unlock_irqrestore(sch->lock, flags); - ret = sch->driver->notify(sch, event); - spin_lock_irqsave(sch->lock, flags); - if (ret) - action = NONE; - } - break; - case CIO_REVALIDATE: - /* Device will be removed, so no notify necessary. */ - if (disc) - /* Reprobe because immediate unregister might block. */ - action = REPROBE; - else - action = UNREGISTER_PROBE; - break; - case CIO_OPER: - if (disc) - /* Get device operational again. */ - action = REPROBE; - break; - } - /* Perform action. */ - ret = 0; - switch (action) { - case UNREGISTER: - case UNREGISTER_PROBE: - /* Unregister device (will use subchannel lock). */ - spin_unlock_irqrestore(sch->lock, flags); - css_sch_device_unregister(sch); - spin_lock_irqsave(sch->lock, flags); - - /* Reset intparm to zeroes. */ - sch->schib.pmcw.intparm = 0; - cio_modify(sch); - break; - case REPROBE: - device_trigger_reprobe(sch); - break; - default: - break; - } - spin_unlock_irqrestore(sch->lock, flags); - /* Probe if necessary. */ - if (action == UNREGISTER_PROBE) - ret = css_probe_device(sch->schid); - - return ret; -} - static int css_evaluate_new_subchannel(struct subchannel_id schid, int slow) { struct schib schib; @@ -429,6 +356,21 @@ static int css_evaluate_new_subchannel(struct subchannel_id schid, int slow) return css_probe_device(schid); } +static int css_evaluate_known_subchannel(struct subchannel *sch, int slow) +{ + int ret = 0; + + if (sch->driver) { + if (sch->driver->sch_event) + ret = sch->driver->sch_event(sch, slow); + else + dev_dbg(&sch->dev, + "Got subchannel machine check but " + "no sch_event handler provided.\n"); + } + return ret; +} + static void css_evaluate_subchannel(struct subchannel_id schid, int slow) { struct subchannel *sch; @@ -596,18 +538,29 @@ EXPORT_SYMBOL_GPL(css_schedule_reprobe); /* * Called from the machine check handler for subchannel report words. */ -void css_process_crw(int rsid1, int rsid2) +static void css_process_crw(struct crw *crw0, struct crw *crw1, int overflow) { struct subchannel_id mchk_schid; - CIO_CRW_EVENT(2, "source is subchannel %04X, subsystem id %x\n", - rsid1, rsid2); + if (overflow) { + css_schedule_eval_all(); + return; + } + CIO_CRW_EVENT(2, "CRW0 reports slct=%d, oflw=%d, " + "chn=%d, rsc=%X, anc=%d, erc=%X, rsid=%X\n", + crw0->slct, crw0->oflw, crw0->chn, crw0->rsc, crw0->anc, + crw0->erc, crw0->rsid); + if (crw1) + CIO_CRW_EVENT(2, "CRW1 reports slct=%d, oflw=%d, " + "chn=%d, rsc=%X, anc=%d, erc=%X, rsid=%X\n", + crw1->slct, crw1->oflw, crw1->chn, crw1->rsc, + crw1->anc, crw1->erc, crw1->rsid); init_subchannel_id(&mchk_schid); - mchk_schid.sch_no = rsid1; - if (rsid2 != 0) - mchk_schid.ssid = (rsid2 >> 8) & 3; + mchk_schid.sch_no = crw0->rsid; + if (crw1) + mchk_schid.ssid = (crw1->rsid >> 8) & 3; - /* + /* * Since we are always presented with IPI in the CRW, we have to * use stsch() to find out if the subchannel in question has come * or gone. @@ -658,7 +611,7 @@ __init_channel_subsystem(struct subchannel_id schid, void *data) static void __init css_generate_pgid(struct channel_subsystem *css, u32 tod_high) { - if (css_characteristics_avail && css_general_characteristics.mcss) { + if (css_general_characteristics.mcss) { css->global_pgid.pgid_high.ext_cssid.version = 0x80; css->global_pgid.pgid_high.ext_cssid.cssid = css->cssid; } else { @@ -795,8 +748,6 @@ init_channel_subsystem (void) ret = chsc_determine_css_characteristics(); if (ret == -ENOMEM) goto out; /* No need to continue. */ - if (ret == 0) - css_characteristics_avail = 1; ret = chsc_alloc_sei_area(); if (ret) @@ -806,6 +757,10 @@ init_channel_subsystem (void) if (ret) goto out; + ret = s390_register_crw_handler(CRW_RSC_SCH, css_process_crw); + if (ret) + goto out; + if ((ret = bus_register(&css_bus_type))) goto out; @@ -836,8 +791,7 @@ init_channel_subsystem (void) ret = device_register(&css->device); if (ret) goto out_free_all; - if (css_characteristics_avail && - css_chsc_characteristics.secm) { + if (css_chsc_characteristics.secm) { ret = device_create_file(&css->device, &dev_attr_cm_enable); if (ret) @@ -852,7 +806,8 @@ init_channel_subsystem (void) goto out_pseudo; css_init_done = 1; - ctl_set_bit(6, 28); + /* Enable default isc for I/O subchannels. */ + isc_register(IO_SCH_ISC); for_each_subchannel(__init_channel_subsystem, NULL); return 0; @@ -875,7 +830,7 @@ out_unregister: i--; css = channel_subsystems[i]; device_unregister(&css->pseudo_subchannel->dev); - if (css_characteristics_avail && css_chsc_characteristics.secm) + if (css_chsc_characteristics.secm) device_remove_file(&css->device, &dev_attr_cm_enable); device_unregister(&css->device); @@ -883,6 +838,7 @@ out_unregister: out_bus: bus_unregister(&css_bus_type); out: + s390_unregister_crw_handler(CRW_RSC_CSS); chsc_free_sei_area(); kfree(slow_subchannel_set); printk(KERN_WARNING"cio: failed to initialize css driver (%d)!\n", @@ -895,19 +851,16 @@ int sch_is_pseudo_sch(struct subchannel *sch) return sch == to_css(sch->dev.parent)->pseudo_subchannel; } -/* - * find a driver for a subchannel. They identify by the subchannel - * type with the exception that the console subchannel driver has its own - * subchannel type although the device is an i/o subchannel - */ -static int -css_bus_match (struct device *dev, struct device_driver *drv) +static int css_bus_match(struct device *dev, struct device_driver *drv) { struct subchannel *sch = to_subchannel(dev); struct css_driver *driver = to_cssdriver(drv); + struct css_device_id *id; - if (sch->st == driver->subchannel_type) - return 1; + for (id = driver->subchannel_type; id->match_flags; id++) { + if (sch->st == id->type) + return 1; + } return 0; } @@ -945,12 +898,25 @@ static void css_shutdown(struct device *dev) sch->driver->shutdown(sch); } +static int css_uevent(struct device *dev, struct kobj_uevent_env *env) +{ + struct subchannel *sch = to_subchannel(dev); + int ret; + + ret = add_uevent_var(env, "ST=%01X", sch->st); + if (ret) + return ret; + ret = add_uevent_var(env, "MODALIAS=css:t%01X", sch->st); + return ret; +} + struct bus_type css_bus_type = { .name = "css", .match = css_bus_match, .probe = css_probe, .remove = css_remove, .shutdown = css_shutdown, + .uevent = css_uevent, }; /** @@ -985,4 +951,3 @@ subsys_initcall(init_channel_subsystem); MODULE_LICENSE("GPL"); EXPORT_SYMBOL(css_bus_type); -EXPORT_SYMBOL_GPL(css_characteristics_avail); diff --git a/drivers/s390/cio/css.h b/drivers/s390/cio/css.h index e1913518f35..57ebf120f82 100644 --- a/drivers/s390/cio/css.h +++ b/drivers/s390/cio/css.h @@ -9,8 +9,7 @@ #include <asm/cio.h> #include <asm/chpid.h> - -#include "schid.h" +#include <asm/schid.h> /* * path grouping stuff @@ -58,20 +57,28 @@ struct pgid { __u32 tod_high; /* high word TOD clock */ } __attribute__ ((packed)); -/* - * A css driver handles all subchannels of one type. - * Currently, we only care about I/O subchannels (type 0), these - * have a ccw_device connected to them. - */ struct subchannel; +struct chp_link; +/** + * struct css_driver - device driver for subchannels + * @owner: owning module + * @subchannel_type: subchannel type supported by this driver + * @drv: embedded device driver structure + * @irq: called on interrupts + * @chp_event: called for events affecting a channel path + * @sch_event: called for events affecting the subchannel + * @probe: function called on probe + * @remove: function called on remove + * @shutdown: called at device shutdown + * @name: name of the device driver + */ struct css_driver { struct module *owner; - unsigned int subchannel_type; + struct css_device_id *subchannel_type; struct device_driver drv; void (*irq)(struct subchannel *); - int (*notify)(struct subchannel *, int); - void (*verify)(struct subchannel *); - void (*termination)(struct subchannel *); + int (*chp_event)(struct subchannel *, struct chp_link *, int); + int (*sch_event)(struct subchannel *, int); int (*probe)(struct subchannel *); int (*remove)(struct subchannel *); void (*shutdown)(struct subchannel *); @@ -89,13 +96,13 @@ extern int css_driver_register(struct css_driver *); extern void css_driver_unregister(struct css_driver *); extern void css_sch_device_unregister(struct subchannel *); -extern struct subchannel * get_subchannel_by_schid(struct subchannel_id); +extern int css_probe_device(struct subchannel_id); +extern struct subchannel *get_subchannel_by_schid(struct subchannel_id); extern int css_init_done; int for_each_subchannel_staged(int (*fn_known)(struct subchannel *, void *), int (*fn_unknown)(struct subchannel_id, void *), void *data); extern int for_each_subchannel(int(*fn)(struct subchannel_id, void *), void *); -extern void css_process_crw(int, int); extern void css_reiterate_subchannels(void); void css_update_ssd_info(struct subchannel *sch); @@ -121,20 +128,6 @@ struct channel_subsystem { extern struct bus_type css_bus_type; extern struct channel_subsystem *channel_subsystems[]; -/* Some helper functions for disconnected state. */ -int device_is_disconnected(struct subchannel *); -void device_set_disconnected(struct subchannel *); -void device_trigger_reprobe(struct subchannel *); - -/* Helper functions for vary on/off. */ -int device_is_online(struct subchannel *); -void device_kill_io(struct subchannel *); -void device_set_intretry(struct subchannel *sch); -int device_trigger_verify(struct subchannel *sch); - -/* Machine check helper function. */ -void device_kill_pending_timer(struct subchannel *); - /* Helper functions to build lists for the slow path. */ void css_schedule_eval(struct subchannel_id schid); void css_schedule_eval_all(void); @@ -145,6 +138,4 @@ int css_sch_is_valid(struct schib *); extern struct workqueue_struct *slow_path_wq; void css_wait_for_slow_path(void); - -extern struct attribute_group *subch_attr_groups[]; #endif diff --git a/drivers/s390/cio/device.c b/drivers/s390/cio/device.c index e22813db74a..e818d0c54c0 100644 --- a/drivers/s390/cio/device.c +++ b/drivers/s390/cio/device.c @@ -2,8 +2,7 @@ * drivers/s390/cio/device.c * bus driver for ccw devices * - * Copyright (C) 2002 IBM Deutschland Entwicklung GmbH, - * IBM Corporation + * Copyright IBM Corp. 2002,2008 * Author(s): Arnd Bergmann (arndb@de.ibm.com) * Cornelia Huck (cornelia.huck@de.ibm.com) * Martin Schwidefsky (schwidefsky@de.ibm.com) @@ -23,7 +22,9 @@ #include <asm/cio.h> #include <asm/param.h> /* HZ */ #include <asm/cmb.h> +#include <asm/isc.h> +#include "chp.h" #include "cio.h" #include "cio_debug.h" #include "css.h" @@ -125,19 +126,24 @@ struct bus_type ccw_bus_type; static void io_subchannel_irq(struct subchannel *); static int io_subchannel_probe(struct subchannel *); static int io_subchannel_remove(struct subchannel *); -static int io_subchannel_notify(struct subchannel *, int); -static void io_subchannel_verify(struct subchannel *); -static void io_subchannel_ioterm(struct subchannel *); static void io_subchannel_shutdown(struct subchannel *); +static int io_subchannel_sch_event(struct subchannel *, int); +static int io_subchannel_chp_event(struct subchannel *, struct chp_link *, + int); + +static struct css_device_id io_subchannel_ids[] = { + { .match_flags = 0x1, .type = SUBCHANNEL_TYPE_IO, }, + { /* end of list */ }, +}; +MODULE_DEVICE_TABLE(css, io_subchannel_ids); static struct css_driver io_subchannel_driver = { .owner = THIS_MODULE, - .subchannel_type = SUBCHANNEL_TYPE_IO, + .subchannel_type = io_subchannel_ids, .name = "io_subchannel", .irq = io_subchannel_irq, - .notify = io_subchannel_notify, - .verify = io_subchannel_verify, - .termination = io_subchannel_ioterm, + .sch_event = io_subchannel_sch_event, + .chp_event = io_subchannel_chp_event, .probe = io_subchannel_probe, .remove = io_subchannel_remove, .shutdown = io_subchannel_shutdown, @@ -487,25 +493,22 @@ static int online_store_recog_and_online(struct ccw_device *cdev) ccw_device_set_online(cdev); return 0; } -static void online_store_handle_online(struct ccw_device *cdev, int force) +static int online_store_handle_online(struct ccw_device *cdev, int force) { int ret; ret = online_store_recog_and_online(cdev); if (ret) - return; + return ret; if (force && cdev->private->state == DEV_STATE_BOXED) { ret = ccw_device_stlck(cdev); - if (ret) { - dev_warn(&cdev->dev, - "ccw_device_stlck returned %d!\n", ret); - return; - } + if (ret) + return ret; if (cdev->id.cu_type == 0) cdev->private->state = DEV_STATE_NOT_OPER; online_store_recog_and_online(cdev); } - + return 0; } static ssize_t online_store (struct device *dev, struct device_attribute *attr, @@ -538,8 +541,9 @@ static ssize_t online_store (struct device *dev, struct device_attribute *attr, ret = count; break; case 1: - online_store_handle_online(cdev, force); - ret = count; + ret = online_store_handle_online(cdev, force); + if (!ret) + ret = count; break; default: ret = -EINVAL; @@ -584,19 +588,14 @@ static DEVICE_ATTR(modalias, 0444, modalias_show, NULL); static DEVICE_ATTR(online, 0644, online_show, online_store); static DEVICE_ATTR(availability, 0444, available_show, NULL); -static struct attribute * subch_attrs[] = { +static struct attribute *io_subchannel_attrs[] = { &dev_attr_chpids.attr, &dev_attr_pimpampom.attr, NULL, }; -static struct attribute_group subch_attr_group = { - .attrs = subch_attrs, -}; - -struct attribute_group *subch_attr_groups[] = { - &subch_attr_group, - NULL, +static struct attribute_group io_subchannel_attr_group = { + .attrs = io_subchannel_attrs, }; static struct attribute * ccwdev_attrs[] = { @@ -790,7 +789,7 @@ static void sch_attach_device(struct subchannel *sch, sch_set_cdev(sch, cdev); cdev->private->schid = sch->schid; cdev->ccwlock = sch->lock; - device_trigger_reprobe(sch); + ccw_device_trigger_reprobe(cdev); spin_unlock_irq(sch->lock); } @@ -1037,7 +1036,6 @@ io_subchannel_recog(struct ccw_device *cdev, struct subchannel *sch) struct ccw_device_private *priv; sch_set_cdev(sch, cdev); - sch->driver = &io_subchannel_driver; cdev->ccwlock = sch->lock; /* Init private data. */ @@ -1122,8 +1120,33 @@ static void io_subchannel_irq(struct subchannel *sch) dev_fsm_event(cdev, DEV_EVENT_INTERRUPT); } -static int -io_subchannel_probe (struct subchannel *sch) +static void io_subchannel_init_fields(struct subchannel *sch) +{ + if (cio_is_console(sch->schid)) + sch->opm = 0xff; + else + sch->opm = chp_get_sch_opm(sch); + sch->lpm = sch->schib.pmcw.pam & sch->opm; + sch->isc = cio_is_console(sch->schid) ? CONSOLE_ISC : IO_SCH_ISC; + + CIO_MSG_EVENT(6, "Detected device %04x on subchannel 0.%x.%04X" + " - PIM = %02X, PAM = %02X, POM = %02X\n", + sch->schib.pmcw.dev, sch->schid.ssid, + sch->schid.sch_no, sch->schib.pmcw.pim, + sch->schib.pmcw.pam, sch->schib.pmcw.pom); + /* Initially set up some fields in the pmcw. */ + sch->schib.pmcw.ena = 0; + sch->schib.pmcw.csense = 1; /* concurrent sense */ + if ((sch->lpm & (sch->lpm - 1)) != 0) + sch->schib.pmcw.mp = 1; /* multipath mode */ + /* clean up possible residual cmf stuff */ + sch->schib.pmcw.mme = 0; + sch->schib.pmcw.mbfc = 0; + sch->schib.pmcw.mbi = 0; + sch->schib.mba = 0; +} + +static int io_subchannel_probe(struct subchannel *sch) { struct ccw_device *cdev; int rc; @@ -1132,11 +1155,21 @@ io_subchannel_probe (struct subchannel *sch) cdev = sch_get_cdev(sch); if (cdev) { + rc = sysfs_create_group(&sch->dev.kobj, + &io_subchannel_attr_group); + if (rc) + CIO_MSG_EVENT(0, "Failed to create io subchannel " + "attributes for subchannel " + "0.%x.%04x (rc=%d)\n", + sch->schid.ssid, sch->schid.sch_no, rc); /* * This subchannel already has an associated ccw_device. - * Register it and exit. This happens for all early - * device, e.g. the console. + * Throw the delayed uevent for the subchannel, register + * the ccw_device and exit. This happens for all early + * devices, e.g. the console. */ + sch->dev.uevent_suppress = 0; + kobject_uevent(&sch->dev.kobj, KOBJ_ADD); cdev->dev.groups = ccwdev_attr_groups; device_initialize(&cdev->dev); ccw_device_register(cdev); @@ -1152,17 +1185,24 @@ io_subchannel_probe (struct subchannel *sch) get_device(&cdev->dev); return 0; } + io_subchannel_init_fields(sch); /* * First check if a fitting device may be found amongst the * disconnected devices or in the orphanage. */ dev_id.devno = sch->schib.pmcw.dev; dev_id.ssid = sch->schid.ssid; + rc = sysfs_create_group(&sch->dev.kobj, + &io_subchannel_attr_group); + if (rc) + return rc; /* Allocate I/O subchannel private data. */ sch->private = kzalloc(sizeof(struct io_subchannel_private), GFP_KERNEL | GFP_DMA); - if (!sch->private) - return -ENOMEM; + if (!sch->private) { + rc = -ENOMEM; + goto out_err; + } cdev = get_disc_ccwdev_by_dev_id(&dev_id, NULL); if (!cdev) cdev = get_orphaned_ccwdev_by_dev_id(to_css(sch->dev.parent), @@ -1181,8 +1221,8 @@ io_subchannel_probe (struct subchannel *sch) } cdev = io_subchannel_create_ccwdev(sch); if (IS_ERR(cdev)) { - kfree(sch->private); - return PTR_ERR(cdev); + rc = PTR_ERR(cdev); + goto out_err; } rc = io_subchannel_recog(cdev, sch); if (rc) { @@ -1191,9 +1231,12 @@ io_subchannel_probe (struct subchannel *sch) spin_unlock_irqrestore(sch->lock, flags); if (cdev->dev.release) cdev->dev.release(&cdev->dev); - kfree(sch->private); + goto out_err; } - + return 0; +out_err: + kfree(sch->private); + sysfs_remove_group(&sch->dev.kobj, &io_subchannel_attr_group); return rc; } @@ -1214,6 +1257,7 @@ io_subchannel_remove (struct subchannel *sch) ccw_device_unregister(cdev); put_device(&cdev->dev); kfree(sch->private); + sysfs_remove_group(&sch->dev.kobj, &io_subchannel_attr_group); return 0; } @@ -1224,11 +1268,7 @@ static int io_subchannel_notify(struct subchannel *sch, int event) cdev = sch_get_cdev(sch); if (!cdev) return 0; - if (!cdev->drv) - return 0; - if (!cdev->online) - return 0; - return cdev->drv->notify ? cdev->drv->notify(cdev, event) : 0; + return ccw_device_notify(cdev, event); } static void io_subchannel_verify(struct subchannel *sch) @@ -1240,22 +1280,96 @@ static void io_subchannel_verify(struct subchannel *sch) dev_fsm_event(cdev, DEV_EVENT_VERIFY); } -static void io_subchannel_ioterm(struct subchannel *sch) +static int check_for_io_on_path(struct subchannel *sch, int mask) { - struct ccw_device *cdev; + int cc; - cdev = sch_get_cdev(sch); - if (!cdev) - return; - /* Internal I/O will be retried by the interrupt handler. */ - if (cdev->private->flags.intretry) + cc = stsch(sch->schid, &sch->schib); + if (cc) + return 0; + if (scsw_actl(&sch->schib.scsw) && sch->schib.pmcw.lpum == mask) + return 1; + return 0; +} + +static void terminate_internal_io(struct subchannel *sch, + struct ccw_device *cdev) +{ + if (cio_clear(sch)) { + /* Recheck device in case clear failed. */ + sch->lpm = 0; + if (cdev->online) + dev_fsm_event(cdev, DEV_EVENT_VERIFY); + else + css_schedule_eval(sch->schid); return; + } cdev->private->state = DEV_STATE_CLEAR_VERIFY; + /* Request retry of internal operation. */ + cdev->private->flags.intretry = 1; + /* Call handler. */ if (cdev->handler) cdev->handler(cdev, cdev->private->intparm, ERR_PTR(-EIO)); } +static void io_subchannel_terminate_path(struct subchannel *sch, u8 mask) +{ + struct ccw_device *cdev; + + cdev = sch_get_cdev(sch); + if (!cdev) + return; + if (check_for_io_on_path(sch, mask)) { + if (cdev->private->state == DEV_STATE_ONLINE) + ccw_device_kill_io(cdev); + else { + terminate_internal_io(sch, cdev); + /* Re-start path verification. */ + dev_fsm_event(cdev, DEV_EVENT_VERIFY); + } + } else + /* trigger path verification. */ + dev_fsm_event(cdev, DEV_EVENT_VERIFY); + +} + +static int io_subchannel_chp_event(struct subchannel *sch, + struct chp_link *link, int event) +{ + int mask; + + mask = chp_ssd_get_mask(&sch->ssd_info, link); + if (!mask) + return 0; + switch (event) { + case CHP_VARY_OFF: + sch->opm &= ~mask; + sch->lpm &= ~mask; + io_subchannel_terminate_path(sch, mask); + break; + case CHP_VARY_ON: + sch->opm |= mask; + sch->lpm |= mask; + io_subchannel_verify(sch); + break; + case CHP_OFFLINE: + if (stsch(sch->schid, &sch->schib)) + return -ENXIO; + if (!css_sch_is_valid(&sch->schib)) + return -ENODEV; + io_subchannel_terminate_path(sch, mask); + break; + case CHP_ONLINE: + if (stsch(sch->schid, &sch->schib)) + return -ENXIO; + sch->lpm |= mask & sch->opm; + io_subchannel_verify(sch); + break; + } + return 0; +} + static void io_subchannel_shutdown(struct subchannel *sch) { @@ -1285,6 +1399,195 @@ io_subchannel_shutdown(struct subchannel *sch) cio_disable_subchannel(sch); } +static int io_subchannel_get_status(struct subchannel *sch) +{ + struct schib schib; + + if (stsch(sch->schid, &schib) || !schib.pmcw.dnv) + return CIO_GONE; + if (sch->schib.pmcw.dnv && (schib.pmcw.dev != sch->schib.pmcw.dev)) + return CIO_REVALIDATE; + if (!sch->lpm) + return CIO_NO_PATH; + return CIO_OPER; +} + +static int device_is_disconnected(struct ccw_device *cdev) +{ + if (!cdev) + return 0; + return (cdev->private->state == DEV_STATE_DISCONNECTED || + cdev->private->state == DEV_STATE_DISCONNECTED_SENSE_ID); +} + +static int recovery_check(struct device *dev, void *data) +{ + struct ccw_device *cdev = to_ccwdev(dev); + int *redo = data; + + spin_lock_irq(cdev->ccwlock); + switch (cdev->private->state) { + case DEV_STATE_DISCONNECTED: + CIO_MSG_EVENT(3, "recovery: trigger 0.%x.%04x\n", + cdev->private->dev_id.ssid, + cdev->private->dev_id.devno); + dev_fsm_event(cdev, DEV_EVENT_VERIFY); + *redo = 1; + break; + case DEV_STATE_DISCONNECTED_SENSE_ID: + *redo = 1; + break; + } + spin_unlock_irq(cdev->ccwlock); + + return 0; +} + +static void recovery_work_func(struct work_struct *unused) +{ + int redo = 0; + + bus_for_each_dev(&ccw_bus_type, NULL, &redo, recovery_check); + if (redo) { + spin_lock_irq(&recovery_lock); + if (!timer_pending(&recovery_timer)) { + if (recovery_phase < ARRAY_SIZE(recovery_delay) - 1) + recovery_phase++; + mod_timer(&recovery_timer, jiffies + + recovery_delay[recovery_phase] * HZ); + } + spin_unlock_irq(&recovery_lock); + } else + CIO_MSG_EVENT(4, "recovery: end\n"); +} + +static DECLARE_WORK(recovery_work, recovery_work_func); + +static void recovery_func(unsigned long data) +{ + /* + * We can't do our recovery in softirq context and it's not + * performance critical, so we schedule it. + */ + schedule_work(&recovery_work); +} + +static void ccw_device_schedule_recovery(void) +{ + unsigned long flags; + + CIO_MSG_EVENT(4, "recovery: schedule\n"); + spin_lock_irqsave(&recovery_lock, flags); + if (!timer_pending(&recovery_timer) || (recovery_phase != 0)) { + recovery_phase = 0; + mod_timer(&recovery_timer, jiffies + recovery_delay[0] * HZ); + } + spin_unlock_irqrestore(&recovery_lock, flags); +} + +static void device_set_disconnected(struct ccw_device *cdev) +{ + if (!cdev) + return; + ccw_device_set_timeout(cdev, 0); + cdev->private->flags.fake_irb = 0; + cdev->private->state = DEV_STATE_DISCONNECTED; + if (cdev->online) + ccw_device_schedule_recovery(); +} + +static int io_subchannel_sch_event(struct subchannel *sch, int slow) +{ + int event, ret, disc; + unsigned long flags; + enum { NONE, UNREGISTER, UNREGISTER_PROBE, REPROBE } action; + struct ccw_device *cdev; + + spin_lock_irqsave(sch->lock, flags); + cdev = sch_get_cdev(sch); + disc = device_is_disconnected(cdev); + if (disc && slow) { + /* Disconnected devices are evaluated directly only.*/ + spin_unlock_irqrestore(sch->lock, flags); + return 0; + } + /* No interrupt after machine check - kill pending timers. */ + if (cdev) + ccw_device_set_timeout(cdev, 0); + if (!disc && !slow) { + /* Non-disconnected devices are evaluated on the slow path. */ + spin_unlock_irqrestore(sch->lock, flags); + return -EAGAIN; + } + event = io_subchannel_get_status(sch); + CIO_MSG_EVENT(4, "Evaluating schid 0.%x.%04x, event %d, %s, %s path.\n", + sch->schid.ssid, sch->schid.sch_no, event, + disc ? "disconnected" : "normal", + slow ? "slow" : "fast"); + /* Analyze subchannel status. */ + action = NONE; + switch (event) { + case CIO_NO_PATH: + if (disc) { + /* Check if paths have become available. */ + action = REPROBE; + break; + } + /* fall through */ + case CIO_GONE: + /* Prevent unwanted effects when opening lock. */ + cio_disable_subchannel(sch); + device_set_disconnected(cdev); + /* Ask driver what to do with device. */ + action = UNREGISTER; + spin_unlock_irqrestore(sch->lock, flags); + ret = io_subchannel_notify(sch, event); + spin_lock_irqsave(sch->lock, flags); + if (ret) + action = NONE; + break; + case CIO_REVALIDATE: + /* Device will be removed, so no notify necessary. */ + if (disc) + /* Reprobe because immediate unregister might block. */ + action = REPROBE; + else + action = UNREGISTER_PROBE; + break; + case CIO_OPER: + if (disc) + /* Get device operational again. */ + action = REPROBE; + break; + } + /* Perform action. */ + ret = 0; + switch (action) { + case UNREGISTER: + case UNREGISTER_PROBE: + /* Unregister device (will use subchannel lock). */ + spin_unlock_irqrestore(sch->lock, flags); + css_sch_device_unregister(sch); + spin_lock_irqsave(sch->lock, flags); + + /* Reset intparm to zeroes. */ + sch->schib.pmcw.intparm = 0; + cio_modify(sch); + break; + case REPROBE: + ccw_device_trigger_reprobe(cdev); + break; + default: + break; + } + spin_unlock_irqrestore(sch->lock, flags); + /* Probe if necessary. */ + if (action == UNREGISTER_PROBE) + ret = css_probe_device(sch->schid); + + return ret; +} + #ifdef CONFIG_CCW_CONSOLE static struct ccw_device console_cdev; static struct ccw_device_private console_private; @@ -1297,14 +1600,16 @@ spinlock_t * cio_get_console_lock(void) return &ccw_console_lock; } -static int -ccw_device_console_enable (struct ccw_device *cdev, struct subchannel *sch) +static int ccw_device_console_enable(struct ccw_device *cdev, + struct subchannel *sch) { int rc; /* Attach subchannel private data. */ sch->private = cio_get_console_priv(); memset(sch->private, 0, sizeof(struct io_subchannel_private)); + io_subchannel_init_fields(sch); + sch->driver = &io_subchannel_driver; /* Initialize the ccw_device structure. */ cdev->dev.parent= &sch->dev; rc = io_subchannel_recog(cdev, sch); @@ -1515,71 +1820,6 @@ ccw_device_get_subchannel_id(struct ccw_device *cdev) return sch->schid; } -static int recovery_check(struct device *dev, void *data) -{ - struct ccw_device *cdev = to_ccwdev(dev); - int *redo = data; - - spin_lock_irq(cdev->ccwlock); - switch (cdev->private->state) { - case DEV_STATE_DISCONNECTED: - CIO_MSG_EVENT(4, "recovery: trigger 0.%x.%04x\n", - cdev->private->dev_id.ssid, - cdev->private->dev_id.devno); - dev_fsm_event(cdev, DEV_EVENT_VERIFY); - *redo = 1; - break; - case DEV_STATE_DISCONNECTED_SENSE_ID: - *redo = 1; - break; - } - spin_unlock_irq(cdev->ccwlock); - - return 0; -} - -static void recovery_work_func(struct work_struct *unused) -{ - int redo = 0; - - bus_for_each_dev(&ccw_bus_type, NULL, &redo, recovery_check); - if (redo) { - spin_lock_irq(&recovery_lock); - if (!timer_pending(&recovery_timer)) { - if (recovery_phase < ARRAY_SIZE(recovery_delay) - 1) - recovery_phase++; - mod_timer(&recovery_timer, jiffies + - recovery_delay[recovery_phase] * HZ); - } - spin_unlock_irq(&recovery_lock); - } else - CIO_MSG_EVENT(4, "recovery: end\n"); -} - -static DECLARE_WORK(recovery_work, recovery_work_func); - -static void recovery_func(unsigned long data) -{ - /* - * We can't do our recovery in softirq context and it's not - * performance critical, so we schedule it. - */ - schedule_work(&recovery_work); -} - -void ccw_device_schedule_recovery(void) -{ - unsigned long flags; - - CIO_MSG_EVENT(4, "recovery: schedule\n"); - spin_lock_irqsave(&recovery_lock, flags); - if (!timer_pending(&recovery_timer) || (recovery_phase != 0)) { - recovery_phase = 0; - mod_timer(&recovery_timer, jiffies + recovery_delay[0] * HZ); - } - spin_unlock_irqrestore(&recovery_lock, flags); -} - MODULE_LICENSE("GPL"); EXPORT_SYMBOL(ccw_device_set_online); EXPORT_SYMBOL(ccw_device_set_offline); diff --git a/drivers/s390/cio/device.h b/drivers/s390/cio/device.h index cb08092be39..9800a8335a3 100644 --- a/drivers/s390/cio/device.h +++ b/drivers/s390/cio/device.h @@ -88,8 +88,6 @@ int ccw_device_recognition(struct ccw_device *); int ccw_device_online(struct ccw_device *); int ccw_device_offline(struct ccw_device *); -void ccw_device_schedule_recovery(void); - /* Function prototypes for device status and basic sense stuff. */ void ccw_device_accumulate_irb(struct ccw_device *, struct irb *); void ccw_device_accumulate_basic_sense(struct ccw_device *, struct irb *); @@ -118,6 +116,11 @@ int ccw_device_call_handler(struct ccw_device *); int ccw_device_stlck(struct ccw_device *); +/* Helper function for machine check handling. */ +void ccw_device_trigger_reprobe(struct ccw_device *); +void ccw_device_kill_io(struct ccw_device *); +int ccw_device_notify(struct ccw_device *, int); + /* qdio needs this. */ void ccw_device_set_timeout(struct ccw_device *, int); extern struct subchannel_id ccw_device_get_subchannel_id(struct ccw_device *); diff --git a/drivers/s390/cio/device_fsm.c b/drivers/s390/cio/device_fsm.c index e268d5a77c1..8b5fe57fb2f 100644 --- a/drivers/s390/cio/device_fsm.c +++ b/drivers/s390/cio/device_fsm.c @@ -2,8 +2,7 @@ * drivers/s390/cio/device_fsm.c * finite state machine for device handling * - * Copyright (C) 2002 IBM Deutschland Entwicklung GmbH, - * IBM Corporation + * Copyright IBM Corp. 2002,2008 * Author(s): Cornelia Huck (cornelia.huck@de.ibm.com) * Martin Schwidefsky (schwidefsky@de.ibm.com) */ @@ -27,65 +26,6 @@ static int timeout_log_enabled; -int -device_is_online(struct subchannel *sch) -{ - struct ccw_device *cdev; - - cdev = sch_get_cdev(sch); - if (!cdev) - return 0; - return (cdev->private->state == DEV_STATE_ONLINE); -} - -int -device_is_disconnected(struct subchannel *sch) -{ - struct ccw_device *cdev; - - cdev = sch_get_cdev(sch); - if (!cdev) - return 0; - return (cdev->private->state == DEV_STATE_DISCONNECTED || - cdev->private->state == DEV_STATE_DISCONNECTED_SENSE_ID); -} - -void -device_set_disconnected(struct subchannel *sch) -{ - struct ccw_device *cdev; - - cdev = sch_get_cdev(sch); - if (!cdev) - return; - ccw_device_set_timeout(cdev, 0); - cdev->private->flags.fake_irb = 0; - cdev->private->state = DEV_STATE_DISCONNECTED; - if (cdev->online) - ccw_device_schedule_recovery(); -} - -void device_set_intretry(struct subchannel *sch) -{ - struct ccw_device *cdev; - - cdev = sch_get_cdev(sch); - if (!cdev) - return; - cdev->private->flags.intretry = 1; -} - -int device_trigger_verify(struct subchannel *sch) -{ - struct ccw_device *cdev; - - cdev = sch_get_cdev(sch); - if (!cdev || !cdev->online) - return -EINVAL; - dev_fsm_event(cdev, DEV_EVENT_VERIFY); - return 0; -} - static int __init ccw_timeout_log_setup(char *unused) { timeout_log_enabled = 1; @@ -99,31 +39,43 @@ static void ccw_timeout_log(struct ccw_device *cdev) struct schib schib; struct subchannel *sch; struct io_subchannel_private *private; + union orb *orb; int cc; sch = to_subchannel(cdev->dev.parent); private = to_io_private(sch); + orb = &private->orb; cc = stsch(sch->schid, &schib); printk(KERN_WARNING "cio: ccw device timeout occurred at %llx, " "device information:\n", get_clock()); printk(KERN_WARNING "cio: orb:\n"); print_hex_dump(KERN_WARNING, "cio: ", DUMP_PREFIX_NONE, 16, 1, - &private->orb, sizeof(private->orb), 0); + orb, sizeof(*orb), 0); printk(KERN_WARNING "cio: ccw device bus id: %s\n", cdev->dev.bus_id); printk(KERN_WARNING "cio: subchannel bus id: %s\n", sch->dev.bus_id); printk(KERN_WARNING "cio: subchannel lpm: %02x, opm: %02x, " "vpm: %02x\n", sch->lpm, sch->opm, sch->vpm); - if ((void *)(addr_t)private->orb.cpa == &private->sense_ccw || - (void *)(addr_t)private->orb.cpa == cdev->private->iccws) - printk(KERN_WARNING "cio: last channel program (intern):\n"); - else - printk(KERN_WARNING "cio: last channel program:\n"); - - print_hex_dump(KERN_WARNING, "cio: ", DUMP_PREFIX_NONE, 16, 1, - (void *)(addr_t)private->orb.cpa, - sizeof(struct ccw1), 0); + if (orb->tm.b) { + printk(KERN_WARNING "cio: orb indicates transport mode\n"); + printk(KERN_WARNING "cio: last tcw:\n"); + print_hex_dump(KERN_WARNING, "cio: ", DUMP_PREFIX_NONE, 16, 1, + (void *)(addr_t)orb->tm.tcw, + sizeof(struct tcw), 0); + } else { + printk(KERN_WARNING "cio: orb indicates command mode\n"); + if ((void *)(addr_t)orb->cmd.cpa == &private->sense_ccw || + (void *)(addr_t)orb->cmd.cpa == cdev->private->iccws) + printk(KERN_WARNING "cio: last channel program " + "(intern):\n"); + else + printk(KERN_WARNING "cio: last channel program:\n"); + + print_hex_dump(KERN_WARNING, "cio: ", DUMP_PREFIX_NONE, 16, 1, + (void *)(addr_t)orb->cmd.cpa, + sizeof(struct ccw1), 0); + } printk(KERN_WARNING "cio: ccw device state: %d\n", cdev->private->state); printk(KERN_WARNING "cio: store subchannel returned: cc=%d\n", cc); @@ -171,18 +123,6 @@ ccw_device_set_timeout(struct ccw_device *cdev, int expires) add_timer(&cdev->private->timer); } -/* Kill any pending timers after machine check. */ -void -device_kill_pending_timer(struct subchannel *sch) -{ - struct ccw_device *cdev; - - cdev = sch_get_cdev(sch); - if (!cdev) - return; - ccw_device_set_timeout(cdev, 0); -} - /* * Cancel running i/o. This is called repeatedly since halt/clear are * asynchronous operations. We do one try with cio_cancel, two tries @@ -205,15 +145,18 @@ ccw_device_cancel_halt_clear(struct ccw_device *cdev) /* Not operational -> done. */ return 0; /* Stage 1: cancel io. */ - if (!(sch->schib.scsw.actl & SCSW_ACTL_HALT_PEND) && - !(sch->schib.scsw.actl & SCSW_ACTL_CLEAR_PEND)) { - ret = cio_cancel(sch); - if (ret != -EINVAL) - return ret; - /* cancel io unsuccessful. From now on it is asynchronous. */ + if (!(scsw_actl(&sch->schib.scsw) & SCSW_ACTL_HALT_PEND) && + !(scsw_actl(&sch->schib.scsw) & SCSW_ACTL_CLEAR_PEND)) { + if (!scsw_is_tm(&sch->schib.scsw)) { + ret = cio_cancel(sch); + if (ret != -EINVAL) + return ret; + } + /* cancel io unsuccessful or not applicable (transport mode). + * Continue with asynchronous instructions. */ cdev->private->iretry = 3; /* 3 halt retries. */ } - if (!(sch->schib.scsw.actl & SCSW_ACTL_CLEAR_PEND)) { + if (!(scsw_actl(&sch->schib.scsw) & SCSW_ACTL_CLEAR_PEND)) { /* Stage 2: halt io. */ if (cdev->private->iretry) { cdev->private->iretry--; @@ -388,34 +331,30 @@ ccw_device_sense_id_done(struct ccw_device *cdev, int err) } } +int ccw_device_notify(struct ccw_device *cdev, int event) +{ + if (!cdev->drv) + return 0; + if (!cdev->online) + return 0; + return cdev->drv->notify ? cdev->drv->notify(cdev, event) : 0; +} + static void ccw_device_oper_notify(struct work_struct *work) { struct ccw_device_private *priv; struct ccw_device *cdev; - struct subchannel *sch; int ret; - unsigned long flags; priv = container_of(work, struct ccw_device_private, kick_work); cdev = priv->cdev; - spin_lock_irqsave(cdev->ccwlock, flags); - sch = to_subchannel(cdev->dev.parent); - if (sch->driver && sch->driver->notify) { - spin_unlock_irqrestore(cdev->ccwlock, flags); - ret = sch->driver->notify(sch, CIO_OPER); - spin_lock_irqsave(cdev->ccwlock, flags); - } else - ret = 0; + ret = ccw_device_notify(cdev, CIO_OPER); if (ret) { /* Reenable channel measurements, if needed. */ - spin_unlock_irqrestore(cdev->ccwlock, flags); cmf_reenable(cdev); - spin_lock_irqsave(cdev->ccwlock, flags); wake_up(&cdev->private->wait_q); - } - spin_unlock_irqrestore(cdev->ccwlock, flags); - if (!ret) + } else /* Driver doesn't want device back. */ ccw_device_do_unreg_rereg(work); } @@ -621,10 +560,11 @@ ccw_device_verify_done(struct ccw_device *cdev, int err) /* Deliver fake irb to device driver, if needed. */ if (cdev->private->flags.fake_irb) { memset(&cdev->private->irb, 0, sizeof(struct irb)); - cdev->private->irb.scsw.cc = 1; - cdev->private->irb.scsw.fctl = SCSW_FCTL_START_FUNC; - cdev->private->irb.scsw.actl = SCSW_ACTL_START_PEND; - cdev->private->irb.scsw.stctl = SCSW_STCTL_STATUS_PEND; + cdev->private->irb.scsw.cmd.cc = 1; + cdev->private->irb.scsw.cmd.fctl = SCSW_FCTL_START_FUNC; + cdev->private->irb.scsw.cmd.actl = SCSW_ACTL_START_PEND; + cdev->private->irb.scsw.cmd.stctl = + SCSW_STCTL_STATUS_PEND; cdev->private->flags.fake_irb = 0; if (cdev->handler) cdev->handler(cdev, cdev->private->intparm, @@ -718,13 +658,10 @@ ccw_device_offline(struct ccw_device *cdev) sch = to_subchannel(cdev->dev.parent); if (stsch(sch->schid, &sch->schib) || !sch->schib.pmcw.dnv) return -ENODEV; - if (cdev->private->state != DEV_STATE_ONLINE) { - if (sch->schib.scsw.actl != 0) - return -EBUSY; - return -EINVAL; - } - if (sch->schib.scsw.actl != 0) + if (scsw_actl(&sch->schib.scsw) != 0) return -EBUSY; + if (cdev->private->state != DEV_STATE_ONLINE) + return -EINVAL; /* Are we doing path grouping? */ if (!cdev->private->options.pgroup) { /* No, set state offline immediately. */ @@ -799,9 +736,9 @@ ccw_device_online_verify(struct ccw_device *cdev, enum dev_event dev_event) */ stsch(sch->schid, &sch->schib); - if (sch->schib.scsw.actl != 0 || - (sch->schib.scsw.stctl & SCSW_STCTL_STATUS_PEND) || - (cdev->private->irb.scsw.stctl & SCSW_STCTL_STATUS_PEND)) { + if (scsw_actl(&sch->schib.scsw) != 0 || + (scsw_stctl(&sch->schib.scsw) & SCSW_STCTL_STATUS_PEND) || + (scsw_stctl(&cdev->private->irb.scsw) & SCSW_STCTL_STATUS_PEND)) { /* * No final status yet or final status not yet delivered * to the device driver. Can't do path verfication now, @@ -823,13 +760,13 @@ static void ccw_device_irq(struct ccw_device *cdev, enum dev_event dev_event) { struct irb *irb; + int is_cmd; irb = (struct irb *) __LC_IRB; + is_cmd = !scsw_is_tm(&irb->scsw); /* Check for unsolicited interrupt. */ - if ((irb->scsw.stctl == - (SCSW_STCTL_STATUS_PEND | SCSW_STCTL_ALERT_STATUS)) - && (!irb->scsw.cc)) { - if ((irb->scsw.dstat & DEV_STAT_UNIT_CHECK) && + if (!scsw_is_solicited(&irb->scsw)) { + if (is_cmd && (irb->scsw.cmd.dstat & DEV_STAT_UNIT_CHECK) && !irb->esw.esw0.erw.cons) { /* Unit check but no sense data. Need basic sense. */ if (ccw_device_do_sense(cdev, irb) != 0) @@ -848,7 +785,7 @@ call_handler_unsol: } /* Accumulate status and find out if a basic sense is needed. */ ccw_device_accumulate_irb(cdev, irb); - if (cdev->private->flags.dosense) { + if (is_cmd && cdev->private->flags.dosense) { if (ccw_device_do_sense(cdev, irb) == 0) { cdev->private->state = DEV_STATE_W4SENSE; } @@ -892,9 +829,9 @@ ccw_device_w4sense(struct ccw_device *cdev, enum dev_event dev_event) irb = (struct irb *) __LC_IRB; /* Check for unsolicited interrupt. */ - if (irb->scsw.stctl == - (SCSW_STCTL_STATUS_PEND | SCSW_STCTL_ALERT_STATUS)) { - if (irb->scsw.cc == 1) + if (scsw_stctl(&irb->scsw) == + (SCSW_STCTL_STATUS_PEND | SCSW_STCTL_ALERT_STATUS)) { + if (scsw_cc(&irb->scsw) == 1) /* Basic sense hasn't started. Try again. */ ccw_device_do_sense(cdev, irb); else { @@ -912,7 +849,8 @@ ccw_device_w4sense(struct ccw_device *cdev, enum dev_event dev_event) * only deliver the halt/clear interrupt to the device driver as if it * had killed the original request. */ - if (irb->scsw.fctl & (SCSW_FCTL_CLEAR_FUNC | SCSW_FCTL_HALT_FUNC)) { + if (scsw_fctl(&irb->scsw) & + (SCSW_FCTL_CLEAR_FUNC | SCSW_FCTL_HALT_FUNC)) { /* Retry Basic Sense if requested. */ if (cdev->private->flags.intretry) { cdev->private->flags.intretry = 0; @@ -986,12 +924,10 @@ ccw_device_killing_timeout(struct ccw_device *cdev, enum dev_event dev_event) ERR_PTR(-EIO)); } -void device_kill_io(struct subchannel *sch) +void ccw_device_kill_io(struct ccw_device *cdev) { int ret; - struct ccw_device *cdev; - cdev = sch_get_cdev(sch); ret = ccw_device_cancel_halt_clear(cdev); if (ret == -EBUSY) { ccw_device_set_timeout(cdev, 3*HZ); @@ -1021,9 +957,9 @@ ccw_device_stlck_done(struct ccw_device *cdev, enum dev_event dev_event) case DEV_EVENT_INTERRUPT: irb = (struct irb *) __LC_IRB; /* Check for unsolicited interrupt. */ - if ((irb->scsw.stctl == + if ((scsw_stctl(&irb->scsw) == (SCSW_STCTL_STATUS_PEND | SCSW_STCTL_ALERT_STATUS)) && - (!irb->scsw.cc)) + (!scsw_cc(&irb->scsw))) /* FIXME: we should restart stlck here, but this * is extremely unlikely ... */ goto out_wakeup; @@ -1055,17 +991,14 @@ ccw_device_start_id(struct ccw_device *cdev, enum dev_event dev_event) ccw_device_sense_id_start(cdev); } -void -device_trigger_reprobe(struct subchannel *sch) +void ccw_device_trigger_reprobe(struct ccw_device *cdev) { - struct ccw_device *cdev; + struct subchannel *sch; - cdev = sch_get_cdev(sch); - if (!cdev) - return; if (cdev->private->state != DEV_STATE_DISCONNECTED) return; + sch = to_subchannel(cdev->dev.parent); /* Update some values. */ if (stsch(sch->schid, &sch->schib)) return; @@ -1081,7 +1014,6 @@ device_trigger_reprobe(struct subchannel *sch) sch->schib.pmcw.ena = 0; if ((sch->lpm & (sch->lpm - 1)) != 0) sch->schib.pmcw.mp = 1; - sch->schib.pmcw.intparm = (u32)(addr_t)sch; /* We should also udate ssd info, but this has to wait. */ /* Check if this is another device which appeared on the same sch. */ if (sch->schib.pmcw.dev != cdev->private->dev_id.devno) { diff --git a/drivers/s390/cio/device_id.c b/drivers/s390/cio/device_id.c index cba7020517e..1bdaa614e34 100644 --- a/drivers/s390/cio/device_id.c +++ b/drivers/s390/cio/device_id.c @@ -196,7 +196,7 @@ ccw_device_check_sense_id(struct ccw_device *cdev) irb = &cdev->private->irb; /* Check the error cases. */ - if (irb->scsw.fctl & (SCSW_FCTL_HALT_FUNC | SCSW_FCTL_CLEAR_FUNC)) { + if (irb->scsw.cmd.fctl & (SCSW_FCTL_HALT_FUNC | SCSW_FCTL_CLEAR_FUNC)) { /* Retry Sense ID if requested. */ if (cdev->private->flags.intretry) { cdev->private->flags.intretry = 0; @@ -234,10 +234,10 @@ ccw_device_check_sense_id(struct ccw_device *cdev) irb->ecw[6], irb->ecw[7]); return -EAGAIN; } - if (irb->scsw.cc == 3) { + if (irb->scsw.cmd.cc == 3) { u8 lpm; - lpm = to_io_private(sch)->orb.lpm; + lpm = to_io_private(sch)->orb.cmd.lpm; if ((lpm & sch->schib.pmcw.pim & sch->schib.pmcw.pam) != 0) CIO_MSG_EVENT(4, "SenseID : path %02X for device %04x " "on subchannel 0.%x.%04x is " @@ -248,9 +248,9 @@ ccw_device_check_sense_id(struct ccw_device *cdev) } /* Did we get a proper answer ? */ - if (irb->scsw.cc == 0 && cdev->private->senseid.cu_type != 0xFFFF && + if (irb->scsw.cmd.cc == 0 && cdev->private->senseid.cu_type != 0xFFFF && cdev->private->senseid.reserved == 0xFF) { - if (irb->scsw.count < sizeof(struct senseid) - 8) + if (irb->scsw.cmd.count < sizeof(struct senseid) - 8) cdev->private->flags.esid = 1; return 0; /* Success */ } @@ -260,7 +260,7 @@ ccw_device_check_sense_id(struct ccw_device *cdev) "subchannel 0.%x.%04x returns status %02X%02X\n", cdev->private->dev_id.devno, sch->schid.ssid, sch->schid.sch_no, - irb->scsw.dstat, irb->scsw.cstat); + irb->scsw.cmd.dstat, irb->scsw.cmd.cstat); return -EAGAIN; } @@ -277,9 +277,9 @@ ccw_device_sense_id_irq(struct ccw_device *cdev, enum dev_event dev_event) sch = to_subchannel(cdev->dev.parent); irb = (struct irb *) __LC_IRB; /* Retry sense id, if needed. */ - if (irb->scsw.stctl == + if (irb->scsw.cmd.stctl == (SCSW_STCTL_STATUS_PEND | SCSW_STCTL_ALERT_STATUS)) { - if ((irb->scsw.cc == 1) || !irb->scsw.actl) { + if ((irb->scsw.cmd.cc == 1) || !irb->scsw.cmd.actl) { ret = __ccw_device_sense_id_start(cdev); if (ret && ret != -EBUSY) ccw_device_sense_id_done(cdev, ret); diff --git a/drivers/s390/cio/device_ops.c b/drivers/s390/cio/device_ops.c index f308ad55a6d..ee1a28310fb 100644 --- a/drivers/s390/cio/device_ops.c +++ b/drivers/s390/cio/device_ops.c @@ -17,6 +17,7 @@ #include <asm/ccwdev.h> #include <asm/idals.h> #include <asm/chpid.h> +#include <asm/fcx.h> #include "cio.h" #include "cio_debug.h" @@ -179,8 +180,8 @@ int ccw_device_start_key(struct ccw_device *cdev, struct ccw1 *cpa, return -EBUSY; } if (cdev->private->state != DEV_STATE_ONLINE || - ((sch->schib.scsw.stctl & SCSW_STCTL_PRIM_STATUS) && - !(sch->schib.scsw.stctl & SCSW_STCTL_SEC_STATUS)) || + ((sch->schib.scsw.cmd.stctl & SCSW_STCTL_PRIM_STATUS) && + !(sch->schib.scsw.cmd.stctl & SCSW_STCTL_SEC_STATUS)) || cdev->private->flags.doverify) return -EBUSY; ret = cio_set_options (sch, flags); @@ -379,7 +380,7 @@ int ccw_device_resume(struct ccw_device *cdev) if (cdev->private->state == DEV_STATE_NOT_OPER) return -ENODEV; if (cdev->private->state != DEV_STATE_ONLINE || - !(sch->schib.scsw.actl & SCSW_ACTL_SUSPENDED)) + !(sch->schib.scsw.cmd.actl & SCSW_ACTL_SUSPENDED)) return -EINVAL; return cio_resume(sch); } @@ -404,7 +405,7 @@ ccw_device_call_handler(struct ccw_device *cdev) * - fast notification was requested (primary status) * - unsolicited interrupts */ - stctl = cdev->private->irb.scsw.stctl; + stctl = scsw_stctl(&cdev->private->irb.scsw); ending_status = (stctl & SCSW_STCTL_SEC_STATUS) || (stctl == (SCSW_STCTL_ALERT_STATUS | SCSW_STCTL_STATUS_PEND)) || (stctl == SCSW_STCTL_STATUS_PEND); @@ -528,14 +529,15 @@ ccw_device_stlck(struct ccw_device *cdev) cio_disable_subchannel(sch); //FIXME: return code? goto out_unlock; } - cdev->private->irb.scsw.actl |= SCSW_ACTL_START_PEND; + cdev->private->irb.scsw.cmd.actl |= SCSW_ACTL_START_PEND; spin_unlock_irqrestore(sch->lock, flags); - wait_event(cdev->private->wait_q, cdev->private->irb.scsw.actl == 0); + wait_event(cdev->private->wait_q, + cdev->private->irb.scsw.cmd.actl == 0); spin_lock_irqsave(sch->lock, flags); cio_disable_subchannel(sch); //FIXME: return code? - if ((cdev->private->irb.scsw.dstat != + if ((cdev->private->irb.scsw.cmd.dstat != (DEV_STAT_CHN_END|DEV_STAT_DEV_END)) || - (cdev->private->irb.scsw.cstat != 0)) + (cdev->private->irb.scsw.cmd.cstat != 0)) ret = -EIO; /* Clear irb. */ memset(&cdev->private->irb, 0, sizeof(struct irb)); @@ -568,6 +570,122 @@ void ccw_device_get_id(struct ccw_device *cdev, struct ccw_dev_id *dev_id) } EXPORT_SYMBOL(ccw_device_get_id); +/** + * ccw_device_tm_start_key - perform start function + * @cdev: ccw device on which to perform the start function + * @tcw: transport-command word to be started + * @intparm: user defined parameter to be passed to the interrupt handler + * @lpm: mask of paths to use + * @key: storage key to use for storage access + * + * Start the tcw on the given ccw device. Return zero on success, non-zero + * otherwise. + */ +int ccw_device_tm_start_key(struct ccw_device *cdev, struct tcw *tcw, + unsigned long intparm, u8 lpm, u8 key) +{ + struct subchannel *sch; + int rc; + + sch = to_subchannel(cdev->dev.parent); + if (cdev->private->state != DEV_STATE_ONLINE) + return -EIO; + /* Adjust requested path mask to excluded varied off paths. */ + if (lpm) { + lpm &= sch->opm; + if (lpm == 0) + return -EACCES; + } + rc = cio_tm_start_key(sch, tcw, lpm, key); + if (rc == 0) + cdev->private->intparm = intparm; + return rc; +} +EXPORT_SYMBOL(ccw_device_tm_start_key); + +/** + * ccw_device_tm_start_timeout_key - perform start function + * @cdev: ccw device on which to perform the start function + * @tcw: transport-command word to be started + * @intparm: user defined parameter to be passed to the interrupt handler + * @lpm: mask of paths to use + * @key: storage key to use for storage access + * @expires: time span in jiffies after which to abort request + * + * Start the tcw on the given ccw device. Return zero on success, non-zero + * otherwise. + */ +int ccw_device_tm_start_timeout_key(struct ccw_device *cdev, struct tcw *tcw, + unsigned long intparm, u8 lpm, u8 key, + int expires) +{ + int ret; + + ccw_device_set_timeout(cdev, expires); + ret = ccw_device_tm_start_key(cdev, tcw, intparm, lpm, key); + if (ret != 0) + ccw_device_set_timeout(cdev, 0); + return ret; +} +EXPORT_SYMBOL(ccw_device_tm_start_timeout_key); + +/** + * ccw_device_tm_start - perform start function + * @cdev: ccw device on which to perform the start function + * @tcw: transport-command word to be started + * @intparm: user defined parameter to be passed to the interrupt handler + * @lpm: mask of paths to use + * + * Start the tcw on the given ccw device. Return zero on success, non-zero + * otherwise. + */ +int ccw_device_tm_start(struct ccw_device *cdev, struct tcw *tcw, + unsigned long intparm, u8 lpm) +{ + return ccw_device_tm_start_key(cdev, tcw, intparm, lpm, + PAGE_DEFAULT_KEY); +} +EXPORT_SYMBOL(ccw_device_tm_start); + +/** + * ccw_device_tm_start_timeout - perform start function + * @cdev: ccw device on which to perform the start function + * @tcw: transport-command word to be started + * @intparm: user defined parameter to be passed to the interrupt handler + * @lpm: mask of paths to use + * @expires: time span in jiffies after which to abort request + * + * Start the tcw on the given ccw device. Return zero on success, non-zero + * otherwise. + */ +int ccw_device_tm_start_timeout(struct ccw_device *cdev, struct tcw *tcw, + unsigned long intparm, u8 lpm, int expires) +{ + return ccw_device_tm_start_timeout_key(cdev, tcw, intparm, lpm, + PAGE_DEFAULT_KEY, expires); +} +EXPORT_SYMBOL(ccw_device_tm_start_timeout); + +/** + * ccw_device_tm_intrg - perform interrogate function + * @cdev: ccw device on which to perform the interrogate function + * + * Perform an interrogate function on the given ccw device. Return zero on + * success, non-zero otherwise. + */ +int ccw_device_tm_intrg(struct ccw_device *cdev) +{ + struct subchannel *sch = to_subchannel(cdev->dev.parent); + + if (cdev->private->state != DEV_STATE_ONLINE) + return -EIO; + if (!scsw_is_tm(&sch->schib.scsw) || + !(scsw_actl(&sch->schib.scsw) | SCSW_ACTL_START_PEND)) + return -EINVAL; + return cio_tm_intrg(sch); +} +EXPORT_SYMBOL(ccw_device_tm_intrg); + // FIXME: these have to go: int diff --git a/drivers/s390/cio/device_pgid.c b/drivers/s390/cio/device_pgid.c index 5cf7be008e9..86bc94eb607 100644 --- a/drivers/s390/cio/device_pgid.c +++ b/drivers/s390/cio/device_pgid.c @@ -28,13 +28,13 @@ * Helper function called from interrupt context to decide whether an * operation should be tried again. */ -static int __ccw_device_should_retry(struct scsw *scsw) +static int __ccw_device_should_retry(union scsw *scsw) { /* CC is only valid if start function bit is set. */ - if ((scsw->fctl & SCSW_FCTL_START_FUNC) && scsw->cc == 1) + if ((scsw->cmd.fctl & SCSW_FCTL_START_FUNC) && scsw->cmd.cc == 1) return 1; /* No more activity. For sense and set PGID we stubbornly try again. */ - if (!scsw->actl) + if (!scsw->cmd.actl) return 1; return 0; } @@ -125,7 +125,7 @@ __ccw_device_check_sense_pgid(struct ccw_device *cdev) sch = to_subchannel(cdev->dev.parent); irb = &cdev->private->irb; - if (irb->scsw.fctl & (SCSW_FCTL_HALT_FUNC | SCSW_FCTL_CLEAR_FUNC)) { + if (irb->scsw.cmd.fctl & (SCSW_FCTL_HALT_FUNC | SCSW_FCTL_CLEAR_FUNC)) { /* Retry Sense PGID if requested. */ if (cdev->private->flags.intretry) { cdev->private->flags.intretry = 0; @@ -155,10 +155,10 @@ __ccw_device_check_sense_pgid(struct ccw_device *cdev) irb->ecw[6], irb->ecw[7]); return -EAGAIN; } - if (irb->scsw.cc == 3) { + if (irb->scsw.cmd.cc == 3) { u8 lpm; - lpm = to_io_private(sch)->orb.lpm; + lpm = to_io_private(sch)->orb.cmd.lpm; CIO_MSG_EVENT(3, "SNID - Device %04x on Subchannel 0.%x.%04x," " lpm %02X, became 'not operational'\n", cdev->private->dev_id.devno, sch->schid.ssid, @@ -188,7 +188,7 @@ ccw_device_sense_pgid_irq(struct ccw_device *cdev, enum dev_event dev_event) irb = (struct irb *) __LC_IRB; - if (irb->scsw.stctl == + if (irb->scsw.cmd.stctl == (SCSW_STCTL_STATUS_PEND | SCSW_STCTL_ALERT_STATUS)) { if (__ccw_device_should_retry(&irb->scsw)) { ret = __ccw_device_sense_pgid_start(cdev); @@ -331,7 +331,7 @@ __ccw_device_check_pgid(struct ccw_device *cdev) sch = to_subchannel(cdev->dev.parent); irb = &cdev->private->irb; - if (irb->scsw.fctl & (SCSW_FCTL_HALT_FUNC | SCSW_FCTL_CLEAR_FUNC)) { + if (irb->scsw.cmd.fctl & (SCSW_FCTL_HALT_FUNC | SCSW_FCTL_CLEAR_FUNC)) { /* Retry Set PGID if requested. */ if (cdev->private->flags.intretry) { cdev->private->flags.intretry = 0; @@ -355,7 +355,7 @@ __ccw_device_check_pgid(struct ccw_device *cdev) irb->ecw[6], irb->ecw[7]); return -EAGAIN; } - if (irb->scsw.cc == 3) { + if (irb->scsw.cmd.cc == 3) { CIO_MSG_EVENT(3, "SPID - Device %04x on Subchannel 0.%x.%04x," " lpm %02X, became 'not operational'\n", cdev->private->dev_id.devno, sch->schid.ssid, @@ -376,7 +376,7 @@ static int __ccw_device_check_nop(struct ccw_device *cdev) sch = to_subchannel(cdev->dev.parent); irb = &cdev->private->irb; - if (irb->scsw.fctl & (SCSW_FCTL_HALT_FUNC | SCSW_FCTL_CLEAR_FUNC)) { + if (irb->scsw.cmd.fctl & (SCSW_FCTL_HALT_FUNC | SCSW_FCTL_CLEAR_FUNC)) { /* Retry NOP if requested. */ if (cdev->private->flags.intretry) { cdev->private->flags.intretry = 0; @@ -384,7 +384,7 @@ static int __ccw_device_check_nop(struct ccw_device *cdev) } return -ETIME; } - if (irb->scsw.cc == 3) { + if (irb->scsw.cmd.cc == 3) { CIO_MSG_EVENT(3, "NOP - Device %04x on Subchannel 0.%x.%04x," " lpm %02X, became 'not operational'\n", cdev->private->dev_id.devno, sch->schid.ssid, @@ -438,7 +438,7 @@ ccw_device_verify_irq(struct ccw_device *cdev, enum dev_event dev_event) irb = (struct irb *) __LC_IRB; - if (irb->scsw.stctl == + if (irb->scsw.cmd.stctl == (SCSW_STCTL_STATUS_PEND | SCSW_STCTL_ALERT_STATUS)) { if (__ccw_device_should_retry(&irb->scsw)) __ccw_device_verify_start(cdev); @@ -544,7 +544,7 @@ ccw_device_disband_irq(struct ccw_device *cdev, enum dev_event dev_event) irb = (struct irb *) __LC_IRB; - if (irb->scsw.stctl == + if (irb->scsw.cmd.stctl == (SCSW_STCTL_STATUS_PEND | SCSW_STCTL_ALERT_STATUS)) { if (__ccw_device_should_retry(&irb->scsw)) __ccw_device_disband_start(cdev); diff --git a/drivers/s390/cio/device_status.c b/drivers/s390/cio/device_status.c index 4a38993000f..1b03c5423be 100644 --- a/drivers/s390/cio/device_status.c +++ b/drivers/s390/cio/device_status.c @@ -29,9 +29,11 @@ static void ccw_device_msg_control_check(struct ccw_device *cdev, struct irb *irb) { - if (!(irb->scsw.cstat & (SCHN_STAT_CHN_DATA_CHK | - SCHN_STAT_CHN_CTRL_CHK | - SCHN_STAT_INTF_CTRL_CHK))) + char dbf_text[15]; + + if (!scsw_is_valid_cstat(&irb->scsw) || + !(scsw_cstat(&irb->scsw) & (SCHN_STAT_CHN_DATA_CHK | + SCHN_STAT_CHN_CTRL_CHK | SCHN_STAT_INTF_CTRL_CHK))) return; CIO_MSG_EVENT(0, "Channel-Check or Interface-Control-Check " "received" @@ -39,15 +41,10 @@ ccw_device_msg_control_check(struct ccw_device *cdev, struct irb *irb) ": %02X sch_stat : %02X\n", cdev->private->dev_id.devno, cdev->private->schid.ssid, cdev->private->schid.sch_no, - irb->scsw.dstat, irb->scsw.cstat); - - if (irb->scsw.cc != 3) { - char dbf_text[15]; - - sprintf(dbf_text, "chk%x", cdev->private->schid.sch_no); - CIO_TRACE_EVENT(0, dbf_text); - CIO_HEX_EVENT(0, irb, sizeof (struct irb)); - } + scsw_dstat(&irb->scsw), scsw_cstat(&irb->scsw)); + sprintf(dbf_text, "chk%x", cdev->private->schid.sch_no); + CIO_TRACE_EVENT(0, dbf_text); + CIO_HEX_EVENT(0, irb, sizeof(struct irb)); } /* @@ -81,12 +78,12 @@ ccw_device_accumulate_ecw(struct ccw_device *cdev, struct irb *irb) * are condition that have to be met for the extended control * bit to have meaning. Sick. */ - cdev->private->irb.scsw.ectl = 0; - if ((irb->scsw.stctl & SCSW_STCTL_ALERT_STATUS) && - !(irb->scsw.stctl & SCSW_STCTL_INTER_STATUS)) - cdev->private->irb.scsw.ectl = irb->scsw.ectl; + cdev->private->irb.scsw.cmd.ectl = 0; + if ((irb->scsw.cmd.stctl & SCSW_STCTL_ALERT_STATUS) && + !(irb->scsw.cmd.stctl & SCSW_STCTL_INTER_STATUS)) + cdev->private->irb.scsw.cmd.ectl = irb->scsw.cmd.ectl; /* Check if extended control word is valid. */ - if (!cdev->private->irb.scsw.ectl) + if (!cdev->private->irb.scsw.cmd.ectl) return; /* Copy concurrent sense / model dependent information. */ memcpy (&cdev->private->irb.ecw, irb->ecw, sizeof (irb->ecw)); @@ -98,11 +95,12 @@ ccw_device_accumulate_ecw(struct ccw_device *cdev, struct irb *irb) static int ccw_device_accumulate_esw_valid(struct irb *irb) { - if (!irb->scsw.eswf && irb->scsw.stctl == SCSW_STCTL_STATUS_PEND) + if (!irb->scsw.cmd.eswf && + (irb->scsw.cmd.stctl == SCSW_STCTL_STATUS_PEND)) return 0; - if (irb->scsw.stctl == - (SCSW_STCTL_INTER_STATUS|SCSW_STCTL_STATUS_PEND) && - !(irb->scsw.actl & SCSW_ACTL_SUSPENDED)) + if (irb->scsw.cmd.stctl == + (SCSW_STCTL_INTER_STATUS|SCSW_STCTL_STATUS_PEND) && + !(irb->scsw.cmd.actl & SCSW_ACTL_SUSPENDED)) return 0; return 1; } @@ -125,7 +123,7 @@ ccw_device_accumulate_esw(struct ccw_device *cdev, struct irb *irb) cdev_irb->esw.esw1.lpum = irb->esw.esw1.lpum; /* Copy subchannel logout information if esw is of format 0. */ - if (irb->scsw.eswf) { + if (irb->scsw.cmd.eswf) { cdev_sublog = &cdev_irb->esw.esw0.sublog; sublog = &irb->esw.esw0.sublog; /* Copy extended status flags. */ @@ -134,7 +132,7 @@ ccw_device_accumulate_esw(struct ccw_device *cdev, struct irb *irb) * Copy fields that have a meaning for channel data check * channel control check and interface control check. */ - if (irb->scsw.cstat & (SCHN_STAT_CHN_DATA_CHK | + if (irb->scsw.cmd.cstat & (SCHN_STAT_CHN_DATA_CHK | SCHN_STAT_CHN_CTRL_CHK | SCHN_STAT_INTF_CTRL_CHK)) { /* Copy ancillary report bit. */ @@ -155,7 +153,7 @@ ccw_device_accumulate_esw(struct ccw_device *cdev, struct irb *irb) /* Copy i/o-error alert. */ cdev_sublog->ioerr = sublog->ioerr; /* Copy channel path timeout bit. */ - if (irb->scsw.cstat & SCHN_STAT_INTF_CTRL_CHK) + if (irb->scsw.cmd.cstat & SCHN_STAT_INTF_CTRL_CHK) cdev_irb->esw.esw0.erw.cpt = irb->esw.esw0.erw.cpt; /* Copy failing storage address validity flag. */ cdev_irb->esw.esw0.erw.fsavf = irb->esw.esw0.erw.fsavf; @@ -200,24 +198,24 @@ ccw_device_accumulate_irb(struct ccw_device *cdev, struct irb *irb) * If not, the remaining bit have no meaning and we must ignore them. * The esw is not meaningful as well... */ - if (!(irb->scsw.stctl & SCSW_STCTL_STATUS_PEND)) + if (!(scsw_stctl(&irb->scsw) & SCSW_STCTL_STATUS_PEND)) return; /* Check for channel checks and interface control checks. */ ccw_device_msg_control_check(cdev, irb); /* Check for path not operational. */ - if (irb->scsw.pno && irb->scsw.fctl != 0 && - (!(irb->scsw.stctl & SCSW_STCTL_INTER_STATUS) || - (irb->scsw.actl & SCSW_ACTL_SUSPENDED))) + if (scsw_is_valid_pno(&irb->scsw) && scsw_pno(&irb->scsw)) ccw_device_path_notoper(cdev); - + /* No irb accumulation for transport mode irbs. */ + if (scsw_is_tm(&irb->scsw)) { + memcpy(&cdev->private->irb, irb, sizeof(struct irb)); + return; + } /* * Don't accumulate unsolicited interrupts. */ - if ((irb->scsw.stctl == - (SCSW_STCTL_STATUS_PEND | SCSW_STCTL_ALERT_STATUS)) && - (!irb->scsw.cc)) + if (!scsw_is_solicited(&irb->scsw)) return; cdev_irb = &cdev->private->irb; @@ -227,62 +225,63 @@ ccw_device_accumulate_irb(struct ccw_device *cdev, struct irb *irb) * status at the subchannel has been cleared and we must not pass * intermediate accumulated status to the device driver. */ - if (irb->scsw.fctl & SCSW_FCTL_CLEAR_FUNC) + if (irb->scsw.cmd.fctl & SCSW_FCTL_CLEAR_FUNC) memset(&cdev->private->irb, 0, sizeof(struct irb)); /* Copy bits which are valid only for the start function. */ - if (irb->scsw.fctl & SCSW_FCTL_START_FUNC) { + if (irb->scsw.cmd.fctl & SCSW_FCTL_START_FUNC) { /* Copy key. */ - cdev_irb->scsw.key = irb->scsw.key; + cdev_irb->scsw.cmd.key = irb->scsw.cmd.key; /* Copy suspend control bit. */ - cdev_irb->scsw.sctl = irb->scsw.sctl; + cdev_irb->scsw.cmd.sctl = irb->scsw.cmd.sctl; /* Accumulate deferred condition code. */ - cdev_irb->scsw.cc |= irb->scsw.cc; + cdev_irb->scsw.cmd.cc |= irb->scsw.cmd.cc; /* Copy ccw format bit. */ - cdev_irb->scsw.fmt = irb->scsw.fmt; + cdev_irb->scsw.cmd.fmt = irb->scsw.cmd.fmt; /* Copy prefetch bit. */ - cdev_irb->scsw.pfch = irb->scsw.pfch; + cdev_irb->scsw.cmd.pfch = irb->scsw.cmd.pfch; /* Copy initial-status-interruption-control. */ - cdev_irb->scsw.isic = irb->scsw.isic; + cdev_irb->scsw.cmd.isic = irb->scsw.cmd.isic; /* Copy address limit checking control. */ - cdev_irb->scsw.alcc = irb->scsw.alcc; + cdev_irb->scsw.cmd.alcc = irb->scsw.cmd.alcc; /* Copy suppress suspend bit. */ - cdev_irb->scsw.ssi = irb->scsw.ssi; + cdev_irb->scsw.cmd.ssi = irb->scsw.cmd.ssi; } /* Take care of the extended control bit and extended control word. */ ccw_device_accumulate_ecw(cdev, irb); /* Accumulate function control. */ - cdev_irb->scsw.fctl |= irb->scsw.fctl; + cdev_irb->scsw.cmd.fctl |= irb->scsw.cmd.fctl; /* Copy activity control. */ - cdev_irb->scsw.actl= irb->scsw.actl; + cdev_irb->scsw.cmd.actl = irb->scsw.cmd.actl; /* Accumulate status control. */ - cdev_irb->scsw.stctl |= irb->scsw.stctl; + cdev_irb->scsw.cmd.stctl |= irb->scsw.cmd.stctl; /* * Copy ccw address if it is valid. This is a bit simplified * but should be close enough for all practical purposes. */ - if ((irb->scsw.stctl & SCSW_STCTL_PRIM_STATUS) || - ((irb->scsw.stctl == + if ((irb->scsw.cmd.stctl & SCSW_STCTL_PRIM_STATUS) || + ((irb->scsw.cmd.stctl == (SCSW_STCTL_INTER_STATUS|SCSW_STCTL_STATUS_PEND)) && - (irb->scsw.actl & SCSW_ACTL_DEVACT) && - (irb->scsw.actl & SCSW_ACTL_SCHACT)) || - (irb->scsw.actl & SCSW_ACTL_SUSPENDED)) - cdev_irb->scsw.cpa = irb->scsw.cpa; + (irb->scsw.cmd.actl & SCSW_ACTL_DEVACT) && + (irb->scsw.cmd.actl & SCSW_ACTL_SCHACT)) || + (irb->scsw.cmd.actl & SCSW_ACTL_SUSPENDED)) + cdev_irb->scsw.cmd.cpa = irb->scsw.cmd.cpa; /* Accumulate device status, but not the device busy flag. */ - cdev_irb->scsw.dstat &= ~DEV_STAT_BUSY; + cdev_irb->scsw.cmd.dstat &= ~DEV_STAT_BUSY; /* dstat is not always valid. */ - if (irb->scsw.stctl & + if (irb->scsw.cmd.stctl & (SCSW_STCTL_PRIM_STATUS | SCSW_STCTL_SEC_STATUS | SCSW_STCTL_INTER_STATUS | SCSW_STCTL_ALERT_STATUS)) - cdev_irb->scsw.dstat |= irb->scsw.dstat; + cdev_irb->scsw.cmd.dstat |= irb->scsw.cmd.dstat; /* Accumulate subchannel status. */ - cdev_irb->scsw.cstat |= irb->scsw.cstat; + cdev_irb->scsw.cmd.cstat |= irb->scsw.cmd.cstat; /* Copy residual count if it is valid. */ - if ((irb->scsw.stctl & SCSW_STCTL_PRIM_STATUS) && - (irb->scsw.cstat & ~(SCHN_STAT_PCI | SCHN_STAT_INCORR_LEN)) == 0) - cdev_irb->scsw.count = irb->scsw.count; + if ((irb->scsw.cmd.stctl & SCSW_STCTL_PRIM_STATUS) && + (irb->scsw.cmd.cstat & ~(SCHN_STAT_PCI | SCHN_STAT_INCORR_LEN)) + == 0) + cdev_irb->scsw.cmd.count = irb->scsw.cmd.count; /* Take care of bits in the extended status word. */ ccw_device_accumulate_esw(cdev, irb); @@ -299,7 +298,7 @@ ccw_device_accumulate_irb(struct ccw_device *cdev, struct irb *irb) * sense facility available/supported when enabling the * concurrent sense facility. */ - if ((cdev_irb->scsw.dstat & DEV_STAT_UNIT_CHECK) && + if ((cdev_irb->scsw.cmd.dstat & DEV_STAT_UNIT_CHECK) && !(cdev_irb->esw.esw0.erw.cons)) cdev->private->flags.dosense = 1; } @@ -317,7 +316,7 @@ ccw_device_do_sense(struct ccw_device *cdev, struct irb *irb) sch = to_subchannel(cdev->dev.parent); /* A sense is required, can we do it now ? */ - if ((irb->scsw.actl & (SCSW_ACTL_DEVACT | SCSW_ACTL_SCHACT)) != 0) + if (scsw_actl(&irb->scsw) & (SCSW_ACTL_DEVACT | SCSW_ACTL_SCHACT)) /* * we received an Unit Check but we have no final * status yet, therefore we must delay the SENSE @@ -355,20 +354,18 @@ ccw_device_accumulate_basic_sense(struct ccw_device *cdev, struct irb *irb) * If not, the remaining bit have no meaning and we must ignore them. * The esw is not meaningful as well... */ - if (!(irb->scsw.stctl & SCSW_STCTL_STATUS_PEND)) + if (!(scsw_stctl(&irb->scsw) & SCSW_STCTL_STATUS_PEND)) return; /* Check for channel checks and interface control checks. */ ccw_device_msg_control_check(cdev, irb); /* Check for path not operational. */ - if (irb->scsw.pno && irb->scsw.fctl != 0 && - (!(irb->scsw.stctl & SCSW_STCTL_INTER_STATUS) || - (irb->scsw.actl & SCSW_ACTL_SUSPENDED))) + if (scsw_is_valid_pno(&irb->scsw) && scsw_pno(&irb->scsw)) ccw_device_path_notoper(cdev); - if (!(irb->scsw.dstat & DEV_STAT_UNIT_CHECK) && - (irb->scsw.dstat & DEV_STAT_CHN_END)) { + if (!(irb->scsw.cmd.dstat & DEV_STAT_UNIT_CHECK) && + (irb->scsw.cmd.dstat & DEV_STAT_CHN_END)) { cdev->private->irb.esw.esw0.erw.cons = 1; cdev->private->flags.dosense = 0; } @@ -386,11 +383,11 @@ int ccw_device_accumulate_and_sense(struct ccw_device *cdev, struct irb *irb) { ccw_device_accumulate_irb(cdev, irb); - if ((irb->scsw.actl & (SCSW_ACTL_DEVACT | SCSW_ACTL_SCHACT)) != 0) + if ((irb->scsw.cmd.actl & (SCSW_ACTL_DEVACT | SCSW_ACTL_SCHACT)) != 0) return -EBUSY; /* Check for basic sense. */ if (cdev->private->flags.dosense && - !(irb->scsw.dstat & DEV_STAT_UNIT_CHECK)) { + !(irb->scsw.cmd.dstat & DEV_STAT_UNIT_CHECK)) { cdev->private->irb.esw.esw0.erw.cons = 1; cdev->private->flags.dosense = 0; return 0; diff --git a/drivers/s390/cio/fcx.c b/drivers/s390/cio/fcx.c new file mode 100644 index 00000000000..61677dfbdc9 --- /dev/null +++ b/drivers/s390/cio/fcx.c @@ -0,0 +1,350 @@ +/* + * Functions for assembling fcx enabled I/O control blocks. + * + * Copyright IBM Corp. 2008 + * Author(s): Peter Oberparleiter <peter.oberparleiter@de.ibm.com> + */ + +#include <linux/kernel.h> +#include <linux/types.h> +#include <linux/string.h> +#include <linux/errno.h> +#include <linux/err.h> +#include <linux/module.h> +#include <asm/fcx.h> +#include "cio.h" + +/** + * tcw_get_intrg - return pointer to associated interrogate tcw + * @tcw: pointer to the original tcw + * + * Return a pointer to the interrogate tcw associated with the specified tcw + * or %NULL if there is no associated interrogate tcw. + */ +struct tcw *tcw_get_intrg(struct tcw *tcw) +{ + return (struct tcw *) ((addr_t) tcw->intrg); +} +EXPORT_SYMBOL(tcw_get_intrg); + +/** + * tcw_get_data - return pointer to input/output data associated with tcw + * @tcw: pointer to the tcw + * + * Return the input or output data address specified in the tcw depending + * on whether the r-bit or the w-bit is set. If neither bit is set, return + * %NULL. + */ +void *tcw_get_data(struct tcw *tcw) +{ + if (tcw->r) + return (void *) ((addr_t) tcw->input); + if (tcw->w) + return (void *) ((addr_t) tcw->output); + return NULL; +} +EXPORT_SYMBOL(tcw_get_data); + +/** + * tcw_get_tccb - return pointer to tccb associated with tcw + * @tcw: pointer to the tcw + * + * Return pointer to the tccb associated with this tcw. + */ +struct tccb *tcw_get_tccb(struct tcw *tcw) +{ + return (struct tccb *) ((addr_t) tcw->tccb); +} +EXPORT_SYMBOL(tcw_get_tccb); + +/** + * tcw_get_tsb - return pointer to tsb associated with tcw + * @tcw: pointer to the tcw + * + * Return pointer to the tsb associated with this tcw. + */ +struct tsb *tcw_get_tsb(struct tcw *tcw) +{ + return (struct tsb *) ((addr_t) tcw->tsb); +} +EXPORT_SYMBOL(tcw_get_tsb); + +/** + * tcw_init - initialize tcw data structure + * @tcw: pointer to the tcw to be initialized + * @r: initial value of the r-bit + * @w: initial value of the w-bit + * + * Initialize all fields of the specified tcw data structure with zero and + * fill in the format, flags, r and w fields. + */ +void tcw_init(struct tcw *tcw, int r, int w) +{ + memset(tcw, 0, sizeof(struct tcw)); + tcw->format = TCW_FORMAT_DEFAULT; + tcw->flags = TCW_FLAGS_TIDAW_FORMAT(TCW_TIDAW_FORMAT_DEFAULT); + if (r) + tcw->r = 1; + if (w) + tcw->w = 1; +} +EXPORT_SYMBOL(tcw_init); + +static inline size_t tca_size(struct tccb *tccb) +{ + return tccb->tcah.tcal - 12; +} + +static u32 calc_dcw_count(struct tccb *tccb) +{ + int offset; + struct dcw *dcw; + u32 count = 0; + size_t size; + + size = tca_size(tccb); + for (offset = 0; offset < size;) { + dcw = (struct dcw *) &tccb->tca[offset]; + count += dcw->count; + if (!(dcw->flags & DCW_FLAGS_CC)) + break; + offset += sizeof(struct dcw) + ALIGN((int) dcw->cd_count, 4); + } + return count; +} + +static u32 calc_cbc_size(struct tidaw *tidaw, int num) +{ + int i; + u32 cbc_data; + u32 cbc_count = 0; + u64 data_count = 0; + + for (i = 0; i < num; i++) { + if (tidaw[i].flags & TIDAW_FLAGS_LAST) + break; + /* TODO: find out if padding applies to total of data + * transferred or data transferred by this tidaw. Assumption: + * applies to total. */ + data_count += tidaw[i].count; + if (tidaw[i].flags & TIDAW_FLAGS_INSERT_CBC) { + cbc_data = 4 + ALIGN(data_count, 4) - data_count; + cbc_count += cbc_data; + data_count += cbc_data; + } + } + return cbc_count; +} + +/** + * tcw_finalize - finalize tcw length fields and tidaw list + * @tcw: pointer to the tcw + * @num_tidaws: the number of tidaws used to address input/output data or zero + * if no tida is used + * + * Calculate the input-/output-count and tccbl field in the tcw, add a + * tcat the tccb and terminate the data tidaw list if used. + * + * Note: in case input- or output-tida is used, the tidaw-list must be stored + * in contiguous storage (no ttic). The tcal field in the tccb must be + * up-to-date. + */ +void tcw_finalize(struct tcw *tcw, int num_tidaws) +{ + struct tidaw *tidaw; + struct tccb *tccb; + struct tccb_tcat *tcat; + u32 count; + + /* Terminate tidaw list. */ + tidaw = tcw_get_data(tcw); + if (num_tidaws > 0) + tidaw[num_tidaws - 1].flags |= TIDAW_FLAGS_LAST; + /* Add tcat to tccb. */ + tccb = tcw_get_tccb(tcw); + tcat = (struct tccb_tcat *) &tccb->tca[tca_size(tccb)]; + memset(tcat, 0, sizeof(tcat)); + /* Calculate tcw input/output count and tcat transport count. */ + count = calc_dcw_count(tccb); + if (tcw->w && (tcw->flags & TCW_FLAGS_OUTPUT_TIDA)) + count += calc_cbc_size(tidaw, num_tidaws); + if (tcw->r) + tcw->input_count = count; + else if (tcw->w) + tcw->output_count = count; + tcat->count = ALIGN(count, 4) + 4; + /* Calculate tccbl. */ + tcw->tccbl = (sizeof(struct tccb) + tca_size(tccb) + + sizeof(struct tccb_tcat) - 20) >> 2; +} +EXPORT_SYMBOL(tcw_finalize); + +/** + * tcw_set_intrg - set the interrogate tcw address of a tcw + * @tcw: the tcw address + * @intrg_tcw: the address of the interrogate tcw + * + * Set the address of the interrogate tcw in the specified tcw. + */ +void tcw_set_intrg(struct tcw *tcw, struct tcw *intrg_tcw) +{ + tcw->intrg = (u32) ((addr_t) intrg_tcw); +} +EXPORT_SYMBOL(tcw_set_intrg); + +/** + * tcw_set_data - set data address and tida flag of a tcw + * @tcw: the tcw address + * @data: the data address + * @use_tidal: zero of the data address specifies a contiguous block of data, + * non-zero if it specifies a list if tidaws. + * + * Set the input/output data address of a tcw (depending on the value of the + * r-flag and w-flag). If @use_tidal is non-zero, the corresponding tida flag + * is set as well. + */ +void tcw_set_data(struct tcw *tcw, void *data, int use_tidal) +{ + if (tcw->r) { + tcw->input = (u64) ((addr_t) data); + if (use_tidal) + tcw->flags |= TCW_FLAGS_INPUT_TIDA; + } else if (tcw->w) { + tcw->output = (u64) ((addr_t) data); + if (use_tidal) + tcw->flags |= TCW_FLAGS_OUTPUT_TIDA; + } +} +EXPORT_SYMBOL(tcw_set_data); + +/** + * tcw_set_tccb - set tccb address of a tcw + * @tcw: the tcw address + * @tccb: the tccb address + * + * Set the address of the tccb in the specified tcw. + */ +void tcw_set_tccb(struct tcw *tcw, struct tccb *tccb) +{ + tcw->tccb = (u64) ((addr_t) tccb); +} +EXPORT_SYMBOL(tcw_set_tccb); + +/** + * tcw_set_tsb - set tsb address of a tcw + * @tcw: the tcw address + * @tsb: the tsb address + * + * Set the address of the tsb in the specified tcw. + */ +void tcw_set_tsb(struct tcw *tcw, struct tsb *tsb) +{ + tcw->tsb = (u64) ((addr_t) tsb); +} +EXPORT_SYMBOL(tcw_set_tsb); + +/** + * tccb_init - initialize tccb + * @tccb: the tccb address + * @size: the maximum size of the tccb + * @sac: the service-action-code to be user + * + * Initialize the header of the specified tccb by resetting all values to zero + * and filling in defaults for format, sac and initial tcal fields. + */ +void tccb_init(struct tccb *tccb, size_t size, u32 sac) +{ + memset(tccb, 0, size); + tccb->tcah.format = TCCB_FORMAT_DEFAULT; + tccb->tcah.sac = sac; + tccb->tcah.tcal = 12; +} +EXPORT_SYMBOL(tccb_init); + +/** + * tsb_init - initialize tsb + * @tsb: the tsb address + * + * Initialize the specified tsb by resetting all values to zero. + */ +void tsb_init(struct tsb *tsb) +{ + memset(tsb, 0, sizeof(tsb)); +} +EXPORT_SYMBOL(tsb_init); + +/** + * tccb_add_dcw - add a dcw to the tccb + * @tccb: the tccb address + * @tccb_size: the maximum tccb size + * @cmd: the dcw command + * @flags: flags for the dcw + * @cd: pointer to control data for this dcw or NULL if none is required + * @cd_count: number of control data bytes for this dcw + * @count: number of data bytes for this dcw + * + * Add a new dcw to the specified tccb by writing the dcw information specified + * by @cmd, @flags, @cd, @cd_count and @count to the tca of the tccb. Return + * a pointer to the newly added dcw on success or -%ENOSPC if the new dcw + * would exceed the available space as defined by @tccb_size. + * + * Note: the tcal field of the tccb header will be updates to reflect added + * content. + */ +struct dcw *tccb_add_dcw(struct tccb *tccb, size_t tccb_size, u8 cmd, u8 flags, + void *cd, u8 cd_count, u32 count) +{ + struct dcw *dcw; + int size; + int tca_offset; + + /* Check for space. */ + tca_offset = tca_size(tccb); + size = ALIGN(sizeof(struct dcw) + cd_count, 4); + if (sizeof(struct tccb_tcah) + tca_offset + size + + sizeof(struct tccb_tcat) > tccb_size) + return ERR_PTR(-ENOSPC); + /* Add dcw to tca. */ + dcw = (struct dcw *) &tccb->tca[tca_offset]; + memset(dcw, 0, size); + dcw->cmd = cmd; + dcw->flags = flags; + dcw->count = count; + dcw->cd_count = cd_count; + if (cd) + memcpy(&dcw->cd[0], cd, cd_count); + tccb->tcah.tcal += size; + return dcw; +} +EXPORT_SYMBOL(tccb_add_dcw); + +/** + * tcw_add_tidaw - add a tidaw to a tcw + * @tcw: the tcw address + * @num_tidaws: the current number of tidaws + * @flags: flags for the new tidaw + * @addr: address value for the new tidaw + * @count: count value for the new tidaw + * + * Add a new tidaw to the input/output data tidaw-list of the specified tcw + * (depending on the value of the r-flag and w-flag) and return a pointer to + * the new tidaw. + * + * Note: the tidaw-list is assumed to be contiguous with no ttics. The caller + * must ensure that there is enough space for the new tidaw. The last-tidaw + * flag for the last tidaw in the list will be set by tcw_finalize. + */ +struct tidaw *tcw_add_tidaw(struct tcw *tcw, int num_tidaws, u8 flags, + void *addr, u32 count) +{ + struct tidaw *tidaw; + + /* Add tidaw to tidaw-list. */ + tidaw = ((struct tidaw *) tcw_get_data(tcw)) + num_tidaws; + memset(tidaw, 0, sizeof(struct tidaw)); + tidaw->flags = flags; + tidaw->count = count; + tidaw->addr = (u64) ((addr_t) addr); + return tidaw; +} +EXPORT_SYMBOL(tcw_add_tidaw); diff --git a/drivers/s390/cio/idset.h b/drivers/s390/cio/idset.h index 144466ab8c1..528065cb502 100644 --- a/drivers/s390/cio/idset.h +++ b/drivers/s390/cio/idset.h @@ -8,7 +8,7 @@ #ifndef S390_IDSET_H #define S390_IDSET_H S390_IDSET_H -#include "schid.h" +#include <asm/schid.h> struct idset; diff --git a/drivers/s390/cio/io_sch.h b/drivers/s390/cio/io_sch.h index 8c613160bfc..3f8f1cf69c7 100644 --- a/drivers/s390/cio/io_sch.h +++ b/drivers/s390/cio/io_sch.h @@ -1,12 +1,12 @@ #ifndef S390_IO_SCH_H #define S390_IO_SCH_H -#include "schid.h" +#include <asm/schid.h> /* - * operation request block + * command-mode operation request block */ -struct orb { +struct cmd_orb { u32 intparm; /* interruption parameter */ u32 key : 4; /* flags, like key, suspend control, etc. */ u32 spnd : 1; /* suspend control */ @@ -28,8 +28,36 @@ struct orb { u32 cpa; /* channel program address */ } __attribute__ ((packed, aligned(4))); +/* + * transport-mode operation request block + */ +struct tm_orb { + u32 intparm; + u32 key:4; + u32 :9; + u32 b:1; + u32 :2; + u32 lpm:8; + u32 :7; + u32 x:1; + u32 tcw; + u32 prio:8; + u32 :8; + u32 rsvpgm:8; + u32 :8; + u32 :32; + u32 :32; + u32 :32; + u32 :32; +} __attribute__ ((packed, aligned(4))); + +union orb { + struct cmd_orb cmd; + struct tm_orb tm; +} __attribute__ ((packed, aligned(4))); + struct io_subchannel_private { - struct orb orb; /* operation request block */ + union orb orb; /* operation request block */ struct ccw1 sense_ccw; /* static ccw for sense command */ } __attribute__ ((aligned(8))); @@ -95,16 +123,18 @@ struct ccw_device_private { void *cmb_wait; /* deferred cmb enable/disable */ }; -static inline int ssch(struct subchannel_id schid, volatile struct orb *addr) +static inline int ssch(struct subchannel_id schid, volatile union orb *addr) { register struct subchannel_id reg1 asm("1") = schid; - int ccode; + int ccode = -EIO; asm volatile( " ssch 0(%2)\n" - " ipm %0\n" - " srl %0,28" - : "=d" (ccode) : "d" (reg1), "a" (addr), "m" (*addr) : "cc"); + "0: ipm %0\n" + " srl %0,28\n" + "1:\n" + EX_TABLE(0b, 1b) + : "+d" (ccode) : "d" (reg1), "a" (addr), "m" (*addr) : "cc"); return ccode; } diff --git a/drivers/s390/cio/ioasm.h b/drivers/s390/cio/ioasm.h index 652ea3625f9..9fa2ac13ac8 100644 --- a/drivers/s390/cio/ioasm.h +++ b/drivers/s390/cio/ioasm.h @@ -2,7 +2,7 @@ #define S390_CIO_IOASM_H #include <asm/chpid.h> -#include "schid.h" +#include <asm/schid.h> /* * TPI info structure diff --git a/drivers/s390/cio/isc.c b/drivers/s390/cio/isc.c new file mode 100644 index 00000000000..c592087be0f --- /dev/null +++ b/drivers/s390/cio/isc.c @@ -0,0 +1,68 @@ +/* + * Functions for registration of I/O interruption subclasses on s390. + * + * Copyright IBM Corp. 2008 + * Authors: Sebastian Ott <sebott@linux.vnet.ibm.com> + */ + +#include <linux/spinlock.h> +#include <linux/module.h> +#include <asm/isc.h> + +static unsigned int isc_refs[MAX_ISC + 1]; +static DEFINE_SPINLOCK(isc_ref_lock); + + +/** + * isc_register - register an I/O interruption subclass. + * @isc: I/O interruption subclass to register + * + * The number of users for @isc is increased. If this is the first user to + * register @isc, the corresponding I/O interruption subclass mask is enabled. + * + * Context: + * This function must not be called in interrupt context. + */ +void isc_register(unsigned int isc) +{ + if (isc > MAX_ISC) { + WARN_ON(1); + return; + } + + spin_lock(&isc_ref_lock); + if (isc_refs[isc] == 0) + ctl_set_bit(6, 31 - isc); + isc_refs[isc]++; + spin_unlock(&isc_ref_lock); +} +EXPORT_SYMBOL_GPL(isc_register); + +/** + * isc_unregister - unregister an I/O interruption subclass. + * @isc: I/O interruption subclass to unregister + * + * The number of users for @isc is decreased. If this is the last user to + * unregister @isc, the corresponding I/O interruption subclass mask is + * disabled. + * Note: This function must not be called if isc_register() hasn't been called + * before by the driver for @isc. + * + * Context: + * This function must not be called in interrupt context. + */ +void isc_unregister(unsigned int isc) +{ + spin_lock(&isc_ref_lock); + /* check for misuse */ + if (isc > MAX_ISC || isc_refs[isc] == 0) { + WARN_ON(1); + goto out_unlock; + } + if (isc_refs[isc] == 1) + ctl_clear_bit(6, 31 - isc); + isc_refs[isc]--; +out_unlock: + spin_unlock(&isc_ref_lock); +} +EXPORT_SYMBOL_GPL(isc_unregister); diff --git a/drivers/s390/cio/itcw.c b/drivers/s390/cio/itcw.c new file mode 100644 index 00000000000..17da9ab932e --- /dev/null +++ b/drivers/s390/cio/itcw.c @@ -0,0 +1,327 @@ +/* + * Functions for incremental construction of fcx enabled I/O control blocks. + * + * Copyright IBM Corp. 2008 + * Author(s): Peter Oberparleiter <peter.oberparleiter@de.ibm.com> + */ + +#include <linux/kernel.h> +#include <linux/types.h> +#include <linux/string.h> +#include <linux/errno.h> +#include <linux/err.h> +#include <linux/module.h> +#include <asm/fcx.h> +#include <asm/itcw.h> + +/** + * struct itcw - incremental tcw helper data type + * + * This structure serves as a handle for the incremental construction of a + * tcw and associated tccb, tsb, data tidaw-list plus an optional interrogate + * tcw and associated data. The data structures are contained inside a single + * contiguous buffer provided by the user. + * + * The itcw construction functions take care of overall data integrity: + * - reset unused fields to zero + * - fill in required pointers + * - ensure required alignment for data structures + * - prevent data structures to cross 4k-byte boundary where required + * - calculate tccb-related length fields + * - optionally provide ready-made interrogate tcw and associated structures + * + * Restrictions apply to the itcws created with these construction functions: + * - tida only supported for data address, not for tccb + * - only contiguous tidaw-lists (no ttic) + * - total number of bytes required per itcw may not exceed 4k bytes + * - either read or write operation (may not work with r=0 and w=0) + * + * Example: + * struct itcw *itcw; + * void *buffer; + * size_t size; + * + * size = itcw_calc_size(1, 2, 0); + * buffer = kmalloc(size, GFP_DMA); + * if (!buffer) + * return -ENOMEM; + * itcw = itcw_init(buffer, size, ITCW_OP_READ, 1, 2, 0); + * if (IS_ERR(itcw)) + * return PTR_ER(itcw); + * itcw_add_dcw(itcw, 0x2, 0, NULL, 0, 72); + * itcw_add_tidaw(itcw, 0, 0x30000, 20); + * itcw_add_tidaw(itcw, 0, 0x40000, 52); + * itcw_finalize(itcw); + * + */ +struct itcw { + struct tcw *tcw; + struct tcw *intrg_tcw; + int num_tidaws; + int max_tidaws; + int intrg_num_tidaws; + int intrg_max_tidaws; +}; + +/** + * itcw_get_tcw - return pointer to tcw associated with the itcw + * @itcw: address of the itcw + * + * Return pointer to the tcw associated with the itcw. + */ +struct tcw *itcw_get_tcw(struct itcw *itcw) +{ + return itcw->tcw; +} +EXPORT_SYMBOL(itcw_get_tcw); + +/** + * itcw_calc_size - return the size of an itcw with the given parameters + * @intrg: if non-zero, add an interrogate tcw + * @max_tidaws: maximum number of tidaws to be used for data addressing or zero + * if no tida is to be used. + * @intrg_max_tidaws: maximum number of tidaws to be used for data addressing + * by the interrogate tcw, if specified + * + * Calculate and return the number of bytes required to hold an itcw with the + * given parameters and assuming tccbs with maximum size. + * + * Note that the resulting size also contains bytes needed for alignment + * padding as well as padding to ensure that data structures don't cross a + * 4k-boundary where required. + */ +size_t itcw_calc_size(int intrg, int max_tidaws, int intrg_max_tidaws) +{ + size_t len; + + /* Main data. */ + len = sizeof(struct itcw); + len += /* TCW */ sizeof(struct tcw) + /* TCCB */ TCCB_MAX_SIZE + + /* TSB */ sizeof(struct tsb) + + /* TIDAL */ max_tidaws * sizeof(struct tidaw); + /* Interrogate data. */ + if (intrg) { + len += /* TCW */ sizeof(struct tcw) + /* TCCB */ TCCB_MAX_SIZE + + /* TSB */ sizeof(struct tsb) + + /* TIDAL */ intrg_max_tidaws * sizeof(struct tidaw); + } + /* Maximum required alignment padding. */ + len += /* Initial TCW */ 63 + /* Interrogate TCCB */ 7; + /* Maximum padding for structures that may not cross 4k boundary. */ + if ((max_tidaws > 0) || (intrg_max_tidaws > 0)) + len += max(max_tidaws, intrg_max_tidaws) * + sizeof(struct tidaw) - 1; + return len; +} +EXPORT_SYMBOL(itcw_calc_size); + +#define CROSS4K(x, l) (((x) & ~4095) != ((x + l) & ~4095)) + +static inline void *fit_chunk(addr_t *start, addr_t end, size_t len, + int align, int check_4k) +{ + addr_t addr; + + addr = ALIGN(*start, align); + if (check_4k && CROSS4K(addr, len)) { + addr = ALIGN(addr, 4096); + addr = ALIGN(addr, align); + } + if (addr + len > end) + return ERR_PTR(-ENOSPC); + *start = addr + len; + return (void *) addr; +} + +/** + * itcw_init - initialize incremental tcw data structure + * @buffer: address of buffer to use for data structures + * @size: number of bytes in buffer + * @op: %ITCW_OP_READ for a read operation tcw, %ITCW_OP_WRITE for a write + * operation tcw + * @intrg: if non-zero, add and initialize an interrogate tcw + * @max_tidaws: maximum number of tidaws to be used for data addressing or zero + * if no tida is to be used. + * @intrg_max_tidaws: maximum number of tidaws to be used for data addressing + * by the interrogate tcw, if specified + * + * Prepare the specified buffer to be used as an incremental tcw, i.e. a + * helper data structure that can be used to construct a valid tcw by + * successive calls to other helper functions. Note: the buffer needs to be + * located below the 2G address limit. The resulting tcw has the following + * restrictions: + * - no tccb tidal + * - input/output tidal is contiguous (no ttic) + * - total data should not exceed 4k + * - tcw specifies either read or write operation + * + * On success, return pointer to the resulting incremental tcw data structure, + * ERR_PTR otherwise. + */ +struct itcw *itcw_init(void *buffer, size_t size, int op, int intrg, + int max_tidaws, int intrg_max_tidaws) +{ + struct itcw *itcw; + void *chunk; + addr_t start; + addr_t end; + + /* Check for 2G limit. */ + start = (addr_t) buffer; + end = start + size; + if (end > (1 << 31)) + return ERR_PTR(-EINVAL); + memset(buffer, 0, size); + /* ITCW. */ + chunk = fit_chunk(&start, end, sizeof(struct itcw), 1, 0); + if (IS_ERR(chunk)) + return chunk; + itcw = chunk; + itcw->max_tidaws = max_tidaws; + itcw->intrg_max_tidaws = intrg_max_tidaws; + /* Main TCW. */ + chunk = fit_chunk(&start, end, sizeof(struct tcw), 64, 0); + if (IS_ERR(chunk)) + return chunk; + itcw->tcw = chunk; + tcw_init(itcw->tcw, (op == ITCW_OP_READ) ? 1 : 0, + (op == ITCW_OP_WRITE) ? 1 : 0); + /* Interrogate TCW. */ + if (intrg) { + chunk = fit_chunk(&start, end, sizeof(struct tcw), 64, 0); + if (IS_ERR(chunk)) + return chunk; + itcw->intrg_tcw = chunk; + tcw_init(itcw->intrg_tcw, 1, 0); + tcw_set_intrg(itcw->tcw, itcw->intrg_tcw); + } + /* Data TIDAL. */ + if (max_tidaws > 0) { + chunk = fit_chunk(&start, end, sizeof(struct tidaw) * + max_tidaws, 16, 1); + if (IS_ERR(chunk)) + return chunk; + tcw_set_data(itcw->tcw, chunk, 1); + } + /* Interrogate data TIDAL. */ + if (intrg && (intrg_max_tidaws > 0)) { + chunk = fit_chunk(&start, end, sizeof(struct tidaw) * + intrg_max_tidaws, 16, 1); + if (IS_ERR(chunk)) + return chunk; + tcw_set_data(itcw->intrg_tcw, chunk, 1); + } + /* TSB. */ + chunk = fit_chunk(&start, end, sizeof(struct tsb), 8, 0); + if (IS_ERR(chunk)) + return chunk; + tsb_init(chunk); + tcw_set_tsb(itcw->tcw, chunk); + /* Interrogate TSB. */ + if (intrg) { + chunk = fit_chunk(&start, end, sizeof(struct tsb), 8, 0); + if (IS_ERR(chunk)) + return chunk; + tsb_init(chunk); + tcw_set_tsb(itcw->intrg_tcw, chunk); + } + /* TCCB. */ + chunk = fit_chunk(&start, end, TCCB_MAX_SIZE, 8, 0); + if (IS_ERR(chunk)) + return chunk; + tccb_init(chunk, TCCB_MAX_SIZE, TCCB_SAC_DEFAULT); + tcw_set_tccb(itcw->tcw, chunk); + /* Interrogate TCCB. */ + if (intrg) { + chunk = fit_chunk(&start, end, TCCB_MAX_SIZE, 8, 0); + if (IS_ERR(chunk)) + return chunk; + tccb_init(chunk, TCCB_MAX_SIZE, TCCB_SAC_INTRG); + tcw_set_tccb(itcw->intrg_tcw, chunk); + tccb_add_dcw(chunk, TCCB_MAX_SIZE, DCW_CMD_INTRG, 0, NULL, + sizeof(struct dcw_intrg_data), 0); + tcw_finalize(itcw->intrg_tcw, 0); + } + return itcw; +} +EXPORT_SYMBOL(itcw_init); + +/** + * itcw_add_dcw - add a dcw to the itcw + * @itcw: address of the itcw + * @cmd: the dcw command + * @flags: flags for the dcw + * @cd: address of control data for this dcw or NULL if none is required + * @cd_count: number of control data bytes for this dcw + * @count: number of data bytes for this dcw + * + * Add a new dcw to the specified itcw by writing the dcw information specified + * by @cmd, @flags, @cd, @cd_count and @count to the tca of the tccb. Return + * a pointer to the newly added dcw on success or -%ENOSPC if the new dcw + * would exceed the available space. + * + * Note: the tcal field of the tccb header will be updated to reflect added + * content. + */ +struct dcw *itcw_add_dcw(struct itcw *itcw, u8 cmd, u8 flags, void *cd, + u8 cd_count, u32 count) +{ + return tccb_add_dcw(tcw_get_tccb(itcw->tcw), TCCB_MAX_SIZE, cmd, + flags, cd, cd_count, count); +} +EXPORT_SYMBOL(itcw_add_dcw); + +/** + * itcw_add_tidaw - add a tidaw to the itcw + * @itcw: address of the itcw + * @flags: flags for the new tidaw + * @addr: address value for the new tidaw + * @count: count value for the new tidaw + * + * Add a new tidaw to the input/output data tidaw-list of the specified itcw + * (depending on the value of the r-flag and w-flag). Return a pointer to + * the new tidaw on success or -%ENOSPC if the new tidaw would exceed the + * available space. + * + * Note: the tidaw-list is assumed to be contiguous with no ttics. The + * last-tidaw flag for the last tidaw in the list will be set by itcw_finalize. + */ +struct tidaw *itcw_add_tidaw(struct itcw *itcw, u8 flags, void *addr, u32 count) +{ + if (itcw->num_tidaws >= itcw->max_tidaws) + return ERR_PTR(-ENOSPC); + return tcw_add_tidaw(itcw->tcw, itcw->num_tidaws++, flags, addr, count); +} +EXPORT_SYMBOL(itcw_add_tidaw); + +/** + * itcw_set_data - set data address and tida flag of the itcw + * @itcw: address of the itcw + * @addr: the data address + * @use_tidal: zero of the data address specifies a contiguous block of data, + * non-zero if it specifies a list if tidaws. + * + * Set the input/output data address of the itcw (depending on the value of the + * r-flag and w-flag). If @use_tidal is non-zero, the corresponding tida flag + * is set as well. + */ +void itcw_set_data(struct itcw *itcw, void *addr, int use_tidal) +{ + tcw_set_data(itcw->tcw, addr, use_tidal); +} +EXPORT_SYMBOL(itcw_set_data); + +/** + * itcw_finalize - calculate length and count fields of the itcw + * @itcw: address of the itcw + * + * Calculate tcw input-/output-count and tccbl fields and add a tcat the tccb. + * In case input- or output-tida is used, the tidaw-list must be stored in + * continuous storage (no ttic). The tcal field in the tccb must be + * up-to-date. + */ +void itcw_finalize(struct itcw *itcw) +{ + tcw_finalize(itcw->tcw, itcw->num_tidaws); +} +EXPORT_SYMBOL(itcw_finalize); diff --git a/drivers/s390/cio/qdio.c b/drivers/s390/cio/qdio.c index 445cf364e46..2bf36e14b10 100644 --- a/drivers/s390/cio/qdio.c +++ b/drivers/s390/cio/qdio.c @@ -2082,7 +2082,6 @@ qdio_timeout_handler(struct ccw_device *cdev) default: BUG(); } - ccw_device_set_timeout(cdev, 0); wake_up(&cdev->private->wait_q); } @@ -2121,6 +2120,8 @@ qdio_handler(struct ccw_device *cdev, unsigned long intparm, struct irb *irb) case -EIO: QDIO_PRINT_ERR("i/o error on device %s\n", cdev->dev.bus_id); + qdio_set_state(irq_ptr, QDIO_IRQ_STATE_ERR); + wake_up(&cdev->private->wait_q); return; case -ETIMEDOUT: qdio_timeout_handler(cdev); @@ -2139,8 +2140,8 @@ qdio_handler(struct ccw_device *cdev, unsigned long intparm, struct irb *irb) QDIO_DBF_TEXT4(0, trace, dbf_text); #endif /* CONFIG_QDIO_DEBUG */ - cstat = irb->scsw.cstat; - dstat = irb->scsw.dstat; + cstat = irb->scsw.cmd.cstat; + dstat = irb->scsw.cmd.dstat; switch (irq_ptr->state) { case QDIO_IRQ_STATE_INACTIVE: @@ -2353,9 +2354,6 @@ tiqdio_check_chsc_availability(void) { char dbf_text[15]; - if (!css_characteristics_avail) - return -EIO; - /* Check for bit 41. */ if (!css_general_characteristics.aif) { QDIO_PRINT_WARN("Adapter interruption facility not " \ @@ -2667,12 +2665,12 @@ qdio_shutdown(struct ccw_device *cdev, int how) spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags); } else if (rc == 0) { qdio_set_state(irq_ptr, QDIO_IRQ_STATE_CLEANUP); - ccw_device_set_timeout(cdev, timeout); spin_unlock_irqrestore(get_ccwdev_lock(cdev),flags); - wait_event(cdev->private->wait_q, - irq_ptr->state == QDIO_IRQ_STATE_INACTIVE || - irq_ptr->state == QDIO_IRQ_STATE_ERR); + wait_event_interruptible_timeout(cdev->private->wait_q, + irq_ptr->state == QDIO_IRQ_STATE_INACTIVE || + irq_ptr->state == QDIO_IRQ_STATE_ERR, + timeout); } else { QDIO_PRINT_INFO("ccw_device_{halt,clear} returned %d for " "device %s\n", result, cdev->dev.bus_id); @@ -2692,7 +2690,6 @@ qdio_shutdown(struct ccw_device *cdev, int how) /* Ignore errors. */ qdio_set_state(irq_ptr, QDIO_IRQ_STATE_INACTIVE); - ccw_device_set_timeout(cdev, 0); out: up(&irq_ptr->setting_up_sema); return result; @@ -2907,13 +2904,10 @@ qdio_establish_handle_irq(struct ccw_device *cdev, int cstat, int dstat) QDIO_DBF_TEXT0(0,setup,dbf_text); QDIO_DBF_TEXT0(0,trace,dbf_text); - if (qdio_establish_irq_check_for_errors(cdev, cstat, dstat)) { - ccw_device_set_timeout(cdev, 0); + if (qdio_establish_irq_check_for_errors(cdev, cstat, dstat)) return; - } qdio_set_state(irq_ptr,QDIO_IRQ_STATE_ESTABLISHED); - ccw_device_set_timeout(cdev, 0); } int @@ -3196,8 +3190,6 @@ qdio_establish(struct qdio_initialize *init_data) irq_ptr->schid.ssid, irq_ptr->schid.sch_no, result, result2); result=result2; - if (result) - ccw_device_set_timeout(cdev, 0); } spin_unlock_irqrestore(get_ccwdev_lock(cdev),saveflags); @@ -3279,7 +3271,6 @@ qdio_activate(struct ccw_device *cdev, int flags) spin_lock_irqsave(get_ccwdev_lock(cdev),saveflags); - ccw_device_set_timeout(cdev, 0); ccw_device_set_options(cdev, CCWDEV_REPORT_ALL); result=ccw_device_start(cdev,&irq_ptr->ccw,QDIO_DOING_ACTIVATE, 0, DOIO_DENY_PREFETCH); @@ -3722,7 +3713,8 @@ tiqdio_register_thinints(void) char dbf_text[20]; tiqdio_ind = - s390_register_adapter_interrupt(&tiqdio_thinint_handler, NULL); + s390_register_adapter_interrupt(&tiqdio_thinint_handler, NULL, + TIQDIO_THININT_ISC); if (IS_ERR(tiqdio_ind)) { sprintf(dbf_text, "regthn%lx", PTR_ERR(tiqdio_ind)); QDIO_DBF_TEXT0(0,setup,dbf_text); @@ -3738,7 +3730,8 @@ static void tiqdio_unregister_thinints(void) { if (tiqdio_ind) - s390_unregister_adapter_interrupt(tiqdio_ind); + s390_unregister_adapter_interrupt(tiqdio_ind, + TIQDIO_THININT_ISC); } static int @@ -3899,6 +3892,7 @@ init_QDIO(void) qdio_mempool_alloc, qdio_mempool_free, NULL); + isc_register(QDIO_AIRQ_ISC); if (tiqdio_check_chsc_availability()) QDIO_PRINT_ERR("Not all CHSCs supported. Continuing.\n"); @@ -3911,6 +3905,7 @@ static void __exit cleanup_QDIO(void) { tiqdio_unregister_thinints(); + isc_unregister(QDIO_AIRQ_ISC); qdio_remove_procfs_entry(); qdio_release_qdio_memory(); qdio_unregister_dbf_views(); diff --git a/drivers/s390/cio/qdio.h b/drivers/s390/cio/qdio.h index c3df6b2c38b..7656081a24d 100644 --- a/drivers/s390/cio/qdio.h +++ b/drivers/s390/cio/qdio.h @@ -2,8 +2,8 @@ #define _CIO_QDIO_H #include <asm/page.h> - -#include "schid.h" +#include <asm/isc.h> +#include <asm/schid.h> #ifdef CONFIG_QDIO_DEBUG #define QDIO_VERBOSE_LEVEL 9 @@ -26,7 +26,7 @@ */ #define IQDIO_FILL_LEVEL_TO_POLL 4 -#define TIQDIO_THININT_ISC 3 +#define TIQDIO_THININT_ISC QDIO_AIRQ_ISC #define TIQDIO_DELAY_TARGET 0 #define QDIO_BUSY_BIT_PATIENCE 100 /* in microsecs */ #define QDIO_BUSY_BIT_GIVE_UP 10000000 /* 10 seconds */ diff --git a/drivers/s390/cio/schid.h b/drivers/s390/cio/schid.h deleted file mode 100644 index 54328fec5ad..00000000000 --- a/drivers/s390/cio/schid.h +++ /dev/null @@ -1,26 +0,0 @@ -#ifndef S390_SCHID_H -#define S390_SCHID_H - -struct subchannel_id { - __u32 reserved:13; - __u32 ssid:2; - __u32 one:1; - __u32 sch_no:16; -} __attribute__ ((packed,aligned(4))); - - -/* Helper function for sane state of pre-allocated subchannel_id. */ -static inline void -init_subchannel_id(struct subchannel_id *schid) -{ - memset(schid, 0, sizeof(struct subchannel_id)); - schid->one = 1; -} - -static inline int -schid_equal(struct subchannel_id *schid1, struct subchannel_id *schid2) -{ - return !memcmp(schid1, schid2, sizeof(struct subchannel_id)); -} - -#endif /* S390_SCHID_H */ diff --git a/drivers/s390/cio/scsw.c b/drivers/s390/cio/scsw.c new file mode 100644 index 00000000000..f8da25ab576 --- /dev/null +++ b/drivers/s390/cio/scsw.c @@ -0,0 +1,843 @@ +/* + * Helper functions for scsw access. + * + * Copyright IBM Corp. 2008 + * Author(s): Peter Oberparleiter <peter.oberparleiter@de.ibm.com> + */ + +#include <linux/types.h> +#include <linux/module.h> +#include <asm/cio.h> +#include "css.h" +#include "chsc.h" + +/** + * scsw_is_tm - check for transport mode scsw + * @scsw: pointer to scsw + * + * Return non-zero if the specified scsw is a transport mode scsw, zero + * otherwise. + */ +int scsw_is_tm(union scsw *scsw) +{ + return css_general_characteristics.fcx && (scsw->tm.x == 1); +} +EXPORT_SYMBOL(scsw_is_tm); + +/** + * scsw_key - return scsw key field + * @scsw: pointer to scsw + * + * Return the value of the key field of the specified scsw, regardless of + * whether it is a transport mode or command mode scsw. + */ +u32 scsw_key(union scsw *scsw) +{ + if (scsw_is_tm(scsw)) + return scsw->tm.key; + else + return scsw->cmd.key; +} +EXPORT_SYMBOL(scsw_key); + +/** + * scsw_eswf - return scsw eswf field + * @scsw: pointer to scsw + * + * Return the value of the eswf field of the specified scsw, regardless of + * whether it is a transport mode or command mode scsw. + */ +u32 scsw_eswf(union scsw *scsw) +{ + if (scsw_is_tm(scsw)) + return scsw->tm.eswf; + else + return scsw->cmd.eswf; +} +EXPORT_SYMBOL(scsw_eswf); + +/** + * scsw_cc - return scsw cc field + * @scsw: pointer to scsw + * + * Return the value of the cc field of the specified scsw, regardless of + * whether it is a transport mode or command mode scsw. + */ +u32 scsw_cc(union scsw *scsw) +{ + if (scsw_is_tm(scsw)) + return scsw->tm.cc; + else + return scsw->cmd.cc; +} +EXPORT_SYMBOL(scsw_cc); + +/** + * scsw_ectl - return scsw ectl field + * @scsw: pointer to scsw + * + * Return the value of the ectl field of the specified scsw, regardless of + * whether it is a transport mode or command mode scsw. + */ +u32 scsw_ectl(union scsw *scsw) +{ + if (scsw_is_tm(scsw)) + return scsw->tm.ectl; + else + return scsw->cmd.ectl; +} +EXPORT_SYMBOL(scsw_ectl); + +/** + * scsw_pno - return scsw pno field + * @scsw: pointer to scsw + * + * Return the value of the pno field of the specified scsw, regardless of + * whether it is a transport mode or command mode scsw. + */ +u32 scsw_pno(union scsw *scsw) +{ + if (scsw_is_tm(scsw)) + return scsw->tm.pno; + else + return scsw->cmd.pno; +} +EXPORT_SYMBOL(scsw_pno); + +/** + * scsw_fctl - return scsw fctl field + * @scsw: pointer to scsw + * + * Return the value of the fctl field of the specified scsw, regardless of + * whether it is a transport mode or command mode scsw. + */ +u32 scsw_fctl(union scsw *scsw) +{ + if (scsw_is_tm(scsw)) + return scsw->tm.fctl; + else + return scsw->cmd.fctl; +} +EXPORT_SYMBOL(scsw_fctl); + +/** + * scsw_actl - return scsw actl field + * @scsw: pointer to scsw + * + * Return the value of the actl field of the specified scsw, regardless of + * whether it is a transport mode or command mode scsw. + */ +u32 scsw_actl(union scsw *scsw) +{ + if (scsw_is_tm(scsw)) + return scsw->tm.actl; + else + return scsw->cmd.actl; +} +EXPORT_SYMBOL(scsw_actl); + +/** + * scsw_stctl - return scsw stctl field + * @scsw: pointer to scsw + * + * Return the value of the stctl field of the specified scsw, regardless of + * whether it is a transport mode or command mode scsw. + */ +u32 scsw_stctl(union scsw *scsw) +{ + if (scsw_is_tm(scsw)) + return scsw->tm.stctl; + else + return scsw->cmd.stctl; +} +EXPORT_SYMBOL(scsw_stctl); + +/** + * scsw_dstat - return scsw dstat field + * @scsw: pointer to scsw + * + * Return the value of the dstat field of the specified scsw, regardless of + * whether it is a transport mode or command mode scsw. + */ +u32 scsw_dstat(union scsw *scsw) +{ + if (scsw_is_tm(scsw)) + return scsw->tm.dstat; + else + return scsw->cmd.dstat; +} +EXPORT_SYMBOL(scsw_dstat); + +/** + * scsw_cstat - return scsw cstat field + * @scsw: pointer to scsw + * + * Return the value of the cstat field of the specified scsw, regardless of + * whether it is a transport mode or command mode scsw. + */ +u32 scsw_cstat(union scsw *scsw) +{ + if (scsw_is_tm(scsw)) + return scsw->tm.cstat; + else + return scsw->cmd.cstat; +} +EXPORT_SYMBOL(scsw_cstat); + +/** + * scsw_cmd_is_valid_key - check key field validity + * @scsw: pointer to scsw + * + * Return non-zero if the key field of the specified command mode scsw is + * valid, zero otherwise. + */ +int scsw_cmd_is_valid_key(union scsw *scsw) +{ + return (scsw->cmd.fctl & SCSW_FCTL_START_FUNC); +} +EXPORT_SYMBOL(scsw_cmd_is_valid_key); + +/** + * scsw_cmd_is_valid_sctl - check fctl field validity + * @scsw: pointer to scsw + * + * Return non-zero if the fctl field of the specified command mode scsw is + * valid, zero otherwise. + */ +int scsw_cmd_is_valid_sctl(union scsw *scsw) +{ + return (scsw->cmd.fctl & SCSW_FCTL_START_FUNC); +} +EXPORT_SYMBOL(scsw_cmd_is_valid_sctl); + +/** + * scsw_cmd_is_valid_eswf - check eswf field validity + * @scsw: pointer to scsw + * + * Return non-zero if the eswf field of the specified command mode scsw is + * valid, zero otherwise. + */ +int scsw_cmd_is_valid_eswf(union scsw *scsw) +{ + return (scsw->cmd.stctl & SCSW_STCTL_STATUS_PEND); +} +EXPORT_SYMBOL(scsw_cmd_is_valid_eswf); + +/** + * scsw_cmd_is_valid_cc - check cc field validity + * @scsw: pointer to scsw + * + * Return non-zero if the cc field of the specified command mode scsw is + * valid, zero otherwise. + */ +int scsw_cmd_is_valid_cc(union scsw *scsw) +{ + return (scsw->cmd.fctl & SCSW_FCTL_START_FUNC) && + (scsw->cmd.stctl & SCSW_STCTL_STATUS_PEND); +} +EXPORT_SYMBOL(scsw_cmd_is_valid_cc); + +/** + * scsw_cmd_is_valid_fmt - check fmt field validity + * @scsw: pointer to scsw + * + * Return non-zero if the fmt field of the specified command mode scsw is + * valid, zero otherwise. + */ +int scsw_cmd_is_valid_fmt(union scsw *scsw) +{ + return (scsw->cmd.fctl & SCSW_FCTL_START_FUNC); +} +EXPORT_SYMBOL(scsw_cmd_is_valid_fmt); + +/** + * scsw_cmd_is_valid_pfch - check pfch field validity + * @scsw: pointer to scsw + * + * Return non-zero if the pfch field of the specified command mode scsw is + * valid, zero otherwise. + */ +int scsw_cmd_is_valid_pfch(union scsw *scsw) +{ + return (scsw->cmd.fctl & SCSW_FCTL_START_FUNC); +} +EXPORT_SYMBOL(scsw_cmd_is_valid_pfch); + +/** + * scsw_cmd_is_valid_isic - check isic field validity + * @scsw: pointer to scsw + * + * Return non-zero if the isic field of the specified command mode scsw is + * valid, zero otherwise. + */ +int scsw_cmd_is_valid_isic(union scsw *scsw) +{ + return (scsw->cmd.fctl & SCSW_FCTL_START_FUNC); +} +EXPORT_SYMBOL(scsw_cmd_is_valid_isic); + +/** + * scsw_cmd_is_valid_alcc - check alcc field validity + * @scsw: pointer to scsw + * + * Return non-zero if the alcc field of the specified command mode scsw is + * valid, zero otherwise. + */ +int scsw_cmd_is_valid_alcc(union scsw *scsw) +{ + return (scsw->cmd.fctl & SCSW_FCTL_START_FUNC); +} +EXPORT_SYMBOL(scsw_cmd_is_valid_alcc); + +/** + * scsw_cmd_is_valid_ssi - check ssi field validity + * @scsw: pointer to scsw + * + * Return non-zero if the ssi field of the specified command mode scsw is + * valid, zero otherwise. + */ +int scsw_cmd_is_valid_ssi(union scsw *scsw) +{ + return (scsw->cmd.fctl & SCSW_FCTL_START_FUNC); +} +EXPORT_SYMBOL(scsw_cmd_is_valid_ssi); + +/** + * scsw_cmd_is_valid_zcc - check zcc field validity + * @scsw: pointer to scsw + * + * Return non-zero if the zcc field of the specified command mode scsw is + * valid, zero otherwise. + */ +int scsw_cmd_is_valid_zcc(union scsw *scsw) +{ + return (scsw->cmd.fctl & SCSW_FCTL_START_FUNC) && + (scsw->cmd.stctl & SCSW_STCTL_INTER_STATUS); +} +EXPORT_SYMBOL(scsw_cmd_is_valid_zcc); + +/** + * scsw_cmd_is_valid_ectl - check ectl field validity + * @scsw: pointer to scsw + * + * Return non-zero if the ectl field of the specified command mode scsw is + * valid, zero otherwise. + */ +int scsw_cmd_is_valid_ectl(union scsw *scsw) +{ + return (scsw->cmd.stctl & SCSW_STCTL_STATUS_PEND) && + !(scsw->cmd.stctl & SCSW_STCTL_INTER_STATUS) && + (scsw->cmd.stctl & SCSW_STCTL_ALERT_STATUS); +} +EXPORT_SYMBOL(scsw_cmd_is_valid_ectl); + +/** + * scsw_cmd_is_valid_pno - check pno field validity + * @scsw: pointer to scsw + * + * Return non-zero if the pno field of the specified command mode scsw is + * valid, zero otherwise. + */ +int scsw_cmd_is_valid_pno(union scsw *scsw) +{ + return (scsw->cmd.fctl != 0) && + (scsw->cmd.stctl & SCSW_STCTL_STATUS_PEND) && + (!(scsw->cmd.stctl & SCSW_STCTL_INTER_STATUS) || + ((scsw->cmd.stctl & SCSW_STCTL_INTER_STATUS) && + (scsw->cmd.actl & SCSW_ACTL_SUSPENDED))); +} +EXPORT_SYMBOL(scsw_cmd_is_valid_pno); + +/** + * scsw_cmd_is_valid_fctl - check fctl field validity + * @scsw: pointer to scsw + * + * Return non-zero if the fctl field of the specified command mode scsw is + * valid, zero otherwise. + */ +int scsw_cmd_is_valid_fctl(union scsw *scsw) +{ + /* Only valid if pmcw.dnv == 1*/ + return 1; +} +EXPORT_SYMBOL(scsw_cmd_is_valid_fctl); + +/** + * scsw_cmd_is_valid_actl - check actl field validity + * @scsw: pointer to scsw + * + * Return non-zero if the actl field of the specified command mode scsw is + * valid, zero otherwise. + */ +int scsw_cmd_is_valid_actl(union scsw *scsw) +{ + /* Only valid if pmcw.dnv == 1*/ + return 1; +} +EXPORT_SYMBOL(scsw_cmd_is_valid_actl); + +/** + * scsw_cmd_is_valid_stctl - check stctl field validity + * @scsw: pointer to scsw + * + * Return non-zero if the stctl field of the specified command mode scsw is + * valid, zero otherwise. + */ +int scsw_cmd_is_valid_stctl(union scsw *scsw) +{ + /* Only valid if pmcw.dnv == 1*/ + return 1; +} +EXPORT_SYMBOL(scsw_cmd_is_valid_stctl); + +/** + * scsw_cmd_is_valid_dstat - check dstat field validity + * @scsw: pointer to scsw + * + * Return non-zero if the dstat field of the specified command mode scsw is + * valid, zero otherwise. + */ +int scsw_cmd_is_valid_dstat(union scsw *scsw) +{ + return (scsw->cmd.stctl & SCSW_STCTL_STATUS_PEND) && + (scsw->cmd.cc != 3); +} +EXPORT_SYMBOL(scsw_cmd_is_valid_dstat); + +/** + * scsw_cmd_is_valid_cstat - check cstat field validity + * @scsw: pointer to scsw + * + * Return non-zero if the cstat field of the specified command mode scsw is + * valid, zero otherwise. + */ +int scsw_cmd_is_valid_cstat(union scsw *scsw) +{ + return (scsw->cmd.stctl & SCSW_STCTL_STATUS_PEND) && + (scsw->cmd.cc != 3); +} +EXPORT_SYMBOL(scsw_cmd_is_valid_cstat); + +/** + * scsw_tm_is_valid_key - check key field validity + * @scsw: pointer to scsw + * + * Return non-zero if the key field of the specified transport mode scsw is + * valid, zero otherwise. + */ +int scsw_tm_is_valid_key(union scsw *scsw) +{ + return (scsw->tm.fctl & SCSW_FCTL_START_FUNC); +} +EXPORT_SYMBOL(scsw_tm_is_valid_key); + +/** + * scsw_tm_is_valid_eswf - check eswf field validity + * @scsw: pointer to scsw + * + * Return non-zero if the eswf field of the specified transport mode scsw is + * valid, zero otherwise. + */ +int scsw_tm_is_valid_eswf(union scsw *scsw) +{ + return (scsw->tm.stctl & SCSW_STCTL_STATUS_PEND); +} +EXPORT_SYMBOL(scsw_tm_is_valid_eswf); + +/** + * scsw_tm_is_valid_cc - check cc field validity + * @scsw: pointer to scsw + * + * Return non-zero if the cc field of the specified transport mode scsw is + * valid, zero otherwise. + */ +int scsw_tm_is_valid_cc(union scsw *scsw) +{ + return (scsw->tm.fctl & SCSW_FCTL_START_FUNC) && + (scsw->tm.stctl & SCSW_STCTL_STATUS_PEND); +} +EXPORT_SYMBOL(scsw_tm_is_valid_cc); + +/** + * scsw_tm_is_valid_fmt - check fmt field validity + * @scsw: pointer to scsw + * + * Return non-zero if the fmt field of the specified transport mode scsw is + * valid, zero otherwise. + */ +int scsw_tm_is_valid_fmt(union scsw *scsw) +{ + return 1; +} +EXPORT_SYMBOL(scsw_tm_is_valid_fmt); + +/** + * scsw_tm_is_valid_x - check x field validity + * @scsw: pointer to scsw + * + * Return non-zero if the x field of the specified transport mode scsw is + * valid, zero otherwise. + */ +int scsw_tm_is_valid_x(union scsw *scsw) +{ + return 1; +} +EXPORT_SYMBOL(scsw_tm_is_valid_x); + +/** + * scsw_tm_is_valid_q - check q field validity + * @scsw: pointer to scsw + * + * Return non-zero if the q field of the specified transport mode scsw is + * valid, zero otherwise. + */ +int scsw_tm_is_valid_q(union scsw *scsw) +{ + return 1; +} +EXPORT_SYMBOL(scsw_tm_is_valid_q); + +/** + * scsw_tm_is_valid_ectl - check ectl field validity + * @scsw: pointer to scsw + * + * Return non-zero if the ectl field of the specified transport mode scsw is + * valid, zero otherwise. + */ +int scsw_tm_is_valid_ectl(union scsw *scsw) +{ + return (scsw->tm.stctl & SCSW_STCTL_STATUS_PEND) && + !(scsw->tm.stctl & SCSW_STCTL_INTER_STATUS) && + (scsw->tm.stctl & SCSW_STCTL_ALERT_STATUS); +} +EXPORT_SYMBOL(scsw_tm_is_valid_ectl); + +/** + * scsw_tm_is_valid_pno - check pno field validity + * @scsw: pointer to scsw + * + * Return non-zero if the pno field of the specified transport mode scsw is + * valid, zero otherwise. + */ +int scsw_tm_is_valid_pno(union scsw *scsw) +{ + return (scsw->tm.fctl != 0) && + (scsw->tm.stctl & SCSW_STCTL_STATUS_PEND) && + (!(scsw->tm.stctl & SCSW_STCTL_INTER_STATUS) || + ((scsw->tm.stctl & SCSW_STCTL_INTER_STATUS) && + (scsw->tm.actl & SCSW_ACTL_SUSPENDED))); +} +EXPORT_SYMBOL(scsw_tm_is_valid_pno); + +/** + * scsw_tm_is_valid_fctl - check fctl field validity + * @scsw: pointer to scsw + * + * Return non-zero if the fctl field of the specified transport mode scsw is + * valid, zero otherwise. + */ +int scsw_tm_is_valid_fctl(union scsw *scsw) +{ + /* Only valid if pmcw.dnv == 1*/ + return 1; +} +EXPORT_SYMBOL(scsw_tm_is_valid_fctl); + +/** + * scsw_tm_is_valid_actl - check actl field validity + * @scsw: pointer to scsw + * + * Return non-zero if the actl field of the specified transport mode scsw is + * valid, zero otherwise. + */ +int scsw_tm_is_valid_actl(union scsw *scsw) +{ + /* Only valid if pmcw.dnv == 1*/ + return 1; +} +EXPORT_SYMBOL(scsw_tm_is_valid_actl); + +/** + * scsw_tm_is_valid_stctl - check stctl field validity + * @scsw: pointer to scsw + * + * Return non-zero if the stctl field of the specified transport mode scsw is + * valid, zero otherwise. + */ +int scsw_tm_is_valid_stctl(union scsw *scsw) +{ + /* Only valid if pmcw.dnv == 1*/ + return 1; +} +EXPORT_SYMBOL(scsw_tm_is_valid_stctl); + +/** + * scsw_tm_is_valid_dstat - check dstat field validity + * @scsw: pointer to scsw + * + * Return non-zero if the dstat field of the specified transport mode scsw is + * valid, zero otherwise. + */ +int scsw_tm_is_valid_dstat(union scsw *scsw) +{ + return (scsw->tm.stctl & SCSW_STCTL_STATUS_PEND) && + (scsw->tm.cc != 3); +} +EXPORT_SYMBOL(scsw_tm_is_valid_dstat); + +/** + * scsw_tm_is_valid_cstat - check cstat field validity + * @scsw: pointer to scsw + * + * Return non-zero if the cstat field of the specified transport mode scsw is + * valid, zero otherwise. + */ +int scsw_tm_is_valid_cstat(union scsw *scsw) +{ + return (scsw->tm.stctl & SCSW_STCTL_STATUS_PEND) && + (scsw->tm.cc != 3); +} +EXPORT_SYMBOL(scsw_tm_is_valid_cstat); + +/** + * scsw_tm_is_valid_fcxs - check fcxs field validity + * @scsw: pointer to scsw + * + * Return non-zero if the fcxs field of the specified transport mode scsw is + * valid, zero otherwise. + */ +int scsw_tm_is_valid_fcxs(union scsw *scsw) +{ + return 1; +} +EXPORT_SYMBOL(scsw_tm_is_valid_fcxs); + +/** + * scsw_tm_is_valid_schxs - check schxs field validity + * @scsw: pointer to scsw + * + * Return non-zero if the schxs field of the specified transport mode scsw is + * valid, zero otherwise. + */ +int scsw_tm_is_valid_schxs(union scsw *scsw) +{ + return (scsw->tm.cstat & (SCHN_STAT_PROG_CHECK | + SCHN_STAT_INTF_CTRL_CHK | + SCHN_STAT_PROT_CHECK | + SCHN_STAT_CHN_DATA_CHK)); +} +EXPORT_SYMBOL(scsw_tm_is_valid_schxs); + +/** + * scsw_is_valid_actl - check actl field validity + * @scsw: pointer to scsw + * + * Return non-zero if the actl field of the specified scsw is valid, + * regardless of whether it is a transport mode or command mode scsw. + * Return zero if the field does not contain a valid value. + */ +int scsw_is_valid_actl(union scsw *scsw) +{ + if (scsw_is_tm(scsw)) + return scsw_tm_is_valid_actl(scsw); + else + return scsw_cmd_is_valid_actl(scsw); +} +EXPORT_SYMBOL(scsw_is_valid_actl); + +/** + * scsw_is_valid_cc - check cc field validity + * @scsw: pointer to scsw + * + * Return non-zero if the cc field of the specified scsw is valid, + * regardless of whether it is a transport mode or command mode scsw. + * Return zero if the field does not contain a valid value. + */ +int scsw_is_valid_cc(union scsw *scsw) +{ + if (scsw_is_tm(scsw)) + return scsw_tm_is_valid_cc(scsw); + else + return scsw_cmd_is_valid_cc(scsw); +} +EXPORT_SYMBOL(scsw_is_valid_cc); + +/** + * scsw_is_valid_cstat - check cstat field validity + * @scsw: pointer to scsw + * + * Return non-zero if the cstat field of the specified scsw is valid, + * regardless of whether it is a transport mode or command mode scsw. + * Return zero if the field does not contain a valid value. + */ +int scsw_is_valid_cstat(union scsw *scsw) +{ + if (scsw_is_tm(scsw)) + return scsw_tm_is_valid_cstat(scsw); + else + return scsw_cmd_is_valid_cstat(scsw); +} +EXPORT_SYMBOL(scsw_is_valid_cstat); + +/** + * scsw_is_valid_dstat - check dstat field validity + * @scsw: pointer to scsw + * + * Return non-zero if the dstat field of the specified scsw is valid, + * regardless of whether it is a transport mode or command mode scsw. + * Return zero if the field does not contain a valid value. + */ +int scsw_is_valid_dstat(union scsw *scsw) +{ + if (scsw_is_tm(scsw)) + return scsw_tm_is_valid_dstat(scsw); + else + return scsw_cmd_is_valid_dstat(scsw); +} +EXPORT_SYMBOL(scsw_is_valid_dstat); + +/** + * scsw_is_valid_ectl - check ectl field validity + * @scsw: pointer to scsw + * + * Return non-zero if the ectl field of the specified scsw is valid, + * regardless of whether it is a transport mode or command mode scsw. + * Return zero if the field does not contain a valid value. + */ +int scsw_is_valid_ectl(union scsw *scsw) +{ + if (scsw_is_tm(scsw)) + return scsw_tm_is_valid_ectl(scsw); + else + return scsw_cmd_is_valid_ectl(scsw); +} +EXPORT_SYMBOL(scsw_is_valid_ectl); + +/** + * scsw_is_valid_eswf - check eswf field validity + * @scsw: pointer to scsw + * + * Return non-zero if the eswf field of the specified scsw is valid, + * regardless of whether it is a transport mode or command mode scsw. + * Return zero if the field does not contain a valid value. + */ +int scsw_is_valid_eswf(union scsw *scsw) +{ + if (scsw_is_tm(scsw)) + return scsw_tm_is_valid_eswf(scsw); + else + return scsw_cmd_is_valid_eswf(scsw); +} +EXPORT_SYMBOL(scsw_is_valid_eswf); + +/** + * scsw_is_valid_fctl - check fctl field validity + * @scsw: pointer to scsw + * + * Return non-zero if the fctl field of the specified scsw is valid, + * regardless of whether it is a transport mode or command mode scsw. + * Return zero if the field does not contain a valid value. + */ +int scsw_is_valid_fctl(union scsw *scsw) +{ + if (scsw_is_tm(scsw)) + return scsw_tm_is_valid_fctl(scsw); + else + return scsw_cmd_is_valid_fctl(scsw); +} +EXPORT_SYMBOL(scsw_is_valid_fctl); + +/** + * scsw_is_valid_key - check key field validity + * @scsw: pointer to scsw + * + * Return non-zero if the key field of the specified scsw is valid, + * regardless of whether it is a transport mode or command mode scsw. + * Return zero if the field does not contain a valid value. + */ +int scsw_is_valid_key(union scsw *scsw) +{ + if (scsw_is_tm(scsw)) + return scsw_tm_is_valid_key(scsw); + else + return scsw_cmd_is_valid_key(scsw); +} +EXPORT_SYMBOL(scsw_is_valid_key); + +/** + * scsw_is_valid_pno - check pno field validity + * @scsw: pointer to scsw + * + * Return non-zero if the pno field of the specified scsw is valid, + * regardless of whether it is a transport mode or command mode scsw. + * Return zero if the field does not contain a valid value. + */ +int scsw_is_valid_pno(union scsw *scsw) +{ + if (scsw_is_tm(scsw)) + return scsw_tm_is_valid_pno(scsw); + else + return scsw_cmd_is_valid_pno(scsw); +} +EXPORT_SYMBOL(scsw_is_valid_pno); + +/** + * scsw_is_valid_stctl - check stctl field validity + * @scsw: pointer to scsw + * + * Return non-zero if the stctl field of the specified scsw is valid, + * regardless of whether it is a transport mode or command mode scsw. + * Return zero if the field does not contain a valid value. + */ +int scsw_is_valid_stctl(union scsw *scsw) +{ + if (scsw_is_tm(scsw)) + return scsw_tm_is_valid_stctl(scsw); + else + return scsw_cmd_is_valid_stctl(scsw); +} +EXPORT_SYMBOL(scsw_is_valid_stctl); + +/** + * scsw_cmd_is_solicited - check for solicited scsw + * @scsw: pointer to scsw + * + * Return non-zero if the command mode scsw indicates that the associated + * status condition is solicited, zero if it is unsolicited. + */ +int scsw_cmd_is_solicited(union scsw *scsw) +{ + return (scsw->cmd.cc != 0) || (scsw->cmd.stctl != + (SCSW_STCTL_STATUS_PEND | SCSW_STCTL_ALERT_STATUS)); +} +EXPORT_SYMBOL(scsw_cmd_is_solicited); + +/** + * scsw_tm_is_solicited - check for solicited scsw + * @scsw: pointer to scsw + * + * Return non-zero if the transport mode scsw indicates that the associated + * status condition is solicited, zero if it is unsolicited. + */ +int scsw_tm_is_solicited(union scsw *scsw) +{ + return (scsw->tm.cc != 0) || (scsw->tm.stctl != + (SCSW_STCTL_STATUS_PEND | SCSW_STCTL_ALERT_STATUS)); +} +EXPORT_SYMBOL(scsw_tm_is_solicited); + +/** + * scsw_is_solicited - check for solicited scsw + * @scsw: pointer to scsw + * + * Return non-zero if the transport or command mode scsw indicates that the + * associated status condition is solicited, zero if it is unsolicited. + */ +int scsw_is_solicited(union scsw *scsw) +{ + if (scsw_is_tm(scsw)) + return scsw_tm_is_solicited(scsw); + else + return scsw_cmd_is_solicited(scsw); +} +EXPORT_SYMBOL(scsw_is_solicited); diff --git a/drivers/s390/crypto/ap_bus.c b/drivers/s390/crypto/ap_bus.c index a1ab3e3efd1..62b6b55230d 100644 --- a/drivers/s390/crypto/ap_bus.c +++ b/drivers/s390/crypto/ap_bus.c @@ -34,13 +34,15 @@ #include <linux/mutex.h> #include <asm/s390_rdev.h> #include <asm/reset.h> +#include <linux/hrtimer.h> +#include <linux/ktime.h> #include "ap_bus.h" /* Some prototypes. */ static void ap_scan_bus(struct work_struct *); static void ap_poll_all(unsigned long); -static void ap_poll_timeout(unsigned long); +static enum hrtimer_restart ap_poll_timeout(struct hrtimer *); static int ap_poll_thread_start(void); static void ap_poll_thread_stop(void); static void ap_request_timeout(unsigned long); @@ -80,12 +82,15 @@ static DECLARE_WORK(ap_config_work, ap_scan_bus); /* * Tasklet & timer for AP request polling. */ -static struct timer_list ap_poll_timer = TIMER_INITIALIZER(ap_poll_timeout,0,0); static DECLARE_TASKLET(ap_tasklet, ap_poll_all, 0); static atomic_t ap_poll_requests = ATOMIC_INIT(0); static DECLARE_WAIT_QUEUE_HEAD(ap_poll_wait); static struct task_struct *ap_poll_kthread = NULL; static DEFINE_MUTEX(ap_poll_thread_mutex); +static struct hrtimer ap_poll_timer; +/* In LPAR poll with 4kHz frequency. Poll every 250000 nanoseconds. + * If z/VM change to 1500000 nanoseconds to adjust to z/VM polling.*/ +static unsigned long long poll_timeout = 250000; /** * ap_intructions_available() - Test if AP instructions are available. @@ -636,11 +641,39 @@ static ssize_t ap_poll_thread_store(struct bus_type *bus, static BUS_ATTR(poll_thread, 0644, ap_poll_thread_show, ap_poll_thread_store); +static ssize_t poll_timeout_show(struct bus_type *bus, char *buf) +{ + return snprintf(buf, PAGE_SIZE, "%llu\n", poll_timeout); +} + +static ssize_t poll_timeout_store(struct bus_type *bus, const char *buf, + size_t count) +{ + unsigned long long time; + ktime_t hr_time; + + /* 120 seconds = maximum poll interval */ + if (sscanf(buf, "%llu\n", &time) != 1 || time < 1 || time > 120000000000) + return -EINVAL; + poll_timeout = time; + hr_time = ktime_set(0, poll_timeout); + + if (!hrtimer_is_queued(&ap_poll_timer) || + !hrtimer_forward(&ap_poll_timer, ap_poll_timer.expires, hr_time)) { + ap_poll_timer.expires = hr_time; + hrtimer_start(&ap_poll_timer, hr_time, HRTIMER_MODE_ABS); + } + return count; +} + +static BUS_ATTR(poll_timeout, 0644, poll_timeout_show, poll_timeout_store); + static struct bus_attribute *const ap_bus_attrs[] = { &bus_attr_ap_domain, &bus_attr_config_time, &bus_attr_poll_thread, - NULL + &bus_attr_poll_timeout, + NULL, }; /** @@ -895,9 +928,10 @@ ap_config_timeout(unsigned long ptr) */ static inline void ap_schedule_poll_timer(void) { - if (timer_pending(&ap_poll_timer)) + if (hrtimer_is_queued(&ap_poll_timer)) return; - mod_timer(&ap_poll_timer, jiffies + AP_POLL_TIME); + hrtimer_start(&ap_poll_timer, ktime_set(0, poll_timeout), + HRTIMER_MODE_ABS); } /** @@ -1115,13 +1149,14 @@ EXPORT_SYMBOL(ap_cancel_message); /** * ap_poll_timeout(): AP receive polling for finished AP requests. - * @unused: Unused variable. + * @unused: Unused pointer. * - * Schedules the AP tasklet. + * Schedules the AP tasklet using a high resolution timer. */ -static void ap_poll_timeout(unsigned long unused) +static enum hrtimer_restart ap_poll_timeout(struct hrtimer *unused) { tasklet_schedule(&ap_tasklet); + return HRTIMER_NORESTART; } /** @@ -1344,6 +1379,14 @@ int __init ap_module_init(void) ap_config_timer.expires = jiffies + ap_config_time * HZ; add_timer(&ap_config_timer); + /* Setup the high resultion poll timer. + * If we are running under z/VM adjust polling to z/VM polling rate. + */ + if (MACHINE_IS_VM) + poll_timeout = 1500000; + hrtimer_init(&ap_poll_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS); + ap_poll_timer.function = ap_poll_timeout; + /* Start the low priority AP bus poll thread. */ if (ap_thread_flag) { rc = ap_poll_thread_start(); @@ -1355,7 +1398,7 @@ int __init ap_module_init(void) out_work: del_timer_sync(&ap_config_timer); - del_timer_sync(&ap_poll_timer); + hrtimer_cancel(&ap_poll_timer); destroy_workqueue(ap_work_queue); out_root: s390_root_dev_unregister(ap_root_device); @@ -1386,7 +1429,7 @@ void ap_module_exit(void) ap_reset_domain(); ap_poll_thread_stop(); del_timer_sync(&ap_config_timer); - del_timer_sync(&ap_poll_timer); + hrtimer_cancel(&ap_poll_timer); destroy_workqueue(ap_work_queue); tasklet_kill(&ap_tasklet); s390_root_dev_unregister(ap_root_device); diff --git a/drivers/s390/crypto/ap_bus.h b/drivers/s390/crypto/ap_bus.h index c1e1200c43f..446378b308f 100644 --- a/drivers/s390/crypto/ap_bus.h +++ b/drivers/s390/crypto/ap_bus.h @@ -92,6 +92,8 @@ struct ap_queue_status { #define AP_DEVICE_TYPE_PCIXCC 5 #define AP_DEVICE_TYPE_CEX2A 6 #define AP_DEVICE_TYPE_CEX2C 7 +#define AP_DEVICE_TYPE_CEX2A2 8 +#define AP_DEVICE_TYPE_CEX2C2 9 /* * AP reset flag states diff --git a/drivers/s390/crypto/zcrypt_api.c b/drivers/s390/crypto/zcrypt_api.c index 4d36e805a23..8a4964f3584 100644 --- a/drivers/s390/crypto/zcrypt_api.c +++ b/drivers/s390/crypto/zcrypt_api.c @@ -1068,10 +1068,8 @@ static int zcrypt_status_write(struct file *file, const char __user *buffer, #define LBUFSIZE 1200UL lbuf = kmalloc(LBUFSIZE, GFP_KERNEL); - if (!lbuf) { - PRINTK("kmalloc failed!\n"); + if (!lbuf) return 0; - } local_count = min(LBUFSIZE - 1, count); if (copy_from_user(lbuf, buffer, local_count) != 0) { @@ -1081,23 +1079,15 @@ static int zcrypt_status_write(struct file *file, const char __user *buffer, lbuf[local_count] = '\0'; ptr = strstr(lbuf, "Online devices"); - if (!ptr) { - PRINTK("Unable to parse data (missing \"Online devices\")\n"); + if (!ptr) goto out; - } ptr = strstr(ptr, "\n"); - if (!ptr) { - PRINTK("Unable to parse data (missing newline " - "after \"Online devices\")\n"); + if (!ptr) goto out; - } ptr++; - if (strstr(ptr, "Waiting work element counts") == NULL) { - PRINTK("Unable to parse data (missing " - "\"Waiting work element counts\")\n"); + if (strstr(ptr, "Waiting work element counts") == NULL) goto out; - } for (j = 0; j < 64 && *ptr; ptr++) { /* @@ -1197,16 +1187,12 @@ int __init zcrypt_api_init(void) /* Register the request sprayer. */ rc = misc_register(&zcrypt_misc_device); - if (rc < 0) { - PRINTKW(KERN_ERR "misc_register (minor %d) failed with %d\n", - zcrypt_misc_device.minor, rc); + if (rc < 0) goto out; - } /* Set up the proc file system */ zcrypt_entry = create_proc_entry("driver/z90crypt", 0644, NULL); if (!zcrypt_entry) { - PRINTK("Couldn't create z90crypt proc entry\n"); rc = -ENOMEM; goto out_misc; } diff --git a/drivers/s390/crypto/zcrypt_api.h b/drivers/s390/crypto/zcrypt_api.h index 5c6e222b2ac..1d1ec74dadb 100644 --- a/drivers/s390/crypto/zcrypt_api.h +++ b/drivers/s390/crypto/zcrypt_api.h @@ -30,34 +30,6 @@ #ifndef _ZCRYPT_API_H_ #define _ZCRYPT_API_H_ -/** - * Macro definitions - * - * PDEBUG debugs in the form "zcrypt: function_name -> message" - * - * PRINTK is like PDEBUG, except that it is always enabled - * PRINTKN is like PRINTK, except that it does not include the function name - * PRINTKW is like PRINTK, except that it uses KERN_WARNING - * PRINTKC is like PRINTK, except that it uses KERN_CRIT - */ -#define DEV_NAME "zcrypt" - -#define PRINTK(fmt, args...) \ - printk(KERN_DEBUG DEV_NAME ": %s -> " fmt, __func__ , ## args) -#define PRINTKN(fmt, args...) \ - printk(KERN_DEBUG DEV_NAME ": " fmt, ## args) -#define PRINTKW(fmt, args...) \ - printk(KERN_WARNING DEV_NAME ": %s -> " fmt, __func__ , ## args) -#define PRINTKC(fmt, args...) \ - printk(KERN_CRIT DEV_NAME ": %s -> " fmt, __func__ , ## args) - -#ifdef ZCRYPT_DEBUG -#define PDEBUG(fmt, args...) \ - printk(KERN_DEBUG DEV_NAME ": %s -> " fmt, __func__ , ## args) -#else -#define PDEBUG(fmt, args...) do {} while (0) -#endif - #include "ap_bus.h" #include <asm/zcrypt.h> diff --git a/drivers/s390/crypto/zcrypt_cex2a.c b/drivers/s390/crypto/zcrypt_cex2a.c index 08657f604b8..54f4cbc3be9 100644 --- a/drivers/s390/crypto/zcrypt_cex2a.c +++ b/drivers/s390/crypto/zcrypt_cex2a.c @@ -49,6 +49,7 @@ static struct ap_device_id zcrypt_cex2a_ids[] = { { AP_DEVICE(AP_DEVICE_TYPE_CEX2A) }, + { AP_DEVICE(AP_DEVICE_TYPE_CEX2A2) }, { /* end of list */ }, }; @@ -242,9 +243,6 @@ static int convert_response(struct zcrypt_device *zdev, return convert_type80(zdev, reply, outputdata, outputdatalength); default: /* Unknown response type, this should NEVER EVER happen */ - PRINTK("Unrecognized Message Header: %08x%08x\n", - *(unsigned int *) reply->message, - *(unsigned int *) (reply->message+4)); zdev->online = 0; return -EAGAIN; /* repeat the request on a different device. */ } diff --git a/drivers/s390/crypto/zcrypt_error.h b/drivers/s390/crypto/zcrypt_error.h index 3e27fe77d20..03ba27f05f9 100644 --- a/drivers/s390/crypto/zcrypt_error.h +++ b/drivers/s390/crypto/zcrypt_error.h @@ -92,10 +92,6 @@ static inline int convert_error(struct zcrypt_device *zdev, { struct error_hdr *ehdr = reply->message; - PRINTK("Hardware error : Type %02x Message Header: %08x%08x\n", - ehdr->type, *(unsigned int *) reply->message, - *(unsigned int *) (reply->message + 4)); - switch (ehdr->reply_code) { case REP82_ERROR_OPERAND_INVALID: case REP82_ERROR_OPERAND_SIZE: @@ -123,8 +119,6 @@ static inline int convert_error(struct zcrypt_device *zdev, zdev->online = 0; return -EAGAIN; default: - PRINTKW("unknown type %02x reply code = %d\n", - ehdr->type, ehdr->reply_code); zdev->online = 0; return -EAGAIN; /* repeat the request on a different device. */ } diff --git a/drivers/s390/crypto/zcrypt_pcica.c b/drivers/s390/crypto/zcrypt_pcica.c index 6e93b475178..12da4815ba8 100644 --- a/drivers/s390/crypto/zcrypt_pcica.c +++ b/drivers/s390/crypto/zcrypt_pcica.c @@ -226,9 +226,6 @@ static int convert_response(struct zcrypt_device *zdev, return convert_type84(zdev, reply, outputdata, outputdatalength); default: /* Unknown response type, this should NEVER EVER happen */ - PRINTK("Unrecognized Message Header: %08x%08x\n", - *(unsigned int *) reply->message, - *(unsigned int *) (reply->message+4)); zdev->online = 0; return -EAGAIN; /* repeat the request on a different device. */ } diff --git a/drivers/s390/crypto/zcrypt_pcicc.c b/drivers/s390/crypto/zcrypt_pcicc.c index 17ea56ce1c1..779952cb19f 100644 --- a/drivers/s390/crypto/zcrypt_pcicc.c +++ b/drivers/s390/crypto/zcrypt_pcicc.c @@ -361,26 +361,18 @@ static int convert_type86(struct zcrypt_device *zdev, service_rc = le16_to_cpu(msg->cprb.ccp_rtcode); if (unlikely(service_rc != 0)) { service_rs = le16_to_cpu(msg->cprb.ccp_rscode); - if (service_rc == 8 && service_rs == 66) { - PDEBUG("Bad block format on PCICC\n"); + if (service_rc == 8 && service_rs == 66) return -EINVAL; - } - if (service_rc == 8 && service_rs == 65) { - PDEBUG("Probably an even modulus on PCICC\n"); + if (service_rc == 8 && service_rs == 65) return -EINVAL; - } if (service_rc == 8 && service_rs == 770) { - PDEBUG("Invalid key length on PCICC\n"); zdev->max_mod_size = PCICC_MAX_MOD_SIZE_OLD; return -EAGAIN; } if (service_rc == 8 && service_rs == 783) { - PDEBUG("Extended bitlengths not enabled on PCICC\n"); zdev->max_mod_size = PCICC_MAX_MOD_SIZE_OLD; return -EAGAIN; } - PRINTK("Unknown service rc/rs (PCICC): %d/%d\n", - service_rc, service_rs); zdev->online = 0; return -EAGAIN; /* repeat the request on a different device. */ } @@ -434,9 +426,6 @@ static int convert_response(struct zcrypt_device *zdev, outputdata, outputdatalength); /* no break, incorrect cprb version is an unknown response */ default: /* Unknown response type, this should NEVER EVER happen */ - PRINTK("Unrecognized Message Header: %08x%08x\n", - *(unsigned int *) reply->message, - *(unsigned int *) (reply->message+4)); zdev->online = 0; return -EAGAIN; /* repeat the request on a different device. */ } diff --git a/drivers/s390/crypto/zcrypt_pcixcc.c b/drivers/s390/crypto/zcrypt_pcixcc.c index 0bc9b3188e6..d8ad36f8154 100644 --- a/drivers/s390/crypto/zcrypt_pcixcc.c +++ b/drivers/s390/crypto/zcrypt_pcixcc.c @@ -72,6 +72,7 @@ struct response_type { static struct ap_device_id zcrypt_pcixcc_ids[] = { { AP_DEVICE(AP_DEVICE_TYPE_PCIXCC) }, { AP_DEVICE(AP_DEVICE_TYPE_CEX2C) }, + { AP_DEVICE(AP_DEVICE_TYPE_CEX2C2) }, { /* end of list */ }, }; @@ -289,38 +290,19 @@ static int XCRB_msg_to_type6CPRB_msgX(struct zcrypt_device *zdev, ap_msg->length = sizeof(struct type6_hdr) + CEIL4(xcRB->request_control_blk_length) + xcRB->request_data_length; - if (ap_msg->length > PCIXCC_MAX_XCRB_MESSAGE_SIZE) { - PRINTK("Combined message is too large (%ld/%d/%d).\n", - sizeof(struct type6_hdr), - xcRB->request_control_blk_length, - xcRB->request_data_length); + if (ap_msg->length > PCIXCC_MAX_XCRB_MESSAGE_SIZE) return -EFAULT; - } - if (CEIL4(xcRB->reply_control_blk_length) > - PCIXCC_MAX_XCRB_REPLY_SIZE) { - PDEBUG("Reply CPRB length is too large (%d).\n", - xcRB->request_control_blk_length); + if (CEIL4(xcRB->reply_control_blk_length) > PCIXCC_MAX_XCRB_REPLY_SIZE) return -EFAULT; - } - if (CEIL4(xcRB->reply_data_length) > PCIXCC_MAX_XCRB_DATA_SIZE) { - PDEBUG("Reply data block length is too large (%d).\n", - xcRB->reply_data_length); + if (CEIL4(xcRB->reply_data_length) > PCIXCC_MAX_XCRB_DATA_SIZE) return -EFAULT; - } replylen = CEIL4(xcRB->reply_control_blk_length) + CEIL4(xcRB->reply_data_length) + sizeof(struct type86_fmt2_msg); if (replylen > PCIXCC_MAX_XCRB_RESPONSE_SIZE) { - PDEBUG("Reply CPRB + data block > PCIXCC_MAX_XCRB_RESPONSE_SIZE" - " (%d/%d/%d).\n", - sizeof(struct type86_fmt2_msg), - xcRB->reply_control_blk_length, - xcRB->reply_data_length); xcRB->reply_control_blk_length = PCIXCC_MAX_XCRB_RESPONSE_SIZE - (sizeof(struct type86_fmt2_msg) + CEIL4(xcRB->reply_data_length)); - PDEBUG("Capping Reply CPRB length at %d\n", - xcRB->reply_control_blk_length); } /* prepare type6 header */ @@ -339,11 +321,8 @@ static int XCRB_msg_to_type6CPRB_msgX(struct zcrypt_device *zdev, xcRB->request_control_blk_length)) return -EFAULT; if (msg->cprbx.cprb_len + sizeof(msg->hdr.function_code) > - xcRB->request_control_blk_length) { - PDEBUG("cprb_len too large (%d/%d)\n", msg->cprbx.cprb_len, - xcRB->request_control_blk_length); + xcRB->request_control_blk_length) return -EFAULT; - } function_code = ((unsigned char *)&msg->cprbx) + msg->cprbx.cprb_len; memcpy(msg->hdr.function_code, function_code, sizeof(msg->hdr.function_code)); @@ -471,29 +450,18 @@ static int convert_type86_ica(struct zcrypt_device *zdev, service_rc = msg->cprbx.ccp_rtcode; if (unlikely(service_rc != 0)) { service_rs = msg->cprbx.ccp_rscode; - if (service_rc == 8 && service_rs == 66) { - PDEBUG("Bad block format on PCIXCC/CEX2C\n"); + if (service_rc == 8 && service_rs == 66) return -EINVAL; - } - if (service_rc == 8 && service_rs == 65) { - PDEBUG("Probably an even modulus on PCIXCC/CEX2C\n"); + if (service_rc == 8 && service_rs == 65) return -EINVAL; - } - if (service_rc == 8 && service_rs == 770) { - PDEBUG("Invalid key length on PCIXCC/CEX2C\n"); + if (service_rc == 8 && service_rs == 770) return -EINVAL; - } if (service_rc == 8 && service_rs == 783) { - PDEBUG("Extended bitlengths not enabled on PCIXCC/CEX2C\n"); zdev->min_mod_size = PCIXCC_MIN_MOD_SIZE_OLD; return -EAGAIN; } - if (service_rc == 12 && service_rs == 769) { - PDEBUG("Invalid key on PCIXCC/CEX2C\n"); + if (service_rc == 12 && service_rs == 769) return -EINVAL; - } - PRINTK("Unknown service rc/rs (PCIXCC/CEX2C): %d/%d\n", - service_rc, service_rs); zdev->online = 0; return -EAGAIN; /* repeat the request on a different device. */ } @@ -569,11 +537,8 @@ static int convert_type86_rng(struct zcrypt_device *zdev, } __attribute__((packed)) *msg = reply->message; char *data = reply->message; - if (msg->cprbx.ccp_rtcode != 0 || msg->cprbx.ccp_rscode != 0) { - PDEBUG("RNG response error on PCIXCC/CEX2C rc=%hu/rs=%hu\n", - rc, rs); + if (msg->cprbx.ccp_rtcode != 0 || msg->cprbx.ccp_rscode != 0) return -EINVAL; - } memcpy(buffer, data + msg->fmt2.offset2, msg->fmt2.count2); return msg->fmt2.count2; } @@ -598,9 +563,6 @@ static int convert_response_ica(struct zcrypt_device *zdev, outputdata, outputdatalength); /* no break, incorrect cprb version is an unknown response */ default: /* Unknown response type, this should NEVER EVER happen */ - PRINTK("Unrecognized Message Header: %08x%08x\n", - *(unsigned int *) reply->message, - *(unsigned int *) (reply->message+4)); zdev->online = 0; return -EAGAIN; /* repeat the request on a different device. */ } @@ -627,9 +589,6 @@ static int convert_response_xcrb(struct zcrypt_device *zdev, return convert_type86_xcrb(zdev, reply, xcRB); /* no break, incorrect cprb version is an unknown response */ default: /* Unknown response type, this should NEVER EVER happen */ - PRINTK("Unrecognized Message Header: %08x%08x\n", - *(unsigned int *) reply->message, - *(unsigned int *) (reply->message+4)); xcRB->status = 0x0008044DL; /* HDD_InvalidParm */ zdev->online = 0; return -EAGAIN; /* repeat the request on a different device. */ @@ -653,9 +612,6 @@ static int convert_response_rng(struct zcrypt_device *zdev, return convert_type86_rng(zdev, reply, data); /* no break, incorrect cprb version is an unknown response */ default: /* Unknown response type, this should NEVER EVER happen */ - PRINTK("Unrecognized Message Header: %08x%08x\n", - *(unsigned int *) reply->message, - *(unsigned int *) (reply->message+4)); zdev->online = 0; return -EAGAIN; /* repeat the request on a different device. */ } @@ -700,10 +656,7 @@ static void zcrypt_pcixcc_receive(struct ap_device *ap_dev, memcpy(msg->message, reply->message, length); break; default: - PRINTK("Invalid internal response type: %i\n", - resp_type->type); - memcpy(msg->message, &error_reply, - sizeof error_reply); + memcpy(msg->message, &error_reply, sizeof error_reply); } } else memcpy(msg->message, reply->message, sizeof error_reply); diff --git a/drivers/s390/net/claw.c b/drivers/s390/net/claw.c index 04a1d7bf678..c644669a75c 100644 --- a/drivers/s390/net/claw.c +++ b/drivers/s390/net/claw.c @@ -703,7 +703,8 @@ claw_irq_handler(struct ccw_device *cdev, if (!cdev->dev.driver_data) { printk(KERN_WARNING "claw: unsolicited interrupt for device:" "%s received c-%02x d-%02x\n", - cdev->dev.bus_id,irb->scsw.cstat, irb->scsw.dstat); + cdev->dev.bus_id, irb->scsw.cmd.cstat, + irb->scsw.cmd.dstat); #ifdef FUNCTRACE printk(KERN_INFO "claw: %s() " "exit on line %d\n",__func__,__LINE__); @@ -732,22 +733,23 @@ claw_irq_handler(struct ccw_device *cdev, #ifdef IOTRACE printk(KERN_INFO "%s: interrupt for device: %04x " "received c-%02x d-%02x state-%02x\n", - dev->name, p_ch->devno, irb->scsw.cstat, - irb->scsw.dstat, p_ch->claw_state); + dev->name, p_ch->devno, irb->scsw.cmd.cstat, + irb->scsw.cmd.dstat, p_ch->claw_state); #endif /* Copy interruption response block. */ memcpy(p_ch->irb, irb, sizeof(struct irb)); /* Check for good subchannel return code, otherwise error message */ - if (irb->scsw.cstat && !(irb->scsw.cstat & SCHN_STAT_PCI)) { + if (irb->scsw.cmd.cstat && !(irb->scsw.cmd.cstat & SCHN_STAT_PCI)) { printk(KERN_INFO "%s: subchannel check for device: %04x -" " Sch Stat %02x Dev Stat %02x CPA - %04x\n", dev->name, p_ch->devno, - irb->scsw.cstat, irb->scsw.dstat,irb->scsw.cpa); + irb->scsw.cmd.cstat, irb->scsw.cmd.dstat, + irb->scsw.cmd.cpa); #ifdef IOTRACE dumpit((char *)irb,sizeof(struct irb)); - dumpit((char *)(unsigned long)irb->scsw.cpa, + dumpit((char *)(unsigned long)irb->scsw.cmd.cpa, sizeof(struct ccw1)); #endif #ifdef FUNCTRACE @@ -759,22 +761,24 @@ claw_irq_handler(struct ccw_device *cdev, } /* Check the reason-code of a unit check */ - if (irb->scsw.dstat & DEV_STAT_UNIT_CHECK) { + if (irb->scsw.cmd.dstat & DEV_STAT_UNIT_CHECK) ccw_check_unit_check(p_ch, irb->ecw[0]); - } /* State machine to bring the connection up, down and to restart */ - p_ch->last_dstat = irb->scsw.dstat; + p_ch->last_dstat = irb->scsw.cmd.dstat; switch (p_ch->claw_state) { case CLAW_STOP:/* HALT_IO by claw_release (halt sequence) */ #ifdef DEBUGMSG printk(KERN_INFO "%s: CLAW_STOP enter\n", dev->name); #endif - if (!((p_ch->irb->scsw.stctl & SCSW_STCTL_SEC_STATUS) || - (p_ch->irb->scsw.stctl == SCSW_STCTL_STATUS_PEND) || - (p_ch->irb->scsw.stctl == - (SCSW_STCTL_ALERT_STATUS | SCSW_STCTL_STATUS_PEND)))) { + if (!((p_ch->irb->scsw.cmd.stctl & + SCSW_STCTL_SEC_STATUS) || + (p_ch->irb->scsw.cmd.stctl == + SCSW_STCTL_STATUS_PEND) || + (p_ch->irb->scsw.cmd.stctl == + (SCSW_STCTL_ALERT_STATUS | + SCSW_STCTL_STATUS_PEND)))) { #ifdef FUNCTRACE printk(KERN_INFO "%s:%s Exit on line %d\n", dev->name,__func__,__LINE__); @@ -798,10 +802,13 @@ claw_irq_handler(struct ccw_device *cdev, printk(KERN_INFO "%s: process CLAW_STAT_HALT_IO\n", dev->name); #endif - if (!((p_ch->irb->scsw.stctl & SCSW_STCTL_SEC_STATUS) || - (p_ch->irb->scsw.stctl == SCSW_STCTL_STATUS_PEND) || - (p_ch->irb->scsw.stctl == - (SCSW_STCTL_ALERT_STATUS | SCSW_STCTL_STATUS_PEND)))) { + if (!((p_ch->irb->scsw.cmd.stctl & + SCSW_STCTL_SEC_STATUS) || + (p_ch->irb->scsw.cmd.stctl == + SCSW_STCTL_STATUS_PEND) || + (p_ch->irb->scsw.cmd.stctl == + (SCSW_STCTL_ALERT_STATUS | + SCSW_STCTL_STATUS_PEND)))) { #ifdef FUNCTRACE printk(KERN_INFO "%s:%s Exit on line %d\n", dev->name,__func__,__LINE__); @@ -828,8 +835,8 @@ claw_irq_handler(struct ccw_device *cdev, "interrupt for device:" "%s received c-%02x d-%02x\n", cdev->dev.bus_id, - irb->scsw.cstat, - irb->scsw.dstat); + irb->scsw.cmd.cstat, + irb->scsw.cmd.dstat); return; } #ifdef DEBUGMSG @@ -844,7 +851,7 @@ claw_irq_handler(struct ccw_device *cdev, return; case CLAW_START_READ: CLAW_DBF_TEXT(4,trace,"ReadIRQ"); - if (p_ch->irb->scsw.dstat & DEV_STAT_UNIT_CHECK) { + if (p_ch->irb->scsw.cmd.dstat & DEV_STAT_UNIT_CHECK) { clear_bit(0, (void *)&p_ch->IO_active); if ((p_ch->irb->ecw[0] & 0x41) == 0x41 || (p_ch->irb->ecw[0] & 0x40) == 0x40 || @@ -863,8 +870,8 @@ claw_irq_handler(struct ccw_device *cdev, CLAW_DBF_TEXT(4,trace,"notrdy"); return; } - if ((p_ch->irb->scsw.cstat & SCHN_STAT_PCI) && - (p_ch->irb->scsw.dstat==0)) { + if ((p_ch->irb->scsw.cmd.cstat & SCHN_STAT_PCI) && + (p_ch->irb->scsw.cmd.dstat == 0)) { if (test_and_set_bit(CLAW_BH_ACTIVE, (void *)&p_ch->flag_a) == 0) { tasklet_schedule(&p_ch->tasklet); @@ -879,10 +886,13 @@ claw_irq_handler(struct ccw_device *cdev, CLAW_DBF_TEXT(4,trace,"PCI_read"); return; } - if(!((p_ch->irb->scsw.stctl & SCSW_STCTL_SEC_STATUS) || - (p_ch->irb->scsw.stctl == SCSW_STCTL_STATUS_PEND) || - (p_ch->irb->scsw.stctl == - (SCSW_STCTL_ALERT_STATUS | SCSW_STCTL_STATUS_PEND)))) { + if (!((p_ch->irb->scsw.cmd.stctl & + SCSW_STCTL_SEC_STATUS) || + (p_ch->irb->scsw.cmd.stctl == + SCSW_STCTL_STATUS_PEND) || + (p_ch->irb->scsw.cmd.stctl == + (SCSW_STCTL_ALERT_STATUS | + SCSW_STCTL_STATUS_PEND)))) { #ifdef FUNCTRACE printk(KERN_INFO "%s:%s Exit on line %d\n", dev->name,__func__,__LINE__); @@ -911,7 +921,7 @@ claw_irq_handler(struct ccw_device *cdev, CLAW_DBF_TEXT(4,trace,"RdIRQXit"); return; case CLAW_START_WRITE: - if (p_ch->irb->scsw.dstat & DEV_STAT_UNIT_CHECK) { + if (p_ch->irb->scsw.cmd.dstat & DEV_STAT_UNIT_CHECK) { printk(KERN_INFO "%s: Unit Check Occured in " "write channel\n",dev->name); clear_bit(0, (void *)&p_ch->IO_active); @@ -934,16 +944,19 @@ claw_irq_handler(struct ccw_device *cdev, CLAW_DBF_TEXT(4,trace,"rstrtwrt"); return; } - if (p_ch->irb->scsw.dstat & DEV_STAT_UNIT_EXCEP) { + if (p_ch->irb->scsw.cmd.dstat & DEV_STAT_UNIT_EXCEP) { clear_bit(0, (void *)&p_ch->IO_active); printk(KERN_INFO "%s: Unit Exception " "Occured in write channel\n", dev->name); } - if(!((p_ch->irb->scsw.stctl & SCSW_STCTL_SEC_STATUS) || - (p_ch->irb->scsw.stctl == SCSW_STCTL_STATUS_PEND) || - (p_ch->irb->scsw.stctl == - (SCSW_STCTL_ALERT_STATUS | SCSW_STCTL_STATUS_PEND)))) { + if (!((p_ch->irb->scsw.cmd.stctl & + SCSW_STCTL_SEC_STATUS) || + (p_ch->irb->scsw.cmd.stctl == + SCSW_STCTL_STATUS_PEND) || + (p_ch->irb->scsw.cmd.stctl == + (SCSW_STCTL_ALERT_STATUS | + SCSW_STCTL_STATUS_PEND)))) { #ifdef FUNCTRACE printk(KERN_INFO "%s:%s Exit on line %d\n", dev->name,__func__,__LINE__); diff --git a/drivers/s390/net/ctcm_fsms.c b/drivers/s390/net/ctcm_fsms.c index 2a106f3a076..7e6bd387f4d 100644 --- a/drivers/s390/net/ctcm_fsms.c +++ b/drivers/s390/net/ctcm_fsms.c @@ -257,9 +257,9 @@ static void chx_txdone(fsm_instance *fi, int event, void *arg) if (duration > ch->prof.tx_time) ch->prof.tx_time = duration; - if (ch->irb->scsw.count != 0) + if (ch->irb->scsw.cmd.count != 0) ctcm_pr_debug("%s: TX not complete, remaining %d bytes\n", - dev->name, ch->irb->scsw.count); + dev->name, ch->irb->scsw.cmd.count); fsm_deltimer(&ch->timer); while ((skb = skb_dequeue(&ch->io_queue))) { priv->stats.tx_packets++; @@ -353,7 +353,7 @@ static void chx_rx(fsm_instance *fi, int event, void *arg) struct channel *ch = arg; struct net_device *dev = ch->netdev; struct ctcm_priv *priv = dev->priv; - int len = ch->max_bufsize - ch->irb->scsw.count; + int len = ch->max_bufsize - ch->irb->scsw.cmd.count; struct sk_buff *skb = ch->trans_skb; __u16 block_len = *((__u16 *)skb->data); int check_len; @@ -1234,9 +1234,9 @@ static void ctcmpc_chx_txdone(fsm_instance *fi, int event, void *arg) if (duration > ch->prof.tx_time) ch->prof.tx_time = duration; - if (ch->irb->scsw.count != 0) + if (ch->irb->scsw.cmd.count != 0) ctcm_pr_debug("%s: TX not complete, remaining %d bytes\n", - dev->name, ch->irb->scsw.count); + dev->name, ch->irb->scsw.cmd.count); fsm_deltimer(&ch->timer); while ((skb = skb_dequeue(&ch->io_queue))) { priv->stats.tx_packets++; @@ -1394,7 +1394,7 @@ static void ctcmpc_chx_rx(fsm_instance *fi, int event, void *arg) struct sk_buff *skb = ch->trans_skb; struct sk_buff *new_skb; unsigned long saveflags = 0; /* avoids compiler warning */ - int len = ch->max_bufsize - ch->irb->scsw.count; + int len = ch->max_bufsize - ch->irb->scsw.cmd.count; if (do_debug_data) { CTCM_DBF_TEXT_(TRACE, CTC_DBF_DEBUG, "mpc_ch_rx %s cp:%i %s\n", diff --git a/drivers/s390/net/ctcm_main.c b/drivers/s390/net/ctcm_main.c index d52843da4f5..6b13c1c1beb 100644 --- a/drivers/s390/net/ctcm_main.c +++ b/drivers/s390/net/ctcm_main.c @@ -1236,8 +1236,8 @@ static void ctcm_irq_handler(struct ccw_device *cdev, /* Check for unsolicited interrupts. */ if (cgdev == NULL) { ctcm_pr_warn("ctcm: Got unsolicited irq: %s c-%02x d-%02x\n", - cdev->dev.bus_id, irb->scsw.cstat, - irb->scsw.dstat); + cdev->dev.bus_id, irb->scsw.cmd.cstat, + irb->scsw.cmd.dstat); return; } @@ -1266,40 +1266,40 @@ static void ctcm_irq_handler(struct ccw_device *cdev, "received c-%02x d-%02x\n", dev->name, ch->id, - irb->scsw.cstat, - irb->scsw.dstat); + irb->scsw.cmd.cstat, + irb->scsw.cmd.dstat); /* Copy interruption response block. */ memcpy(ch->irb, irb, sizeof(struct irb)); /* Check for good subchannel return code, otherwise error message */ - if (irb->scsw.cstat) { + if (irb->scsw.cmd.cstat) { fsm_event(ch->fsm, CTC_EVENT_SC_UNKNOWN, ch); ctcm_pr_warn("%s: subchannel check for dev: %s - %02x %02x\n", - dev->name, ch->id, irb->scsw.cstat, - irb->scsw.dstat); + dev->name, ch->id, irb->scsw.cmd.cstat, + irb->scsw.cmd.dstat); return; } /* Check the reason-code of a unit check */ - if (irb->scsw.dstat & DEV_STAT_UNIT_CHECK) { + if (irb->scsw.cmd.dstat & DEV_STAT_UNIT_CHECK) { ccw_unit_check(ch, irb->ecw[0]); return; } - if (irb->scsw.dstat & DEV_STAT_BUSY) { - if (irb->scsw.dstat & DEV_STAT_ATTENTION) + if (irb->scsw.cmd.dstat & DEV_STAT_BUSY) { + if (irb->scsw.cmd.dstat & DEV_STAT_ATTENTION) fsm_event(ch->fsm, CTC_EVENT_ATTNBUSY, ch); else fsm_event(ch->fsm, CTC_EVENT_BUSY, ch); return; } - if (irb->scsw.dstat & DEV_STAT_ATTENTION) { + if (irb->scsw.cmd.dstat & DEV_STAT_ATTENTION) { fsm_event(ch->fsm, CTC_EVENT_ATTN, ch); return; } - if ((irb->scsw.stctl & SCSW_STCTL_SEC_STATUS) || - (irb->scsw.stctl == SCSW_STCTL_STATUS_PEND) || - (irb->scsw.stctl == + if ((irb->scsw.cmd.stctl & SCSW_STCTL_SEC_STATUS) || + (irb->scsw.cmd.stctl == SCSW_STCTL_STATUS_PEND) || + (irb->scsw.cmd.stctl == (SCSW_STCTL_ALERT_STATUS | SCSW_STCTL_STATUS_PEND))) fsm_event(ch->fsm, CTC_EVENT_FINSTAT, ch); else diff --git a/drivers/s390/net/cu3088.c b/drivers/s390/net/cu3088.c index 8e7697305a4..f4a32375c03 100644 --- a/drivers/s390/net/cu3088.c +++ b/drivers/s390/net/cu3088.c @@ -36,7 +36,6 @@ const char *cu3088_type[] = { "CTC/A", "ESCON channel", "FICON channel", - "P390 LCS card", "OSA LCS card", "CLAW channel device", "unknown channel type", @@ -49,7 +48,6 @@ static struct ccw_device_id cu3088_ids[] = { { CCW_DEVICE(0x3088, 0x08), .driver_info = channel_type_parallel }, { CCW_DEVICE(0x3088, 0x1f), .driver_info = channel_type_escon }, { CCW_DEVICE(0x3088, 0x1e), .driver_info = channel_type_ficon }, - { CCW_DEVICE(0x3088, 0x01), .driver_info = channel_type_p390 }, { CCW_DEVICE(0x3088, 0x60), .driver_info = channel_type_osa2 }, { CCW_DEVICE(0x3088, 0x61), .driver_info = channel_type_claw }, { /* end of list */ } diff --git a/drivers/s390/net/cu3088.h b/drivers/s390/net/cu3088.h index 1753661f702..d8558a7105a 100644 --- a/drivers/s390/net/cu3088.h +++ b/drivers/s390/net/cu3088.h @@ -17,9 +17,6 @@ enum channel_types { /* Device is a FICON channel */ channel_type_ficon, - /* Device is a P390 LCS card */ - channel_type_p390, - /* Device is a OSA2 card */ channel_type_osa2, diff --git a/drivers/s390/net/lcs.c b/drivers/s390/net/lcs.c index dd22f4b3703..6de28385b35 100644 --- a/drivers/s390/net/lcs.c +++ b/drivers/s390/net/lcs.c @@ -1327,8 +1327,8 @@ lcs_get_problem(struct ccw_device *cdev, struct irb *irb) char *sense; sense = (char *) irb->ecw; - cstat = irb->scsw.cstat; - dstat = irb->scsw.dstat; + cstat = irb->scsw.cmd.cstat; + dstat = irb->scsw.cmd.dstat; if (cstat & (SCHN_STAT_CHN_CTRL_CHK | SCHN_STAT_INTF_CTRL_CHK | SCHN_STAT_CHN_DATA_CHK | SCHN_STAT_CHAIN_CHECK | @@ -1388,11 +1388,13 @@ lcs_irq(struct ccw_device *cdev, unsigned long intparm, struct irb *irb) else channel = &card->write; - cstat = irb->scsw.cstat; - dstat = irb->scsw.dstat; + cstat = irb->scsw.cmd.cstat; + dstat = irb->scsw.cmd.dstat; LCS_DBF_TEXT_(5, trace, "Rint%s",cdev->dev.bus_id); - LCS_DBF_TEXT_(5, trace, "%4x%4x",irb->scsw.cstat, irb->scsw.dstat); - LCS_DBF_TEXT_(5, trace, "%4x%4x",irb->scsw.fctl, irb->scsw.actl); + LCS_DBF_TEXT_(5, trace, "%4x%4x", irb->scsw.cmd.cstat, + irb->scsw.cmd.dstat); + LCS_DBF_TEXT_(5, trace, "%4x%4x", irb->scsw.cmd.fctl, + irb->scsw.cmd.actl); /* Check for channel and device errors presented */ rc = lcs_get_problem(cdev, irb); @@ -1410,11 +1412,11 @@ lcs_irq(struct ccw_device *cdev, unsigned long intparm, struct irb *irb) } /* How far in the ccw chain have we processed? */ if ((channel->state != LCS_CH_STATE_INIT) && - (irb->scsw.fctl & SCSW_FCTL_START_FUNC)) { - index = (struct ccw1 *) __va((addr_t) irb->scsw.cpa) + (irb->scsw.cmd.fctl & SCSW_FCTL_START_FUNC)) { + index = (struct ccw1 *) __va((addr_t) irb->scsw.cmd.cpa) - channel->ccws; - if ((irb->scsw.actl & SCSW_ACTL_SUSPENDED) || - (irb->scsw.cstat & SCHN_STAT_PCI)) + if ((irb->scsw.cmd.actl & SCSW_ACTL_SUSPENDED) || + (irb->scsw.cmd.cstat & SCHN_STAT_PCI)) /* Bloody io subsystem tells us lies about cpa... */ index = (index - 1) & (LCS_NUM_BUFFS - 1); while (channel->io_idx != index) { @@ -1425,25 +1427,24 @@ lcs_irq(struct ccw_device *cdev, unsigned long intparm, struct irb *irb) } } - if ((irb->scsw.dstat & DEV_STAT_DEV_END) || - (irb->scsw.dstat & DEV_STAT_CHN_END) || - (irb->scsw.dstat & DEV_STAT_UNIT_CHECK)) + if ((irb->scsw.cmd.dstat & DEV_STAT_DEV_END) || + (irb->scsw.cmd.dstat & DEV_STAT_CHN_END) || + (irb->scsw.cmd.dstat & DEV_STAT_UNIT_CHECK)) /* Mark channel as stopped. */ channel->state = LCS_CH_STATE_STOPPED; - else if (irb->scsw.actl & SCSW_ACTL_SUSPENDED) + else if (irb->scsw.cmd.actl & SCSW_ACTL_SUSPENDED) /* CCW execution stopped on a suspend bit. */ channel->state = LCS_CH_STATE_SUSPENDED; - if (irb->scsw.fctl & SCSW_FCTL_HALT_FUNC) { - if (irb->scsw.cc != 0) { + if (irb->scsw.cmd.fctl & SCSW_FCTL_HALT_FUNC) { + if (irb->scsw.cmd.cc != 0) { ccw_device_halt(channel->ccwdev, (addr_t) channel); return; } /* The channel has been stopped by halt_IO. */ channel->state = LCS_CH_STATE_HALTED; } - if (irb->scsw.fctl & SCSW_FCTL_CLEAR_FUNC) { + if (irb->scsw.cmd.fctl & SCSW_FCTL_CLEAR_FUNC) channel->state = LCS_CH_STATE_CLEARED; - } /* Do the rest in the tasklet. */ tasklet_schedule(&channel->irq_tasklet); } @@ -1761,7 +1762,7 @@ lcs_get_control(struct lcs_card *card, struct lcs_cmd *cmd) netif_carrier_off(card->dev); break; default: - PRINT_INFO("UNRECOGNIZED LGW COMMAND\n"); + LCS_DBF_TEXT(5, trace, "noLGWcmd"); break; } } else @@ -2042,13 +2043,12 @@ lcs_probe_device(struct ccwgroup_device *ccwgdev) LCS_DBF_TEXT(2, setup, "add_dev"); card = lcs_alloc_card(); if (!card) { - PRINT_ERR("Allocation of lcs card failed\n"); + LCS_DBF_TEXT_(2, setup, " rc%d", -ENOMEM); put_device(&ccwgdev->dev); return -ENOMEM; } ret = sysfs_create_group(&ccwgdev->dev.kobj, &lcs_attr_group); if (ret) { - PRINT_ERR("Creating attributes failed"); lcs_free_card(card); put_device(&ccwgdev->dev); return ret; @@ -2140,7 +2140,6 @@ lcs_new_device(struct ccwgroup_device *ccwgdev) default: LCS_DBF_TEXT(3, setup, "errinit"); PRINT_ERR("LCS: Initialization failed\n"); - PRINT_ERR("LCS: No device found!\n"); goto out; } if (!dev) @@ -2269,7 +2268,6 @@ lcs_remove_device(struct ccwgroup_device *ccwgdev) if (!card) return; - PRINT_INFO("Removing lcs group device ....\n"); LCS_DBF_TEXT(3, setup, "remdev"); LCS_DBF_HEX(3, setup, &card, sizeof(void*)); if (ccwgdev->state == CCWGROUP_ONLINE) { diff --git a/drivers/s390/net/netiucv.c b/drivers/s390/net/netiucv.c index e4ba6a0372a..9242b5acc66 100644 --- a/drivers/s390/net/netiucv.c +++ b/drivers/s390/net/netiucv.c @@ -625,9 +625,6 @@ static void netiucv_unpack_skb(struct iucv_connection *conn, offset += header->next; header->next -= NETIUCV_HDRLEN; if (skb_tailroom(pskb) < header->next) { - PRINT_WARN("%s: Illegal next field in iucv header: " - "%d > %d\n", - dev->name, header->next, skb_tailroom(pskb)); IUCV_DBF_TEXT_(data, 2, "Illegal next field: %d > %d\n", header->next, skb_tailroom(pskb)); return; @@ -636,8 +633,6 @@ static void netiucv_unpack_skb(struct iucv_connection *conn, skb_reset_mac_header(pskb); skb = dev_alloc_skb(pskb->len); if (!skb) { - PRINT_WARN("%s Out of memory in netiucv_unpack_skb\n", - dev->name); IUCV_DBF_TEXT(data, 2, "Out of memory in netiucv_unpack_skb\n"); privptr->stats.rx_dropped++; @@ -674,7 +669,6 @@ static void conn_action_rx(fsm_instance *fi, int event, void *arg) if (!conn->netdev) { iucv_message_reject(conn->path, msg); - PRINT_WARN("Received data for unlinked connection\n"); IUCV_DBF_TEXT(data, 2, "Received data for unlinked connection\n"); return; @@ -682,8 +676,6 @@ static void conn_action_rx(fsm_instance *fi, int event, void *arg) if (msg->length > conn->max_buffsize) { iucv_message_reject(conn->path, msg); privptr->stats.rx_dropped++; - PRINT_WARN("msglen %d > max_buffsize %d\n", - msg->length, conn->max_buffsize); IUCV_DBF_TEXT_(data, 2, "msglen %d > max_buffsize %d\n", msg->length, conn->max_buffsize); return; @@ -695,7 +687,6 @@ static void conn_action_rx(fsm_instance *fi, int event, void *arg) msg->length, NULL); if (rc || msg->length < 5) { privptr->stats.rx_errors++; - PRINT_WARN("iucv_receive returned %08x\n", rc); IUCV_DBF_TEXT_(data, 2, "rc %d from iucv_receive\n", rc); return; } @@ -778,7 +769,6 @@ static void conn_action_txdone(fsm_instance *fi, int event, void *arg) fsm_newstate(fi, CONN_STATE_IDLE); if (privptr) privptr->stats.tx_errors += txpackets; - PRINT_WARN("iucv_send returned %08x\n", rc); IUCV_DBF_TEXT_(data, 2, "rc %d from iucv_send\n", rc); } else { if (privptr) { @@ -806,8 +796,6 @@ static void conn_action_connaccept(fsm_instance *fi, int event, void *arg) path->flags = 0; rc = iucv_path_accept(path, &netiucv_handler, NULL, conn); if (rc) { - PRINT_WARN("%s: IUCV accept failed with error %d\n", - netdev->name, rc); IUCV_DBF_TEXT_(setup, 2, "rc %d from iucv_accept", rc); return; } @@ -873,7 +861,7 @@ static void conn_action_start(fsm_instance *fi, int event, void *arg) IUCV_DBF_TEXT(trace, 3, __func__); fsm_newstate(fi, CONN_STATE_STARTWAIT); - PRINT_DEBUG("%s('%s'): connecting ...\n", + IUCV_DBF_TEXT_(setup, 2, "%s('%s'): connecting ...\n", conn->netdev->name, conn->userid); /* @@ -968,8 +956,8 @@ static void conn_action_inval(fsm_instance *fi, int event, void *arg) struct iucv_connection *conn = arg; struct net_device *netdev = conn->netdev; - PRINT_WARN("%s: Cannot connect without username\n", netdev->name); - IUCV_DBF_TEXT(data, 2, "conn_action_inval called\n"); + IUCV_DBF_TEXT_(data, 2, "%s('%s'): conn_action_inval called\n", + netdev->name, conn->userid); } static const fsm_node conn_fsm[] = { @@ -1077,9 +1065,6 @@ dev_action_connup(fsm_instance *fi, int event, void *arg) "connection is up and running\n"); break; case DEV_STATE_STOPWAIT: - PRINT_INFO( - "%s: got connection UP event during shutdown!\n", - dev->name); IUCV_DBF_TEXT(data, 2, "dev_action_connup: in DEV_STATE_STOPWAIT\n"); break; @@ -1174,8 +1159,6 @@ static int netiucv_transmit_skb(struct iucv_connection *conn, nskb = alloc_skb(skb->len + NETIUCV_HDRLEN + NETIUCV_HDRLEN, GFP_ATOMIC | GFP_DMA); if (!nskb) { - PRINT_WARN("%s: Could not allocate tx_skb\n", - conn->netdev->name); IUCV_DBF_TEXT(data, 2, "alloc_skb failed\n"); rc = -ENOMEM; return rc; @@ -1223,7 +1206,6 @@ static int netiucv_transmit_skb(struct iucv_connection *conn, skb_pull(skb, NETIUCV_HDRLEN); skb_trim(skb, skb->len - NETIUCV_HDRLEN); } - PRINT_WARN("iucv_send returned %08x\n", rc); IUCV_DBF_TEXT_(data, 2, "rc %d from iucv_send\n", rc); } else { if (copied) @@ -1293,14 +1275,11 @@ static int netiucv_tx(struct sk_buff *skb, struct net_device *dev) * Some sanity checks ... */ if (skb == NULL) { - PRINT_WARN("%s: NULL sk_buff passed\n", dev->name); IUCV_DBF_TEXT(data, 2, "netiucv_tx: skb is NULL\n"); privptr->stats.tx_dropped++; return 0; } if (skb_headroom(skb) < NETIUCV_HDRLEN) { - PRINT_WARN("%s: Got sk_buff with head room < %ld bytes\n", - dev->name, NETIUCV_HDRLEN); IUCV_DBF_TEXT(data, 2, "netiucv_tx: skb_headroom < NETIUCV_HDRLEN\n"); dev_kfree_skb(skb); @@ -1393,7 +1372,6 @@ static ssize_t user_write(struct device *dev, struct device_attribute *attr, IUCV_DBF_TEXT(trace, 3, __func__); if (count > 9) { - PRINT_WARN("netiucv: username too long (%d)!\n", (int) count); IUCV_DBF_TEXT_(setup, 2, "%d is length of username\n", (int) count); return -EINVAL; @@ -1409,7 +1387,6 @@ static ssize_t user_write(struct device *dev, struct device_attribute *attr, /* trailing lf, grr */ break; } - PRINT_WARN("netiucv: Invalid char %c in username!\n", *p); IUCV_DBF_TEXT_(setup, 2, "username: invalid character %c\n", *p); return -EINVAL; @@ -1421,18 +1398,15 @@ static ssize_t user_write(struct device *dev, struct device_attribute *attr, if (memcmp(username, priv->conn->userid, 9) && (ndev->flags & (IFF_UP | IFF_RUNNING))) { /* username changed while the interface is active. */ - PRINT_WARN("netiucv: device %s active, connected to %s\n", - dev->bus_id, priv->conn->userid); - PRINT_WARN("netiucv: user cannot be updated\n"); IUCV_DBF_TEXT(setup, 2, "user_write: device active\n"); - return -EBUSY; + return -EPERM; } read_lock_bh(&iucv_connection_rwlock); list_for_each_entry(cp, &iucv_connection_list, list) { if (!strncmp(username, cp->userid, 9) && cp->netdev != ndev) { read_unlock_bh(&iucv_connection_rwlock); - PRINT_WARN("netiucv: Connection to %s already " - "exists\n", username); + IUCV_DBF_TEXT_(setup, 2, "user_write: Connection " + "to %s already exists\n", username); return -EEXIST; } } @@ -1466,13 +1440,10 @@ static ssize_t buffer_write (struct device *dev, struct device_attribute *attr, bs1 = simple_strtoul(buf, &e, 0); if (e && (!isspace(*e))) { - PRINT_WARN("netiucv: Invalid character in buffer!\n"); IUCV_DBF_TEXT_(setup, 2, "buffer_write: invalid char %c\n", *e); return -EINVAL; } if (bs1 > NETIUCV_BUFSIZE_MAX) { - PRINT_WARN("netiucv: Given buffer size %d too large.\n", - bs1); IUCV_DBF_TEXT_(setup, 2, "buffer_write: buffer size %d too large\n", bs1); @@ -1480,16 +1451,12 @@ static ssize_t buffer_write (struct device *dev, struct device_attribute *attr, } if ((ndev->flags & IFF_RUNNING) && (bs1 < (ndev->mtu + NETIUCV_HDRLEN + 2))) { - PRINT_WARN("netiucv: Given buffer size %d too small.\n", - bs1); IUCV_DBF_TEXT_(setup, 2, "buffer_write: buffer size %d too small\n", bs1); return -EINVAL; } if (bs1 < (576 + NETIUCV_HDRLEN + NETIUCV_HDRLEN)) { - PRINT_WARN("netiucv: Given buffer size %d too small.\n", - bs1); IUCV_DBF_TEXT_(setup, 2, "buffer_write: buffer size %d too small\n", bs1); @@ -1963,7 +1930,6 @@ static ssize_t conn_write(struct device_driver *drv, IUCV_DBF_TEXT(trace, 3, __func__); if (count>9) { - PRINT_WARN("netiucv: username too long (%d)!\n", (int)count); IUCV_DBF_TEXT(setup, 2, "conn_write: too long\n"); return -EINVAL; } @@ -1976,7 +1942,6 @@ static ssize_t conn_write(struct device_driver *drv, if (*p == '\n') /* trailing lf, grr */ break; - PRINT_WARN("netiucv: Invalid character in username!\n"); IUCV_DBF_TEXT_(setup, 2, "conn_write: invalid character %c\n", *p); return -EINVAL; @@ -1989,8 +1954,8 @@ static ssize_t conn_write(struct device_driver *drv, list_for_each_entry(cp, &iucv_connection_list, list) { if (!strncmp(username, cp->userid, 9)) { read_unlock_bh(&iucv_connection_rwlock); - PRINT_WARN("netiucv: Connection to %s already " - "exists\n", username); + IUCV_DBF_TEXT_(setup, 2, "conn_write: Connection " + "to %s already exists\n", username); return -EEXIST; } } @@ -1998,9 +1963,6 @@ static ssize_t conn_write(struct device_driver *drv, dev = netiucv_init_netdevice(username); if (!dev) { - PRINT_WARN("netiucv: Could not allocate network device " - "structure for user '%s'\n", - netiucv_printname(username)); IUCV_DBF_TEXT(setup, 2, "NULL from netiucv_init_netdevice\n"); return -ENODEV; } @@ -2020,15 +1982,12 @@ static ssize_t conn_write(struct device_driver *drv, if (rc) goto out_unreg; - PRINT_INFO("%s: '%s'\n", dev->name, netiucv_printname(username)); return count; out_unreg: netiucv_unregister_device(priv->dev); out_free_ndev: - PRINT_WARN("netiucv: Could not register '%s'\n", dev->name); - IUCV_DBF_TEXT(setup, 2, "conn_write: could not register\n"); netiucv_free_netdevice(dev); return rc; } @@ -2073,14 +2032,13 @@ static ssize_t remove_write (struct device_driver *drv, PRINT_WARN("netiucv: %s cannot be removed\n", ndev->name); IUCV_DBF_TEXT(data, 2, "remove_write: still active\n"); - return -EBUSY; + return -EPERM; } unregister_netdev(ndev); netiucv_unregister_device(dev); return count; } read_unlock_bh(&iucv_connection_rwlock); - PRINT_WARN("netiucv: net device %s unknown\n", name); IUCV_DBF_TEXT(data, 2, "remove_write: unknown device\n"); return -EINVAL; } @@ -2148,7 +2106,6 @@ static int __init netiucv_init(void) netiucv_driver.groups = netiucv_drv_attr_groups; rc = driver_register(&netiucv_driver); if (rc) { - PRINT_ERR("NETIUCV: failed to register driver.\n"); IUCV_DBF_TEXT_(setup, 2, "ret %d from driver_register\n", rc); goto out_iucv; } diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c index 9a71dae223e..0ac54dc638c 100644 --- a/drivers/s390/net/qeth_core_main.c +++ b/drivers/s390/net/qeth_core_main.c @@ -420,7 +420,7 @@ static struct qeth_ipa_cmd *qeth_check_ipa_data(struct qeth_card *card, QETH_DBF_TEXT(TRACE, 3, "urla"); break; default: - PRINT_WARN("Received data is IPA " + QETH_DBF_MESSAGE(2, "Received data is IPA " "but not a reply!\n"); break; } @@ -735,8 +735,8 @@ static int qeth_get_problem(struct ccw_device *cdev, struct irb *irb) char *sense; sense = (char *) irb->ecw; - cstat = irb->scsw.cstat; - dstat = irb->scsw.dstat; + cstat = irb->scsw.cmd.cstat; + dstat = irb->scsw.cmd.dstat; if (cstat & (SCHN_STAT_CHN_CTRL_CHK | SCHN_STAT_INTF_CTRL_CHK | SCHN_STAT_CHN_DATA_CHK | SCHN_STAT_CHAIN_CHECK | @@ -823,8 +823,8 @@ static void qeth_irq(struct ccw_device *cdev, unsigned long intparm, if (__qeth_check_irb_error(cdev, intparm, irb)) return; - cstat = irb->scsw.cstat; - dstat = irb->scsw.dstat; + cstat = irb->scsw.cmd.cstat; + dstat = irb->scsw.cmd.dstat; card = CARD_FROM_CDEV(cdev); if (!card) @@ -842,10 +842,10 @@ static void qeth_irq(struct ccw_device *cdev, unsigned long intparm, } atomic_set(&channel->irq_pending, 0); - if (irb->scsw.fctl & (SCSW_FCTL_CLEAR_FUNC)) + if (irb->scsw.cmd.fctl & (SCSW_FCTL_CLEAR_FUNC)) channel->state = CH_STATE_STOPPED; - if (irb->scsw.fctl & (SCSW_FCTL_HALT_FUNC)) + if (irb->scsw.cmd.fctl & (SCSW_FCTL_HALT_FUNC)) channel->state = CH_STATE_HALTED; /*let's wake up immediately on data channel*/ @@ -4092,7 +4092,6 @@ static int qeth_core_probe_device(struct ccwgroup_device *gdev) rc = qeth_determine_card_type(card); if (rc) { - PRINT_WARN("%s: not a valid card type\n", __func__); QETH_DBF_TEXT_(SETUP, 2, "3err%d", rc); goto err_card; } diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c index 999552c83bb..06deaee50f6 100644 --- a/drivers/s390/net/qeth_l3_main.c +++ b/drivers/s390/net/qeth_l3_main.c @@ -944,15 +944,8 @@ static int qeth_l3_deregister_addr_entry(struct qeth_card *card, else rc = qeth_l3_send_setdelip(card, addr, IPA_CMD_DELIP, addr->del_flags); - if (rc) { + if (rc) QETH_DBF_TEXT(TRACE, 2, "failed"); - /* TODO: re-activate this warning as soon as we have a - * clean mirco code - qeth_ipaddr_to_string(addr->proto, (u8 *)&addr->u, buf); - PRINT_WARN("Could not deregister IP address %s (rc=%x)\n", - buf, rc); - */ - } return rc; } diff --git a/drivers/s390/net/smsgiucv.c b/drivers/s390/net/smsgiucv.c index 8735a415a11..164e090c262 100644 --- a/drivers/s390/net/smsgiucv.c +++ b/drivers/s390/net/smsgiucv.c @@ -156,11 +156,8 @@ static int __init smsg_init(void) if (rc != 0) goto out; rc = iucv_register(&smsg_handler, 1); - if (rc) { - printk(KERN_ERR "SMSGIUCV: failed to register to iucv"); - rc = -EIO; /* better errno ? */ + if (rc) goto out_driver; - } smsg_path = iucv_path_alloc(255, 0, GFP_KERNEL); if (!smsg_path) { rc = -ENOMEM; @@ -168,11 +165,8 @@ static int __init smsg_init(void) } rc = iucv_path_connect(smsg_path, &smsg_handler, "*MSG ", NULL, NULL, NULL); - if (rc) { - printk(KERN_ERR "SMSGIUCV: failed to connect to *MSG"); - rc = -EIO; /* better errno ? */ + if (rc) goto out_free; - } cpcmd("SET SMSG IUCV", NULL, 0, NULL); return 0; diff --git a/drivers/s390/s390mach.c b/drivers/s390/s390mach.c index 5bfbe765983..834e9ee7e93 100644 --- a/drivers/s390/s390mach.c +++ b/drivers/s390/s390mach.c @@ -2,10 +2,10 @@ * drivers/s390/s390mach.c * S/390 machine check handler * - * S390 version - * Copyright (C) 2000 IBM Deutschland Entwicklung GmbH, IBM Corporation + * Copyright IBM Corp. 2000,2008 * Author(s): Ingo Adlung (adlung@de.ibm.com) * Martin Schwidefsky (schwidefsky@de.ibm.com) + * Cornelia Huck <cornelia.huck@de.ibm.com> */ #include <linux/init.h> @@ -18,10 +18,6 @@ #include <asm/etr.h> #include <asm/lowcore.h> #include <asm/cio.h> -#include "cio/cio.h" -#include "cio/chsc.h" -#include "cio/css.h" -#include "cio/chp.h" #include "s390mach.h" static struct semaphore m_sem; @@ -36,13 +32,40 @@ s390_handle_damage(char *msg) for(;;); } +static crw_handler_t crw_handlers[NR_RSCS]; + +/** + * s390_register_crw_handler() - register a channel report word handler + * @rsc: reporting source code to handle + * @handler: handler to be registered + * + * Returns %0 on success and a negative error value otherwise. + */ +int s390_register_crw_handler(int rsc, crw_handler_t handler) +{ + if ((rsc < 0) || (rsc >= NR_RSCS)) + return -EINVAL; + if (!cmpxchg(&crw_handlers[rsc], NULL, handler)) + return 0; + return -EBUSY; +} + +/** + * s390_unregister_crw_handler() - unregister a channel report word handler + * @rsc: reporting source code to handle + */ +void s390_unregister_crw_handler(int rsc) +{ + if ((rsc < 0) || (rsc >= NR_RSCS)) + return; + xchg(&crw_handlers[rsc], NULL); + synchronize_sched(); +} + /* * Retrieve CRWs and call function to handle event. - * - * Note : we currently process CRWs for io and chsc subchannels only */ -static int -s390_collect_crw_info(void *param) +static int s390_collect_crw_info(void *param) { struct crw crw[2]; int ccode; @@ -84,57 +107,24 @@ repeat: crw[chain].rsid); /* Check for overflows. */ if (crw[chain].oflw) { + int i; + pr_debug("%s: crw overflow detected!\n", __func__); - css_schedule_eval_all(); + for (i = 0; i < NR_RSCS; i++) { + if (crw_handlers[i]) + crw_handlers[i](NULL, NULL, 1); + } chain = 0; continue; } - switch (crw[chain].rsc) { - case CRW_RSC_SCH: - if (crw[0].chn && !chain) - break; - pr_debug("source is subchannel %04X\n", crw[0].rsid); - css_process_crw(crw[0].rsid, chain ? crw[1].rsid : 0); - break; - case CRW_RSC_MONITOR: - pr_debug("source is monitoring facility\n"); - break; - case CRW_RSC_CPATH: - pr_debug("source is channel path %02X\n", crw[0].rsid); - /* - * Check for solicited machine checks. These are - * created by reset channel path and need not be - * reported to the common I/O layer. - */ - if (crw[chain].slct) { - pr_debug("solicited machine check for " - "channel path %02X\n", crw[0].rsid); - break; - } - switch (crw[0].erc) { - case CRW_ERC_IPARM: /* Path has come. */ - chp_process_crw(crw[0].rsid, 1); - break; - case CRW_ERC_PERRI: /* Path has gone. */ - case CRW_ERC_PERRN: - chp_process_crw(crw[0].rsid, 0); - break; - default: - pr_debug("Don't know how to handle erc=%x\n", - crw[0].erc); - } - break; - case CRW_RSC_CONFIG: - pr_debug("source is configuration-alert facility\n"); - break; - case CRW_RSC_CSS: - pr_debug("source is channel subsystem\n"); - chsc_process_crw(); - break; - default: - pr_debug("unknown source\n"); - break; + if (crw[0].chn && !chain) { + chain++; + continue; } + if (crw_handlers[crw[chain].rsc]) + crw_handlers[crw[chain].rsc](&crw[0], + chain ? &crw[1] : NULL, + 0); /* chain is always 0 or 1 here. */ chain = crw[chain].chn ? chain + 1 : 0; } @@ -468,6 +458,10 @@ s390_do_machine_check(struct pt_regs *regs) etr_sync_check(); if (S390_lowcore.external_damage_code & (1U << ED_ETR_SWITCH)) etr_switch_to_local(); + if (S390_lowcore.external_damage_code & (1U << ED_STP_SYNC)) + stp_sync_check(); + if (S390_lowcore.external_damage_code & (1U << ED_STP_ISLAND)) + stp_island_check(); } if (mci->se) diff --git a/drivers/s390/s390mach.h b/drivers/s390/s390mach.h index ca681f9b67f..d39f8b697d2 100644 --- a/drivers/s390/s390mach.h +++ b/drivers/s390/s390mach.h @@ -72,6 +72,13 @@ struct crw { __u32 rsid : 16; /* reporting-source ID */ } __attribute__ ((packed)); +typedef void (*crw_handler_t)(struct crw *, struct crw *, int); + +extern int s390_register_crw_handler(int rsc, crw_handler_t handler); +extern void s390_unregister_crw_handler(int rsc); + +#define NR_RSCS 16 + #define CRW_RSC_MONITOR 0x2 /* monitoring facility */ #define CRW_RSC_SCH 0x3 /* subchannel */ #define CRW_RSC_CPATH 0x4 /* channel path */ @@ -105,6 +112,9 @@ static inline int stcrw(struct crw *pcrw ) #define ED_ETR_SYNC 12 /* External damage ETR sync check */ #define ED_ETR_SWITCH 13 /* External damage ETR switch to local */ +#define ED_STP_SYNC 7 /* External damage STP sync check */ +#define ED_STP_ISLAND 6 /* External damage STP island check */ + struct pt_regs; void s390_handle_mcck(void); |