diff options
Diffstat (limited to 'drivers/s390')
35 files changed, 598 insertions, 421 deletions
diff --git a/drivers/s390/block/dasd.c b/drivers/s390/block/dasd.c index e5b84db0aa0..3f62dd50bbb 100644 --- a/drivers/s390/block/dasd.c +++ b/drivers/s390/block/dasd.c @@ -470,7 +470,7 @@ static int dasd_decrease_state(struct dasd_device *device) */ static void dasd_change_state(struct dasd_device *device) { - int rc; + int rc; if (device->state == device->target) /* Already where we want to go today... */ @@ -479,8 +479,10 @@ static void dasd_change_state(struct dasd_device *device) rc = dasd_increase_state(device); else rc = dasd_decrease_state(device); - if (rc && rc != -EAGAIN) - device->target = device->state; + if (rc == -EAGAIN) + return; + if (rc) + device->target = device->state; if (device->state == device->target) { wake_up(&dasd_init_waitq); @@ -2133,9 +2135,9 @@ static int dasd_getgeo(struct block_device *bdev, struct hd_geometry *geo) struct dasd_device *base; block = bdev->bd_disk->private_data; - base = block->base; if (!block) return -ENODEV; + base = block->base; if (!base->discipline || !base->discipline->fill_geometry) @@ -2503,15 +2505,25 @@ int dasd_generic_restore_device(struct ccw_device *cdev) if (IS_ERR(device)) return PTR_ERR(device); + /* allow new IO again */ + device->stopped &= ~DASD_STOPPED_PM; + device->stopped &= ~DASD_UNRESUMED_PM; + dasd_schedule_device_bh(device); if (device->block) dasd_schedule_block_bh(device->block); if (device->discipline->restore) rc = device->discipline->restore(device); + if (rc) + /* + * if the resume failed for the DASD we put it in + * an UNRESUMED stop state + */ + device->stopped |= DASD_UNRESUMED_PM; dasd_put_device(device); - return rc; + return 0; } EXPORT_SYMBOL_GPL(dasd_generic_restore_device); diff --git a/drivers/s390/block/dasd_eckd.c b/drivers/s390/block/dasd_eckd.c index 1c28ec3e4cc..c11770f5b36 100644 --- a/drivers/s390/block/dasd_eckd.c +++ b/drivers/s390/block/dasd_eckd.c @@ -1696,8 +1696,7 @@ static void dasd_eckd_handle_unsolicited_interrupt(struct dasd_device *device, DBF_DEV_EVENT(DBF_ERR, device, "%s", "unsolicited interrupt received " "(sense available)"); - device->discipline->dump_sense_dbf(device, NULL, irb, - "unsolicited"); + device->discipline->dump_sense_dbf(device, irb, "unsolicited"); } dasd_schedule_device_bh(device); @@ -2941,42 +2940,20 @@ dasd_eckd_dump_ccw_range(struct ccw1 *from, struct ccw1 *to, char *page) } static void -dasd_eckd_dump_sense_dbf(struct dasd_device *device, struct dasd_ccw_req *req, - struct irb *irb, char *reason) +dasd_eckd_dump_sense_dbf(struct dasd_device *device, struct irb *irb, + char *reason) { u64 *sense; - int sl; - struct tsb *tsb; - sense = NULL; - tsb = NULL; - if (req && scsw_is_tm(&req->irb.scsw)) { - if (irb->scsw.tm.tcw) - tsb = tcw_get_tsb( - (struct tcw *)(unsigned long)irb->scsw.tm.tcw); - if (tsb && (irb->scsw.tm.fcxs == 0x01)) { - switch (tsb->flags & 0x07) { - case 1: /* tsa_iostat */ - sense = (u64 *)tsb->tsa.iostat.sense; - break; - case 2: /* ts_ddpc */ - sense = (u64 *)tsb->tsa.ddpc.sense; - break; - case 3: /* tsa_intrg */ - break; - } - } - } else { - if (irb->esw.esw0.erw.cons) - sense = (u64 *)irb->ecw; - } + sense = (u64 *) dasd_get_sense(irb); if (sense) { - for (sl = 0; sl < 4; sl++) { - DBF_DEV_EVENT(DBF_EMERG, device, - "%s: %016llx %016llx %016llx %016llx", - reason, sense[0], sense[1], sense[2], - sense[3]); - } + DBF_DEV_EVENT(DBF_EMERG, device, + "%s: %s %02x%02x%02x %016llx %016llx %016llx " + "%016llx", reason, + scsw_is_tm(&irb->scsw) ? "t" : "c", + scsw_cc(&irb->scsw), scsw_cstat(&irb->scsw), + scsw_dstat(&irb->scsw), sense[0], sense[1], + sense[2], sense[3]); } else { DBF_DEV_EVENT(DBF_EMERG, device, "%s", "SORRY - NO VALID SENSE AVAILABLE\n"); @@ -3243,9 +3220,6 @@ int dasd_eckd_restore_device(struct dasd_device *device) int is_known, rc; struct dasd_uid temp_uid; - /* allow new IO again */ - device->stopped &= ~DASD_STOPPED_PM; - private = (struct dasd_eckd_private *) device->private; /* Read Configuration Data */ @@ -3295,12 +3269,7 @@ int dasd_eckd_restore_device(struct dasd_device *device) return 0; out_err: - /* - * if the resume failed for the DASD we put it in - * an UNRESUMED stop state - */ - device->stopped |= DASD_UNRESUMED_PM; - return 0; + return -1; } static struct ccw_driver dasd_eckd_driver = { diff --git a/drivers/s390/block/dasd_erp.c b/drivers/s390/block/dasd_erp.c index d970ce2814b..cb8f9cef742 100644 --- a/drivers/s390/block/dasd_erp.c +++ b/drivers/s390/block/dasd_erp.c @@ -172,7 +172,7 @@ dasd_log_sense_dbf(struct dasd_ccw_req *cqr, struct irb *irb) device = cqr->startdev; /* dump sense data to s390 debugfeature*/ if (device->discipline && device->discipline->dump_sense_dbf) - device->discipline->dump_sense_dbf(device, cqr, irb, "log"); + device->discipline->dump_sense_dbf(device, irb, "log"); } EXPORT_SYMBOL(dasd_log_sense_dbf); diff --git a/drivers/s390/block/dasd_fba.c b/drivers/s390/block/dasd_fba.c index e21ee735f92..31849ad5e59 100644 --- a/drivers/s390/block/dasd_fba.c +++ b/drivers/s390/block/dasd_fba.c @@ -241,7 +241,7 @@ static void dasd_fba_handle_unsolicited_interrupt(struct dasd_device *device, /* check for unsolicited interrupts */ DBF_DEV_EVENT(DBF_WARNING, device, "%s", "unsolicited interrupt received"); - device->discipline->dump_sense_dbf(device, NULL, irb, "unsolicited"); + device->discipline->dump_sense_dbf(device, irb, "unsolicited"); dasd_schedule_device_bh(device); return; }; @@ -444,17 +444,20 @@ dasd_fba_fill_info(struct dasd_device * device, } static void -dasd_fba_dump_sense_dbf(struct dasd_device *device, struct dasd_ccw_req *req, - struct irb *irb, char *reason) +dasd_fba_dump_sense_dbf(struct dasd_device *device, struct irb *irb, + char *reason) { - int sl; - if (irb->esw.esw0.erw.cons) { - for (sl = 0; sl < 4; sl++) { - DBF_DEV_EVENT(DBF_EMERG, device, - "%s: %08x %08x %08x %08x", - reason, irb->ecw[8 * 0], irb->ecw[8 * 1], - irb->ecw[8 * 2], irb->ecw[8 * 3]); - } + u64 *sense; + + sense = (u64 *) dasd_get_sense(irb); + if (sense) { + DBF_DEV_EVENT(DBF_EMERG, device, + "%s: %s %02x%02x%02x %016llx %016llx %016llx " + "%016llx", reason, + scsw_is_tm(&irb->scsw) ? "t" : "c", + scsw_cc(&irb->scsw), scsw_cstat(&irb->scsw), + scsw_dstat(&irb->scsw), sense[0], sense[1], + sense[2], sense[3]); } else { DBF_DEV_EVENT(DBF_EMERG, device, "%s", "SORRY - NO VALID SENSE AVAILABLE\n"); diff --git a/drivers/s390/block/dasd_int.h b/drivers/s390/block/dasd_int.h index fd63b2f2bda..b699ca356ac 100644 --- a/drivers/s390/block/dasd_int.h +++ b/drivers/s390/block/dasd_int.h @@ -284,8 +284,7 @@ struct dasd_discipline { dasd_erp_fn_t(*erp_postaction) (struct dasd_ccw_req *); void (*dump_sense) (struct dasd_device *, struct dasd_ccw_req *, struct irb *); - void (*dump_sense_dbf) (struct dasd_device *, struct dasd_ccw_req *, - struct irb *, char *); + void (*dump_sense_dbf) (struct dasd_device *, struct irb *, char *); void (*handle_unsolicited_interrupt) (struct dasd_device *, struct irb *); diff --git a/drivers/s390/block/dasd_ioctl.c b/drivers/s390/block/dasd_ioctl.c index 4ce3f72ee1c..df918ef2796 100644 --- a/drivers/s390/block/dasd_ioctl.c +++ b/drivers/s390/block/dasd_ioctl.c @@ -16,6 +16,7 @@ #include <linux/major.h> #include <linux/fs.h> #include <linux/blkpg.h> +#include <linux/smp_lock.h> #include <asm/ccwdev.h> #include <asm/cmb.h> diff --git a/drivers/s390/block/dcssblk.c b/drivers/s390/block/dcssblk.c index 016f9e9d259..d34617682a6 100644 --- a/drivers/s390/block/dcssblk.c +++ b/drivers/s390/block/dcssblk.c @@ -964,7 +964,8 @@ static int dcssblk_freeze(struct device *dev) break; } if (rc) - pr_err("Suspend failed because device %s is writeable.\n", + pr_err("Suspending the system failed because DCSS device %s " + "is writable\n", dev_info->segment_name); return rc; } @@ -987,8 +988,8 @@ static int dcssblk_restore(struct device *dev) goto out_panic; } if (start != entry->start || end != entry->end) { - pr_err("Mismatch of start / end address after " - "resuming device %s\n", + pr_err("The address range of DCSS %s changed " + "while the system was suspended\n", entry->segment_name); goto out_panic; } diff --git a/drivers/s390/block/xpram.c b/drivers/s390/block/xpram.c index 2e9e1ecd6d8..db442cd6621 100644 --- a/drivers/s390/block/xpram.c +++ b/drivers/s390/block/xpram.c @@ -443,7 +443,7 @@ fail: */ static void xpram_resume_error(const char *message) { - pr_err("Resume error: %s\n", message); + pr_err("Resuming the system failed: %s\n", message); panic("xpram resume error\n"); } diff --git a/drivers/s390/char/con3215.c b/drivers/s390/char/con3215.c index 04dc734805c..21639d6c996 100644 --- a/drivers/s390/char/con3215.c +++ b/drivers/s390/char/con3215.c @@ -20,10 +20,7 @@ #include <linux/interrupt.h> #include <linux/err.h> #include <linux/reboot.h> - #include <linux/slab.h> -#include <linux/bootmem.h> - #include <asm/ccwdev.h> #include <asm/cio.h> #include <asm/io.h> @@ -735,7 +732,7 @@ static int raw3215_pm_stop(struct ccw_device *cdev) unsigned long flags; /* Empty the output buffer, then prevent new I/O. */ - raw = cdev->dev.driver_data; + raw = dev_get_drvdata(&cdev->dev); spin_lock_irqsave(get_ccwdev_lock(raw->cdev), flags); raw3215_make_room(raw, RAW3215_BUFFER_SIZE); raw->flags |= RAW3215_FROZEN; @@ -749,7 +746,7 @@ static int raw3215_pm_start(struct ccw_device *cdev) unsigned long flags; /* Allow I/O again and flush output buffer. */ - raw = cdev->dev.driver_data; + raw = dev_get_drvdata(&cdev->dev); spin_lock_irqsave(get_ccwdev_lock(raw->cdev), flags); raw->flags &= ~RAW3215_FROZEN; raw->flags |= RAW3215_FLUSHING; @@ -883,7 +880,7 @@ static int __init con3215_init(void) raw3215_freelist = NULL; spin_lock_init(&raw3215_freelist_lock); for (i = 0; i < NR_3215_REQ; i++) { - req = (struct raw3215_req *) alloc_bootmem_low(sizeof(struct raw3215_req)); + req = kzalloc(sizeof(struct raw3215_req), GFP_KERNEL | GFP_DMA); req->next = raw3215_freelist; raw3215_freelist = req; } @@ -893,10 +890,9 @@ static int __init con3215_init(void) return -ENODEV; raw3215[0] = raw = (struct raw3215_info *) - alloc_bootmem_low(sizeof(struct raw3215_info)); - memset(raw, 0, sizeof(struct raw3215_info)); - raw->buffer = (char *) alloc_bootmem_low(RAW3215_BUFFER_SIZE); - raw->inbuf = (char *) alloc_bootmem_low(RAW3215_INBUF_SIZE); + kzalloc(sizeof(struct raw3215_info), GFP_KERNEL | GFP_DMA); + raw->buffer = kzalloc(RAW3215_BUFFER_SIZE, GFP_KERNEL | GFP_DMA); + raw->inbuf = kzalloc(RAW3215_INBUF_SIZE, GFP_KERNEL | GFP_DMA); raw->cdev = cdev; dev_set_drvdata(&cdev->dev, raw); cdev->handler = raw3215_irq; @@ -906,9 +902,9 @@ static int __init con3215_init(void) /* Request the console irq */ if (raw3215_startup(raw) != 0) { - free_bootmem((unsigned long) raw->inbuf, RAW3215_INBUF_SIZE); - free_bootmem((unsigned long) raw->buffer, RAW3215_BUFFER_SIZE); - free_bootmem((unsigned long) raw, sizeof(struct raw3215_info)); + kfree(raw->inbuf); + kfree(raw->buffer); + kfree(raw); raw3215[0] = NULL; return -ENODEV; } diff --git a/drivers/s390/char/con3270.c b/drivers/s390/char/con3270.c index 44d02e371c0..bb838bdf829 100644 --- a/drivers/s390/char/con3270.c +++ b/drivers/s390/char/con3270.c @@ -7,7 +7,6 @@ * Copyright IBM Corp. 2003, 2009 */ -#include <linux/bootmem.h> #include <linux/console.h> #include <linux/init.h> #include <linux/interrupt.h> @@ -600,16 +599,14 @@ con3270_init(void) if (IS_ERR(rp)) return PTR_ERR(rp); - condev = (struct con3270 *) alloc_bootmem_low(sizeof(struct con3270)); - memset(condev, 0, sizeof(struct con3270)); + condev = kzalloc(sizeof(struct con3270), GFP_KERNEL | GFP_DMA); condev->view.dev = rp; - condev->read = raw3270_request_alloc_bootmem(0); + condev->read = raw3270_request_alloc(0); condev->read->callback = con3270_read_callback; condev->read->callback_data = condev; - condev->write = - raw3270_request_alloc_bootmem(CON3270_OUTPUT_BUFFER_SIZE); - condev->kreset = raw3270_request_alloc_bootmem(1); + condev->write = raw3270_request_alloc(CON3270_OUTPUT_BUFFER_SIZE); + condev->kreset = raw3270_request_alloc(1); INIT_LIST_HEAD(&condev->lines); INIT_LIST_HEAD(&condev->update); @@ -623,7 +620,7 @@ con3270_init(void) INIT_LIST_HEAD(&condev->freemem); for (i = 0; i < CON3270_STRING_PAGES; i++) { - cbuf = (void *) alloc_bootmem_low_pages(PAGE_SIZE); + cbuf = (void *) get_zeroed_page(GFP_KERNEL | GFP_DMA); add_string_memory(&condev->freemem, cbuf, PAGE_SIZE); } condev->cline = alloc_string(&condev->freemem, condev->view.cols); diff --git a/drivers/s390/char/monreader.c b/drivers/s390/char/monreader.c index 75a8831eebb..3234e90bd7f 100644 --- a/drivers/s390/char/monreader.c +++ b/drivers/s390/char/monreader.c @@ -320,7 +320,7 @@ static int mon_open(struct inode *inode, struct file *filp) goto out_path; } filp->private_data = monpriv; - monreader_device->driver_data = monpriv; + dev_set_drvdata(monreader_device, monpriv); unlock_kernel(); return nonseekable_open(inode, filp); @@ -463,7 +463,7 @@ static struct miscdevice mon_dev = { *****************************************************************************/ static int monreader_freeze(struct device *dev) { - struct mon_private *monpriv = dev->driver_data; + struct mon_private *monpriv = dev_get_drvdata(dev); int rc; if (!monpriv) @@ -487,7 +487,7 @@ static int monreader_freeze(struct device *dev) static int monreader_thaw(struct device *dev) { - struct mon_private *monpriv = dev->driver_data; + struct mon_private *monpriv = dev_get_drvdata(dev); int rc; if (!monpriv) diff --git a/drivers/s390/char/raw3270.c b/drivers/s390/char/raw3270.c index acab7b2dfe8..d6a022f55e9 100644 --- a/drivers/s390/char/raw3270.c +++ b/drivers/s390/char/raw3270.c @@ -7,7 +7,6 @@ * Copyright IBM Corp. 2003, 2009 */ -#include <linux/bootmem.h> #include <linux/module.h> #include <linux/err.h> #include <linux/init.h> @@ -143,33 +142,6 @@ raw3270_request_alloc(size_t size) return rq; } -#ifdef CONFIG_TN3270_CONSOLE -/* - * Allocate a new 3270 ccw request from bootmem. Only works very - * early in the boot process. Only con3270.c should be using this. - */ -struct raw3270_request __init *raw3270_request_alloc_bootmem(size_t size) -{ - struct raw3270_request *rq; - - rq = alloc_bootmem_low(sizeof(struct raw3270)); - - /* alloc output buffer. */ - if (size > 0) - rq->buffer = alloc_bootmem_low(size); - rq->size = size; - INIT_LIST_HEAD(&rq->list); - - /* - * Setup ccw. - */ - rq->ccw.cda = __pa(rq->buffer); - rq->ccw.flags = CCW_FLAG_SLI; - - return rq; -} -#endif - /* * Free 3270 ccw request */ @@ -846,8 +818,8 @@ struct raw3270 __init *raw3270_setup_console(struct ccw_device *cdev) char *ascebc; int rc; - rp = (struct raw3270 *) alloc_bootmem_low(sizeof(struct raw3270)); - ascebc = (char *) alloc_bootmem(256); + rp = kzalloc(sizeof(struct raw3270), GFP_KERNEL | GFP_DMA); + ascebc = kzalloc(256, GFP_KERNEL); rc = raw3270_setup_device(cdev, rp, ascebc); if (rc) return ERR_PTR(rc); @@ -1350,7 +1322,7 @@ static int raw3270_pm_stop(struct ccw_device *cdev) struct raw3270_view *view; unsigned long flags; - rp = cdev->dev.driver_data; + rp = dev_get_drvdata(&cdev->dev); if (!rp) return 0; spin_lock_irqsave(get_ccwdev_lock(rp->cdev), flags); @@ -1376,7 +1348,7 @@ static int raw3270_pm_start(struct ccw_device *cdev) struct raw3270 *rp; unsigned long flags; - rp = cdev->dev.driver_data; + rp = dev_get_drvdata(&cdev->dev); if (!rp) return 0; spin_lock_irqsave(get_ccwdev_lock(rp->cdev), flags); diff --git a/drivers/s390/char/sclp_con.c b/drivers/s390/char/sclp_con.c index 336811a7767..ad698d30cb3 100644 --- a/drivers/s390/char/sclp_con.c +++ b/drivers/s390/char/sclp_con.c @@ -11,7 +11,6 @@ #include <linux/init.h> #include <linux/timer.h> #include <linux/jiffies.h> -#include <linux/bootmem.h> #include <linux/termios.h> #include <linux/err.h> #include <linux/reboot.h> @@ -110,7 +109,7 @@ static void sclp_console_sync_queue(void) spin_lock_irqsave(&sclp_con_lock, flags); if (timer_pending(&sclp_con_timer)) - del_timer_sync(&sclp_con_timer); + del_timer(&sclp_con_timer); while (sclp_con_queue_running) { spin_unlock_irqrestore(&sclp_con_lock, flags); sclp_sync_wait(); @@ -298,8 +297,8 @@ sclp_console_init(void) /* Allocate pages for output buffering */ INIT_LIST_HEAD(&sclp_con_pages); for (i = 0; i < MAX_CONSOLE_PAGES; i++) { - page = alloc_bootmem_low_pages(PAGE_SIZE); - list_add_tail((struct list_head *) page, &sclp_con_pages); + page = (void *) get_zeroed_page(GFP_KERNEL | GFP_DMA); + list_add_tail(page, &sclp_con_pages); } INIT_LIST_HEAD(&sclp_con_outqueue); spin_lock_init(&sclp_con_lock); diff --git a/drivers/s390/char/sclp_rw.h b/drivers/s390/char/sclp_rw.h index 85f491ea929..7a7bfc947d9 100644 --- a/drivers/s390/char/sclp_rw.h +++ b/drivers/s390/char/sclp_rw.h @@ -92,5 +92,10 @@ void sclp_set_columns(struct sclp_buffer *, unsigned short); void sclp_set_htab(struct sclp_buffer *, unsigned short); int sclp_chars_in_buffer(struct sclp_buffer *); +#ifdef CONFIG_SCLP_CONSOLE void sclp_console_pm_event(enum sclp_pm_event sclp_pm_event); +#else +static inline void sclp_console_pm_event(enum sclp_pm_event sclp_pm_event) { } +#endif + #endif /* __SCLP_RW_H__ */ diff --git a/drivers/s390/char/sclp_vt220.c b/drivers/s390/char/sclp_vt220.c index 5518e24946a..178724f2a4c 100644 --- a/drivers/s390/char/sclp_vt220.c +++ b/drivers/s390/char/sclp_vt220.c @@ -20,7 +20,6 @@ #include <linux/major.h> #include <linux/console.h> #include <linux/kdev_t.h> -#include <linux/bootmem.h> #include <linux/interrupt.h> #include <linux/init.h> #include <linux/reboot.h> @@ -601,10 +600,7 @@ static void __init __sclp_vt220_free_pages(void) list_for_each_safe(page, p, &sclp_vt220_empty) { list_del(page); - if (slab_is_available()) - free_page((unsigned long) page); - else - free_bootmem((unsigned long) page, PAGE_SIZE); + free_page((unsigned long) page); } } @@ -640,16 +636,12 @@ static int __init __sclp_vt220_init(int num_pages) sclp_vt220_flush_later = 0; /* Allocate pages for output buffering */ + rc = -ENOMEM; for (i = 0; i < num_pages; i++) { - if (slab_is_available()) - page = (void *) get_zeroed_page(GFP_KERNEL | GFP_DMA); - else - page = alloc_bootmem_low_pages(PAGE_SIZE); - if (!page) { - rc = -ENOMEM; + page = (void *) get_zeroed_page(GFP_KERNEL | GFP_DMA); + if (!page) goto out; - } - list_add_tail((struct list_head *) page, &sclp_vt220_empty); + list_add_tail(page, &sclp_vt220_empty); } rc = sclp_register(&sclp_vt220_register); out: diff --git a/drivers/s390/char/tape_core.c b/drivers/s390/char/tape_core.c index 595aa04cfd0..1d420d94759 100644 --- a/drivers/s390/char/tape_core.c +++ b/drivers/s390/char/tape_core.c @@ -396,7 +396,7 @@ int tape_generic_pm_suspend(struct ccw_device *cdev) { struct tape_device *device; - device = cdev->dev.driver_data; + device = dev_get_drvdata(&cdev->dev); if (!device) { return -ENODEV; } diff --git a/drivers/s390/char/vmlogrdr.c b/drivers/s390/char/vmlogrdr.c index 411cfa3c771..c20a4fe6da5 100644 --- a/drivers/s390/char/vmlogrdr.c +++ b/drivers/s390/char/vmlogrdr.c @@ -663,7 +663,7 @@ static struct attribute *vmlogrdr_attrs[] = { static int vmlogrdr_pm_prepare(struct device *dev) { int rc; - struct vmlogrdr_priv_t *priv = dev->driver_data; + struct vmlogrdr_priv_t *priv = dev_get_drvdata(dev); rc = 0; if (priv) { @@ -753,7 +753,7 @@ static int vmlogrdr_register_device(struct vmlogrdr_priv_t *priv) dev->bus = &iucv_bus; dev->parent = iucv_root; dev->driver = &vmlogrdr_driver; - dev->driver_data = priv; + dev_set_drvdata(dev, priv); /* * The release function could be called after the * module has been unloaded. It's _only_ task is to diff --git a/drivers/s390/char/vmur.c b/drivers/s390/char/vmur.c index 7d9e67cb647..31b902e94f7 100644 --- a/drivers/s390/char/vmur.c +++ b/drivers/s390/char/vmur.c @@ -170,7 +170,7 @@ static void urdev_put(struct urdev *urd) */ static int ur_pm_suspend(struct ccw_device *cdev) { - struct urdev *urd = cdev->dev.driver_data; + struct urdev *urd = dev_get_drvdata(&cdev->dev); TRACE("ur_pm_suspend: cdev=%p\n", cdev); if (urd->open_flag) { diff --git a/drivers/s390/char/vmwatchdog.c b/drivers/s390/char/vmwatchdog.c index cb7854c10c0..f2bc287b69e 100644 --- a/drivers/s390/char/vmwatchdog.c +++ b/drivers/s390/char/vmwatchdog.c @@ -250,14 +250,14 @@ static int vmwdt_resume(void) static int vmwdt_suspend(void) { if (test_and_set_bit(VMWDT_OPEN, &vmwdt_is_open)) { - pr_err("The watchdog is in use. " - "This prevents hibernation or suspend.\n"); + pr_err("The system cannot be suspended while the watchdog" + " is in use\n"); return NOTIFY_BAD; } if (test_bit(VMWDT_RUNNING, &vmwdt_is_open)) { clear_bit(VMWDT_OPEN, &vmwdt_is_open); - pr_err("The watchdog is running. " - "This prevents hibernation or suspend.\n"); + pr_err("The system cannot be suspended while the watchdog" + " is running\n"); return NOTIFY_BAD; } return NOTIFY_DONE; diff --git a/drivers/s390/cio/device.c b/drivers/s390/cio/device.c index 3c57c1a18bb..d593bc76afe 100644 --- a/drivers/s390/cio/device.c +++ b/drivers/s390/cio/device.c @@ -772,10 +772,8 @@ static struct ccw_device * io_subchannel_create_ccwdev(struct subchannel *sch) cdev = io_subchannel_allocate_dev(sch); if (!IS_ERR(cdev)) { ret = io_subchannel_initialize_dev(sch, cdev); - if (ret) { - kfree(cdev); + if (ret) cdev = ERR_PTR(ret); - } } return cdev; } diff --git a/drivers/s390/cio/qdio.h b/drivers/s390/cio/qdio.h index 13bcb811438..b1241f8fae8 100644 --- a/drivers/s390/cio/qdio.h +++ b/drivers/s390/cio/qdio.h @@ -351,15 +351,6 @@ static inline unsigned long long get_usecs(void) ((bufnr - dec) & QDIO_MAX_BUFFERS_MASK) /* prototypes for thin interrupt */ -void qdio_sync_after_thinint(struct qdio_q *q); -int get_buf_state(struct qdio_q *q, unsigned int bufnr, unsigned char *state, - int auto_ack); -void qdio_check_outbound_after_thinint(struct qdio_q *q); -int qdio_inbound_q_moved(struct qdio_q *q); -void qdio_kick_handler(struct qdio_q *q); -void qdio_stop_polling(struct qdio_q *q); -int qdio_siga_sync_q(struct qdio_q *q); - void qdio_setup_thinint(struct qdio_irq *irq_ptr); int qdio_establish_thinint(struct qdio_irq *irq_ptr); void qdio_shutdown_thinint(struct qdio_irq *irq_ptr); @@ -392,4 +383,6 @@ void qdio_setup_destroy_sysfs(struct ccw_device *cdev); int qdio_setup_init(void); void qdio_setup_exit(void); +int debug_get_buf_state(struct qdio_q *q, unsigned int bufnr, + unsigned char *state); #endif /* _CIO_QDIO_H */ diff --git a/drivers/s390/cio/qdio_debug.c b/drivers/s390/cio/qdio_debug.c index e3434b34f86..b8626d4df11 100644 --- a/drivers/s390/cio/qdio_debug.c +++ b/drivers/s390/cio/qdio_debug.c @@ -70,9 +70,8 @@ static int qstat_show(struct seq_file *m, void *v) seq_printf(m, "slsb buffer states:\n"); seq_printf(m, "|0 |8 |16 |24 |32 |40 |48 |56 63|\n"); - qdio_siga_sync_q(q); for (i = 0; i < QDIO_MAX_BUFFERS_PER_Q; i++) { - get_buf_state(q, i, &state, 0); + debug_get_buf_state(q, i, &state); switch (state) { case SLSB_P_INPUT_NOT_INIT: case SLSB_P_OUTPUT_NOT_INIT: diff --git a/drivers/s390/cio/qdio_main.c b/drivers/s390/cio/qdio_main.c index d79cf5bf0e6..0038750ad94 100644 --- a/drivers/s390/cio/qdio_main.c +++ b/drivers/s390/cio/qdio_main.c @@ -231,8 +231,8 @@ static inline int get_buf_states(struct qdio_q *q, unsigned int bufnr, return i; } -inline int get_buf_state(struct qdio_q *q, unsigned int bufnr, - unsigned char *state, int auto_ack) +static inline int get_buf_state(struct qdio_q *q, unsigned int bufnr, + unsigned char *state, int auto_ack) { return get_buf_states(q, bufnr, state, 1, auto_ack); } @@ -276,7 +276,7 @@ void qdio_init_buf_states(struct qdio_irq *irq_ptr) QDIO_MAX_BUFFERS_PER_Q); } -static int qdio_siga_sync(struct qdio_q *q, unsigned int output, +static inline int qdio_siga_sync(struct qdio_q *q, unsigned int output, unsigned int input) { int cc; @@ -293,7 +293,7 @@ static int qdio_siga_sync(struct qdio_q *q, unsigned int output, return cc; } -inline int qdio_siga_sync_q(struct qdio_q *q) +static inline int qdio_siga_sync_q(struct qdio_q *q) { if (q->is_input_q) return qdio_siga_sync(q, 0, q->mask); @@ -358,8 +358,7 @@ static inline int qdio_siga_input(struct qdio_q *q) return cc; } -/* called from thinint inbound handler */ -void qdio_sync_after_thinint(struct qdio_q *q) +static inline void qdio_sync_after_thinint(struct qdio_q *q) { if (pci_out_supported(q)) { if (need_siga_sync_thinint(q)) @@ -370,7 +369,14 @@ void qdio_sync_after_thinint(struct qdio_q *q) qdio_siga_sync_q(q); } -inline void qdio_stop_polling(struct qdio_q *q) +int debug_get_buf_state(struct qdio_q *q, unsigned int bufnr, + unsigned char *state) +{ + qdio_siga_sync_q(q); + return get_buf_states(q, bufnr, state, 1, 0); +} + +static inline void qdio_stop_polling(struct qdio_q *q) { if (!q->u.in.polling) return; @@ -449,13 +455,6 @@ static inline void inbound_primed(struct qdio_q *q, int count) count--; if (!count) return; - - /* - * Need to change all PRIMED buffers to NOT_INIT, otherwise - * we're loosing initiative in the thinint code. - */ - set_buf_states(q, q->first_to_check, SLSB_P_INPUT_NOT_INIT, - count); } static int get_inbound_buffer_frontier(struct qdio_q *q) @@ -470,19 +469,13 @@ static int get_inbound_buffer_frontier(struct qdio_q *q) count = min(atomic_read(&q->nr_buf_used), QDIO_MAX_BUFFERS_MASK); stop = add_buf(q->first_to_check, count); - /* - * No siga sync here, as a PCI or we after a thin interrupt - * will sync the queues. - */ - - /* need to set count to 1 for non-qebsm */ - if (!is_qebsm(q)) - count = 1; - -check_next: if (q->first_to_check == stop) goto out; + /* + * No siga sync here, as a PCI or we after a thin interrupt + * already sync'ed the queues. + */ count = get_buf_states(q, q->first_to_check, &state, count, 1); if (!count) goto out; @@ -490,14 +483,9 @@ check_next: switch (state) { case SLSB_P_INPUT_PRIMED: inbound_primed(q, count); - /* - * No siga-sync needed for non-qebsm here, as the inbound queue - * will be synced on the next siga-r, resp. - * tiqdio_is_inbound_q_done will do the siga-sync. - */ q->first_to_check = add_buf(q->first_to_check, count); atomic_sub(count, &q->nr_buf_used); - goto check_next; + break; case SLSB_P_INPUT_ERROR: announce_buffer_error(q, count); /* process the buffer, the upper layer will take care of it */ @@ -516,7 +504,7 @@ out: return q->first_to_check; } -int qdio_inbound_q_moved(struct qdio_q *q) +static int qdio_inbound_q_moved(struct qdio_q *q) { int bufnr; @@ -524,35 +512,32 @@ int qdio_inbound_q_moved(struct qdio_q *q) if ((bufnr != q->last_move) || q->qdio_error) { q->last_move = bufnr; - if (!need_siga_sync(q) && !pci_out_supported(q)) + if (!is_thinint_irq(q->irq_ptr) && !MACHINE_IS_VM) q->u.in.timestamp = get_usecs(); - - DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "in moved"); return 1; } else return 0; } -static int qdio_inbound_q_done(struct qdio_q *q) +static inline int qdio_inbound_q_done(struct qdio_q *q) { unsigned char state = 0; if (!atomic_read(&q->nr_buf_used)) return 1; - /* - * We need that one for synchronization with the adapter, as it - * does a kind of PCI avoidance. - */ qdio_siga_sync_q(q); - get_buf_state(q, q->first_to_check, &state, 0); + if (state == SLSB_P_INPUT_PRIMED) - /* we got something to do */ + /* more work coming */ return 0; - /* on VM, we don't poll, so the q is always done here */ - if (need_siga_sync(q) || pci_out_supported(q)) + if (is_thinint_irq(q->irq_ptr)) + return 1; + + /* don't poll under z/VM */ + if (MACHINE_IS_VM) return 1; /* @@ -563,14 +548,11 @@ static int qdio_inbound_q_done(struct qdio_q *q) DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "in done:%3d", q->first_to_check); return 1; - } else { - DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "in notd:%3d", - q->first_to_check); + } else return 0; - } } -void qdio_kick_handler(struct qdio_q *q) +static void qdio_kick_handler(struct qdio_q *q) { int start = q->first_to_kick; int end = q->first_to_check; @@ -619,7 +601,6 @@ again: goto again; } -/* inbound tasklet */ void qdio_inbound_processing(unsigned long data) { struct qdio_q *q = (struct qdio_q *)data; @@ -642,11 +623,6 @@ static int get_outbound_buffer_frontier(struct qdio_q *q) count = min(atomic_read(&q->nr_buf_used), QDIO_MAX_BUFFERS_MASK); stop = add_buf(q->first_to_check, count); - /* need to set count to 1 for non-qebsm */ - if (!is_qebsm(q)) - count = 1; - -check_next: if (q->first_to_check == stop) return q->first_to_check; @@ -661,13 +637,7 @@ check_next: atomic_sub(count, &q->nr_buf_used); q->first_to_check = add_buf(q->first_to_check, count); - /* - * We fetch all buffer states at once. get_buf_states may - * return count < stop. For QEBSM we do not loop. - */ - if (is_qebsm(q)) - break; - goto check_next; + break; case SLSB_P_OUTPUT_ERROR: announce_buffer_error(q, count); /* process the buffer, the upper layer will take care of it */ @@ -797,8 +767,7 @@ void qdio_outbound_timer(unsigned long data) tasklet_schedule(&q->tasklet); } -/* called from thinint inbound tasklet */ -void qdio_check_outbound_after_thinint(struct qdio_q *q) +static inline void qdio_check_outbound_after_thinint(struct qdio_q *q) { struct qdio_q *out; int i; @@ -811,6 +780,46 @@ void qdio_check_outbound_after_thinint(struct qdio_q *q) tasklet_schedule(&out->tasklet); } +static void __tiqdio_inbound_processing(struct qdio_q *q) +{ + qdio_perf_stat_inc(&perf_stats.thinint_inbound); + qdio_sync_after_thinint(q); + + /* + * The interrupt could be caused by a PCI request. Check the + * PCI capable outbound queues. + */ + qdio_check_outbound_after_thinint(q); + + if (!qdio_inbound_q_moved(q)) + return; + + qdio_kick_handler(q); + + if (!qdio_inbound_q_done(q)) { + qdio_perf_stat_inc(&perf_stats.thinint_inbound_loop); + if (likely(q->irq_ptr->state != QDIO_IRQ_STATE_STOPPED)) + tasklet_schedule(&q->tasklet); + } + + qdio_stop_polling(q); + /* + * We need to check again to not lose initiative after + * resetting the ACK state. + */ + if (!qdio_inbound_q_done(q)) { + qdio_perf_stat_inc(&perf_stats.thinint_inbound_loop2); + if (likely(q->irq_ptr->state != QDIO_IRQ_STATE_STOPPED)) + tasklet_schedule(&q->tasklet); + } +} + +void tiqdio_inbound_processing(unsigned long data) +{ + struct qdio_q *q = (struct qdio_q *)data; + __tiqdio_inbound_processing(q); +} + static inline void qdio_set_state(struct qdio_irq *irq_ptr, enum qdio_irq_states state) { @@ -1488,18 +1497,13 @@ out: * @count: how many buffers to process */ int do_QDIO(struct ccw_device *cdev, unsigned int callflags, - int q_nr, int bufnr, int count) + int q_nr, unsigned int bufnr, unsigned int count) { struct qdio_irq *irq_ptr; - if ((bufnr > QDIO_MAX_BUFFERS_PER_Q) || - (count > QDIO_MAX_BUFFERS_PER_Q) || - (q_nr >= QDIO_MAX_QUEUES_PER_IRQ)) + if (bufnr >= QDIO_MAX_BUFFERS_PER_Q || count > QDIO_MAX_BUFFERS_PER_Q) return -EINVAL; - if (!count) - return 0; - irq_ptr = cdev->private->qdio_data; if (!irq_ptr) return -ENODEV; diff --git a/drivers/s390/cio/qdio_thinint.c b/drivers/s390/cio/qdio_thinint.c index c655d011a78..981a77ea7ee 100644 --- a/drivers/s390/cio/qdio_thinint.c +++ b/drivers/s390/cio/qdio_thinint.c @@ -43,9 +43,6 @@ struct indicator_t { }; static struct indicator_t *q_indicators; -static void tiqdio_tasklet_fn(unsigned long data); -static DECLARE_TASKLET(tiqdio_tasklet, tiqdio_tasklet_fn, 0); - static int css_qdio_omit_svs; static inline unsigned long do_clear_global_summary(void) @@ -103,11 +100,6 @@ void tiqdio_add_input_queues(struct qdio_irq *irq_ptr) xchg(irq_ptr->dsci, 1); } -/* - * we cannot stop the tiqdio tasklet here since it is for all - * thinint qdio devices and it must run as long as there is a - * thinint device left - */ void tiqdio_remove_input_queues(struct qdio_irq *irq_ptr) { struct qdio_q *q; @@ -126,79 +118,39 @@ void tiqdio_remove_input_queues(struct qdio_irq *irq_ptr) } } -static inline int tiqdio_inbound_q_done(struct qdio_q *q) -{ - unsigned char state = 0; - - if (!atomic_read(&q->nr_buf_used)) - return 1; - - qdio_siga_sync_q(q); - get_buf_state(q, q->first_to_check, &state, 0); - - if (state == SLSB_P_INPUT_PRIMED) - /* more work coming */ - return 0; - return 1; -} - static inline int shared_ind(struct qdio_irq *irq_ptr) { return irq_ptr->dsci == &q_indicators[TIQDIO_SHARED_IND].ind; } -static void __tiqdio_inbound_processing(struct qdio_q *q) +/** + * tiqdio_thinint_handler - thin interrupt handler for qdio + * @ind: pointer to adapter local summary indicator + * @drv_data: NULL + */ +static void tiqdio_thinint_handler(void *ind, void *drv_data) { - qdio_perf_stat_inc(&perf_stats.thinint_inbound); - qdio_sync_after_thinint(q); + struct qdio_q *q; + + qdio_perf_stat_inc(&perf_stats.thin_int); /* - * Maybe we have work on our outbound queues... at least - * we have to check the PCI capable queues. + * SVS only when needed: issue SVS to benefit from iqdio interrupt + * avoidance (SVS clears adapter interrupt suppression overwrite) */ - qdio_check_outbound_after_thinint(q); - - if (!qdio_inbound_q_moved(q)) - return; - - qdio_kick_handler(q); - - if (!tiqdio_inbound_q_done(q)) { - qdio_perf_stat_inc(&perf_stats.thinint_inbound_loop); - if (likely(q->irq_ptr->state != QDIO_IRQ_STATE_STOPPED)) - tasklet_schedule(&q->tasklet); - } + if (!css_qdio_omit_svs) + do_clear_global_summary(); - qdio_stop_polling(q); /* - * We need to check again to not lose initiative after - * resetting the ACK state. + * reset local summary indicator (tiqdio_alsi) to stop adapter + * interrupts for now */ - if (!tiqdio_inbound_q_done(q)) { - qdio_perf_stat_inc(&perf_stats.thinint_inbound_loop2); - if (likely(q->irq_ptr->state != QDIO_IRQ_STATE_STOPPED)) - tasklet_schedule(&q->tasklet); - } -} - -void tiqdio_inbound_processing(unsigned long data) -{ - struct qdio_q *q = (struct qdio_q *)data; - - __tiqdio_inbound_processing(q); -} - -/* check for work on all inbound thinint queues */ -static void tiqdio_tasklet_fn(unsigned long data) -{ - struct qdio_q *q; - - qdio_perf_stat_inc(&perf_stats.tasklet_thinint); -again: + xchg((u8 *)ind, 0); /* protect tiq_list entries, only changed in activate or shutdown */ rcu_read_lock(); + /* check for work on all inbound thinint queues */ list_for_each_entry_rcu(q, &tiq_list, entry) /* only process queues from changed sets */ if (*q->irq_ptr->dsci) { @@ -226,37 +178,6 @@ again: if (*tiqdio_alsi) xchg(&q_indicators[TIQDIO_SHARED_IND].ind, 1); } - - /* check for more work */ - if (*tiqdio_alsi) { - xchg(tiqdio_alsi, 0); - qdio_perf_stat_inc(&perf_stats.tasklet_thinint_loop); - goto again; - } -} - -/** - * tiqdio_thinint_handler - thin interrupt handler for qdio - * @ind: pointer to adapter local summary indicator - * @drv_data: NULL - */ -static void tiqdio_thinint_handler(void *ind, void *drv_data) -{ - qdio_perf_stat_inc(&perf_stats.thin_int); - - /* - * SVS only when needed: issue SVS to benefit from iqdio interrupt - * avoidance (SVS clears adapter interrupt suppression overwrite) - */ - if (!css_qdio_omit_svs) - do_clear_global_summary(); - - /* - * reset local summary indicator (tiqdio_alsi) to stop adapter - * interrupts for now, the tasklet will clean all dsci's - */ - xchg((u8 *)ind, 0); - tasklet_hi_schedule(&tiqdio_tasklet); } static int set_subchannel_ind(struct qdio_irq *irq_ptr, int reset) @@ -376,5 +297,4 @@ void __exit tiqdio_unregister_thinints(void) s390_unregister_adapter_interrupt(tiqdio_alsi, QDIO_AIRQ_ISC); isc_unregister(QDIO_AIRQ_ISC); } - tasklet_kill(&tiqdio_tasklet); } diff --git a/drivers/s390/crypto/ap_bus.c b/drivers/s390/crypto/ap_bus.c index 9c148406b98..ed3dcdea7fe 100644 --- a/drivers/s390/crypto/ap_bus.c +++ b/drivers/s390/crypto/ap_bus.c @@ -54,6 +54,12 @@ static int ap_poll_thread_start(void); static void ap_poll_thread_stop(void); static void ap_request_timeout(unsigned long); static inline void ap_schedule_poll_timer(void); +static int __ap_poll_device(struct ap_device *ap_dev, unsigned long *flags); +static int ap_device_remove(struct device *dev); +static int ap_device_probe(struct device *dev); +static void ap_interrupt_handler(void *unused1, void *unused2); +static void ap_reset(struct ap_device *ap_dev); +static void ap_config_timeout(unsigned long ptr); /* * Module description. @@ -101,6 +107,10 @@ static struct hrtimer ap_poll_timer; * If z/VM change to 1500000 nanoseconds to adjust to z/VM polling.*/ static unsigned long long poll_timeout = 250000; +/* Suspend flag */ +static int ap_suspend_flag; +static struct bus_type ap_bus_type; + /** * ap_using_interrupts() - Returns non-zero if interrupt support is * available. @@ -617,10 +627,79 @@ static int ap_uevent (struct device *dev, struct kobj_uevent_env *env) return retval; } +static int ap_bus_suspend(struct device *dev, pm_message_t state) +{ + struct ap_device *ap_dev = to_ap_dev(dev); + unsigned long flags; + + if (!ap_suspend_flag) { + ap_suspend_flag = 1; + + /* Disable scanning for devices, thus we do not want to scan + * for them after removing. + */ + del_timer_sync(&ap_config_timer); + if (ap_work_queue != NULL) { + destroy_workqueue(ap_work_queue); + ap_work_queue = NULL; + } + tasklet_disable(&ap_tasklet); + } + /* Poll on the device until all requests are finished. */ + do { + flags = 0; + __ap_poll_device(ap_dev, &flags); + } while ((flags & 1) || (flags & 2)); + + ap_device_remove(dev); + return 0; +} + +static int ap_bus_resume(struct device *dev) +{ + int rc = 0; + struct ap_device *ap_dev = to_ap_dev(dev); + + if (ap_suspend_flag) { + ap_suspend_flag = 0; + if (!ap_interrupts_available()) + ap_interrupt_indicator = NULL; + ap_device_probe(dev); + ap_reset(ap_dev); + setup_timer(&ap_dev->timeout, ap_request_timeout, + (unsigned long) ap_dev); + ap_scan_bus(NULL); + init_timer(&ap_config_timer); + ap_config_timer.function = ap_config_timeout; + ap_config_timer.data = 0; + ap_config_timer.expires = jiffies + ap_config_time * HZ; + add_timer(&ap_config_timer); + ap_work_queue = create_singlethread_workqueue("kapwork"); + if (!ap_work_queue) + return -ENOMEM; + tasklet_enable(&ap_tasklet); + if (!ap_using_interrupts()) + ap_schedule_poll_timer(); + else + tasklet_schedule(&ap_tasklet); + if (ap_thread_flag) + rc = ap_poll_thread_start(); + } else { + ap_device_probe(dev); + ap_reset(ap_dev); + setup_timer(&ap_dev->timeout, ap_request_timeout, + (unsigned long) ap_dev); + } + + return rc; +} + static struct bus_type ap_bus_type = { .name = "ap", .match = &ap_bus_match, .uevent = &ap_uevent, + .suspend = ap_bus_suspend, + .resume = ap_bus_resume }; static int ap_device_probe(struct device *dev) @@ -1066,12 +1145,17 @@ ap_config_timeout(unsigned long ptr) */ static inline void ap_schedule_poll_timer(void) { - if (ap_using_interrupts()) + ktime_t hr_time; + if (ap_using_interrupts() || ap_suspend_flag) return; if (hrtimer_is_queued(&ap_poll_timer)) return; - hrtimer_start(&ap_poll_timer, ktime_set(0, poll_timeout), - HRTIMER_MODE_ABS); + if (ktime_to_ns(hrtimer_expires_remaining(&ap_poll_timer)) <= 0) { + hr_time = ktime_set(0, poll_timeout); + hrtimer_forward_now(&ap_poll_timer, hr_time); + hrtimer_restart(&ap_poll_timer); + } + return; } /** @@ -1384,6 +1468,8 @@ static int ap_poll_thread(void *data) set_user_nice(current, 19); while (1) { + if (ap_suspend_flag) + return 0; if (need_resched()) { schedule(); continue; @@ -1414,7 +1500,7 @@ static int ap_poll_thread_start(void) { int rc; - if (ap_using_interrupts()) + if (ap_using_interrupts() || ap_suspend_flag) return 0; mutex_lock(&ap_poll_thread_mutex); if (!ap_poll_kthread) { diff --git a/drivers/s390/net/netiucv.c b/drivers/s390/net/netiucv.c index 52574ce797b..8c36eafcfbf 100644 --- a/drivers/s390/net/netiucv.c +++ b/drivers/s390/net/netiucv.c @@ -1307,7 +1307,7 @@ static void netiucv_pm_complete(struct device *dev) */ static int netiucv_pm_freeze(struct device *dev) { - struct netiucv_priv *priv = dev->driver_data; + struct netiucv_priv *priv = dev_get_drvdata(dev); struct net_device *ndev = NULL; int rc = 0; @@ -1331,7 +1331,7 @@ out: */ static int netiucv_pm_restore_thaw(struct device *dev) { - struct netiucv_priv *priv = dev->driver_data; + struct netiucv_priv *priv = dev_get_drvdata(dev); struct net_device *ndev = NULL; int rc = 0; diff --git a/drivers/s390/net/qeth_l2_main.c b/drivers/s390/net/qeth_l2_main.c index 81d7f268418..691cecd03b8 100644 --- a/drivers/s390/net/qeth_l2_main.c +++ b/drivers/s390/net/qeth_l2_main.c @@ -655,7 +655,7 @@ static void qeth_l2_set_multicast_list(struct net_device *dev) for (dm = dev->mc_list; dm; dm = dm->next) qeth_l2_add_mc(card, dm->da_addr, 0); - list_for_each_entry(ha, &dev->uc_list, list) + list_for_each_entry(ha, &dev->uc.list, list) qeth_l2_add_mc(card, ha->addr, 1); spin_unlock_bh(&card->mclock); diff --git a/drivers/s390/scsi/zfcp_aux.c b/drivers/s390/scsi/zfcp_aux.c index 3ac27ee4739..2ccbd185a5f 100644 --- a/drivers/s390/scsi/zfcp_aux.c +++ b/drivers/s390/scsi/zfcp_aux.c @@ -470,6 +470,12 @@ int zfcp_adapter_enqueue(struct ccw_device *ccw_device) if (!adapter) return -ENOMEM; + adapter->gs = kzalloc(sizeof(struct zfcp_wka_ports), GFP_KERNEL); + if (!adapter->gs) { + kfree(adapter); + return -ENOMEM; + } + ccw_device->handler = NULL; adapter->ccw_device = ccw_device; atomic_set(&adapter->refcount, 0); @@ -523,8 +529,7 @@ int zfcp_adapter_enqueue(struct ccw_device *ccw_device) goto sysfs_failed; atomic_clear_mask(ZFCP_STATUS_COMMON_REMOVE, &adapter->status); - - zfcp_fc_nameserver_init(adapter); + zfcp_fc_wka_ports_init(adapter); if (!zfcp_adapter_scsi_register(adapter)) return 0; @@ -571,6 +576,7 @@ void zfcp_adapter_dequeue(struct zfcp_adapter *adapter) kfree(adapter->req_list); kfree(adapter->fc_stats); kfree(adapter->stats_reset_data); + kfree(adapter->gs); kfree(adapter); } diff --git a/drivers/s390/scsi/zfcp_def.h b/drivers/s390/scsi/zfcp_def.h index 2074d45dbf6..49d0532bca1 100644 --- a/drivers/s390/scsi/zfcp_def.h +++ b/drivers/s390/scsi/zfcp_def.h @@ -22,6 +22,8 @@ #include <linux/syscalls.h> #include <linux/scatterlist.h> #include <linux/ioctl.h> +#include <scsi/fc/fc_fs.h> +#include <scsi/fc/fc_gs.h> #include <scsi/scsi.h> #include <scsi/scsi_tcq.h> #include <scsi/scsi_cmnd.h> @@ -29,6 +31,7 @@ #include <scsi/scsi_host.h> #include <scsi/scsi_transport.h> #include <scsi/scsi_transport_fc.h> +#include <scsi/scsi_bsg_fc.h> #include <asm/ccwdev.h> #include <asm/qdio.h> #include <asm/debug.h> @@ -228,11 +231,6 @@ struct zfcp_ls_adisc { /* FC-PH/FC-GS well-known address identifiers for generic services */ #define ZFCP_DID_WKA 0xFFFFF0 -#define ZFCP_DID_MANAGEMENT_SERVICE 0xFFFFFA -#define ZFCP_DID_TIME_SERVICE 0xFFFFFB -#define ZFCP_DID_DIRECTORY_SERVICE 0xFFFFFC -#define ZFCP_DID_ALIAS_SERVICE 0xFFFFF8 -#define ZFCP_DID_KEY_DISTRIBUTION_SERVICE 0xFFFFF7 /* remote port status */ #define ZFCP_STATUS_PORT_PHYS_OPEN 0x00000001 @@ -376,6 +374,14 @@ struct zfcp_wka_port { struct delayed_work work; }; +struct zfcp_wka_ports { + struct zfcp_wka_port ms; /* management service */ + struct zfcp_wka_port ts; /* time service */ + struct zfcp_wka_port ds; /* directory service */ + struct zfcp_wka_port as; /* alias service */ + struct zfcp_wka_port ks; /* key distribution service */ +}; + struct zfcp_qdio_queue { struct qdio_buffer *sbal[QDIO_MAX_BUFFERS_PER_Q]; u8 first; /* index of next free bfr in queue */ @@ -461,7 +467,7 @@ struct zfcp_adapter { actions */ u32 erp_low_mem_count; /* nr of erp actions waiting for memory */ - struct zfcp_wka_port nsp; /* adapter's nameserver */ + struct zfcp_wka_ports *gs; /* generic services */ debug_info_t *rec_dbf; debug_info_t *hba_dbf; debug_info_t *san_dbf; /* debug feature areas */ diff --git a/drivers/s390/scsi/zfcp_erp.c b/drivers/s390/scsi/zfcp_erp.c index e50ea465bc2..c75d6f35cb5 100644 --- a/drivers/s390/scsi/zfcp_erp.c +++ b/drivers/s390/scsi/zfcp_erp.c @@ -553,40 +553,35 @@ static void _zfcp_erp_unit_reopen_all(struct zfcp_port *port, int clear, _zfcp_erp_unit_reopen(unit, clear, id, ref); } -static void zfcp_erp_strategy_followup_actions(struct zfcp_erp_action *act) +static void zfcp_erp_strategy_followup_failed(struct zfcp_erp_action *act) { - struct zfcp_adapter *adapter = act->adapter; - struct zfcp_port *port = act->port; - struct zfcp_unit *unit = act->unit; - u32 status = act->status; - - /* initiate follow-up actions depending on success of finished action */ switch (act->action) { - case ZFCP_ERP_ACTION_REOPEN_ADAPTER: - if (status == ZFCP_ERP_SUCCEEDED) - _zfcp_erp_port_reopen_all(adapter, 0, "ersfa_1", NULL); - else - _zfcp_erp_adapter_reopen(adapter, 0, "ersfa_2", NULL); + _zfcp_erp_adapter_reopen(act->adapter, 0, "ersff_1", NULL); break; - case ZFCP_ERP_ACTION_REOPEN_PORT_FORCED: - if (status == ZFCP_ERP_SUCCEEDED) - _zfcp_erp_port_reopen(port, 0, "ersfa_3", NULL); - else - _zfcp_erp_adapter_reopen(adapter, 0, "ersfa_4", NULL); + _zfcp_erp_port_forced_reopen(act->port, 0, "ersff_2", NULL); break; - case ZFCP_ERP_ACTION_REOPEN_PORT: - if (status == ZFCP_ERP_SUCCEEDED) - _zfcp_erp_unit_reopen_all(port, 0, "ersfa_5", NULL); - else - _zfcp_erp_port_forced_reopen(port, 0, "ersfa_6", NULL); + _zfcp_erp_port_reopen(act->port, 0, "ersff_3", NULL); break; - case ZFCP_ERP_ACTION_REOPEN_UNIT: - if (status != ZFCP_ERP_SUCCEEDED) - _zfcp_erp_port_reopen(unit->port, 0, "ersfa_7", NULL); + _zfcp_erp_unit_reopen(act->unit, 0, "ersff_4", NULL); + break; + } +} + +static void zfcp_erp_strategy_followup_success(struct zfcp_erp_action *act) +{ + switch (act->action) { + case ZFCP_ERP_ACTION_REOPEN_ADAPTER: + _zfcp_erp_port_reopen_all(act->adapter, 0, "ersfs_1", NULL); + break; + case ZFCP_ERP_ACTION_REOPEN_PORT_FORCED: + _zfcp_erp_port_reopen(act->port, 0, "ersfs_2", NULL); + break; + case ZFCP_ERP_ACTION_REOPEN_PORT: + _zfcp_erp_unit_reopen_all(act->port, 0, "ersfs_3", NULL); break; } } @@ -719,7 +714,7 @@ static void zfcp_erp_adapter_strategy_close(struct zfcp_erp_action *act) zfcp_qdio_close(adapter); zfcp_fsf_req_dismiss_all(adapter); adapter->fsf_req_seq_no = 0; - zfcp_fc_wka_port_force_offline(&adapter->nsp); + zfcp_fc_wka_port_force_offline(&adapter->gs->ds); /* all ports and units are closed */ zfcp_erp_modify_adapter_status(adapter, "erascl1", NULL, ZFCP_STATUS_COMMON_OPEN, ZFCP_CLEAR); @@ -801,7 +796,7 @@ static int zfcp_erp_port_forced_strategy(struct zfcp_erp_action *erp_action) return ZFCP_ERP_FAILED; case ZFCP_ERP_STEP_PHYS_PORT_CLOSING: - if (status & ZFCP_STATUS_PORT_PHYS_OPEN) + if (!(status & ZFCP_STATUS_PORT_PHYS_OPEN)) return ZFCP_ERP_SUCCEEDED; } return ZFCP_ERP_FAILED; @@ -853,11 +848,17 @@ void zfcp_erp_port_strategy_open_lookup(struct work_struct *work) gid_pn_work); retval = zfcp_fc_ns_gid_pn(&port->erp_action); - if (retval == -ENOMEM) - zfcp_erp_notify(&port->erp_action, ZFCP_ERP_NOMEM); - port->erp_action.step = ZFCP_ERP_STEP_NAMESERVER_LOOKUP; - if (retval) - zfcp_erp_notify(&port->erp_action, ZFCP_ERP_FAILED); + if (!retval) { + port->erp_action.step = ZFCP_ERP_STEP_NAMESERVER_LOOKUP; + goto out; + } + if (retval == -ENOMEM) { + zfcp_erp_notify(&port->erp_action, ZFCP_STATUS_ERP_LOWMEM); + goto out; + } + /* all other error condtions */ + zfcp_erp_notify(&port->erp_action, 0); +out: zfcp_port_put(port); } @@ -1289,7 +1290,10 @@ static int zfcp_erp_strategy(struct zfcp_erp_action *erp_action) retval = zfcp_erp_strategy_statechange(erp_action, retval); if (retval == ZFCP_ERP_EXIT) goto unlock; - zfcp_erp_strategy_followup_actions(erp_action); + if (retval == ZFCP_ERP_SUCCEEDED) + zfcp_erp_strategy_followup_success(erp_action); + if (retval == ZFCP_ERP_FAILED) + zfcp_erp_strategy_followup_failed(erp_action); unlock: write_unlock(&adapter->erp_lock); diff --git a/drivers/s390/scsi/zfcp_ext.h b/drivers/s390/scsi/zfcp_ext.h index 120a9a1c81f..3044c601030 100644 --- a/drivers/s390/scsi/zfcp_ext.h +++ b/drivers/s390/scsi/zfcp_ext.h @@ -106,8 +106,12 @@ extern int zfcp_fc_ns_gid_pn(struct zfcp_erp_action *); extern void zfcp_fc_plogi_evaluate(struct zfcp_port *, struct fsf_plogi *); extern void zfcp_test_link(struct zfcp_port *); extern void zfcp_fc_link_test_work(struct work_struct *); -extern void zfcp_fc_nameserver_init(struct zfcp_adapter *); extern void zfcp_fc_wka_port_force_offline(struct zfcp_wka_port *); +extern void zfcp_fc_wka_ports_init(struct zfcp_adapter *); +extern int zfcp_fc_execute_els_fc_job(struct fc_bsg_job *); +extern int zfcp_fc_execute_ct_fc_job(struct fc_bsg_job *); +extern void zfcp_fc_wka_port_force_offline(struct zfcp_wka_port *); + /* zfcp_fsf.c */ extern int zfcp_fsf_open_port(struct zfcp_erp_action *); diff --git a/drivers/s390/scsi/zfcp_fc.c b/drivers/s390/scsi/zfcp_fc.c index 35493a82d2a..47daebfa7e5 100644 --- a/drivers/s390/scsi/zfcp_fc.c +++ b/drivers/s390/scsi/zfcp_fc.c @@ -79,11 +79,9 @@ static int zfcp_wka_port_get(struct zfcp_wka_port *wka_port) mutex_unlock(&wka_port->mutex); - wait_event_timeout( - wka_port->completion_wq, - wka_port->status == ZFCP_WKA_PORT_ONLINE || - wka_port->status == ZFCP_WKA_PORT_OFFLINE, - HZ >> 1); + wait_event(wka_port->completion_wq, + wka_port->status == ZFCP_WKA_PORT_ONLINE || + wka_port->status == ZFCP_WKA_PORT_OFFLINE); if (wka_port->status == ZFCP_WKA_PORT_ONLINE) { atomic_inc(&wka_port->refcount); @@ -120,14 +118,13 @@ static void zfcp_wka_port_put(struct zfcp_wka_port *wka_port) schedule_delayed_work(&wka_port->work, HZ / 100); } -void zfcp_fc_nameserver_init(struct zfcp_adapter *adapter) +static void zfcp_fc_wka_port_init(struct zfcp_wka_port *wka_port, u32 d_id, + struct zfcp_adapter *adapter) { - struct zfcp_wka_port *wka_port = &adapter->nsp; - init_waitqueue_head(&wka_port->completion_wq); wka_port->adapter = adapter; - wka_port->d_id = ZFCP_DID_DIRECTORY_SERVICE; + wka_port->d_id = d_id; wka_port->status = ZFCP_WKA_PORT_OFFLINE; atomic_set(&wka_port->refcount, 0); @@ -143,6 +140,17 @@ void zfcp_fc_wka_port_force_offline(struct zfcp_wka_port *wka) mutex_unlock(&wka->mutex); } +void zfcp_fc_wka_ports_init(struct zfcp_adapter *adapter) +{ + struct zfcp_wka_ports *gs = adapter->gs; + + zfcp_fc_wka_port_init(&gs->ms, FC_FID_MGMT_SERV, adapter); + zfcp_fc_wka_port_init(&gs->ts, FC_FID_TIME_SERV, adapter); + zfcp_fc_wka_port_init(&gs->ds, FC_FID_DIR_SERV, adapter); + zfcp_fc_wka_port_init(&gs->as, FC_FID_ALIASES, adapter); + zfcp_fc_wka_port_init(&gs->ks, FC_FID_SEC_KEY, adapter); +} + static void _zfcp_fc_incoming_rscn(struct zfcp_fsf_req *fsf_req, u32 range, struct fcp_rscn_element *elem) { @@ -282,7 +290,7 @@ int static zfcp_fc_ns_gid_pn_request(struct zfcp_erp_action *erp_action, /* setup parameters for send generic command */ gid_pn->port = erp_action->port; - gid_pn->ct.wka_port = &adapter->nsp; + gid_pn->ct.wka_port = &adapter->gs->ds; gid_pn->ct.handler = zfcp_fc_ns_handler; gid_pn->ct.handler_data = (unsigned long) &compl_rec; gid_pn->ct.timeout = ZFCP_NS_GID_PN_TIMEOUT; @@ -329,13 +337,13 @@ int zfcp_fc_ns_gid_pn(struct zfcp_erp_action *erp_action) memset(gid_pn, 0, sizeof(*gid_pn)); - ret = zfcp_wka_port_get(&adapter->nsp); + ret = zfcp_wka_port_get(&adapter->gs->ds); if (ret) goto out; ret = zfcp_fc_ns_gid_pn_request(erp_action, gid_pn); - zfcp_wka_port_put(&adapter->nsp); + zfcp_wka_port_put(&adapter->gs->ds); out: mempool_free(gid_pn, adapter->pool.data_gid_pn); return ret; @@ -525,7 +533,7 @@ static int zfcp_scan_issue_gpn_ft(struct zfcp_gpn_ft *gpn_ft, req->fc4_type = ZFCP_CT_SCSI_FCP; /* prepare zfcp_send_ct */ - ct->wka_port = &adapter->nsp; + ct->wka_port = &adapter->gs->ds; ct->handler = zfcp_fc_ns_handler; ct->handler_data = (unsigned long)&compl_rec; ct->timeout = 10; @@ -644,7 +652,7 @@ int zfcp_scan_ports(struct zfcp_adapter *adapter) fc_host_port_type(adapter->scsi_host) != FC_PORTTYPE_NPIV) return 0; - ret = zfcp_wka_port_get(&adapter->nsp); + ret = zfcp_wka_port_get(&adapter->gs->ds); if (ret) return ret; @@ -666,7 +674,7 @@ int zfcp_scan_ports(struct zfcp_adapter *adapter) } zfcp_free_sg_env(gpn_ft, buf_num); out: - zfcp_wka_port_put(&adapter->nsp); + zfcp_wka_port_put(&adapter->gs->ds); return ret; } @@ -675,3 +683,158 @@ void _zfcp_scan_ports_later(struct work_struct *work) { zfcp_scan_ports(container_of(work, struct zfcp_adapter, scan_work)); } + +struct zfcp_els_fc_job { + struct zfcp_send_els els; + struct fc_bsg_job *job; +}; + +static void zfcp_fc_generic_els_handler(unsigned long data) +{ + struct zfcp_els_fc_job *els_fc_job = (struct zfcp_els_fc_job *) data; + struct fc_bsg_job *job = els_fc_job->job; + struct fc_bsg_reply *reply = job->reply; + + if (els_fc_job->els.status) { + /* request rejected or timed out */ + reply->reply_data.ctels_reply.status = FC_CTELS_STATUS_REJECT; + goto out; + } + + reply->reply_data.ctels_reply.status = FC_CTELS_STATUS_OK; + reply->reply_payload_rcv_len = job->reply_payload.payload_len; + +out: + job->state_flags = FC_RQST_STATE_DONE; + job->job_done(job); + kfree(els_fc_job); +} + +int zfcp_fc_execute_els_fc_job(struct fc_bsg_job *job) +{ + struct zfcp_els_fc_job *els_fc_job; + struct fc_rport *rport = job->rport; + struct Scsi_Host *shost; + struct zfcp_adapter *adapter; + struct zfcp_port *port; + u8 *port_did; + + shost = rport ? rport_to_shost(rport) : job->shost; + adapter = (struct zfcp_adapter *)shost->hostdata[0]; + + if (!(atomic_read(&adapter->status) & ZFCP_STATUS_COMMON_OPEN)) + return -EINVAL; + + els_fc_job = kzalloc(sizeof(struct zfcp_els_fc_job), GFP_KERNEL); + if (!els_fc_job) + return -ENOMEM; + + els_fc_job->els.adapter = adapter; + if (rport) { + read_lock_irq(&zfcp_data.config_lock); + port = rport->dd_data; + if (port) + els_fc_job->els.d_id = port->d_id; + read_unlock_irq(&zfcp_data.config_lock); + if (!port) { + kfree(els_fc_job); + return -EINVAL; + } + } else { + port_did = job->request->rqst_data.h_els.port_id; + els_fc_job->els.d_id = (port_did[0] << 16) + + (port_did[1] << 8) + port_did[2]; + } + + els_fc_job->els.req = job->request_payload.sg_list; + els_fc_job->els.resp = job->reply_payload.sg_list; + els_fc_job->els.handler = zfcp_fc_generic_els_handler; + els_fc_job->els.handler_data = (unsigned long) els_fc_job; + els_fc_job->job = job; + + return zfcp_fsf_send_els(&els_fc_job->els); +} + +struct zfcp_ct_fc_job { + struct zfcp_send_ct ct; + struct fc_bsg_job *job; +}; + +static void zfcp_fc_generic_ct_handler(unsigned long data) +{ + struct zfcp_ct_fc_job *ct_fc_job = (struct zfcp_ct_fc_job *) data; + struct fc_bsg_job *job = ct_fc_job->job; + + job->reply->reply_data.ctels_reply.status = ct_fc_job->ct.status ? + FC_CTELS_STATUS_REJECT : FC_CTELS_STATUS_OK; + job->reply->reply_payload_rcv_len = job->reply_payload.payload_len; + job->state_flags = FC_RQST_STATE_DONE; + job->job_done(job); + + zfcp_wka_port_put(ct_fc_job->ct.wka_port); + + kfree(ct_fc_job); +} + +int zfcp_fc_execute_ct_fc_job(struct fc_bsg_job *job) +{ + int ret; + u8 gs_type; + struct fc_rport *rport = job->rport; + struct Scsi_Host *shost; + struct zfcp_adapter *adapter; + struct zfcp_ct_fc_job *ct_fc_job; + u32 preamble_word1; + + shost = rport ? rport_to_shost(rport) : job->shost; + + adapter = (struct zfcp_adapter *)shost->hostdata[0]; + if (!(atomic_read(&adapter->status) & ZFCP_STATUS_COMMON_OPEN)) + return -EINVAL; + + ct_fc_job = kzalloc(sizeof(struct zfcp_ct_fc_job), GFP_KERNEL); + if (!ct_fc_job) + return -ENOMEM; + + preamble_word1 = job->request->rqst_data.r_ct.preamble_word1; + gs_type = (preamble_word1 & 0xff000000) >> 24; + + switch (gs_type) { + case FC_FST_ALIAS: + ct_fc_job->ct.wka_port = &adapter->gs->as; + break; + case FC_FST_MGMT: + ct_fc_job->ct.wka_port = &adapter->gs->ms; + break; + case FC_FST_TIME: + ct_fc_job->ct.wka_port = &adapter->gs->ts; + break; + case FC_FST_DIR: + ct_fc_job->ct.wka_port = &adapter->gs->ds; + break; + default: + kfree(ct_fc_job); + return -EINVAL; /* no such service */ + } + + ret = zfcp_wka_port_get(ct_fc_job->ct.wka_port); + if (ret) { + kfree(ct_fc_job); + return ret; + } + + ct_fc_job->ct.req = job->request_payload.sg_list; + ct_fc_job->ct.resp = job->reply_payload.sg_list; + ct_fc_job->ct.timeout = ZFCP_FSF_REQUEST_TIMEOUT; + ct_fc_job->ct.handler = zfcp_fc_generic_ct_handler; + ct_fc_job->ct.handler_data = (unsigned long) ct_fc_job; + ct_fc_job->ct.completion = NULL; + ct_fc_job->job = job; + + ret = zfcp_fsf_send_ct(&ct_fc_job->ct, NULL, NULL); + if (ret) { + kfree(ct_fc_job); + zfcp_wka_port_put(ct_fc_job->ct.wka_port); + } + return ret; +} diff --git a/drivers/s390/scsi/zfcp_fsf.c b/drivers/s390/scsi/zfcp_fsf.c index e6dae3744e7..47795fbf081 100644 --- a/drivers/s390/scsi/zfcp_fsf.c +++ b/drivers/s390/scsi/zfcp_fsf.c @@ -670,8 +670,11 @@ static int zfcp_fsf_req_sbal_get(struct zfcp_adapter *adapter) zfcp_fsf_sbal_check(adapter), 5 * HZ); if (ret > 0) return 0; - if (!ret) + if (!ret) { atomic_inc(&adapter->qdio_outb_full); + /* assume hanging outbound queue, try queue recovery */ + zfcp_erp_adapter_reopen(adapter, 0, "fsrsg_1", NULL); + } spin_lock_bh(&adapter->req_q_lock); return -EIO; @@ -722,7 +725,7 @@ static struct zfcp_fsf_req *zfcp_fsf_req_create(struct zfcp_adapter *adapter, req = zfcp_fsf_alloc_qtcb(pool); if (unlikely(!req)) - return ERR_PTR(-EIO); + return ERR_PTR(-ENOMEM); if (adapter->req_no == 0) adapter->req_no++; @@ -1010,6 +1013,23 @@ skip_fsfstatus: send_ct->handler(send_ct->handler_data); } +static void zfcp_fsf_setup_ct_els_unchained(struct qdio_buffer_element *sbale, + struct scatterlist *sg_req, + struct scatterlist *sg_resp) +{ + sbale[0].flags |= SBAL_FLAGS0_TYPE_WRITE_READ; + sbale[2].addr = sg_virt(sg_req); + sbale[2].length = sg_req->length; + sbale[3].addr = sg_virt(sg_resp); + sbale[3].length = sg_resp->length; + sbale[3].flags |= SBAL_FLAGS_LAST_ENTRY; +} + +static int zfcp_fsf_one_sbal(struct scatterlist *sg) +{ + return sg_is_last(sg) && sg->length <= PAGE_SIZE; +} + static int zfcp_fsf_setup_ct_els_sbals(struct zfcp_fsf_req *req, struct scatterlist *sg_req, struct scatterlist *sg_resp, @@ -1020,30 +1040,30 @@ static int zfcp_fsf_setup_ct_els_sbals(struct zfcp_fsf_req *req, int bytes; if (!(feat & FSF_FEATURE_ELS_CT_CHAINED_SBALS)) { - if (sg_req->length > PAGE_SIZE || sg_resp->length > PAGE_SIZE || - !sg_is_last(sg_req) || !sg_is_last(sg_resp)) + if (!zfcp_fsf_one_sbal(sg_req) || !zfcp_fsf_one_sbal(sg_resp)) return -EOPNOTSUPP; - sbale[0].flags |= SBAL_FLAGS0_TYPE_WRITE_READ; - sbale[2].addr = sg_virt(sg_req); - sbale[2].length = sg_req->length; - sbale[3].addr = sg_virt(sg_resp); - sbale[3].length = sg_resp->length; - sbale[3].flags |= SBAL_FLAGS_LAST_ENTRY; + zfcp_fsf_setup_ct_els_unchained(sbale, sg_req, sg_resp); + return 0; + } + + /* use single, unchained SBAL if it can hold the request */ + if (zfcp_fsf_one_sbal(sg_req) && zfcp_fsf_one_sbal(sg_resp)) { + zfcp_fsf_setup_ct_els_unchained(sbale, sg_req, sg_resp); return 0; } bytes = zfcp_qdio_sbals_from_sg(req, SBAL_FLAGS0_TYPE_WRITE_READ, sg_req, max_sbals); if (bytes <= 0) - return -ENOMEM; + return -EIO; req->qtcb->bottom.support.req_buf_length = bytes; req->sbale_curr = ZFCP_LAST_SBALE_PER_SBAL; bytes = zfcp_qdio_sbals_from_sg(req, SBAL_FLAGS0_TYPE_WRITE_READ, sg_resp, max_sbals); if (bytes <= 0) - return -ENOMEM; + return -EIO; req->qtcb->bottom.support.resp_buf_length = bytes; return 0; @@ -1146,7 +1166,8 @@ static void zfcp_fsf_send_els_handler(struct zfcp_fsf_req *req) case FSF_RESPONSE_SIZE_TOO_LARGE: break; case FSF_ACCESS_DENIED: - zfcp_fsf_access_denied_port(req, port); + if (port) + zfcp_fsf_access_denied_port(req, port); break; case FSF_SBAL_MISMATCH: /* should never occure, avoided in zfcp_fsf_send_els */ @@ -1606,10 +1627,10 @@ static void zfcp_fsf_open_wka_port_handler(struct zfcp_fsf_req *req) case FSF_ACCESS_DENIED: wka_port->status = ZFCP_WKA_PORT_OFFLINE; break; - case FSF_PORT_ALREADY_OPEN: - break; case FSF_GOOD: wka_port->handle = header->port_handle; + /* fall through */ + case FSF_PORT_ALREADY_OPEN: wka_port->status = ZFCP_WKA_PORT_ONLINE; } out: @@ -1730,15 +1751,16 @@ static void zfcp_fsf_close_physical_port_handler(struct zfcp_fsf_req *req) zfcp_fsf_access_denied_port(req, port); break; case FSF_PORT_BOXED: - zfcp_erp_port_boxed(port, "fscpph2", req); - req->status |= ZFCP_STATUS_FSFREQ_ERROR | - ZFCP_STATUS_FSFREQ_RETRY; /* can't use generic zfcp_erp_modify_port_status because * ZFCP_STATUS_COMMON_OPEN must not be reset for the port */ atomic_clear_mask(ZFCP_STATUS_PORT_PHYS_OPEN, &port->status); list_for_each_entry(unit, &port->unit_list_head, list) atomic_clear_mask(ZFCP_STATUS_COMMON_OPEN, &unit->status); + zfcp_erp_port_boxed(port, "fscpph2", req); + req->status |= ZFCP_STATUS_FSFREQ_ERROR | + ZFCP_STATUS_FSFREQ_RETRY; + break; case FSF_ADAPTER_STATUS_AVAILABLE: switch (header->fsf_status_qual.word[0]) { @@ -2540,7 +2562,6 @@ struct zfcp_fsf_req *zfcp_fsf_control_file(struct zfcp_adapter *adapter, bytes = zfcp_qdio_sbals_from_sg(req, direction, fsf_cfdc->sg, FSF_MAX_SBALS_PER_REQ); if (bytes != ZFCP_CFDC_MAX_SIZE) { - retval = -ENOMEM; zfcp_fsf_req_free(req); goto out; } diff --git a/drivers/s390/scsi/zfcp_scsi.c b/drivers/s390/scsi/zfcp_scsi.c index 7d0da230eb6..6925a178468 100644 --- a/drivers/s390/scsi/zfcp_scsi.c +++ b/drivers/s390/scsi/zfcp_scsi.c @@ -167,20 +167,21 @@ static int zfcp_scsi_eh_abort_handler(struct scsi_cmnd *scpnt) struct zfcp_unit *unit = scpnt->device->hostdata; struct zfcp_fsf_req *old_req, *abrt_req; unsigned long flags; - unsigned long old_req_id = (unsigned long) scpnt->host_scribble; + unsigned long old_reqid = (unsigned long) scpnt->host_scribble; int retval = SUCCESS; int retry = 3; + char *dbf_tag; /* avoid race condition between late normal completion and abort */ write_lock_irqsave(&adapter->abort_lock, flags); spin_lock(&adapter->req_list_lock); - old_req = zfcp_reqlist_find(adapter, old_req_id); + old_req = zfcp_reqlist_find(adapter, old_reqid); spin_unlock(&adapter->req_list_lock); if (!old_req) { write_unlock_irqrestore(&adapter->abort_lock, flags); zfcp_scsi_dbf_event_abort("lte1", adapter, scpnt, NULL, - old_req_id); + old_reqid); return FAILED; /* completion could be in progress */ } old_req->data = NULL; @@ -189,7 +190,7 @@ static int zfcp_scsi_eh_abort_handler(struct scsi_cmnd *scpnt) write_unlock_irqrestore(&adapter->abort_lock, flags); while (retry--) { - abrt_req = zfcp_fsf_abort_fcp_command(old_req_id, unit); + abrt_req = zfcp_fsf_abort_fcp_command(old_reqid, unit); if (abrt_req) break; @@ -197,7 +198,7 @@ static int zfcp_scsi_eh_abort_handler(struct scsi_cmnd *scpnt) if (!(atomic_read(&adapter->status) & ZFCP_STATUS_COMMON_RUNNING)) { zfcp_scsi_dbf_event_abort("nres", adapter, scpnt, NULL, - old_req_id); + old_reqid); return SUCCESS; } } @@ -208,13 +209,14 @@ static int zfcp_scsi_eh_abort_handler(struct scsi_cmnd *scpnt) abrt_req->status & ZFCP_STATUS_FSFREQ_COMPLETED); if (abrt_req->status & ZFCP_STATUS_FSFREQ_ABORTSUCCEEDED) - zfcp_scsi_dbf_event_abort("okay", adapter, scpnt, abrt_req, 0); + dbf_tag = "okay"; else if (abrt_req->status & ZFCP_STATUS_FSFREQ_ABORTNOTNEEDED) - zfcp_scsi_dbf_event_abort("lte2", adapter, scpnt, abrt_req, 0); + dbf_tag = "lte2"; else { - zfcp_scsi_dbf_event_abort("fail", adapter, scpnt, abrt_req, 0); + dbf_tag = "fail"; retval = FAILED; } + zfcp_scsi_dbf_event_abort(dbf_tag, adapter, scpnt, abrt_req, old_reqid); zfcp_fsf_req_free(abrt_req); return retval; } @@ -534,6 +536,9 @@ static void zfcp_scsi_rport_register(struct zfcp_port *port) struct fc_rport_identifiers ids; struct fc_rport *rport; + if (port->rport) + return; + ids.node_name = port->wwnn; ids.port_name = port->wwpn; ids.port_id = port->d_id; @@ -557,8 +562,10 @@ static void zfcp_scsi_rport_block(struct zfcp_port *port) { struct fc_rport *rport = port->rport; - if (rport) + if (rport) { fc_remote_port_delete(rport); + port->rport = NULL; + } } void zfcp_scsi_schedule_rport_register(struct zfcp_port *port) @@ -623,6 +630,20 @@ void zfcp_scsi_scan(struct work_struct *work) zfcp_unit_put(unit); } +static int zfcp_execute_fc_job(struct fc_bsg_job *job) +{ + switch (job->request->msgcode) { + case FC_BSG_RPT_ELS: + case FC_BSG_HST_ELS_NOLOGIN: + return zfcp_fc_execute_els_fc_job(job); + case FC_BSG_RPT_CT: + case FC_BSG_HST_CT: + return zfcp_fc_execute_ct_fc_job(job); + default: + return -EINVAL; + } +} + struct fc_function_template zfcp_transport_functions = { .show_starget_port_id = 1, .show_starget_port_name = 1, @@ -644,6 +665,7 @@ struct fc_function_template zfcp_transport_functions = { .dev_loss_tmo_callbk = zfcp_scsi_dev_loss_tmo_callbk, .terminate_rport_io = zfcp_scsi_terminate_rport_io, .show_host_port_state = 1, + .bsg_request = zfcp_execute_fc_job, /* no functions registered for following dynamic attributes but directly set by LLDD */ .show_host_port_type = 1, diff --git a/drivers/s390/scsi/zfcp_sysfs.c b/drivers/s390/scsi/zfcp_sysfs.c index 3e51e64d110..0fe5cce818c 100644 --- a/drivers/s390/scsi/zfcp_sysfs.c +++ b/drivers/s390/scsi/zfcp_sysfs.c @@ -494,9 +494,14 @@ static ssize_t zfcp_sysfs_adapter_q_full_show(struct device *dev, struct Scsi_Host *scsi_host = class_to_shost(dev); struct zfcp_adapter *adapter = (struct zfcp_adapter *) scsi_host->hostdata[0]; + u64 util; + + spin_lock_bh(&adapter->qdio_stat_lock); + util = adapter->req_q_util; + spin_unlock_bh(&adapter->qdio_stat_lock); return sprintf(buf, "%d %llu\n", atomic_read(&adapter->qdio_outb_full), - (unsigned long long)adapter->req_q_util); + (unsigned long long)util); } static DEVICE_ATTR(queue_full, S_IRUGO, zfcp_sysfs_adapter_q_full_show, NULL); |