diff options
Diffstat (limited to 'drivers')
220 files changed, 5748 insertions, 3205 deletions
diff --git a/drivers/acpi/bus.c b/drivers/acpi/bus.c index 7edf6d913c1..765fd1c56cd 100644 --- a/drivers/acpi/bus.c +++ b/drivers/acpi/bus.c @@ -688,14 +688,6 @@ void __init acpi_early_init(void) if (acpi_disabled) return; - /* - * ACPI CA initializes acpi_dbg_level to non-zero, which means - * we get debug output merely by turning on CONFIG_ACPI_DEBUG. - * Turn it off so we don't get output unless the user specifies - * acpi.debug_level. - */ - acpi_dbg_level = 0; - printk(KERN_INFO PREFIX "Core revision %08x\n", ACPI_CA_VERSION); /* enable workarounds, unless strict ACPI spec. compliance */ diff --git a/drivers/acpi/pci_irq.c b/drivers/acpi/pci_irq.c index 11acaee14d6..bf79d83bdfb 100644 --- a/drivers/acpi/pci_irq.c +++ b/drivers/acpi/pci_irq.c @@ -384,6 +384,27 @@ acpi_pci_free_irq(struct acpi_prt_entry *entry, return irq; } +#ifdef CONFIG_X86_IO_APIC +extern int noioapicquirk; + +static int bridge_has_boot_interrupt_variant(struct pci_bus *bus) +{ + struct pci_bus *bus_it; + + for (bus_it = bus ; bus_it ; bus_it = bus_it->parent) { + if (!bus_it->self) + return 0; + + printk(KERN_INFO "vendor=%04x device=%04x\n", bus_it->self->vendor, + bus_it->self->device); + + if (bus_it->self->irq_reroute_variant) + return bus_it->self->irq_reroute_variant; + } + return 0; +} +#endif /* CONFIG_X86_IO_APIC */ + /* * acpi_pci_irq_lookup * success: return IRQ >= 0 @@ -413,6 +434,41 @@ acpi_pci_irq_lookup(struct pci_bus *bus, } ret = func(entry, triggering, polarity, link); + +#ifdef CONFIG_X86_IO_APIC + /* + * Some chipsets (e.g. intel 6700PXH) generate a legacy INTx when the + * IRQ entry in the chipset's IO-APIC is masked (as, e.g. the RT kernel + * does during interrupt handling). When this INTx generation cannot be + * disabled, we reroute these interrupts to their legacy equivalent to + * get rid of spurious interrupts. + */ + if (!noioapicquirk) { + switch (bridge_has_boot_interrupt_variant(bus)) { + case 0: + /* no rerouting necessary */ + break; + + case INTEL_IRQ_REROUTE_VARIANT: + /* + * Remap according to INTx routing table in 6700PXH + * specs, intel order number 302628-002, section + * 2.15.2. Other chipsets (80332, ...) have the same + * mapping and are handled here as well. + */ + printk(KERN_INFO "pci irq %d -> rerouted to legacy " + "irq %d\n", ret, (ret % 4) + 16); + ret = (ret % 4) + 16; + break; + + default: + printk(KERN_INFO "not rerouting irq %d to legacy irq: " + "unknown mapping\n", ret); + break; + } + } +#endif /* CONFIG_X86_IO_APIC */ + return ret; } diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c index 5f8d746a9b8..38aca048e95 100644 --- a/drivers/acpi/processor_idle.c +++ b/drivers/acpi/processor_idle.c @@ -374,15 +374,15 @@ static int tsc_halts_in_c(int state) { switch (boot_cpu_data.x86_vendor) { case X86_VENDOR_AMD: + case X86_VENDOR_INTEL: /* * AMD Fam10h TSC will tick in all * C/P/S0/S1 states when this bit is set. */ - if (boot_cpu_has(X86_FEATURE_CONSTANT_TSC)) + if (boot_cpu_has(X86_FEATURE_NONSTOP_TSC)) return 0; + /*FALL THROUGH*/ - case X86_VENDOR_INTEL: - /* Several cases known where TSC halts in C2 too */ default: return state > ACPI_STATE_C1; } diff --git a/drivers/acpi/utilities/utglobal.c b/drivers/acpi/utilities/utglobal.c index 670551b95e5..17ed5ac840f 100644 --- a/drivers/acpi/utilities/utglobal.c +++ b/drivers/acpi/utilities/utglobal.c @@ -64,7 +64,7 @@ u32 acpi_dbg_level = ACPI_DEBUG_DEFAULT; /* Debug switch - layer (component) mask */ -u32 acpi_dbg_layer = ACPI_COMPONENT_DEFAULT | ACPI_ALL_DRIVERS; +u32 acpi_dbg_layer = 0; u32 acpi_gbl_nesting_level = 0; /* Debugger globals */ diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c index 5e2eb740df4..bc6695e3c84 100644 --- a/drivers/ata/libata-core.c +++ b/drivers/ata/libata-core.c @@ -4050,17 +4050,70 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = { { "ST3160023AS", "3.42", ATA_HORKAGE_NONCQ }, /* Seagate NCQ + FLUSH CACHE firmware bug */ - { "ST31500341AS", "9JU138", ATA_HORKAGE_NONCQ | + { "ST31500341AS", "SD15", ATA_HORKAGE_NONCQ | ATA_HORKAGE_FIRMWARE_WARN }, - { "ST31000333AS", "9FZ136", ATA_HORKAGE_NONCQ | + { "ST31500341AS", "SD16", ATA_HORKAGE_NONCQ | ATA_HORKAGE_FIRMWARE_WARN }, - { "ST3640623AS", "9FZ164", ATA_HORKAGE_NONCQ | + { "ST31500341AS", "SD17", ATA_HORKAGE_NONCQ | ATA_HORKAGE_FIRMWARE_WARN }, - { "ST3640323AS", "9FZ134", ATA_HORKAGE_NONCQ | + { "ST31500341AS", "SD18", ATA_HORKAGE_NONCQ | ATA_HORKAGE_FIRMWARE_WARN }, - { "ST3320813AS", "9FZ182", ATA_HORKAGE_NONCQ | + { "ST31500341AS", "SD19", ATA_HORKAGE_NONCQ | ATA_HORKAGE_FIRMWARE_WARN }, - { "ST3320613AS", "9FZ162", ATA_HORKAGE_NONCQ | + + { "ST31000333AS", "SD15", ATA_HORKAGE_NONCQ | + ATA_HORKAGE_FIRMWARE_WARN }, + { "ST31000333AS", "SD16", ATA_HORKAGE_NONCQ | + ATA_HORKAGE_FIRMWARE_WARN }, + { "ST31000333AS", "SD17", ATA_HORKAGE_NONCQ | + ATA_HORKAGE_FIRMWARE_WARN }, + { "ST31000333AS", "SD18", ATA_HORKAGE_NONCQ | + ATA_HORKAGE_FIRMWARE_WARN }, + { "ST31000333AS", "SD19", ATA_HORKAGE_NONCQ | + ATA_HORKAGE_FIRMWARE_WARN }, + + { "ST3640623AS", "SD15", ATA_HORKAGE_NONCQ | + ATA_HORKAGE_FIRMWARE_WARN }, + { "ST3640623AS", "SD16", ATA_HORKAGE_NONCQ | + ATA_HORKAGE_FIRMWARE_WARN }, + { "ST3640623AS", "SD17", ATA_HORKAGE_NONCQ | + ATA_HORKAGE_FIRMWARE_WARN }, + { "ST3640623AS", "SD18", ATA_HORKAGE_NONCQ | + ATA_HORKAGE_FIRMWARE_WARN }, + { "ST3640623AS", "SD19", ATA_HORKAGE_NONCQ | + ATA_HORKAGE_FIRMWARE_WARN }, + + { "ST3640323AS", "SD15", ATA_HORKAGE_NONCQ | + ATA_HORKAGE_FIRMWARE_WARN }, + { "ST3640323AS", "SD16", ATA_HORKAGE_NONCQ | + ATA_HORKAGE_FIRMWARE_WARN }, + { "ST3640323AS", "SD17", ATA_HORKAGE_NONCQ | + ATA_HORKAGE_FIRMWARE_WARN }, + { "ST3640323AS", "SD18", ATA_HORKAGE_NONCQ | + ATA_HORKAGE_FIRMWARE_WARN }, + { "ST3640323AS", "SD19", ATA_HORKAGE_NONCQ | + ATA_HORKAGE_FIRMWARE_WARN }, + + { "ST3320813AS", "SD15", ATA_HORKAGE_NONCQ | + ATA_HORKAGE_FIRMWARE_WARN }, + { "ST3320813AS", "SD16", ATA_HORKAGE_NONCQ | + ATA_HORKAGE_FIRMWARE_WARN }, + { "ST3320813AS", "SD17", ATA_HORKAGE_NONCQ | + ATA_HORKAGE_FIRMWARE_WARN }, + { "ST3320813AS", "SD18", ATA_HORKAGE_NONCQ | + ATA_HORKAGE_FIRMWARE_WARN }, + { "ST3320813AS", "SD19", ATA_HORKAGE_NONCQ | + ATA_HORKAGE_FIRMWARE_WARN }, + + { "ST3320613AS", "SD15", ATA_HORKAGE_NONCQ | + ATA_HORKAGE_FIRMWARE_WARN }, + { "ST3320613AS", "SD16", ATA_HORKAGE_NONCQ | + ATA_HORKAGE_FIRMWARE_WARN }, + { "ST3320613AS", "SD17", ATA_HORKAGE_NONCQ | + ATA_HORKAGE_FIRMWARE_WARN }, + { "ST3320613AS", "SD18", ATA_HORKAGE_NONCQ | + ATA_HORKAGE_FIRMWARE_WARN }, + { "ST3320613AS", "SD19", ATA_HORKAGE_NONCQ | ATA_HORKAGE_FIRMWARE_WARN }, /* Blacklist entries taken from Silicon Image 3124/3132 diff --git a/drivers/ata/pata_hpt366.c b/drivers/ata/pata_hpt366.c index a098ba8eaab..e0c4f05d7d5 100644 --- a/drivers/ata/pata_hpt366.c +++ b/drivers/ata/pata_hpt366.c @@ -183,7 +183,9 @@ static unsigned long hpt366_filter(struct ata_device *adev, unsigned long mask) mask &= ~(0xF8 << ATA_SHIFT_UDMA); if (hpt_dma_blacklisted(adev, "UDMA4", bad_ata66_4)) mask &= ~(0xF0 << ATA_SHIFT_UDMA); - } + } else if (adev->class == ATA_DEV_ATAPI) + mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA); + return ata_bmdma_mode_filter(adev, mask); } @@ -211,11 +213,15 @@ static u32 hpt36x_find_mode(struct ata_port *ap, int speed) static int hpt36x_cable_detect(struct ata_port *ap) { - u8 ata66; struct pci_dev *pdev = to_pci_dev(ap->host->dev); + u8 ata66; + /* + * Each channel of pata_hpt366 occupies separate PCI function + * as the primary channel and bit1 indicates the cable type. + */ pci_read_config_byte(pdev, 0x5A, &ata66); - if (ata66 & (1 << ap->port_no)) + if (ata66 & 2) return ATA_CBL_PATA40; return ATA_CBL_PATA80; } diff --git a/drivers/block/cciss.c b/drivers/block/cciss.c index 9364dc55425..9f7c543cc04 100644 --- a/drivers/block/cciss.c +++ b/drivers/block/cciss.c @@ -1693,6 +1693,11 @@ static int rebuild_lun_table(ctlr_info_t *h, int first_time) for (i = 0; i <= h->highest_lun; i++) { int j; drv_found = 0; + + /* skip holes in the array from already deleted drives */ + if (h->drv[i].raid_level == -1) + continue; + for (j = 0; j < num_luns; j++) { memcpy(&lunid, &ld_buff->LUN[j][0], 4); lunid = le32_to_cpu(lunid); diff --git a/drivers/block/loop.c b/drivers/block/loop.c index 5c4ee70d5cf..fb06ed65921 100644 --- a/drivers/block/loop.c +++ b/drivers/block/loop.c @@ -936,8 +936,10 @@ loop_set_status(struct loop_device *lo, const struct loop_info64 *info) { int err; struct loop_func_table *xfer; + uid_t uid = current_uid(); - if (lo->lo_encrypt_key_size && lo->lo_key_owner != current->uid && + if (lo->lo_encrypt_key_size && + lo->lo_key_owner != uid && !capable(CAP_SYS_ADMIN)) return -EPERM; if (lo->lo_state != Lo_bound) @@ -992,7 +994,7 @@ loop_set_status(struct loop_device *lo, const struct loop_info64 *info) if (info->lo_encrypt_key_size) { memcpy(lo->lo_encrypt_key, info->lo_encrypt_key, info->lo_encrypt_key_size); - lo->lo_key_owner = current->uid; + lo->lo_key_owner = uid; } return 0; diff --git a/drivers/char/Kconfig b/drivers/char/Kconfig index 43d6ba83a19..8783457b93d 100644 --- a/drivers/char/Kconfig +++ b/drivers/char/Kconfig @@ -622,6 +622,16 @@ config HVC_BEAT help Toshiba's Cell Reference Set Beat Console device driver +config HVC_IUCV + bool "z/VM IUCV Hypervisor console support (VM only)" + depends on S390 + select HVC_DRIVER + select IUCV + default y + help + This driver provides a Hypervisor console (HVC) back-end to access + a Linux (console) terminal via a z/VM IUCV communication path. + config HVC_XEN bool "Xen Hypervisor Console support" depends on XEN diff --git a/drivers/char/Makefile b/drivers/char/Makefile index 438f71317c5..36151bae0d7 100644 --- a/drivers/char/Makefile +++ b/drivers/char/Makefile @@ -50,6 +50,7 @@ obj-$(CONFIG_HVC_BEAT) += hvc_beat.o obj-$(CONFIG_HVC_DRIVER) += hvc_console.o obj-$(CONFIG_HVC_IRQ) += hvc_irq.o obj-$(CONFIG_HVC_XEN) += hvc_xen.o +obj-$(CONFIG_HVC_IUCV) += hvc_iucv.o obj-$(CONFIG_VIRTIO_CONSOLE) += virtio_console.o obj-$(CONFIG_RAW_DRIVER) += raw.o obj-$(CONFIG_SGI_SNSC) += snsc.o snsc_event.o diff --git a/drivers/char/hvc_iucv.c b/drivers/char/hvc_iucv.c new file mode 100644 index 00000000000..5ea7d7713fc --- /dev/null +++ b/drivers/char/hvc_iucv.c @@ -0,0 +1,850 @@ +/* + * hvc_iucv.c - z/VM IUCV back-end for the Hypervisor Console (HVC) + * + * This back-end for HVC provides terminal access via + * z/VM IUCV communication paths. + * + * Copyright IBM Corp. 2008. + * + * Author(s): Hendrik Brueckner <brueckner@linux.vnet.ibm.com> + */ +#define KMSG_COMPONENT "hvc_iucv" + +#include <linux/types.h> +#include <asm/ebcdic.h> +#include <linux/mempool.h> +#include <linux/module.h> +#include <linux/tty.h> +#include <net/iucv/iucv.h> + +#include "hvc_console.h" + + +/* HVC backend for z/VM IUCV */ +#define HVC_IUCV_MAGIC 0xc9e4c3e5 +#define MAX_HVC_IUCV_LINES HVC_ALLOC_TTY_ADAPTERS +#define MEMPOOL_MIN_NR (PAGE_SIZE / sizeof(struct iucv_tty_buffer)/4) + +/* IUCV TTY message */ +#define MSG_VERSION 0x02 /* Message version */ +#define MSG_TYPE_ERROR 0x01 /* Error message */ +#define MSG_TYPE_TERMENV 0x02 /* Terminal environment variable */ +#define MSG_TYPE_TERMIOS 0x04 /* Terminal IO struct update */ +#define MSG_TYPE_WINSIZE 0x08 /* Terminal window size update */ +#define MSG_TYPE_DATA 0x10 /* Terminal data */ + +#define MSG_SIZE(s) ((s) + offsetof(struct iucv_tty_msg, data)) +struct iucv_tty_msg { + u8 version; /* Message version */ + u8 type; /* Message type */ +#define MSG_MAX_DATALEN (~(u16)0) + u16 datalen; /* Payload length */ + u8 data[]; /* Payload buffer */ +} __attribute__((packed)); + +enum iucv_state_t { + IUCV_DISCONN = 0, + IUCV_CONNECTED = 1, + IUCV_SEVERED = 2, +}; + +enum tty_state_t { + TTY_CLOSED = 0, + TTY_OPENED = 1, +}; + +struct hvc_iucv_private { + struct hvc_struct *hvc; /* HVC console struct reference */ + u8 srv_name[8]; /* IUCV service name (ebcdic) */ + enum iucv_state_t iucv_state; /* IUCV connection status */ + enum tty_state_t tty_state; /* TTY status */ + struct iucv_path *path; /* IUCV path pointer */ + spinlock_t lock; /* hvc_iucv_private lock */ + struct list_head tty_outqueue; /* outgoing IUCV messages */ + struct list_head tty_inqueue; /* incoming IUCV messages */ +}; + +struct iucv_tty_buffer { + struct list_head list; /* list pointer */ + struct iucv_message msg; /* store an incoming IUCV message */ + size_t offset; /* data buffer offset */ + struct iucv_tty_msg *mbuf; /* buffer to store input/output data */ +}; + +/* IUCV callback handler */ +static int hvc_iucv_path_pending(struct iucv_path *, u8[8], u8[16]); +static void hvc_iucv_path_severed(struct iucv_path *, u8[16]); +static void hvc_iucv_msg_pending(struct iucv_path *, struct iucv_message *); +static void hvc_iucv_msg_complete(struct iucv_path *, struct iucv_message *); + + +/* Kernel module parameters */ +static unsigned long hvc_iucv_devices; + +/* Array of allocated hvc iucv tty lines... */ +static struct hvc_iucv_private *hvc_iucv_table[MAX_HVC_IUCV_LINES]; + +/* Kmem cache and mempool for iucv_tty_buffer elements */ +static struct kmem_cache *hvc_iucv_buffer_cache; +static mempool_t *hvc_iucv_mempool; + +/* IUCV handler callback functions */ +static struct iucv_handler hvc_iucv_handler = { + .path_pending = hvc_iucv_path_pending, + .path_severed = hvc_iucv_path_severed, + .message_complete = hvc_iucv_msg_complete, + .message_pending = hvc_iucv_msg_pending, +}; + + +/** + * hvc_iucv_get_private() - Return a struct hvc_iucv_private instance. + * @num: The HVC virtual terminal number (vtermno) + * + * This function returns the struct hvc_iucv_private instance that corresponds + * to the HVC virtual terminal number specified as parameter @num. + */ +struct hvc_iucv_private *hvc_iucv_get_private(uint32_t num) +{ + if ((num < HVC_IUCV_MAGIC) || (num - HVC_IUCV_MAGIC > hvc_iucv_devices)) + return NULL; + return hvc_iucv_table[num - HVC_IUCV_MAGIC]; +} + +/** + * alloc_tty_buffer() - Returns a new struct iucv_tty_buffer element. + * @size: Size of the internal buffer used to store data. + * @flags: Memory allocation flags passed to mempool. + * + * This function allocates a new struct iucv_tty_buffer element and, optionally, + * allocates an internal data buffer with the specified size @size. + * Note: The total message size arises from the internal buffer size and the + * members of the iucv_tty_msg structure. + * + * The function returns NULL if memory allocation has failed. + */ +static struct iucv_tty_buffer *alloc_tty_buffer(size_t size, gfp_t flags) +{ + struct iucv_tty_buffer *bufp; + + bufp = mempool_alloc(hvc_iucv_mempool, flags); + if (!bufp) + return NULL; + memset(bufp, 0, sizeof(struct iucv_tty_buffer)); + + if (size > 0) { + bufp->msg.length = MSG_SIZE(size); + bufp->mbuf = kmalloc(bufp->msg.length, flags); + if (!bufp->mbuf) { + mempool_free(bufp, hvc_iucv_mempool); + return NULL; + } + bufp->mbuf->version = MSG_VERSION; + bufp->mbuf->type = MSG_TYPE_DATA; + bufp->mbuf->datalen = (u16) size; + } + return bufp; +} + +/** + * destroy_tty_buffer() - destroy struct iucv_tty_buffer element. + * @bufp: Pointer to a struct iucv_tty_buffer element, SHALL NOT be NULL. + * + * The destroy_tty_buffer() function frees the internal data buffer and returns + * the struct iucv_tty_buffer element back to the mempool for freeing. + */ +static void destroy_tty_buffer(struct iucv_tty_buffer *bufp) +{ + kfree(bufp->mbuf); + mempool_free(bufp, hvc_iucv_mempool); +} + +/** + * destroy_tty_buffer_list() - call destroy_tty_buffer() for each list element. + * @list: List head pointer to a list containing struct iucv_tty_buffer + * elements. + * + * Calls destroy_tty_buffer() for each struct iucv_tty_buffer element in the + * list @list. + */ +static void destroy_tty_buffer_list(struct list_head *list) +{ + struct iucv_tty_buffer *ent, *next; + + list_for_each_entry_safe(ent, next, list, list) { + list_del(&ent->list); + destroy_tty_buffer(ent); + } +} + +/** + * hvc_iucv_write() - Receive IUCV message write data to HVC console buffer. + * @priv: Pointer to hvc_iucv_private structure. + * @buf: HVC console buffer for writing received terminal data. + * @count: HVC console buffer size. + * @has_more_data: Pointer to an int variable. + * + * The function picks up pending messages from the input queue and receives + * the message data that is then written to the specified buffer @buf. + * If the buffer size @count is less than the data message size, then the + * message is kept on the input queue and @has_more_data is set to 1. + * If the message data has been entirely written, the message is removed from + * the input queue. + * + * The function returns the number of bytes written to the terminal, zero if + * there are no pending data messages available or if there is no established + * IUCV path. + * If the IUCV path has been severed, then -EPIPE is returned to cause a + * hang up (that is issued by the HVC console layer). + */ +static int hvc_iucv_write(struct hvc_iucv_private *priv, + char *buf, int count, int *has_more_data) +{ + struct iucv_tty_buffer *rb; + int written; + int rc; + + /* Immediately return if there is no IUCV connection */ + if (priv->iucv_state == IUCV_DISCONN) + return 0; + + /* If the IUCV path has been severed, return -EPIPE to inform the + * hvc console layer to hang up the tty device. */ + if (priv->iucv_state == IUCV_SEVERED) + return -EPIPE; + + /* check if there are pending messages */ + if (list_empty(&priv->tty_inqueue)) + return 0; + + /* receive a iucv message and flip data to the tty (ldisc) */ + rb = list_first_entry(&priv->tty_inqueue, struct iucv_tty_buffer, list); + + written = 0; + if (!rb->mbuf) { /* message not yet received ... */ + /* allocate mem to store msg data; if no memory is available + * then leave the buffer on the list and re-try later */ + rb->mbuf = kmalloc(rb->msg.length, GFP_ATOMIC); + if (!rb->mbuf) + return -ENOMEM; + + rc = __iucv_message_receive(priv->path, &rb->msg, 0, + rb->mbuf, rb->msg.length, NULL); + switch (rc) { + case 0: /* Successful */ + break; + case 2: /* No message found */ + case 9: /* Message purged */ + break; + default: + written = -EIO; + } + /* remove buffer if an error has occured or received data + * is not correct */ + if (rc || (rb->mbuf->version != MSG_VERSION) || + (rb->msg.length != MSG_SIZE(rb->mbuf->datalen))) + goto out_remove_buffer; + } + + switch (rb->mbuf->type) { + case MSG_TYPE_DATA: + written = min_t(int, rb->mbuf->datalen - rb->offset, count); + memcpy(buf, rb->mbuf->data + rb->offset, written); + if (written < (rb->mbuf->datalen - rb->offset)) { + rb->offset += written; + *has_more_data = 1; + goto out_written; + } + break; + + case MSG_TYPE_WINSIZE: + if (rb->mbuf->datalen != sizeof(struct winsize)) + break; + hvc_resize(priv->hvc, *((struct winsize *)rb->mbuf->data)); + break; + + case MSG_TYPE_ERROR: /* ignored ... */ + case MSG_TYPE_TERMENV: /* ignored ... */ + case MSG_TYPE_TERMIOS: /* ignored ... */ + break; + } + +out_remove_buffer: + list_del(&rb->list); + destroy_tty_buffer(rb); + *has_more_data = !list_empty(&priv->tty_inqueue); + +out_written: + return written; +} + +/** + * hvc_iucv_get_chars() - HVC get_chars operation. + * @vtermno: HVC virtual terminal number. + * @buf: Pointer to a buffer to store data + * @count: Size of buffer available for writing + * + * The hvc_console thread calls this method to read characters from + * the terminal backend. If an IUCV communication path has been established, + * pending IUCV messages are received and data is copied into buffer @buf + * up to @count bytes. + * + * Locking: The routine gets called under an irqsave() spinlock; and + * the routine locks the struct hvc_iucv_private->lock to call + * helper functions. + */ +static int hvc_iucv_get_chars(uint32_t vtermno, char *buf, int count) +{ + struct hvc_iucv_private *priv = hvc_iucv_get_private(vtermno); + int written; + int has_more_data; + + if (count <= 0) + return 0; + + if (!priv) + return -ENODEV; + + spin_lock(&priv->lock); + has_more_data = 0; + written = hvc_iucv_write(priv, buf, count, &has_more_data); + spin_unlock(&priv->lock); + + /* if there are still messages on the queue... schedule another run */ + if (has_more_data) + hvc_kick(); + + return written; +} + +/** + * hvc_iucv_send() - Send an IUCV message containing terminal data. + * @priv: Pointer to struct hvc_iucv_private instance. + * @buf: Buffer containing data to send. + * @size: Size of buffer and amount of data to send. + * + * If an IUCV communication path is established, the function copies the buffer + * data to a newly allocated struct iucv_tty_buffer element, sends the data and + * puts the element to the outqueue. + * + * If there is no IUCV communication path established, the function returns 0. + * If an existing IUCV communicaton path has been severed, the function returns + * -EPIPE (can be passed to HVC layer to cause a tty hangup). + */ +static int hvc_iucv_send(struct hvc_iucv_private *priv, const char *buf, + int count) +{ + struct iucv_tty_buffer *sb; + int rc; + u16 len; + + if (priv->iucv_state == IUCV_SEVERED) + return -EPIPE; + + if (priv->iucv_state == IUCV_DISCONN) + return 0; + + len = min_t(u16, MSG_MAX_DATALEN, count); + + /* allocate internal buffer to store msg data and also compute total + * message length */ + sb = alloc_tty_buffer(len, GFP_ATOMIC); + if (!sb) + return -ENOMEM; + + sb->mbuf->datalen = len; + memcpy(sb->mbuf->data, buf, len); + + list_add_tail(&sb->list, &priv->tty_outqueue); + + rc = __iucv_message_send(priv->path, &sb->msg, 0, 0, + (void *) sb->mbuf, sb->msg.length); + if (rc) { + list_del(&sb->list); + destroy_tty_buffer(sb); + len = 0; + } + + return len; +} + +/** + * hvc_iucv_put_chars() - HVC put_chars operation. + * @vtermno: HVC virtual terminal number. + * @buf: Pointer to an buffer to read data from + * @count: Size of buffer available for reading + * + * The hvc_console thread calls this method to write characters from + * to the terminal backend. + * The function calls hvc_iucv_send() under the lock of the + * struct hvc_iucv_private instance that corresponds to the tty @vtermno. + * + * Locking: The method gets called under an irqsave() spinlock; and + * locks struct hvc_iucv_private->lock. + */ +static int hvc_iucv_put_chars(uint32_t vtermno, const char *buf, int count) +{ + struct hvc_iucv_private *priv = hvc_iucv_get_private(vtermno); + int sent; + + if (count <= 0) + return 0; + + if (!priv) + return -ENODEV; + + spin_lock(&priv->lock); + sent = hvc_iucv_send(priv, buf, count); + spin_unlock(&priv->lock); + + return sent; +} + +/** + * hvc_iucv_notifier_add() - HVC notifier for opening a TTY for the first time. + * @hp: Pointer to the HVC device (struct hvc_struct) + * @id: Additional data (originally passed to hvc_alloc): the index of an struct + * hvc_iucv_private instance. + * + * The function sets the tty state to TTY_OPEN for the struct hvc_iucv_private + * instance that is derived from @id. Always returns 0. + * + * Locking: struct hvc_iucv_private->lock, spin_lock_bh + */ +static int hvc_iucv_notifier_add(struct hvc_struct *hp, int id) +{ + struct hvc_iucv_private *priv; + + priv = hvc_iucv_get_private(id); + if (!priv) + return 0; + + spin_lock_bh(&priv->lock); + priv->tty_state = TTY_OPENED; + spin_unlock_bh(&priv->lock); + + return 0; +} + +/** + * hvc_iucv_cleanup() - Clean up function if the tty portion is finally closed. + * @priv: Pointer to the struct hvc_iucv_private instance. + * + * The functions severs the established IUCV communication path (if any), and + * destroy struct iucv_tty_buffer elements from the in- and outqueue. Finally, + * the functions resets the states to TTY_CLOSED and IUCV_DISCONN. + */ +static void hvc_iucv_cleanup(struct hvc_iucv_private *priv) +{ + destroy_tty_buffer_list(&priv->tty_outqueue); + destroy_tty_buffer_list(&priv->tty_inqueue); + + priv->tty_state = TTY_CLOSED; + priv->iucv_state = IUCV_DISCONN; +} + +/** + * hvc_iucv_notifier_hangup() - HVC notifier for tty hangups. + * @hp: Pointer to the HVC device (struct hvc_struct) + * @id: Additional data (originally passed to hvc_alloc): the index of an struct + * hvc_iucv_private instance. + * + * This routine notifies the HVC backend that a tty hangup (carrier loss, + * virtual or otherwise) has occured. + * + * The HVC backend for z/VM IUCV ignores virtual hangups (vhangup()), to keep + * an existing IUCV communication path established. + * (Background: vhangup() is called from user space (by getty or login) to + * disable writing to the tty by other applications). + * + * If the tty has been opened (e.g. getty) and an established IUCV path has been + * severed (we caused the tty hangup in that case), then the functions invokes + * hvc_iucv_cleanup() to clean up. + * + * Locking: struct hvc_iucv_private->lock + */ +static void hvc_iucv_notifier_hangup(struct hvc_struct *hp, int id) +{ + struct hvc_iucv_private *priv; + + priv = hvc_iucv_get_private(id); + if (!priv) + return; + + spin_lock_bh(&priv->lock); + /* NOTE: If the hangup was scheduled by ourself (from the iucv + * path_servered callback [IUCV_SEVERED]), then we have to + * finally clean up the tty backend structure and set state to + * TTY_CLOSED. + * + * If the tty was hung up otherwise (e.g. vhangup()), then we + * ignore this hangup and keep an established IUCV path open... + * (...the reason is that we are not able to connect back to the + * client if we disconnect on hang up) */ + priv->tty_state = TTY_CLOSED; + + if (priv->iucv_state == IUCV_SEVERED) + hvc_iucv_cleanup(priv); + spin_unlock_bh(&priv->lock); +} + +/** + * hvc_iucv_notifier_del() - HVC notifier for closing a TTY for the last time. + * @hp: Pointer to the HVC device (struct hvc_struct) + * @id: Additional data (originally passed to hvc_alloc): + * the index of an struct hvc_iucv_private instance. + * + * This routine notifies the HVC backend that the last tty device file + * descriptor has been closed. + * The function calls hvc_iucv_cleanup() to clean up the struct hvc_iucv_private + * instance. + * + * Locking: struct hvc_iucv_private->lock + */ +static void hvc_iucv_notifier_del(struct hvc_struct *hp, int id) +{ + struct hvc_iucv_private *priv; + struct iucv_path *path; + + priv = hvc_iucv_get_private(id); + if (!priv) + return; + + spin_lock_bh(&priv->lock); + path = priv->path; /* save reference to IUCV path */ + priv->path = NULL; + hvc_iucv_cleanup(priv); + spin_unlock_bh(&priv->lock); + + /* sever IUCV path outside of priv->lock due to lock ordering of: + * priv->lock <--> iucv_table_lock */ + if (path) { + iucv_path_sever(path, NULL); + iucv_path_free(path); + } +} + +/** + * hvc_iucv_path_pending() - IUCV handler to process a connection request. + * @path: Pending path (struct iucv_path) + * @ipvmid: Originator z/VM system identifier + * @ipuser: User specified data for this path + * (AF_IUCV: port/service name and originator port) + * + * The function uses the @ipuser data to check to determine if the pending + * path belongs to a terminal managed by this HVC backend. + * If the check is successful, then an additional check is done to ensure + * that a terminal cannot be accessed multiple times (only one connection + * to a terminal is allowed). In that particular case, the pending path is + * severed. If it is the first connection, the pending path is accepted and + * associated to the struct hvc_iucv_private. The iucv state is updated to + * reflect that a communication path has been established. + * + * Returns 0 if the path belongs to a terminal managed by the this HVC backend; + * otherwise returns -ENODEV in order to dispatch this path to other handlers. + * + * Locking: struct hvc_iucv_private->lock + */ +static int hvc_iucv_path_pending(struct iucv_path *path, + u8 ipvmid[8], u8 ipuser[16]) +{ + struct hvc_iucv_private *priv; + u8 nuser_data[16]; + int i, rc; + + priv = NULL; + for (i = 0; i < hvc_iucv_devices; i++) + if (hvc_iucv_table[i] && + (0 == memcmp(hvc_iucv_table[i]->srv_name, ipuser, 8))) { + priv = hvc_iucv_table[i]; + break; + } + + if (!priv) + return -ENODEV; + + spin_lock(&priv->lock); + + /* If the terminal is already connected or being severed, then sever + * this path to enforce that there is only ONE established communication + * path per terminal. */ + if (priv->iucv_state != IUCV_DISCONN) { + iucv_path_sever(path, ipuser); + iucv_path_free(path); + goto out_path_handled; + } + + /* accept path */ + memcpy(nuser_data, ipuser + 8, 8); /* remote service (for af_iucv) */ + memcpy(nuser_data + 8, ipuser, 8); /* local service (for af_iucv) */ + path->msglim = 0xffff; /* IUCV MSGLIMIT */ + path->flags &= ~IUCV_IPRMDATA; /* TODO: use IUCV_IPRMDATA */ + rc = iucv_path_accept(path, &hvc_iucv_handler, nuser_data, priv); + if (rc) { + iucv_path_sever(path, ipuser); + iucv_path_free(path); + goto out_path_handled; + } + priv->path = path; + priv->iucv_state = IUCV_CONNECTED; + +out_path_handled: + spin_unlock(&priv->lock); + return 0; +} + +/** + * hvc_iucv_path_severed() - IUCV handler to process a path sever. + * @path: Pending path (struct iucv_path) + * @ipuser: User specified data for this path + * (AF_IUCV: port/service name and originator port) + * + * The function also severs the path (as required by the IUCV protocol) and + * sets the iucv state to IUCV_SEVERED for the associated struct + * hvc_iucv_private instance. Later, the IUCV_SEVERED state triggers a tty + * hangup (hvc_iucv_get_chars() / hvc_iucv_write()). + * + * If tty portion of the HVC is closed then clean up the outqueue in addition. + * + * Locking: struct hvc_iucv_private->lock + */ +static void hvc_iucv_path_severed(struct iucv_path *path, u8 ipuser[16]) +{ + struct hvc_iucv_private *priv = path->private; + + spin_lock(&priv->lock); + priv->iucv_state = IUCV_SEVERED; + + /* NOTE: If the tty has not yet been opened by a getty program + * (e.g. to see console messages), then cleanup the + * hvc_iucv_private structure to allow re-connects. + * + * If the tty has been opened, the get_chars() callback returns + * -EPIPE to signal the hvc console layer to hang up the tty. */ + priv->path = NULL; + if (priv->tty_state == TTY_CLOSED) + hvc_iucv_cleanup(priv); + spin_unlock(&priv->lock); + + /* finally sever path (outside of priv->lock due to lock ordering) */ + iucv_path_sever(path, ipuser); + iucv_path_free(path); +} + +/** + * hvc_iucv_msg_pending() - IUCV handler to process an incoming IUCV message. + * @path: Pending path (struct iucv_path) + * @msg: Pointer to the IUCV message + * + * The function stores an incoming message on the input queue for later + * processing (by hvc_iucv_get_chars() / hvc_iucv_write()). + * However, if the tty has not yet been opened, the message is rejected. + * + * Locking: struct hvc_iucv_private->lock + */ +static void hvc_iucv_msg_pending(struct iucv_path *path, + struct iucv_message *msg) +{ + struct hvc_iucv_private *priv = path->private; + struct iucv_tty_buffer *rb; + + spin_lock(&priv->lock); + + /* reject messages if tty has not yet been opened */ + if (priv->tty_state == TTY_CLOSED) { + iucv_message_reject(path, msg); + goto unlock_return; + } + + /* allocate buffer an empty buffer element */ + rb = alloc_tty_buffer(0, GFP_ATOMIC); + if (!rb) { + iucv_message_reject(path, msg); + goto unlock_return; /* -ENOMEM */ + } + rb->msg = *msg; + + list_add_tail(&rb->list, &priv->tty_inqueue); + + hvc_kick(); /* wakup hvc console thread */ + +unlock_return: + spin_unlock(&priv->lock); +} + +/** + * hvc_iucv_msg_complete() - IUCV handler to process message completion + * @path: Pending path (struct iucv_path) + * @msg: Pointer to the IUCV message + * + * The function is called upon completion of message delivery and the + * message is removed from the outqueue. Additional delivery information + * can be found in msg->audit: rejected messages (0x040000 (IPADRJCT)) and + * purged messages (0x010000 (IPADPGNR)). + * + * Locking: struct hvc_iucv_private->lock + */ +static void hvc_iucv_msg_complete(struct iucv_path *path, + struct iucv_message *msg) +{ + struct hvc_iucv_private *priv = path->private; + struct iucv_tty_buffer *ent, *next; + LIST_HEAD(list_remove); + + spin_lock(&priv->lock); + list_for_each_entry_safe(ent, next, &priv->tty_outqueue, list) + if (ent->msg.id == msg->id) { + list_move(&ent->list, &list_remove); + break; + } + spin_unlock(&priv->lock); + destroy_tty_buffer_list(&list_remove); +} + + +/* HVC operations */ +static struct hv_ops hvc_iucv_ops = { + .get_chars = hvc_iucv_get_chars, + .put_chars = hvc_iucv_put_chars, + .notifier_add = hvc_iucv_notifier_add, + .notifier_del = hvc_iucv_notifier_del, + .notifier_hangup = hvc_iucv_notifier_hangup, +}; + +/** + * hvc_iucv_alloc() - Allocates a new struct hvc_iucv_private instance + * @id: hvc_iucv_table index + * + * This function allocates a new hvc_iucv_private struct and put the + * instance into hvc_iucv_table at index @id. + * Returns 0 on success; otherwise non-zero. + */ +static int __init hvc_iucv_alloc(int id) +{ + struct hvc_iucv_private *priv; + char name[9]; + int rc; + + priv = kzalloc(sizeof(struct hvc_iucv_private), GFP_KERNEL); + if (!priv) + return -ENOMEM; + + spin_lock_init(&priv->lock); + INIT_LIST_HEAD(&priv->tty_outqueue); + INIT_LIST_HEAD(&priv->tty_inqueue); + + /* Finally allocate hvc */ + priv->hvc = hvc_alloc(HVC_IUCV_MAGIC + id, + HVC_IUCV_MAGIC + id, &hvc_iucv_ops, PAGE_SIZE); + if (IS_ERR(priv->hvc)) { + rc = PTR_ERR(priv->hvc); + kfree(priv); + return rc; + } + + /* setup iucv related information */ + snprintf(name, 9, "ihvc%-4d", id); + memcpy(priv->srv_name, name, 8); + ASCEBC(priv->srv_name, 8); + + hvc_iucv_table[id] = priv; + return 0; +} + +/** + * hvc_iucv_init() - Initialization of HVC backend for z/VM IUCV + */ +static int __init hvc_iucv_init(void) +{ + int rc, i; + + if (!MACHINE_IS_VM) { + pr_warning("The z/VM IUCV Hypervisor console cannot be " + "used without z/VM.\n"); + return -ENODEV; + } + + if (!hvc_iucv_devices) + return -ENODEV; + + if (hvc_iucv_devices > MAX_HVC_IUCV_LINES) + return -EINVAL; + + hvc_iucv_buffer_cache = kmem_cache_create(KMSG_COMPONENT, + sizeof(struct iucv_tty_buffer), + 0, 0, NULL); + if (!hvc_iucv_buffer_cache) { + pr_err("Not enough memory for driver initialization " + "(rs=%d).\n", 1); + return -ENOMEM; + } + + hvc_iucv_mempool = mempool_create_slab_pool(MEMPOOL_MIN_NR, + hvc_iucv_buffer_cache); + if (!hvc_iucv_mempool) { + pr_err("Not enough memory for driver initialization " + "(rs=%d).\n", 2); + kmem_cache_destroy(hvc_iucv_buffer_cache); + return -ENOMEM; + } + + /* allocate hvc_iucv_private structs */ + for (i = 0; i < hvc_iucv_devices; i++) { + rc = hvc_iucv_alloc(i); + if (rc) { + pr_err("Could not create new z/VM IUCV HVC backend " + "rc=%d.\n", rc); + goto out_error_hvc; + } + } + + /* register IUCV callback handler */ + rc = iucv_register(&hvc_iucv_handler, 0); + if (rc) { + pr_err("Could not register iucv handler (rc=%d).\n", rc); + goto out_error_iucv; + } + + return 0; + +out_error_iucv: + iucv_unregister(&hvc_iucv_handler, 0); +out_error_hvc: + for (i = 0; i < hvc_iucv_devices; i++) + if (hvc_iucv_table[i]) { + if (hvc_iucv_table[i]->hvc) + hvc_remove(hvc_iucv_table[i]->hvc); + kfree(hvc_iucv_table[i]); + } + mempool_destroy(hvc_iucv_mempool); + kmem_cache_destroy(hvc_iucv_buffer_cache); + return rc; +} + +/** + * hvc_iucv_console_init() - Early console initialization + */ +static int __init hvc_iucv_console_init(void) +{ + if (!MACHINE_IS_VM || !hvc_iucv_devices) + return -ENODEV; + return hvc_instantiate(HVC_IUCV_MAGIC, 0, &hvc_iucv_ops); +} + +/** + * hvc_iucv_config() - Parsing of hvc_iucv= kernel command line parameter + * @val: Parameter value (numeric) + */ +static int __init hvc_iucv_config(char *val) +{ + return strict_strtoul(val, 10, &hvc_iucv_devices); +} + + +module_init(hvc_iucv_init); +console_initcall(hvc_iucv_console_init); +__setup("hvc_iucv=", hvc_iucv_config); + +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("HVC back-end for z/VM IUCV."); +MODULE_AUTHOR("Hendrik Brueckner <brueckner@linux.vnet.ibm.com>"); diff --git a/drivers/char/sysrq.c b/drivers/char/sysrq.c index ce0d9da52a8..94966edfb44 100644 --- a/drivers/char/sysrq.c +++ b/drivers/char/sysrq.c @@ -274,6 +274,22 @@ static struct sysrq_key_op sysrq_showstate_blocked_op = { .enable_mask = SYSRQ_ENABLE_DUMP, }; +#ifdef CONFIG_TRACING +#include <linux/ftrace.h> + +static void sysrq_ftrace_dump(int key, struct tty_struct *tty) +{ + ftrace_dump(); +} +static struct sysrq_key_op sysrq_ftrace_dump_op = { + .handler = sysrq_ftrace_dump, + .help_msg = "dumpZ-ftrace-buffer", + .action_msg = "Dump ftrace buffer", + .enable_mask = SYSRQ_ENABLE_DUMP, +}; +#else +#define sysrq_ftrace_dump_op (*(struct sysrq_key_op *)0) +#endif static void sysrq_handle_showmem(int key, struct tty_struct *tty) { @@ -406,7 +422,7 @@ static struct sysrq_key_op *sysrq_key_table[36] = { NULL, /* x */ /* y: May be registered on sparc64 for global register dump */ NULL, /* y */ - NULL /* z */ + &sysrq_ftrace_dump_op, /* z */ }; /* key2index calculation, -1 on invalid index */ diff --git a/drivers/char/tty_audit.c b/drivers/char/tty_audit.c index 5787249934c..34ab6d798f8 100644 --- a/drivers/char/tty_audit.c +++ b/drivers/char/tty_audit.c @@ -67,6 +67,29 @@ static void tty_audit_buf_put(struct tty_audit_buf *buf) tty_audit_buf_free(buf); } +static void tty_audit_log(const char *description, struct task_struct *tsk, + uid_t loginuid, unsigned sessionid, int major, + int minor, unsigned char *data, size_t size) +{ + struct audit_buffer *ab; + + ab = audit_log_start(NULL, GFP_KERNEL, AUDIT_TTY); + if (ab) { + char name[sizeof(tsk->comm)]; + uid_t uid = task_uid(tsk); + + audit_log_format(ab, "%s pid=%u uid=%u auid=%u ses=%u " + "major=%d minor=%d comm=", description, + tsk->pid, uid, loginuid, sessionid, + major, minor); + get_task_comm(name, tsk); + audit_log_untrustedstring(ab, name); + audit_log_format(ab, " data="); + audit_log_n_hex(ab, data, size); + audit_log_end(ab); + } +} + /** * tty_audit_buf_push - Push buffered data out * @@ -77,25 +100,12 @@ static void tty_audit_buf_push(struct task_struct *tsk, uid_t loginuid, unsigned int sessionid, struct tty_audit_buf *buf) { - struct audit_buffer *ab; - if (buf->valid == 0) return; if (audit_enabled == 0) return; - ab = audit_log_start(NULL, GFP_KERNEL, AUDIT_TTY); - if (ab) { - char name[sizeof(tsk->comm)]; - - audit_log_format(ab, "tty pid=%u uid=%u auid=%u ses=%u " - "major=%d minor=%d comm=", tsk->pid, tsk->uid, - loginuid, sessionid, buf->major, buf->minor); - get_task_comm(name, tsk); - audit_log_untrustedstring(ab, name); - audit_log_format(ab, " data="); - audit_log_n_hex(ab, buf->data, buf->valid); - audit_log_end(ab); - } + tty_audit_log("tty", tsk, loginuid, sessionid, buf->major, buf->minor, + buf->data, buf->valid); buf->valid = 0; } @@ -150,6 +160,42 @@ void tty_audit_fork(struct signal_struct *sig) } /** + * tty_audit_tiocsti - Log TIOCSTI + */ +void tty_audit_tiocsti(struct tty_struct *tty, char ch) +{ + struct tty_audit_buf *buf; + int major, minor, should_audit; + + spin_lock_irq(¤t->sighand->siglock); + should_audit = current->signal->audit_tty; + buf = current->signal->tty_audit_buf; + if (buf) + atomic_inc(&buf->count); + spin_unlock_irq(¤t->sighand->siglock); + + major = tty->driver->major; + minor = tty->driver->minor_start + tty->index; + if (buf) { + mutex_lock(&buf->mutex); + if (buf->major == major && buf->minor == minor) + tty_audit_buf_push_current(buf); + mutex_unlock(&buf->mutex); + tty_audit_buf_put(buf); + } + + if (should_audit && audit_enabled) { + uid_t auid; + unsigned int sessionid; + + auid = audit_get_loginuid(current); + sessionid = audit_get_sessionid(current); + tty_audit_log("ioctl=TIOCSTI", current, auid, sessionid, major, + minor, &ch, 1); + } +} + +/** * tty_audit_push_task - Flush task's pending audit data */ void tty_audit_push_task(struct task_struct *tsk, uid_t loginuid, u32 sessionid) diff --git a/drivers/char/tty_io.c b/drivers/char/tty_io.c index 1412a8d1e58..db15f9ba7c0 100644 --- a/drivers/char/tty_io.c +++ b/drivers/char/tty_io.c @@ -2018,6 +2018,7 @@ static int tiocsti(struct tty_struct *tty, char __user *p) return -EPERM; if (get_user(ch, p)) return -EFAULT; + tty_audit_tiocsti(tty, ch); ld = tty_ldisc_ref_wait(tty); ld->ops->receive_buf(tty, &ch, &mbz, 1); tty_ldisc_deref(ld); diff --git a/drivers/char/xilinx_hwicap/buffer_icap.c b/drivers/char/xilinx_hwicap/buffer_icap.c index aa7f7962a9a..05d897764f0 100644 --- a/drivers/char/xilinx_hwicap/buffer_icap.c +++ b/drivers/char/xilinx_hwicap/buffer_icap.c @@ -21,9 +21,6 @@ * INFRINGEMENT, IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS * FOR A PARTICULAR PURPOSE. * - * Xilinx products are not intended for use in life support appliances, - * devices, or systems. Use in such applications is expressly prohibited. - * * (c) Copyright 2003-2008 Xilinx Inc. * All rights reserved. * diff --git a/drivers/char/xilinx_hwicap/buffer_icap.h b/drivers/char/xilinx_hwicap/buffer_icap.h index 8b0252bf06e..d4f419ee87a 100644 --- a/drivers/char/xilinx_hwicap/buffer_icap.h +++ b/drivers/char/xilinx_hwicap/buffer_icap.h @@ -21,9 +21,6 @@ * INFRINGEMENT, IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS * FOR A PARTICULAR PURPOSE. * - * Xilinx products are not intended for use in life support appliances, - * devices, or systems. Use in such applications is expressly prohibited. - * * (c) Copyright 2003-2008 Xilinx Inc. * All rights reserved. * diff --git a/drivers/char/xilinx_hwicap/fifo_icap.c b/drivers/char/xilinx_hwicap/fifo_icap.c index 776b5052847..02225eb19cf 100644 --- a/drivers/char/xilinx_hwicap/fifo_icap.c +++ b/drivers/char/xilinx_hwicap/fifo_icap.c @@ -21,9 +21,6 @@ * INFRINGEMENT, IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS * FOR A PARTICULAR PURPOSE. * - * Xilinx products are not intended for use in life support appliances, - * devices, or systems. Use in such applications is expressly prohibited. - * * (c) Copyright 2007-2008 Xilinx Inc. * All rights reserved. * diff --git a/drivers/char/xilinx_hwicap/fifo_icap.h b/drivers/char/xilinx_hwicap/fifo_icap.h index 62bda453c90..4c9dd9a3b62 100644 --- a/drivers/char/xilinx_hwicap/fifo_icap.h +++ b/drivers/char/xilinx_hwicap/fifo_icap.h @@ -21,9 +21,6 @@ * INFRINGEMENT, IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS * FOR A PARTICULAR PURPOSE. * - * Xilinx products are not intended for use in life support appliances, - * devices, or systems. Use in such applications is expressly prohibited. - * * (c) Copyright 2007-2008 Xilinx Inc. * All rights reserved. * diff --git a/drivers/char/xilinx_hwicap/xilinx_hwicap.c b/drivers/char/xilinx_hwicap/xilinx_hwicap.c index d1613194909..f40ab699860 100644 --- a/drivers/char/xilinx_hwicap/xilinx_hwicap.c +++ b/drivers/char/xilinx_hwicap/xilinx_hwicap.c @@ -21,9 +21,6 @@ * INFRINGEMENT, IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS * FOR A PARTICULAR PURPOSE. * - * Xilinx products are not intended for use in life support appliances, - * devices, or systems. Use in such applications is expressly prohibited. - * * (c) Copyright 2002 Xilinx Inc., Systems Engineering Group * (c) Copyright 2004 Xilinx Inc., Systems Engineering Group * (c) Copyright 2007-2008 Xilinx Inc. diff --git a/drivers/char/xilinx_hwicap/xilinx_hwicap.h b/drivers/char/xilinx_hwicap/xilinx_hwicap.h index 24d0d9b938f..8cca11981c5 100644 --- a/drivers/char/xilinx_hwicap/xilinx_hwicap.h +++ b/drivers/char/xilinx_hwicap/xilinx_hwicap.h @@ -21,9 +21,6 @@ * INFRINGEMENT, IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS * FOR A PARTICULAR PURPOSE. * - * Xilinx products are not intended for use in life support appliances, - * devices, or systems. Use in such applications is expressly prohibited. - * * (c) Copyright 2003-2007 Xilinx Inc. * All rights reserved. * diff --git a/drivers/connector/cn_proc.c b/drivers/connector/cn_proc.c index 5c9f67f98d1..c5afc98e267 100644 --- a/drivers/connector/cn_proc.c +++ b/drivers/connector/cn_proc.c @@ -106,6 +106,7 @@ void proc_id_connector(struct task_struct *task, int which_id) struct proc_event *ev; __u8 buffer[CN_PROC_MSG_SIZE]; struct timespec ts; + const struct cred *cred; if (atomic_read(&proc_event_num_listeners) < 1) return; @@ -115,14 +116,19 @@ void proc_id_connector(struct task_struct *task, int which_id) ev->what = which_id; ev->event_data.id.process_pid = task->pid; ev->event_data.id.process_tgid = task->tgid; + rcu_read_lock(); + cred = __task_cred(task); if (which_id == PROC_EVENT_UID) { - ev->event_data.id.r.ruid = task->uid; - ev->event_data.id.e.euid = task->euid; + ev->event_data.id.r.ruid = cred->uid; + ev->event_data.id.e.euid = cred->euid; } else if (which_id == PROC_EVENT_GID) { - ev->event_data.id.r.rgid = task->gid; - ev->event_data.id.e.egid = task->egid; - } else + ev->event_data.id.r.rgid = cred->gid; + ev->event_data.id.e.egid = cred->egid; + } else { + rcu_read_unlock(); return; + } + rcu_read_unlock(); get_seq(&msg->seq, &ev->cpu); ktime_get_ts(&ts); /* get high res monotonic timestamp */ put_unaligned(timespec_to_ns(&ts), (__u64 *)&ev->timestamp_ns); diff --git a/drivers/crypto/hifn_795x.c b/drivers/crypto/hifn_795x.c index 4d22b21bd3e..0c79fe7f156 100644 --- a/drivers/crypto/hifn_795x.c +++ b/drivers/crypto/hifn_795x.c @@ -38,9 +38,6 @@ #include <asm/kmap_types.h> -#undef dprintk - -#define HIFN_TEST //#define HIFN_DEBUG #ifdef HIFN_DEBUG @@ -363,14 +360,14 @@ static atomic_t hifn_dev_number; #define HIFN_NAMESIZE 32 #define HIFN_MAX_RESULT_ORDER 5 -#define HIFN_D_CMD_RSIZE 24*4 -#define HIFN_D_SRC_RSIZE 80*4 -#define HIFN_D_DST_RSIZE 80*4 -#define HIFN_D_RES_RSIZE 24*4 +#define HIFN_D_CMD_RSIZE 24*1 +#define HIFN_D_SRC_RSIZE 80*1 +#define HIFN_D_DST_RSIZE 80*1 +#define HIFN_D_RES_RSIZE 24*1 #define HIFN_D_DST_DALIGN 4 -#define HIFN_QUEUE_LENGTH HIFN_D_CMD_RSIZE-1 +#define HIFN_QUEUE_LENGTH (HIFN_D_CMD_RSIZE - 1) #define AES_MIN_KEY_SIZE 16 #define AES_MAX_KEY_SIZE 32 @@ -406,8 +403,6 @@ struct hifn_dma { u8 command_bufs[HIFN_D_CMD_RSIZE][HIFN_MAX_COMMAND]; u8 result_bufs[HIFN_D_CMD_RSIZE][HIFN_MAX_RESULT]; - u64 test_src, test_dst; - /* * Our current positions for insertion and removal from the descriptor * rings. @@ -434,9 +429,6 @@ struct hifn_device struct pci_dev *pdev; void __iomem *bar[3]; - unsigned long result_mem; - dma_addr_t dst; - void *desc_virt; dma_addr_t desc_dma; @@ -446,8 +438,6 @@ struct hifn_device spinlock_t lock; - void *priv; - u32 flags; int active, started; struct delayed_work work; @@ -657,12 +647,17 @@ struct ablkcipher_walk struct hifn_context { - u8 key[HIFN_MAX_CRYPT_KEY_LENGTH], *iv; + u8 key[HIFN_MAX_CRYPT_KEY_LENGTH]; struct hifn_device *dev; - unsigned int keysize, ivsize; + unsigned int keysize; +}; + +struct hifn_request_context +{ + u8 *iv; + unsigned int ivsize; u8 op, type, mode, unused; struct ablkcipher_walk walk; - atomic_t sg_num; }; #define crypto_alg_to_hifn(a) container_of(a, struct hifn_crypto_alg, alg) @@ -1168,7 +1163,8 @@ static int hifn_setup_crypto_command(struct hifn_device *dev, } static int hifn_setup_cmd_desc(struct hifn_device *dev, - struct hifn_context *ctx, void *priv, unsigned int nbytes) + struct hifn_context *ctx, struct hifn_request_context *rctx, + void *priv, unsigned int nbytes) { struct hifn_dma *dma = (struct hifn_dma *)dev->desc_virt; int cmd_len, sa_idx; @@ -1179,7 +1175,7 @@ static int hifn_setup_cmd_desc(struct hifn_device *dev, buf_pos = buf = dma->command_bufs[dma->cmdi]; mask = 0; - switch (ctx->op) { + switch (rctx->op) { case ACRYPTO_OP_DECRYPT: mask = HIFN_BASE_CMD_CRYPT | HIFN_BASE_CMD_DECODE; break; @@ -1196,15 +1192,15 @@ static int hifn_setup_cmd_desc(struct hifn_device *dev, buf_pos += hifn_setup_base_command(dev, buf_pos, nbytes, nbytes, mask, dev->snum); - if (ctx->op == ACRYPTO_OP_ENCRYPT || ctx->op == ACRYPTO_OP_DECRYPT) { + if (rctx->op == ACRYPTO_OP_ENCRYPT || rctx->op == ACRYPTO_OP_DECRYPT) { u16 md = 0; if (ctx->keysize) md |= HIFN_CRYPT_CMD_NEW_KEY; - if (ctx->iv && ctx->mode != ACRYPTO_MODE_ECB) + if (rctx->iv && rctx->mode != ACRYPTO_MODE_ECB) md |= HIFN_CRYPT_CMD_NEW_IV; - switch (ctx->mode) { + switch (rctx->mode) { case ACRYPTO_MODE_ECB: md |= HIFN_CRYPT_CMD_MODE_ECB; break; @@ -1221,7 +1217,7 @@ static int hifn_setup_cmd_desc(struct hifn_device *dev, goto err_out; } - switch (ctx->type) { + switch (rctx->type) { case ACRYPTO_TYPE_AES_128: if (ctx->keysize != 16) goto err_out; @@ -1256,17 +1252,18 @@ static int hifn_setup_cmd_desc(struct hifn_device *dev, buf_pos += hifn_setup_crypto_command(dev, buf_pos, nbytes, nbytes, ctx->key, ctx->keysize, - ctx->iv, ctx->ivsize, md); + rctx->iv, rctx->ivsize, md); } dev->sa[sa_idx] = priv; + dev->started++; cmd_len = buf_pos - buf; dma->cmdr[dma->cmdi].l = __cpu_to_le32(cmd_len | HIFN_D_VALID | HIFN_D_LAST | HIFN_D_MASKDONEIRQ); if (++dma->cmdi == HIFN_D_CMD_RSIZE) { - dma->cmdr[dma->cmdi].l = __cpu_to_le32(HIFN_MAX_COMMAND | + dma->cmdr[dma->cmdi].l = __cpu_to_le32( HIFN_D_VALID | HIFN_D_LAST | HIFN_D_MASKDONEIRQ | HIFN_D_JUMP); dma->cmdi = 0; @@ -1284,7 +1281,7 @@ err_out: } static int hifn_setup_src_desc(struct hifn_device *dev, struct page *page, - unsigned int offset, unsigned int size) + unsigned int offset, unsigned int size, int last) { struct hifn_dma *dma = (struct hifn_dma *)dev->desc_virt; int idx; @@ -1296,12 +1293,12 @@ static int hifn_setup_src_desc(struct hifn_device *dev, struct page *page, dma->srcr[idx].p = __cpu_to_le32(addr); dma->srcr[idx].l = __cpu_to_le32(size | HIFN_D_VALID | - HIFN_D_MASKDONEIRQ | HIFN_D_LAST); + HIFN_D_MASKDONEIRQ | (last ? HIFN_D_LAST : 0)); if (++idx == HIFN_D_SRC_RSIZE) { dma->srcr[idx].l = __cpu_to_le32(HIFN_D_VALID | - HIFN_D_JUMP | - HIFN_D_MASKDONEIRQ | HIFN_D_LAST); + HIFN_D_JUMP | HIFN_D_MASKDONEIRQ | + (last ? HIFN_D_LAST : 0)); idx = 0; } @@ -1342,7 +1339,7 @@ static void hifn_setup_res_desc(struct hifn_device *dev) } static void hifn_setup_dst_desc(struct hifn_device *dev, struct page *page, - unsigned offset, unsigned size) + unsigned offset, unsigned size, int last) { struct hifn_dma *dma = (struct hifn_dma *)dev->desc_virt; int idx; @@ -1353,12 +1350,12 @@ static void hifn_setup_dst_desc(struct hifn_device *dev, struct page *page, idx = dma->dsti; dma->dstr[idx].p = __cpu_to_le32(addr); dma->dstr[idx].l = __cpu_to_le32(size | HIFN_D_VALID | - HIFN_D_MASKDONEIRQ | HIFN_D_LAST); + HIFN_D_MASKDONEIRQ | (last ? HIFN_D_LAST : 0)); if (++idx == HIFN_D_DST_RSIZE) { dma->dstr[idx].l = __cpu_to_le32(HIFN_D_VALID | HIFN_D_JUMP | HIFN_D_MASKDONEIRQ | - HIFN_D_LAST); + (last ? HIFN_D_LAST : 0)); idx = 0; } dma->dsti = idx; @@ -1370,16 +1367,52 @@ static void hifn_setup_dst_desc(struct hifn_device *dev, struct page *page, } } -static int hifn_setup_dma(struct hifn_device *dev, struct page *spage, unsigned int soff, - struct page *dpage, unsigned int doff, unsigned int nbytes, void *priv, - struct hifn_context *ctx) +static int hifn_setup_dma(struct hifn_device *dev, + struct hifn_context *ctx, struct hifn_request_context *rctx, + struct scatterlist *src, struct scatterlist *dst, + unsigned int nbytes, void *priv) { - dprintk("%s: spage: %p, soffset: %u, dpage: %p, doffset: %u, nbytes: %u, priv: %p, ctx: %p.\n", - dev->name, spage, soff, dpage, doff, nbytes, priv, ctx); + struct scatterlist *t; + struct page *spage, *dpage; + unsigned int soff, doff; + unsigned int n, len; - hifn_setup_src_desc(dev, spage, soff, nbytes); - hifn_setup_cmd_desc(dev, ctx, priv, nbytes); - hifn_setup_dst_desc(dev, dpage, doff, nbytes); + n = nbytes; + while (n) { + spage = sg_page(src); + soff = src->offset; + len = min(src->length, n); + + hifn_setup_src_desc(dev, spage, soff, len, n - len == 0); + + src++; + n -= len; + } + + t = &rctx->walk.cache[0]; + n = nbytes; + while (n) { + if (t->length && rctx->walk.flags & ASYNC_FLAGS_MISALIGNED) { + BUG_ON(!sg_page(t)); + dpage = sg_page(t); + doff = 0; + len = t->length; + } else { + BUG_ON(!sg_page(dst)); + dpage = sg_page(dst); + doff = dst->offset; + len = dst->length; + } + len = min(len, n); + + hifn_setup_dst_desc(dev, dpage, doff, len, n - len == 0); + + dst++; + t++; + n -= len; + } + + hifn_setup_cmd_desc(dev, ctx, rctx, priv, nbytes); hifn_setup_res_desc(dev); return 0; } @@ -1424,32 +1457,26 @@ static void ablkcipher_walk_exit(struct ablkcipher_walk *w) w->num = 0; } -static int ablkcipher_add(void *daddr, unsigned int *drestp, struct scatterlist *src, +static int ablkcipher_add(unsigned int *drestp, struct scatterlist *dst, unsigned int size, unsigned int *nbytesp) { unsigned int copy, drest = *drestp, nbytes = *nbytesp; int idx = 0; - void *saddr; if (drest < size || size > nbytes) return -EINVAL; while (size) { - copy = min(drest, min(size, src->length)); - - saddr = kmap_atomic(sg_page(src), KM_SOFTIRQ1); - memcpy(daddr, saddr + src->offset, copy); - kunmap_atomic(saddr, KM_SOFTIRQ1); + copy = min(drest, min(size, dst->length)); size -= copy; drest -= copy; nbytes -= copy; - daddr += copy; dprintk("%s: copy: %u, size: %u, drest: %u, nbytes: %u.\n", __func__, copy, size, drest, nbytes); - src++; + dst++; idx++; } @@ -1462,8 +1489,7 @@ static int ablkcipher_add(void *daddr, unsigned int *drestp, struct scatterlist static int ablkcipher_walk(struct ablkcipher_request *req, struct ablkcipher_walk *w) { - struct scatterlist *src, *dst, *t; - void *daddr; + struct scatterlist *dst, *t; unsigned int nbytes = req->nbytes, offset, copy, diff; int idx, tidx, err; @@ -1473,26 +1499,22 @@ static int ablkcipher_walk(struct ablkcipher_request *req, if (idx >= w->num && (w->flags & ASYNC_FLAGS_MISALIGNED)) return -EINVAL; - src = &req->src[idx]; dst = &req->dst[idx]; - dprintk("\n%s: slen: %u, dlen: %u, soff: %u, doff: %u, offset: %u, " - "nbytes: %u.\n", - __func__, src->length, dst->length, src->offset, - dst->offset, offset, nbytes); + dprintk("\n%s: dlen: %u, doff: %u, offset: %u, nbytes: %u.\n", + __func__, dst->length, dst->offset, offset, nbytes); if (!IS_ALIGNED(dst->offset, HIFN_D_DST_DALIGN) || !IS_ALIGNED(dst->length, HIFN_D_DST_DALIGN) || offset) { - unsigned slen = min(src->length - offset, nbytes); + unsigned slen = min(dst->length - offset, nbytes); unsigned dlen = PAGE_SIZE; t = &w->cache[idx]; - daddr = kmap_atomic(sg_page(t), KM_SOFTIRQ0); - err = ablkcipher_add(daddr, &dlen, src, slen, &nbytes); + err = ablkcipher_add(&dlen, dst, slen, &nbytes); if (err < 0) - goto err_out_unmap; + return err; idx += err; @@ -1528,21 +1550,19 @@ static int ablkcipher_walk(struct ablkcipher_request *req, } else { copy += diff + nbytes; - src = &req->src[idx]; + dst = &req->dst[idx]; - err = ablkcipher_add(daddr + slen, &dlen, src, nbytes, &nbytes); + err = ablkcipher_add(&dlen, dst, nbytes, &nbytes); if (err < 0) - goto err_out_unmap; + return err; idx += err; } t->length = copy; t->offset = offset; - - kunmap_atomic(daddr, KM_SOFTIRQ0); } else { - nbytes -= min(src->length, nbytes); + nbytes -= min(dst->length, nbytes); idx++; } @@ -1550,26 +1570,22 @@ static int ablkcipher_walk(struct ablkcipher_request *req, } return tidx; - -err_out_unmap: - kunmap_atomic(daddr, KM_SOFTIRQ0); - return err; } static int hifn_setup_session(struct ablkcipher_request *req) { struct hifn_context *ctx = crypto_tfm_ctx(req->base.tfm); + struct hifn_request_context *rctx = ablkcipher_request_ctx(req); struct hifn_device *dev = ctx->dev; - struct page *spage, *dpage; - unsigned long soff, doff, dlen, flags; - unsigned int nbytes = req->nbytes, idx = 0, len; + unsigned long dlen, flags; + unsigned int nbytes = req->nbytes, idx = 0; int err = -EINVAL, sg_num; - struct scatterlist *src, *dst, *t; + struct scatterlist *dst; - if (ctx->iv && !ctx->ivsize && ctx->mode != ACRYPTO_MODE_ECB) + if (rctx->iv && !rctx->ivsize && rctx->mode != ACRYPTO_MODE_ECB) goto err_out_exit; - ctx->walk.flags = 0; + rctx->walk.flags = 0; while (nbytes) { dst = &req->dst[idx]; @@ -1577,27 +1593,23 @@ static int hifn_setup_session(struct ablkcipher_request *req) if (!IS_ALIGNED(dst->offset, HIFN_D_DST_DALIGN) || !IS_ALIGNED(dlen, HIFN_D_DST_DALIGN)) - ctx->walk.flags |= ASYNC_FLAGS_MISALIGNED; + rctx->walk.flags |= ASYNC_FLAGS_MISALIGNED; nbytes -= dlen; idx++; } - if (ctx->walk.flags & ASYNC_FLAGS_MISALIGNED) { - err = ablkcipher_walk_init(&ctx->walk, idx, GFP_ATOMIC); + if (rctx->walk.flags & ASYNC_FLAGS_MISALIGNED) { + err = ablkcipher_walk_init(&rctx->walk, idx, GFP_ATOMIC); if (err < 0) return err; } - nbytes = req->nbytes; - idx = 0; - - sg_num = ablkcipher_walk(req, &ctx->walk); + sg_num = ablkcipher_walk(req, &rctx->walk); if (sg_num < 0) { err = sg_num; goto err_out_exit; } - atomic_set(&ctx->sg_num, sg_num); spin_lock_irqsave(&dev->lock, flags); if (dev->started + sg_num > HIFN_QUEUE_LENGTH) { @@ -1605,37 +1617,11 @@ static int hifn_setup_session(struct ablkcipher_request *req) goto err_out; } - dev->snum++; - dev->started += sg_num; - - while (nbytes) { - src = &req->src[idx]; - dst = &req->dst[idx]; - t = &ctx->walk.cache[idx]; - - if (t->length) { - spage = dpage = sg_page(t); - soff = doff = 0; - len = t->length; - } else { - spage = sg_page(src); - soff = src->offset; - - dpage = sg_page(dst); - doff = dst->offset; - - len = dst->length; - } - - idx++; - - err = hifn_setup_dma(dev, spage, soff, dpage, doff, nbytes, - req, ctx); - if (err) - goto err_out; + err = hifn_setup_dma(dev, ctx, rctx, req->src, req->dst, req->nbytes, req); + if (err) + goto err_out; - nbytes -= min(len, nbytes); - } + dev->snum++; dev->active = HIFN_DEFAULT_ACTIVE_NUM; spin_unlock_irqrestore(&dev->lock, flags); @@ -1645,12 +1631,13 @@ static int hifn_setup_session(struct ablkcipher_request *req) err_out: spin_unlock_irqrestore(&dev->lock, flags); err_out_exit: - if (err) - dprintk("%s: iv: %p [%d], key: %p [%d], mode: %u, op: %u, " + if (err) { + printk("%s: iv: %p [%d], key: %p [%d], mode: %u, op: %u, " "type: %u, err: %d.\n", - dev->name, ctx->iv, ctx->ivsize, + dev->name, rctx->iv, rctx->ivsize, ctx->key, ctx->keysize, - ctx->mode, ctx->op, ctx->type, err); + rctx->mode, rctx->op, rctx->type, err); + } return err; } @@ -1660,31 +1647,33 @@ static int hifn_test(struct hifn_device *dev, int encdec, u8 snum) int n, err; u8 src[16]; struct hifn_context ctx; + struct hifn_request_context rctx; u8 fips_aes_ecb_from_zero[16] = { 0x66, 0xE9, 0x4B, 0xD4, 0xEF, 0x8A, 0x2C, 0x3B, 0x88, 0x4C, 0xFA, 0x59, 0xCA, 0x34, 0x2B, 0x2E}; + struct scatterlist sg; memset(src, 0, sizeof(src)); memset(ctx.key, 0, sizeof(ctx.key)); ctx.dev = dev; ctx.keysize = 16; - ctx.ivsize = 0; - ctx.iv = NULL; - ctx.op = (encdec)?ACRYPTO_OP_ENCRYPT:ACRYPTO_OP_DECRYPT; - ctx.mode = ACRYPTO_MODE_ECB; - ctx.type = ACRYPTO_TYPE_AES_128; - atomic_set(&ctx.sg_num, 1); - - err = hifn_setup_dma(dev, - virt_to_page(src), offset_in_page(src), - virt_to_page(src), offset_in_page(src), - sizeof(src), NULL, &ctx); + rctx.ivsize = 0; + rctx.iv = NULL; + rctx.op = (encdec)?ACRYPTO_OP_ENCRYPT:ACRYPTO_OP_DECRYPT; + rctx.mode = ACRYPTO_MODE_ECB; + rctx.type = ACRYPTO_TYPE_AES_128; + rctx.walk.cache[0].length = 0; + + sg_init_one(&sg, &src, sizeof(src)); + + err = hifn_setup_dma(dev, &ctx, &rctx, &sg, &sg, sizeof(src), NULL); if (err) goto err_out; + dev->started = 0; msleep(200); dprintk("%s: decoded: ", dev->name); @@ -1711,6 +1700,7 @@ static int hifn_start_device(struct hifn_device *dev) { int err; + dev->started = dev->active = 0; hifn_reset_dma(dev, 1); err = hifn_enable_crypto(dev); @@ -1764,90 +1754,65 @@ static int ablkcipher_get(void *saddr, unsigned int *srestp, unsigned int offset return idx; } -static void hifn_process_ready(struct ablkcipher_request *req, int error) +static inline void hifn_complete_sa(struct hifn_device *dev, int i) { - struct hifn_context *ctx = crypto_tfm_ctx(req->base.tfm); - struct hifn_device *dev; - - dprintk("%s: req: %p, ctx: %p.\n", __func__, req, ctx); + unsigned long flags; - dev = ctx->dev; - dprintk("%s: req: %p, started: %d, sg_num: %d.\n", - __func__, req, dev->started, atomic_read(&ctx->sg_num)); + spin_lock_irqsave(&dev->lock, flags); + dev->sa[i] = NULL; + dev->started--; + if (dev->started < 0) + printk("%s: started: %d.\n", __func__, dev->started); + spin_unlock_irqrestore(&dev->lock, flags); + BUG_ON(dev->started < 0); +} - if (--dev->started < 0) - BUG(); +static void hifn_process_ready(struct ablkcipher_request *req, int error) +{ + struct hifn_request_context *rctx = ablkcipher_request_ctx(req); - if (atomic_dec_and_test(&ctx->sg_num)) { + if (rctx->walk.flags & ASYNC_FLAGS_MISALIGNED) { unsigned int nbytes = req->nbytes; int idx = 0, err; struct scatterlist *dst, *t; void *saddr; - if (ctx->walk.flags & ASYNC_FLAGS_MISALIGNED) { - while (nbytes) { - t = &ctx->walk.cache[idx]; - dst = &req->dst[idx]; - - dprintk("\n%s: sg_page(t): %p, t->length: %u, " - "sg_page(dst): %p, dst->length: %u, " - "nbytes: %u.\n", - __func__, sg_page(t), t->length, - sg_page(dst), dst->length, nbytes); + while (nbytes) { + t = &rctx->walk.cache[idx]; + dst = &req->dst[idx]; - if (!t->length) { - nbytes -= min(dst->length, nbytes); - idx++; - continue; - } + dprintk("\n%s: sg_page(t): %p, t->length: %u, " + "sg_page(dst): %p, dst->length: %u, " + "nbytes: %u.\n", + __func__, sg_page(t), t->length, + sg_page(dst), dst->length, nbytes); - saddr = kmap_atomic(sg_page(t), KM_IRQ1); + if (!t->length) { + nbytes -= min(dst->length, nbytes); + idx++; + continue; + } - err = ablkcipher_get(saddr, &t->length, t->offset, - dst, nbytes, &nbytes); - if (err < 0) { - kunmap_atomic(saddr, KM_IRQ1); - break; - } + saddr = kmap_atomic(sg_page(t), KM_SOFTIRQ0); - idx += err; - kunmap_atomic(saddr, KM_IRQ1); + err = ablkcipher_get(saddr, &t->length, t->offset, + dst, nbytes, &nbytes); + if (err < 0) { + kunmap_atomic(saddr, KM_SOFTIRQ0); + break; } - ablkcipher_walk_exit(&ctx->walk); + idx += err; + kunmap_atomic(saddr, KM_SOFTIRQ0); } - req->base.complete(&req->base, error); + ablkcipher_walk_exit(&rctx->walk); } -} -static void hifn_check_for_completion(struct hifn_device *dev, int error) -{ - int i; - struct hifn_dma *dma = (struct hifn_dma *)dev->desc_virt; - - for (i=0; i<HIFN_D_RES_RSIZE; ++i) { - struct hifn_desc *d = &dma->resr[i]; - - if (!(d->l & __cpu_to_le32(HIFN_D_VALID)) && dev->sa[i]) { - dev->success++; - dev->reset = 0; - hifn_process_ready(dev->sa[i], error); - dev->sa[i] = NULL; - } - - if (d->l & __cpu_to_le32(HIFN_D_DESTOVER | HIFN_D_OVER)) - if (printk_ratelimit()) - printk("%s: overflow detected [d: %u, o: %u] " - "at %d resr: l: %08x, p: %08x.\n", - dev->name, - !!(d->l & __cpu_to_le32(HIFN_D_DESTOVER)), - !!(d->l & __cpu_to_le32(HIFN_D_OVER)), - i, d->l, d->p); - } + req->base.complete(&req->base, error); } -static void hifn_clear_rings(struct hifn_device *dev) +static void hifn_clear_rings(struct hifn_device *dev, int error) { struct hifn_dma *dma = (struct hifn_dma *)dev->desc_virt; int i, u; @@ -1864,21 +1829,26 @@ static void hifn_clear_rings(struct hifn_device *dev) if (dma->resr[i].l & __cpu_to_le32(HIFN_D_VALID)) break; - if (i != HIFN_D_RES_RSIZE) - u--; + if (dev->sa[i]) { + dev->success++; + dev->reset = 0; + hifn_process_ready(dev->sa[i], error); + hifn_complete_sa(dev, i); + } - if (++i == (HIFN_D_RES_RSIZE + 1)) + if (++i == HIFN_D_RES_RSIZE) i = 0; + u--; } dma->resk = i; dma->resu = u; i = dma->srck; u = dma->srcu; while (u != 0) { - if (i == HIFN_D_SRC_RSIZE) - i = 0; if (dma->srcr[i].l & __cpu_to_le32(HIFN_D_VALID)) break; - i++, u--; + if (++i == HIFN_D_SRC_RSIZE) + i = 0; + u--; } dma->srck = i; dma->srcu = u; @@ -1886,20 +1856,19 @@ static void hifn_clear_rings(struct hifn_device *dev) while (u != 0) { if (dma->cmdr[i].l & __cpu_to_le32(HIFN_D_VALID)) break; - if (i != HIFN_D_CMD_RSIZE) - u--; - if (++i == (HIFN_D_CMD_RSIZE + 1)) + if (++i == HIFN_D_CMD_RSIZE) i = 0; + u--; } dma->cmdk = i; dma->cmdu = u; i = dma->dstk; u = dma->dstu; while (u != 0) { - if (i == HIFN_D_DST_RSIZE) - i = 0; if (dma->dstr[i].l & __cpu_to_le32(HIFN_D_VALID)) break; - i++, u--; + if (++i == HIFN_D_DST_RSIZE) + i = 0; + u--; } dma->dstk = i; dma->dstu = u; @@ -1944,30 +1913,39 @@ static void hifn_work(struct work_struct *work) } else dev->active--; - if (dev->prev_success == dev->success && dev->started) + if ((dev->prev_success == dev->success) && dev->started) reset = 1; dev->prev_success = dev->success; spin_unlock_irqrestore(&dev->lock, flags); if (reset) { - dprintk("%s: r: %08x, active: %d, started: %d, " - "success: %lu: reset: %d.\n", - dev->name, r, dev->active, dev->started, - dev->success, reset); - if (++dev->reset >= 5) { - dprintk("%s: really hard reset.\n", dev->name); + int i; + struct hifn_dma *dma = (struct hifn_dma *)dev->desc_virt; + + printk("%s: r: %08x, active: %d, started: %d, " + "success: %lu: qlen: %u/%u, reset: %d.\n", + dev->name, r, dev->active, dev->started, + dev->success, dev->queue.qlen, dev->queue.max_qlen, + reset); + + printk("%s: res: ", __func__); + for (i=0; i<HIFN_D_RES_RSIZE; ++i) { + printk("%x.%p ", dma->resr[i].l, dev->sa[i]); + if (dev->sa[i]) { + hifn_process_ready(dev->sa[i], -ENODEV); + hifn_complete_sa(dev, i); + } + } + printk("\n"); + hifn_reset_dma(dev, 1); hifn_stop_device(dev); hifn_start_device(dev); dev->reset = 0; } - spin_lock_irqsave(&dev->lock, flags); - hifn_check_for_completion(dev, -EBUSY); - hifn_clear_rings(dev); - dev->started = 0; - spin_unlock_irqrestore(&dev->lock, flags); + tasklet_schedule(&dev->tasklet); } schedule_delayed_work(&dev->work, HZ); @@ -1984,8 +1962,8 @@ static irqreturn_t hifn_interrupt(int irq, void *data) dprintk("%s: 1 dmacsr: %08x, dmareg: %08x, res: %08x [%d], " "i: %d.%d.%d.%d, u: %d.%d.%d.%d.\n", dev->name, dmacsr, dev->dmareg, dmacsr & dev->dmareg, dma->cmdi, - dma->cmdu, dma->srcu, dma->dstu, dma->resu, - dma->cmdi, dma->srci, dma->dsti, dma->resi); + dma->cmdi, dma->srci, dma->dsti, dma->resi, + dma->cmdu, dma->srcu, dma->dstu, dma->resu); if ((dmacsr & dev->dmareg) == 0) return IRQ_NONE; @@ -2002,11 +1980,10 @@ static irqreturn_t hifn_interrupt(int irq, void *data) if (restart) { u32 puisr = hifn_read_0(dev, HIFN_0_PUISR); - if (printk_ratelimit()) - printk("%s: overflow: r: %d, d: %d, puisr: %08x, d: %u.\n", - dev->name, !!(dmacsr & HIFN_DMACSR_R_OVER), - !!(dmacsr & HIFN_DMACSR_D_OVER), - puisr, !!(puisr & HIFN_PUISR_DSTOVER)); + printk(KERN_WARNING "%s: overflow: r: %d, d: %d, puisr: %08x, d: %u.\n", + dev->name, !!(dmacsr & HIFN_DMACSR_R_OVER), + !!(dmacsr & HIFN_DMACSR_D_OVER), + puisr, !!(puisr & HIFN_PUISR_DSTOVER)); if (!!(puisr & HIFN_PUISR_DSTOVER)) hifn_write_0(dev, HIFN_0_PUISR, HIFN_PUISR_DSTOVER); hifn_write_1(dev, HIFN_1_DMA_CSR, dmacsr & (HIFN_DMACSR_R_OVER | @@ -2016,12 +1993,11 @@ static irqreturn_t hifn_interrupt(int irq, void *data) restart = dmacsr & (HIFN_DMACSR_C_ABORT | HIFN_DMACSR_S_ABORT | HIFN_DMACSR_D_ABORT | HIFN_DMACSR_R_ABORT); if (restart) { - if (printk_ratelimit()) - printk("%s: abort: c: %d, s: %d, d: %d, r: %d.\n", - dev->name, !!(dmacsr & HIFN_DMACSR_C_ABORT), - !!(dmacsr & HIFN_DMACSR_S_ABORT), - !!(dmacsr & HIFN_DMACSR_D_ABORT), - !!(dmacsr & HIFN_DMACSR_R_ABORT)); + printk(KERN_WARNING "%s: abort: c: %d, s: %d, d: %d, r: %d.\n", + dev->name, !!(dmacsr & HIFN_DMACSR_C_ABORT), + !!(dmacsr & HIFN_DMACSR_S_ABORT), + !!(dmacsr & HIFN_DMACSR_D_ABORT), + !!(dmacsr & HIFN_DMACSR_R_ABORT)); hifn_reset_dma(dev, 1); hifn_init_dma(dev); hifn_init_registers(dev); @@ -2034,7 +2010,6 @@ static irqreturn_t hifn_interrupt(int irq, void *data) } tasklet_schedule(&dev->tasklet); - hifn_clear_rings(dev); return IRQ_HANDLED; } @@ -2048,21 +2023,25 @@ static void hifn_flush(struct hifn_device *dev) struct hifn_dma *dma = (struct hifn_dma *)dev->desc_virt; int i; - spin_lock_irqsave(&dev->lock, flags); for (i=0; i<HIFN_D_RES_RSIZE; ++i) { struct hifn_desc *d = &dma->resr[i]; if (dev->sa[i]) { hifn_process_ready(dev->sa[i], (d->l & __cpu_to_le32(HIFN_D_VALID))?-ENODEV:0); + hifn_complete_sa(dev, i); } } + spin_lock_irqsave(&dev->lock, flags); while ((async_req = crypto_dequeue_request(&dev->queue))) { ctx = crypto_tfm_ctx(async_req->tfm); req = container_of(async_req, struct ablkcipher_request, base); + spin_unlock_irqrestore(&dev->lock, flags); hifn_process_ready(req, -ENODEV); + + spin_lock_irqsave(&dev->lock, flags); } spin_unlock_irqrestore(&dev->lock, flags); } @@ -2121,6 +2100,7 @@ static int hifn_setup_crypto_req(struct ablkcipher_request *req, u8 op, u8 type, u8 mode) { struct hifn_context *ctx = crypto_tfm_ctx(req->base.tfm); + struct hifn_request_context *rctx = ablkcipher_request_ctx(req); unsigned ivsize; ivsize = crypto_ablkcipher_ivsize(crypto_ablkcipher_reqtfm(req)); @@ -2141,11 +2121,11 @@ static int hifn_setup_crypto_req(struct ablkcipher_request *req, u8 op, type = ACRYPTO_TYPE_AES_256; } - ctx->op = op; - ctx->mode = mode; - ctx->type = type; - ctx->iv = req->info; - ctx->ivsize = ivsize; + rctx->op = op; + rctx->mode = mode; + rctx->type = type; + rctx->iv = req->info; + rctx->ivsize = ivsize; /* * HEAVY TODO: needs to kick Herbert XU to write documentation. @@ -2158,7 +2138,7 @@ static int hifn_setup_crypto_req(struct ablkcipher_request *req, u8 op, static int hifn_process_queue(struct hifn_device *dev) { - struct crypto_async_request *async_req; + struct crypto_async_request *async_req, *backlog; struct hifn_context *ctx; struct ablkcipher_request *req; unsigned long flags; @@ -2166,12 +2146,16 @@ static int hifn_process_queue(struct hifn_device *dev) while (dev->started < HIFN_QUEUE_LENGTH) { spin_lock_irqsave(&dev->lock, flags); + backlog = crypto_get_backlog(&dev->queue); async_req = crypto_dequeue_request(&dev->queue); spin_unlock_irqrestore(&dev->lock, flags); if (!async_req) break; + if (backlog) + backlog->complete(backlog, -EINPROGRESS); + ctx = crypto_tfm_ctx(async_req->tfm); req = container_of(async_req, struct ablkcipher_request, base); @@ -2496,7 +2480,7 @@ static int hifn_cra_init(struct crypto_tfm *tfm) struct hifn_context *ctx = crypto_tfm_ctx(tfm); ctx->dev = ha->dev; - + tfm->crt_ablkcipher.reqsize = sizeof(struct hifn_request_context); return 0; } @@ -2574,7 +2558,10 @@ static void hifn_tasklet_callback(unsigned long data) * (like dev->success), but they are used in process * context or update is atomic (like setting dev->sa[i] to NULL). */ - hifn_check_for_completion(dev, 0); + hifn_clear_rings(dev, 0); + + if (dev->started < HIFN_QUEUE_LENGTH && dev->queue.qlen) + hifn_process_queue(dev); } static int hifn_probe(struct pci_dev *pdev, const struct pci_device_id *id) @@ -2631,22 +2618,11 @@ static int hifn_probe(struct pci_dev *pdev, const struct pci_device_id *id) goto err_out_unmap_bars; } - dev->result_mem = __get_free_pages(GFP_KERNEL, HIFN_MAX_RESULT_ORDER); - if (!dev->result_mem) { - dprintk("Failed to allocate %d pages for result_mem.\n", - HIFN_MAX_RESULT_ORDER); - goto err_out_unmap_bars; - } - memset((void *)dev->result_mem, 0, PAGE_SIZE*(1<<HIFN_MAX_RESULT_ORDER)); - - dev->dst = pci_map_single(pdev, (void *)dev->result_mem, - PAGE_SIZE << HIFN_MAX_RESULT_ORDER, PCI_DMA_FROMDEVICE); - dev->desc_virt = pci_alloc_consistent(pdev, sizeof(struct hifn_dma), &dev->desc_dma); if (!dev->desc_virt) { dprintk("Failed to allocate descriptor rings.\n"); - goto err_out_free_result_pages; + goto err_out_unmap_bars; } memset(dev->desc_virt, 0, sizeof(struct hifn_dma)); @@ -2706,11 +2682,6 @@ err_out_free_desc: pci_free_consistent(pdev, sizeof(struct hifn_dma), dev->desc_virt, dev->desc_dma); -err_out_free_result_pages: - pci_unmap_single(pdev, dev->dst, PAGE_SIZE << HIFN_MAX_RESULT_ORDER, - PCI_DMA_FROMDEVICE); - free_pages(dev->result_mem, HIFN_MAX_RESULT_ORDER); - err_out_unmap_bars: for (i=0; i<3; ++i) if (dev->bar[i]) @@ -2748,10 +2719,6 @@ static void hifn_remove(struct pci_dev *pdev) pci_free_consistent(pdev, sizeof(struct hifn_dma), dev->desc_virt, dev->desc_dma); - pci_unmap_single(pdev, dev->dst, - PAGE_SIZE << HIFN_MAX_RESULT_ORDER, - PCI_DMA_FROMDEVICE); - free_pages(dev->result_mem, HIFN_MAX_RESULT_ORDER); for (i=0; i<3; ++i) if (dev->bar[i]) iounmap(dev->bar[i]); @@ -2782,6 +2749,11 @@ static int __devinit hifn_init(void) unsigned int freq; int err; + if (sizeof(dma_addr_t) > 4) { + printk(KERN_INFO "HIFN supports only 32-bit addresses.\n"); + return -EINVAL; + } + if (strncmp(hifn_pll_ref, "ext", 3) && strncmp(hifn_pll_ref, "pci", 3)) { printk(KERN_ERR "hifn795x: invalid hifn_pll_ref clock, " diff --git a/drivers/crypto/padlock-aes.c b/drivers/crypto/padlock-aes.c index bf2917d197a..856b3cc2558 100644 --- a/drivers/crypto/padlock-aes.c +++ b/drivers/crypto/padlock-aes.c @@ -15,6 +15,8 @@ #include <linux/errno.h> #include <linux/interrupt.h> #include <linux/kernel.h> +#include <linux/percpu.h> +#include <linux/smp.h> #include <asm/byteorder.h> #include <asm/i387.h> #include "padlock.h" @@ -49,6 +51,8 @@ struct aes_ctx { u32 *D; }; +static DEFINE_PER_CPU(struct cword *, last_cword); + /* Tells whether the ACE is capable to generate the extended key for a given key_len. */ static inline int @@ -89,6 +93,7 @@ static int aes_set_key(struct crypto_tfm *tfm, const u8 *in_key, const __le32 *key = (const __le32 *)in_key; u32 *flags = &tfm->crt_flags; struct crypto_aes_ctx gen_aes; + int cpu; if (key_len % 8) { *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN; @@ -118,7 +123,7 @@ static int aes_set_key(struct crypto_tfm *tfm, const u8 *in_key, /* Don't generate extended keys if the hardware can do it. */ if (aes_hw_extkey_available(key_len)) - return 0; + goto ok; ctx->D = ctx->d_data; ctx->cword.encrypt.keygen = 1; @@ -131,15 +136,30 @@ static int aes_set_key(struct crypto_tfm *tfm, const u8 *in_key, memcpy(ctx->E, gen_aes.key_enc, AES_MAX_KEYLENGTH); memcpy(ctx->D, gen_aes.key_dec, AES_MAX_KEYLENGTH); + +ok: + for_each_online_cpu(cpu) + if (&ctx->cword.encrypt == per_cpu(last_cword, cpu) || + &ctx->cword.decrypt == per_cpu(last_cword, cpu)) + per_cpu(last_cword, cpu) = NULL; + return 0; } /* ====== Encryption/decryption routines ====== */ /* These are the real call to PadLock. */ -static inline void padlock_reset_key(void) +static inline void padlock_reset_key(struct cword *cword) +{ + int cpu = raw_smp_processor_id(); + + if (cword != per_cpu(last_cword, cpu)) + asm volatile ("pushfl; popfl"); +} + +static inline void padlock_store_cword(struct cword *cword) { - asm volatile ("pushfl; popfl"); + per_cpu(last_cword, raw_smp_processor_id()) = cword; } /* @@ -149,7 +169,7 @@ static inline void padlock_reset_key(void) */ static inline void padlock_xcrypt(const u8 *input, u8 *output, void *key, - void *control_word) + struct cword *control_word) { asm volatile (".byte 0xf3,0x0f,0xa7,0xc8" /* rep xcryptecb */ : "+S"(input), "+D"(output) @@ -213,22 +233,24 @@ static void aes_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in) { struct aes_ctx *ctx = aes_ctx(tfm); int ts_state; - padlock_reset_key(); + padlock_reset_key(&ctx->cword.encrypt); ts_state = irq_ts_save(); aes_crypt(in, out, ctx->E, &ctx->cword.encrypt); irq_ts_restore(ts_state); + padlock_store_cword(&ctx->cword.encrypt); } static void aes_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in) { struct aes_ctx *ctx = aes_ctx(tfm); int ts_state; - padlock_reset_key(); + padlock_reset_key(&ctx->cword.encrypt); ts_state = irq_ts_save(); aes_crypt(in, out, ctx->D, &ctx->cword.decrypt); irq_ts_restore(ts_state); + padlock_store_cword(&ctx->cword.encrypt); } static struct crypto_alg aes_alg = { @@ -261,7 +283,7 @@ static int ecb_aes_encrypt(struct blkcipher_desc *desc, int err; int ts_state; - padlock_reset_key(); + padlock_reset_key(&ctx->cword.encrypt); blkcipher_walk_init(&walk, dst, src, nbytes); err = blkcipher_walk_virt(desc, &walk); @@ -276,6 +298,8 @@ static int ecb_aes_encrypt(struct blkcipher_desc *desc, } irq_ts_restore(ts_state); + padlock_store_cword(&ctx->cword.encrypt); + return err; } @@ -288,7 +312,7 @@ static int ecb_aes_decrypt(struct blkcipher_desc *desc, int err; int ts_state; - padlock_reset_key(); + padlock_reset_key(&ctx->cword.decrypt); blkcipher_walk_init(&walk, dst, src, nbytes); err = blkcipher_walk_virt(desc, &walk); @@ -302,6 +326,9 @@ static int ecb_aes_decrypt(struct blkcipher_desc *desc, err = blkcipher_walk_done(desc, &walk, nbytes); } irq_ts_restore(ts_state); + + padlock_store_cword(&ctx->cword.encrypt); + return err; } @@ -336,7 +363,7 @@ static int cbc_aes_encrypt(struct blkcipher_desc *desc, int err; int ts_state; - padlock_reset_key(); + padlock_reset_key(&ctx->cword.encrypt); blkcipher_walk_init(&walk, dst, src, nbytes); err = blkcipher_walk_virt(desc, &walk); @@ -353,6 +380,8 @@ static int cbc_aes_encrypt(struct blkcipher_desc *desc, } irq_ts_restore(ts_state); + padlock_store_cword(&ctx->cword.decrypt); + return err; } @@ -365,7 +394,7 @@ static int cbc_aes_decrypt(struct blkcipher_desc *desc, int err; int ts_state; - padlock_reset_key(); + padlock_reset_key(&ctx->cword.encrypt); blkcipher_walk_init(&walk, dst, src, nbytes); err = blkcipher_walk_virt(desc, &walk); @@ -380,6 +409,9 @@ static int cbc_aes_decrypt(struct blkcipher_desc *desc, } irq_ts_restore(ts_state); + + padlock_store_cword(&ctx->cword.encrypt); + return err; } diff --git a/drivers/crypto/talitos.c b/drivers/crypto/talitos.c index 24607669a52..a3918c16b3d 100644 --- a/drivers/crypto/talitos.c +++ b/drivers/crypto/talitos.c @@ -127,7 +127,6 @@ struct talitos_private { /* request callback tasklet */ struct tasklet_struct done_task; - struct tasklet_struct error_task; /* list of registered algorithms */ struct list_head alg_list; @@ -138,6 +137,7 @@ struct talitos_private { /* .features flag */ #define TALITOS_FTR_SRC_LINK_TBL_LEN_INCLUDES_EXTENT 0x00000001 +#define TALITOS_FTR_HW_AUTH_CHECK 0x00000002 /* * map virtual single (contiguous) pointer to h/w descriptor pointer @@ -184,6 +184,11 @@ static int reset_channel(struct device *dev, int ch) setbits32(priv->reg + TALITOS_CCCR_LO(ch), TALITOS_CCCR_LO_CDWE | TALITOS_CCCR_LO_CDIE); + /* and ICCR writeback, if available */ + if (priv->features & TALITOS_FTR_HW_AUTH_CHECK) + setbits32(priv->reg + TALITOS_CCCR_LO(ch), + TALITOS_CCCR_LO_IWSE); + return 0; } @@ -239,6 +244,11 @@ static int init_device(struct device *dev) setbits32(priv->reg + TALITOS_IMR, TALITOS_IMR_INIT); setbits32(priv->reg + TALITOS_IMR_LO, TALITOS_IMR_LO_INIT); + /* disable integrity check error interrupts (use writeback instead) */ + if (priv->features & TALITOS_FTR_HW_AUTH_CHECK) + setbits32(priv->reg + TALITOS_MDEUICR_LO, + TALITOS_MDEUICR_LO_ICE); + return 0; } @@ -370,6 +380,12 @@ static void talitos_done(unsigned long data) for (ch = 0; ch < priv->num_channels; ch++) flush_channel(dev, ch, 0, 0); + + /* At this point, all completed channels have been processed. + * Unmask done interrupts for channels completed later on. + */ + setbits32(priv->reg + TALITOS_IMR, TALITOS_IMR_INIT); + setbits32(priv->reg + TALITOS_IMR_LO, TALITOS_IMR_LO_INIT); } /* @@ -469,16 +485,13 @@ static void report_eu_error(struct device *dev, int ch, struct talitos_desc *des /* * recover from error interrupts */ -static void talitos_error(unsigned long data) +static void talitos_error(unsigned long data, u32 isr, u32 isr_lo) { struct device *dev = (struct device *)data; struct talitos_private *priv = dev_get_drvdata(dev); unsigned int timeout = TALITOS_TIMEOUT; int ch, error, reset_dev = 0, reset_ch = 0; - u32 isr, isr_lo, v, v_lo; - - isr = in_be32(priv->reg + TALITOS_ISR); - isr_lo = in_be32(priv->reg + TALITOS_ISR_LO); + u32 v, v_lo; for (ch = 0; ch < priv->num_channels; ch++) { /* skip channels without errors */ @@ -560,16 +573,19 @@ static irqreturn_t talitos_interrupt(int irq, void *data) isr = in_be32(priv->reg + TALITOS_ISR); isr_lo = in_be32(priv->reg + TALITOS_ISR_LO); - - /* ack */ + /* Acknowledge interrupt */ out_be32(priv->reg + TALITOS_ICR, isr); out_be32(priv->reg + TALITOS_ICR_LO, isr_lo); if (unlikely((isr & ~TALITOS_ISR_CHDONE) || isr_lo)) - talitos_error((unsigned long)data); + talitos_error((unsigned long)data, isr, isr_lo); else - if (likely(isr & TALITOS_ISR_CHDONE)) + if (likely(isr & TALITOS_ISR_CHDONE)) { + /* mask further done interrupts. */ + clrbits32(priv->reg + TALITOS_IMR, TALITOS_IMR_DONE); + /* done_task will unmask done interrupts at exit */ tasklet_schedule(&priv->done_task); + } return (isr || isr_lo) ? IRQ_HANDLED : IRQ_NONE; } @@ -802,7 +818,7 @@ static void ipsec_esp_encrypt_done(struct device *dev, aead_request_complete(areq, err); } -static void ipsec_esp_decrypt_done(struct device *dev, +static void ipsec_esp_decrypt_swauth_done(struct device *dev, struct talitos_desc *desc, void *context, int err) { @@ -834,6 +850,27 @@ static void ipsec_esp_decrypt_done(struct device *dev, aead_request_complete(req, err); } +static void ipsec_esp_decrypt_hwauth_done(struct device *dev, + struct talitos_desc *desc, void *context, + int err) +{ + struct aead_request *req = context; + struct ipsec_esp_edesc *edesc = + container_of(desc, struct ipsec_esp_edesc, desc); + + ipsec_esp_unmap(dev, edesc, req); + + /* check ICV auth status */ + if (!err) + if ((desc->hdr_lo & DESC_HDR_LO_ICCR1_MASK) != + DESC_HDR_LO_ICCR1_PASS) + err = -EBADMSG; + + kfree(edesc); + + aead_request_complete(req, err); +} + /* * convert scatterlist to SEC h/w link table format * stop at cryptlen bytes @@ -887,6 +924,7 @@ static int ipsec_esp(struct ipsec_esp_edesc *edesc, struct aead_request *areq, unsigned int authsize = ctx->authsize; unsigned int ivsize; int sg_count, ret; + int sg_link_tbl_len; /* hmac key */ map_single_talitos_ptr(dev, &desc->ptr[0], ctx->authkeylen, &ctx->key, @@ -924,33 +962,19 @@ static int ipsec_esp(struct ipsec_esp_edesc *edesc, struct aead_request *areq, if (sg_count == 1) { desc->ptr[4].ptr = cpu_to_be32(sg_dma_address(areq->src)); } else { - sg_count = sg_to_link_tbl(areq->src, sg_count, cryptlen, + sg_link_tbl_len = cryptlen; + + if ((edesc->desc.hdr & DESC_HDR_MODE1_MDEU_CICV) && + (edesc->desc.hdr & DESC_HDR_MODE0_ENCRYPT) == 0) { + sg_link_tbl_len = cryptlen + authsize; + } + sg_count = sg_to_link_tbl(areq->src, sg_count, sg_link_tbl_len, &edesc->link_tbl[0]); if (sg_count > 1) { - struct talitos_ptr *link_tbl_ptr = - &edesc->link_tbl[sg_count-1]; - struct scatterlist *sg; - struct talitos_private *priv = dev_get_drvdata(dev); - desc->ptr[4].j_extent |= DESC_PTR_LNKTBL_JUMP; desc->ptr[4].ptr = cpu_to_be32(edesc->dma_link_tbl); dma_sync_single_for_device(ctx->dev, edesc->dma_link_tbl, edesc->dma_len, DMA_BIDIRECTIONAL); - /* If necessary for this SEC revision, - * add a link table entry for ICV. - */ - if ((priv->features & - TALITOS_FTR_SRC_LINK_TBL_LEN_INCLUDES_EXTENT) && - (edesc->desc.hdr & DESC_HDR_MODE0_ENCRYPT) == 0) { - link_tbl_ptr->j_extent = 0; - link_tbl_ptr++; - link_tbl_ptr->j_extent = DESC_PTR_LNKTBL_RETURN; - link_tbl_ptr->len = cpu_to_be16(authsize); - sg = sg_last(areq->src, edesc->src_nents ? : 1); - link_tbl_ptr->ptr = cpu_to_be32( - (char *)sg_dma_address(sg) - + sg->length - authsize); - } } else { /* Only one segment now, so no link tbl needed */ desc->ptr[4].ptr = cpu_to_be32(sg_dma_address(areq->src)); @@ -975,13 +999,9 @@ static int ipsec_esp(struct ipsec_esp_edesc *edesc, struct aead_request *areq, desc->ptr[5].ptr = cpu_to_be32((struct talitos_ptr *) edesc->dma_link_tbl + edesc->src_nents + 1); - if (areq->src == areq->dst) { - memcpy(link_tbl_ptr, &edesc->link_tbl[0], - edesc->src_nents * sizeof(struct talitos_ptr)); - } else { - sg_count = sg_to_link_tbl(areq->dst, sg_count, cryptlen, - link_tbl_ptr); - } + sg_count = sg_to_link_tbl(areq->dst, sg_count, cryptlen, + link_tbl_ptr); + /* Add an entry to the link table for ICV data */ link_tbl_ptr += sg_count - 1; link_tbl_ptr->j_extent = 0; @@ -1106,11 +1126,14 @@ static int aead_authenc_encrypt(struct aead_request *req) return ipsec_esp(edesc, req, NULL, 0, ipsec_esp_encrypt_done); } + + static int aead_authenc_decrypt(struct aead_request *req) { struct crypto_aead *authenc = crypto_aead_reqtfm(req); struct talitos_ctx *ctx = crypto_aead_ctx(authenc); unsigned int authsize = ctx->authsize; + struct talitos_private *priv = dev_get_drvdata(ctx->dev); struct ipsec_esp_edesc *edesc; struct scatterlist *sg; void *icvdata; @@ -1122,22 +1145,39 @@ static int aead_authenc_decrypt(struct aead_request *req) if (IS_ERR(edesc)) return PTR_ERR(edesc); - /* stash incoming ICV for later cmp with ICV generated by the h/w */ - if (edesc->dma_len) - icvdata = &edesc->link_tbl[edesc->src_nents + - edesc->dst_nents + 2]; - else - icvdata = &edesc->link_tbl[0]; + if ((priv->features & TALITOS_FTR_HW_AUTH_CHECK) && + (((!edesc->src_nents && !edesc->dst_nents) || + priv->features & TALITOS_FTR_SRC_LINK_TBL_LEN_INCLUDES_EXTENT))) { + + /* decrypt and check the ICV */ + edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_DIR_INBOUND | + DESC_HDR_MODE1_MDEU_CICV; + + /* reset integrity check result bits */ + edesc->desc.hdr_lo = 0; + + return ipsec_esp(edesc, req, NULL, 0, ipsec_esp_decrypt_hwauth_done); + + } else { + + /* Have to check the ICV with software */ - sg = sg_last(req->src, edesc->src_nents ? : 1); + edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_DIR_INBOUND; + + /* stash incoming ICV for later cmp with ICV generated by the h/w */ + if (edesc->dma_len) + icvdata = &edesc->link_tbl[edesc->src_nents + + edesc->dst_nents + 2]; + else + icvdata = &edesc->link_tbl[0]; - memcpy(icvdata, (char *)sg_virt(sg) + sg->length - ctx->authsize, - ctx->authsize); + sg = sg_last(req->src, edesc->src_nents ? : 1); - /* decrypt */ - edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_DIR_INBOUND; + memcpy(icvdata, (char *)sg_virt(sg) + sg->length - ctx->authsize, + ctx->authsize); - return ipsec_esp(edesc, req, NULL, 0, ipsec_esp_decrypt_done); + return ipsec_esp(edesc, req, NULL, 0, ipsec_esp_decrypt_swauth_done); + } } static int aead_authenc_givencrypt( @@ -1391,7 +1431,6 @@ static int talitos_remove(struct of_device *ofdev) } tasklet_kill(&priv->done_task); - tasklet_kill(&priv->error_task); iounmap(priv->reg); @@ -1451,10 +1490,9 @@ static int talitos_probe(struct of_device *ofdev, priv->ofdev = ofdev; - INIT_LIST_HEAD(&priv->alg_list); - tasklet_init(&priv->done_task, talitos_done, (unsigned long)dev); - tasklet_init(&priv->error_task, talitos_error, (unsigned long)dev); + + INIT_LIST_HEAD(&priv->alg_list); priv->irq = irq_of_parse_and_map(np, 0); @@ -1508,6 +1546,9 @@ static int talitos_probe(struct of_device *ofdev, if (of_device_is_compatible(np, "fsl,sec3.0")) priv->features |= TALITOS_FTR_SRC_LINK_TBL_LEN_INCLUDES_EXTENT; + if (of_device_is_compatible(np, "fsl,sec2.1")) + priv->features |= TALITOS_FTR_HW_AUTH_CHECK; + priv->head_lock = kmalloc(sizeof(spinlock_t) * priv->num_channels, GFP_KERNEL); priv->tail_lock = kmalloc(sizeof(spinlock_t) * priv->num_channels, @@ -1551,7 +1592,7 @@ static int talitos_probe(struct of_device *ofdev, goto err_out; } for (i = 0; i < priv->num_channels; i++) - atomic_set(&priv->submit_count[i], -priv->chfifo_len); + atomic_set(&priv->submit_count[i], -(priv->chfifo_len - 1)); priv->head = kzalloc(sizeof(int) * priv->num_channels, GFP_KERNEL); priv->tail = kzalloc(sizeof(int) * priv->num_channels, GFP_KERNEL); diff --git a/drivers/crypto/talitos.h b/drivers/crypto/talitos.h index c48a405abf7..575981f0cfd 100644 --- a/drivers/crypto/talitos.h +++ b/drivers/crypto/talitos.h @@ -37,7 +37,8 @@ #define TALITOS_MCR_LO 0x1038 #define TALITOS_MCR_SWR 0x1 /* s/w reset */ #define TALITOS_IMR 0x1008 /* interrupt mask register */ -#define TALITOS_IMR_INIT 0x10fff /* enable channel IRQs */ +#define TALITOS_IMR_INIT 0x100ff /* enable channel IRQs */ +#define TALITOS_IMR_DONE 0x00055 /* done IRQs */ #define TALITOS_IMR_LO 0x100C #define TALITOS_IMR_LO_INIT 0x20000 /* allow RNGU error IRQs */ #define TALITOS_ISR 0x1010 /* interrupt status register */ @@ -55,6 +56,7 @@ #define TALITOS_CCCR_CONT 0x2 /* channel continue */ #define TALITOS_CCCR_RESET 0x1 /* channel reset */ #define TALITOS_CCCR_LO(ch) (ch * TALITOS_CH_STRIDE + 0x110c) +#define TALITOS_CCCR_LO_IWSE 0x80 /* chan. ICCR writeback enab. */ #define TALITOS_CCCR_LO_CDWE 0x10 /* chan. done writeback enab. */ #define TALITOS_CCCR_LO_NT 0x4 /* notification type */ #define TALITOS_CCCR_LO_CDIE 0x2 /* channel done IRQ enable */ @@ -102,6 +104,9 @@ #define TALITOS_AESUISR_LO 0x4034 #define TALITOS_MDEUISR 0x6030 /* message digest unit */ #define TALITOS_MDEUISR_LO 0x6034 +#define TALITOS_MDEUICR 0x6038 /* interrupt control */ +#define TALITOS_MDEUICR_LO 0x603c +#define TALITOS_MDEUICR_LO_ICE 0x4000 /* integrity check IRQ enable */ #define TALITOS_AFEUISR 0x8030 /* arc4 unit */ #define TALITOS_AFEUISR_LO 0x8034 #define TALITOS_RNGUISR 0xa030 /* random number unit */ @@ -129,31 +134,34 @@ */ /* written back when done */ -#define DESC_HDR_DONE __constant_cpu_to_be32(0xff000000) +#define DESC_HDR_DONE cpu_to_be32(0xff000000) +#define DESC_HDR_LO_ICCR1_MASK cpu_to_be32(0x00180000) +#define DESC_HDR_LO_ICCR1_PASS cpu_to_be32(0x00080000) +#define DESC_HDR_LO_ICCR1_FAIL cpu_to_be32(0x00100000) /* primary execution unit select */ -#define DESC_HDR_SEL0_MASK __constant_cpu_to_be32(0xf0000000) -#define DESC_HDR_SEL0_AFEU __constant_cpu_to_be32(0x10000000) -#define DESC_HDR_SEL0_DEU __constant_cpu_to_be32(0x20000000) -#define DESC_HDR_SEL0_MDEUA __constant_cpu_to_be32(0x30000000) -#define DESC_HDR_SEL0_MDEUB __constant_cpu_to_be32(0xb0000000) -#define DESC_HDR_SEL0_RNG __constant_cpu_to_be32(0x40000000) -#define DESC_HDR_SEL0_PKEU __constant_cpu_to_be32(0x50000000) -#define DESC_HDR_SEL0_AESU __constant_cpu_to_be32(0x60000000) -#define DESC_HDR_SEL0_KEU __constant_cpu_to_be32(0x70000000) -#define DESC_HDR_SEL0_CRCU __constant_cpu_to_be32(0x80000000) +#define DESC_HDR_SEL0_MASK cpu_to_be32(0xf0000000) +#define DESC_HDR_SEL0_AFEU cpu_to_be32(0x10000000) +#define DESC_HDR_SEL0_DEU cpu_to_be32(0x20000000) +#define DESC_HDR_SEL0_MDEUA cpu_to_be32(0x30000000) +#define DESC_HDR_SEL0_MDEUB cpu_to_be32(0xb0000000) +#define DESC_HDR_SEL0_RNG cpu_to_be32(0x40000000) +#define DESC_HDR_SEL0_PKEU cpu_to_be32(0x50000000) +#define DESC_HDR_SEL0_AESU cpu_to_be32(0x60000000) +#define DESC_HDR_SEL0_KEU cpu_to_be32(0x70000000) +#define DESC_HDR_SEL0_CRCU cpu_to_be32(0x80000000) /* primary execution unit mode (MODE0) and derivatives */ -#define DESC_HDR_MODE0_ENCRYPT __constant_cpu_to_be32(0x00100000) -#define DESC_HDR_MODE0_AESU_CBC __constant_cpu_to_be32(0x00200000) -#define DESC_HDR_MODE0_DEU_CBC __constant_cpu_to_be32(0x00400000) -#define DESC_HDR_MODE0_DEU_3DES __constant_cpu_to_be32(0x00200000) -#define DESC_HDR_MODE0_MDEU_INIT __constant_cpu_to_be32(0x01000000) -#define DESC_HDR_MODE0_MDEU_HMAC __constant_cpu_to_be32(0x00800000) -#define DESC_HDR_MODE0_MDEU_PAD __constant_cpu_to_be32(0x00400000) -#define DESC_HDR_MODE0_MDEU_MD5 __constant_cpu_to_be32(0x00200000) -#define DESC_HDR_MODE0_MDEU_SHA256 __constant_cpu_to_be32(0x00100000) -#define DESC_HDR_MODE0_MDEU_SHA1 __constant_cpu_to_be32(0x00000000) +#define DESC_HDR_MODE0_ENCRYPT cpu_to_be32(0x00100000) +#define DESC_HDR_MODE0_AESU_CBC cpu_to_be32(0x00200000) +#define DESC_HDR_MODE0_DEU_CBC cpu_to_be32(0x00400000) +#define DESC_HDR_MODE0_DEU_3DES cpu_to_be32(0x00200000) +#define DESC_HDR_MODE0_MDEU_INIT cpu_to_be32(0x01000000) +#define DESC_HDR_MODE0_MDEU_HMAC cpu_to_be32(0x00800000) +#define DESC_HDR_MODE0_MDEU_PAD cpu_to_be32(0x00400000) +#define DESC_HDR_MODE0_MDEU_MD5 cpu_to_be32(0x00200000) +#define DESC_HDR_MODE0_MDEU_SHA256 cpu_to_be32(0x00100000) +#define DESC_HDR_MODE0_MDEU_SHA1 cpu_to_be32(0x00000000) #define DESC_HDR_MODE0_MDEU_MD5_HMAC (DESC_HDR_MODE0_MDEU_MD5 | \ DESC_HDR_MODE0_MDEU_HMAC) #define DESC_HDR_MODE0_MDEU_SHA256_HMAC (DESC_HDR_MODE0_MDEU_SHA256 | \ @@ -162,18 +170,19 @@ DESC_HDR_MODE0_MDEU_HMAC) /* secondary execution unit select (SEL1) */ -#define DESC_HDR_SEL1_MASK __constant_cpu_to_be32(0x000f0000) -#define DESC_HDR_SEL1_MDEUA __constant_cpu_to_be32(0x00030000) -#define DESC_HDR_SEL1_MDEUB __constant_cpu_to_be32(0x000b0000) -#define DESC_HDR_SEL1_CRCU __constant_cpu_to_be32(0x00080000) +#define DESC_HDR_SEL1_MASK cpu_to_be32(0x000f0000) +#define DESC_HDR_SEL1_MDEUA cpu_to_be32(0x00030000) +#define DESC_HDR_SEL1_MDEUB cpu_to_be32(0x000b0000) +#define DESC_HDR_SEL1_CRCU cpu_to_be32(0x00080000) /* secondary execution unit mode (MODE1) and derivatives */ -#define DESC_HDR_MODE1_MDEU_INIT __constant_cpu_to_be32(0x00001000) -#define DESC_HDR_MODE1_MDEU_HMAC __constant_cpu_to_be32(0x00000800) -#define DESC_HDR_MODE1_MDEU_PAD __constant_cpu_to_be32(0x00000400) -#define DESC_HDR_MODE1_MDEU_MD5 __constant_cpu_to_be32(0x00000200) -#define DESC_HDR_MODE1_MDEU_SHA256 __constant_cpu_to_be32(0x00000100) -#define DESC_HDR_MODE1_MDEU_SHA1 __constant_cpu_to_be32(0x00000000) +#define DESC_HDR_MODE1_MDEU_CICV cpu_to_be32(0x00004000) +#define DESC_HDR_MODE1_MDEU_INIT cpu_to_be32(0x00001000) +#define DESC_HDR_MODE1_MDEU_HMAC cpu_to_be32(0x00000800) +#define DESC_HDR_MODE1_MDEU_PAD cpu_to_be32(0x00000400) +#define DESC_HDR_MODE1_MDEU_MD5 cpu_to_be32(0x00000200) +#define DESC_HDR_MODE1_MDEU_SHA256 cpu_to_be32(0x00000100) +#define DESC_HDR_MODE1_MDEU_SHA1 cpu_to_be32(0x00000000) #define DESC_HDR_MODE1_MDEU_MD5_HMAC (DESC_HDR_MODE1_MDEU_MD5 | \ DESC_HDR_MODE1_MDEU_HMAC) #define DESC_HDR_MODE1_MDEU_SHA256_HMAC (DESC_HDR_MODE1_MDEU_SHA256 | \ @@ -182,16 +191,16 @@ DESC_HDR_MODE1_MDEU_HMAC) /* direction of overall data flow (DIR) */ -#define DESC_HDR_DIR_INBOUND __constant_cpu_to_be32(0x00000002) +#define DESC_HDR_DIR_INBOUND cpu_to_be32(0x00000002) /* request done notification (DN) */ -#define DESC_HDR_DONE_NOTIFY __constant_cpu_to_be32(0x00000001) +#define DESC_HDR_DONE_NOTIFY cpu_to_be32(0x00000001) /* descriptor types */ -#define DESC_HDR_TYPE_AESU_CTR_NONSNOOP __constant_cpu_to_be32(0 << 3) -#define DESC_HDR_TYPE_IPSEC_ESP __constant_cpu_to_be32(1 << 3) -#define DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU __constant_cpu_to_be32(2 << 3) -#define DESC_HDR_TYPE_HMAC_SNOOP_NO_AFEU __constant_cpu_to_be32(4 << 3) +#define DESC_HDR_TYPE_AESU_CTR_NONSNOOP cpu_to_be32(0 << 3) +#define DESC_HDR_TYPE_IPSEC_ESP cpu_to_be32(1 << 3) +#define DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU cpu_to_be32(2 << 3) +#define DESC_HDR_TYPE_HMAC_SNOOP_NO_AFEU cpu_to_be32(4 << 3) /* link table extent field bits */ #define DESC_PTR_LNKTBL_JUMP 0x80 diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c index 5317e08221e..65799651737 100644 --- a/drivers/dma/dmaengine.c +++ b/drivers/dma/dmaengine.c @@ -388,7 +388,10 @@ int dma_async_device_register(struct dma_device *device) init_completion(&device->done); kref_init(&device->refcount); + + mutex_lock(&dma_list_mutex); device->dev_id = id++; + mutex_unlock(&dma_list_mutex); /* represent channels in sysfs. Probably want devs too */ list_for_each_entry(chan, &device->channels, device_node) { diff --git a/drivers/dma/ioat_dma.c b/drivers/dma/ioat_dma.c index ecd743f7cc6..6607fdd00b1 100644 --- a/drivers/dma/ioat_dma.c +++ b/drivers/dma/ioat_dma.c @@ -1341,10 +1341,12 @@ static void ioat_dma_start_null_desc(struct ioat_dma_chan *ioat_chan) */ #define IOAT_TEST_SIZE 2000 +DECLARE_COMPLETION(test_completion); static void ioat_dma_test_callback(void *dma_async_param) { printk(KERN_ERR "ioatdma: ioat_dma_test_callback(%p)\n", dma_async_param); + complete(&test_completion); } /** @@ -1410,7 +1412,8 @@ static int ioat_dma_self_test(struct ioatdma_device *device) goto free_resources; } device->common.device_issue_pending(dma_chan); - msleep(1); + + wait_for_completion_timeout(&test_completion, msecs_to_jiffies(3000)); if (device->common.device_is_tx_complete(dma_chan, cookie, NULL, NULL) != DMA_SUCCESS) { diff --git a/drivers/dma/iop-adma.c b/drivers/dma/iop-adma.c index c7a9306d951..6be31726220 100644 --- a/drivers/dma/iop-adma.c +++ b/drivers/dma/iop-adma.c @@ -85,18 +85,28 @@ iop_adma_run_tx_complete_actions(struct iop_adma_desc_slot *desc, enum dma_ctrl_flags flags = desc->async_tx.flags; u32 src_cnt; dma_addr_t addr; + dma_addr_t dest; + src_cnt = unmap->unmap_src_cnt; + dest = iop_desc_get_dest_addr(unmap, iop_chan); if (!(flags & DMA_COMPL_SKIP_DEST_UNMAP)) { - addr = iop_desc_get_dest_addr(unmap, iop_chan); - dma_unmap_page(dev, addr, len, DMA_FROM_DEVICE); + enum dma_data_direction dir; + + if (src_cnt > 1) /* is xor? */ + dir = DMA_BIDIRECTIONAL; + else + dir = DMA_FROM_DEVICE; + + dma_unmap_page(dev, dest, len, dir); } if (!(flags & DMA_COMPL_SKIP_SRC_UNMAP)) { - src_cnt = unmap->unmap_src_cnt; while (src_cnt--) { addr = iop_desc_get_src_addr(unmap, iop_chan, src_cnt); + if (addr == dest) + continue; dma_unmap_page(dev, addr, len, DMA_TO_DEVICE); } diff --git a/drivers/dma/mv_xor.c b/drivers/dma/mv_xor.c index 0328da020a1..bcda1742641 100644 --- a/drivers/dma/mv_xor.c +++ b/drivers/dma/mv_xor.c @@ -311,17 +311,26 @@ mv_xor_run_tx_complete_actions(struct mv_xor_desc_slot *desc, enum dma_ctrl_flags flags = desc->async_tx.flags; u32 src_cnt; dma_addr_t addr; + dma_addr_t dest; + src_cnt = unmap->unmap_src_cnt; + dest = mv_desc_get_dest_addr(unmap); if (!(flags & DMA_COMPL_SKIP_DEST_UNMAP)) { - addr = mv_desc_get_dest_addr(unmap); - dma_unmap_page(dev, addr, len, DMA_FROM_DEVICE); + enum dma_data_direction dir; + + if (src_cnt > 1) /* is xor ? */ + dir = DMA_BIDIRECTIONAL; + else + dir = DMA_FROM_DEVICE; + dma_unmap_page(dev, dest, len, dir); } if (!(flags & DMA_COMPL_SKIP_SRC_UNMAP)) { - src_cnt = unmap->unmap_src_cnt; while (src_cnt--) { addr = mv_desc_get_src_addr(unmap, src_cnt); + if (addr == dest) + continue; dma_unmap_page(dev, addr, len, DMA_TO_DEVICE); } diff --git a/drivers/edac/edac_device.c b/drivers/edac/edac_device.c index 5fcd3d89c75..4041e914328 100644 --- a/drivers/edac/edac_device.c +++ b/drivers/edac/edac_device.c @@ -394,6 +394,12 @@ static void edac_device_workq_function(struct work_struct *work_req) mutex_lock(&device_ctls_mutex); + /* If we are being removed, bail out immediately */ + if (edac_dev->op_state == OP_OFFLINE) { + mutex_unlock(&device_ctls_mutex); + return; + } + /* Only poll controllers that are running polled and have a check */ if ((edac_dev->op_state == OP_RUNNING_POLL) && (edac_dev->edac_check != NULL)) { @@ -585,14 +591,14 @@ struct edac_device_ctl_info *edac_device_del_device(struct device *dev) /* mark this instance as OFFLINE */ edac_dev->op_state = OP_OFFLINE; - /* clear workq processing on this instance */ - edac_device_workq_teardown(edac_dev); - /* deregister from global list */ del_edac_device_from_global_list(edac_dev); mutex_unlock(&device_ctls_mutex); + /* clear workq processing on this instance */ + edac_device_workq_teardown(edac_dev); + /* Tear down the sysfs entries for this instance */ edac_device_remove_sysfs(edac_dev); diff --git a/drivers/firmware/dmi_scan.c b/drivers/firmware/dmi_scan.c index 8daf4793ac3..4a597d8c2f7 100644 --- a/drivers/firmware/dmi_scan.c +++ b/drivers/firmware/dmi_scan.c @@ -467,6 +467,17 @@ const char *dmi_get_system_info(int field) } EXPORT_SYMBOL(dmi_get_system_info); +/** + * dmi_name_in_serial - Check if string is in the DMI product serial + * information. + */ +int dmi_name_in_serial(const char *str) +{ + int f = DMI_PRODUCT_SERIAL; + if (dmi_ident[f] && strstr(dmi_ident[f], str)) + return 1; + return 0; +} /** * dmi_name_in_vendors - Check if string is anywhere in the DMI vendor information. diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c index 553dd4bc307..afa8a12cd00 100644 --- a/drivers/gpu/drm/i915/i915_dma.c +++ b/drivers/gpu/drm/i915/i915_dma.c @@ -717,7 +717,7 @@ static int i915_getparam(struct drm_device *dev, void *data, value = dev->pci_device; break; case I915_PARAM_HAS_GEM: - value = 1; + value = dev_priv->has_gem; break; default: DRM_ERROR("Unknown parameter %d\n", param->param); @@ -830,6 +830,14 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags) dev_priv->regs = ioremap(base, size); +#ifdef CONFIG_HIGHMEM64G + /* don't enable GEM on PAE - needs agp + set_memory_* interface fixes */ + dev_priv->has_gem = 0; +#else + /* enable GEM by default */ + dev_priv->has_gem = 1; +#endif + i915_gem_load(dev); /* Init HWS */ diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index adc972cc6bf..b3cc4731aa7 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -106,6 +106,8 @@ struct intel_opregion { typedef struct drm_i915_private { struct drm_device *dev; + int has_gem; + void __iomem *regs; drm_local_map_t *sarea; diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index ad672d85482..24fe8c10b4b 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -2309,7 +2309,14 @@ i915_gem_busy_ioctl(struct drm_device *dev, void *data, } obj_priv = obj->driver_private; - args->busy = obj_priv->active; + /* Don't count being on the flushing list against the object being + * done. Otherwise, a buffer left on the flushing list but not getting + * flushed (because nobody's flushing that domain) won't ever return + * unbusy and get reused by libdrm's bo cache. The other expected + * consumer of this interface, OpenGL's occlusion queries, also specs + * that the objects get unbusy "eventually" without any interference. + */ + args->busy = obj_priv->active && obj_priv->last_rendering_seqno != 0; drm_gem_object_unreference(obj); mutex_unlock(&dev->struct_mutex); diff --git a/drivers/gpu/drm/radeon/radeon_irq.c b/drivers/gpu/drm/radeon/radeon_irq.c index 99be11418ac..8289e16419a 100644 --- a/drivers/gpu/drm/radeon/radeon_irq.c +++ b/drivers/gpu/drm/radeon/radeon_irq.c @@ -44,7 +44,7 @@ void radeon_irq_set_state(struct drm_device *dev, u32 mask, int state) else dev_priv->irq_enable_reg &= ~mask; - if (!dev->irq_enabled) + if (dev->irq_enabled) RADEON_WRITE(RADEON_GEN_INT_CNTL, dev_priv->irq_enable_reg); } @@ -57,7 +57,7 @@ static void r500_vbl_irq_set_state(struct drm_device *dev, u32 mask, int state) else dev_priv->r500_disp_irq_reg &= ~mask; - if (!dev->irq_enabled) + if (dev->irq_enabled) RADEON_WRITE(R500_DxMODE_INT_MASK, dev_priv->r500_disp_irq_reg); } diff --git a/drivers/i2c/busses/i2c-cpm.c b/drivers/i2c/busses/i2c-cpm.c index 228f7572306..3fcf78e906d 100644 --- a/drivers/i2c/busses/i2c-cpm.c +++ b/drivers/i2c/busses/i2c-cpm.c @@ -365,6 +365,7 @@ static int cpm_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num) pmsg = &msgs[tptr]; if (pmsg->flags & I2C_M_RD) ret = wait_event_interruptible_timeout(cpm->i2c_wait, + (in_be16(&tbdf[tptr].cbd_sc) & BD_SC_NAK) || !(in_be16(&rbdf[rptr].cbd_sc) & BD_SC_EMPTY), 1 * HZ); else diff --git a/drivers/i2c/busses/i2c-s3c2410.c b/drivers/i2c/busses/i2c-s3c2410.c index 1fac4e23313..b7434d24904 100644 --- a/drivers/i2c/busses/i2c-s3c2410.c +++ b/drivers/i2c/busses/i2c-s3c2410.c @@ -56,6 +56,7 @@ enum s3c24xx_i2c_state { struct s3c24xx_i2c { spinlock_t lock; wait_queue_head_t wait; + unsigned int suspended:1; struct i2c_msg *msg; unsigned int msg_num; @@ -507,7 +508,7 @@ static int s3c24xx_i2c_doxfer(struct s3c24xx_i2c *i2c, struct i2c_msg *msgs, int unsigned long timeout; int ret; - if (!(readl(i2c->regs + S3C2410_IICCON) & S3C2410_IICCON_IRQEN)) + if (i2c->suspended) return -EIO; ret = s3c24xx_i2c_set_master(i2c); @@ -986,17 +987,26 @@ static int s3c24xx_i2c_remove(struct platform_device *pdev) } #ifdef CONFIG_PM +static int s3c24xx_i2c_suspend_late(struct platform_device *dev, + pm_message_t msg) +{ + struct s3c24xx_i2c *i2c = platform_get_drvdata(dev); + i2c->suspended = 1; + return 0; +} + static int s3c24xx_i2c_resume(struct platform_device *dev) { struct s3c24xx_i2c *i2c = platform_get_drvdata(dev); - if (i2c != NULL) - s3c24xx_i2c_init(i2c); + i2c->suspended = 0; + s3c24xx_i2c_init(i2c); return 0; } #else +#define s3c24xx_i2c_suspend_late NULL #define s3c24xx_i2c_resume NULL #endif @@ -1005,6 +1015,7 @@ static int s3c24xx_i2c_resume(struct platform_device *dev) static struct platform_driver s3c2410_i2c_driver = { .probe = s3c24xx_i2c_probe, .remove = s3c24xx_i2c_remove, + .suspend_late = s3c24xx_i2c_suspend_late, .resume = s3c24xx_i2c_resume, .driver = { .owner = THIS_MODULE, @@ -1015,6 +1026,7 @@ static struct platform_driver s3c2410_i2c_driver = { static struct platform_driver s3c2440_i2c_driver = { .probe = s3c24xx_i2c_probe, .remove = s3c24xx_i2c_remove, + .suspend_late = s3c24xx_i2c_suspend_late, .resume = s3c24xx_i2c_resume, .driver = { .owner = THIS_MODULE, diff --git a/drivers/i2c/busses/i2c-sh_mobile.c b/drivers/i2c/busses/i2c-sh_mobile.c index 3384a717fec..6c3d60b939b 100644 --- a/drivers/i2c/busses/i2c-sh_mobile.c +++ b/drivers/i2c/busses/i2c-sh_mobile.c @@ -160,9 +160,39 @@ struct sh_mobile_i2c_data { static void activate_ch(struct sh_mobile_i2c_data *pd) { + unsigned long i2c_clk; + u_int32_t num; + u_int32_t denom; + u_int32_t tmp; + /* Make sure the clock is enabled */ clk_enable(pd->clk); + /* Get clock rate after clock is enabled */ + i2c_clk = clk_get_rate(pd->clk); + + /* Calculate the value for iccl. From the data sheet: + * iccl = (p clock / transfer rate) * (L / (L + H)) + * where L and H are the SCL low/high ratio (5/4 in this case). + * We also round off the result. + */ + num = i2c_clk * 5; + denom = NORMAL_SPEED * 9; + tmp = num * 10 / denom; + if (tmp % 10 >= 5) + pd->iccl = (u_int8_t)((num/denom) + 1); + else + pd->iccl = (u_int8_t)(num/denom); + + /* Calculate the value for icch. From the data sheet: + icch = (p clock / transfer rate) * (H / (L + H)) */ + num = i2c_clk * 4; + tmp = num * 10 / denom; + if (tmp % 10 >= 5) + pd->icch = (u_int8_t)((num/denom) + 1); + else + pd->icch = (u_int8_t)(num/denom); + /* Enable channel and configure rx ack */ iowrite8(ioread8(ICCR(pd)) | ICCR_ICE, ICCR(pd)); @@ -459,40 +489,6 @@ static struct i2c_algorithm sh_mobile_i2c_algorithm = { .master_xfer = sh_mobile_i2c_xfer, }; -static void sh_mobile_i2c_setup_channel(struct platform_device *dev) -{ - struct sh_mobile_i2c_data *pd = platform_get_drvdata(dev); - unsigned long peripheral_clk = clk_get_rate(pd->clk); - u_int32_t num; - u_int32_t denom; - u_int32_t tmp; - - spin_lock_init(&pd->lock); - init_waitqueue_head(&pd->wait); - - /* Calculate the value for iccl. From the data sheet: - * iccl = (p clock / transfer rate) * (L / (L + H)) - * where L and H are the SCL low/high ratio (5/4 in this case). - * We also round off the result. - */ - num = peripheral_clk * 5; - denom = NORMAL_SPEED * 9; - tmp = num * 10 / denom; - if (tmp % 10 >= 5) - pd->iccl = (u_int8_t)((num/denom) + 1); - else - pd->iccl = (u_int8_t)(num/denom); - - /* Calculate the value for icch. From the data sheet: - icch = (p clock / transfer rate) * (H / (L + H)) */ - num = peripheral_clk * 4; - tmp = num * 10 / denom; - if (tmp % 10 >= 5) - pd->icch = (u_int8_t)((num/denom) + 1); - else - pd->icch = (u_int8_t)(num/denom); -} - static int sh_mobile_i2c_hook_irqs(struct platform_device *dev, int hook) { struct resource *res; @@ -533,6 +529,7 @@ static int sh_mobile_i2c_probe(struct platform_device *dev) struct sh_mobile_i2c_data *pd; struct i2c_adapter *adap; struct resource *res; + char clk_name[8]; int size; int ret; @@ -542,9 +539,10 @@ static int sh_mobile_i2c_probe(struct platform_device *dev) return -ENOMEM; } - pd->clk = clk_get(&dev->dev, "peripheral_clk"); + snprintf(clk_name, sizeof(clk_name), "i2c%d", dev->id); + pd->clk = clk_get(&dev->dev, clk_name); if (IS_ERR(pd->clk)) { - dev_err(&dev->dev, "cannot get peripheral clock\n"); + dev_err(&dev->dev, "cannot get clock \"%s\"\n", clk_name); ret = PTR_ERR(pd->clk); goto err; } @@ -586,7 +584,8 @@ static int sh_mobile_i2c_probe(struct platform_device *dev) strlcpy(adap->name, dev->name, sizeof(adap->name)); - sh_mobile_i2c_setup_channel(dev); + spin_lock_init(&pd->lock); + init_waitqueue_head(&pd->wait); ret = i2c_add_numbered_adapter(adap); if (ret < 0) { diff --git a/drivers/ide/cs5530.c b/drivers/ide/cs5530.c index 53f079cc00a..d8ede85fe17 100644 --- a/drivers/ide/cs5530.c +++ b/drivers/ide/cs5530.c @@ -81,11 +81,12 @@ static u8 cs5530_udma_filter(ide_drive_t *drive) { ide_hwif_t *hwif = drive->hwif; ide_drive_t *mate = ide_get_pair_dev(drive); - u16 *mateid = mate->id; + u16 *mateid; u8 mask = hwif->ultra_mask; if (mate == NULL) goto out; + mateid = mate->id; if (ata_id_has_dma(mateid) && __ide_dma_bad_drive(mate) == 0) { if ((mateid[ATA_ID_FIELD_VALID] & 4) && diff --git a/drivers/ide/sc1200.c b/drivers/ide/sc1200.c index f1a8758e3a9..ec7f766ef5e 100644 --- a/drivers/ide/sc1200.c +++ b/drivers/ide/sc1200.c @@ -104,11 +104,12 @@ static u8 sc1200_udma_filter(ide_drive_t *drive) { ide_hwif_t *hwif = drive->hwif; ide_drive_t *mate = ide_get_pair_dev(drive); - u16 *mateid = mate->id; + u16 *mateid; u8 mask = hwif->ultra_mask; if (mate == NULL) goto out; + mateid = mate->id; if (ata_id_has_dma(mateid) && __ide_dma_bad_drive(mate) == 0) { if ((mateid[ATA_ID_FIELD_VALID] & 4) && diff --git a/drivers/ieee1394/nodemgr.c b/drivers/ieee1394/nodemgr.c index d333ae22459..79ef5fd928a 100644 --- a/drivers/ieee1394/nodemgr.c +++ b/drivers/ieee1394/nodemgr.c @@ -115,8 +115,14 @@ static int nodemgr_bus_read(struct csr1212_csr *csr, u64 addr, u16 length, return error; } +#define OUI_FREECOM_TECHNOLOGIES_GMBH 0x0001db + static int nodemgr_get_max_rom(quadlet_t *bus_info_data, void *__ci) { + /* Freecom FireWire Hard Drive firmware bug */ + if (be32_to_cpu(bus_info_data[3]) >> 8 == OUI_FREECOM_TECHNOLOGIES_GMBH) + return 0; + return (be32_to_cpu(bus_info_data[2]) >> 8) & 0x3; } diff --git a/drivers/infiniband/core/addr.c b/drivers/infiniband/core/addr.c index 09a2bec7fd3..d98b05b2826 100644 --- a/drivers/infiniband/core/addr.c +++ b/drivers/infiniband/core/addr.c @@ -41,6 +41,8 @@ #include <net/neighbour.h> #include <net/route.h> #include <net/netevent.h> +#include <net/addrconf.h> +#include <net/ip6_route.h> #include <rdma/ib_addr.h> MODULE_AUTHOR("Sean Hefty"); @@ -49,8 +51,8 @@ MODULE_LICENSE("Dual BSD/GPL"); struct addr_req { struct list_head list; - struct sockaddr src_addr; - struct sockaddr dst_addr; + struct sockaddr_storage src_addr; + struct sockaddr_storage dst_addr; struct rdma_dev_addr *addr; struct rdma_addr_client *client; void *context; @@ -113,15 +115,32 @@ EXPORT_SYMBOL(rdma_copy_addr); int rdma_translate_ip(struct sockaddr *addr, struct rdma_dev_addr *dev_addr) { struct net_device *dev; - __be32 ip = ((struct sockaddr_in *) addr)->sin_addr.s_addr; - int ret; + int ret = -EADDRNOTAVAIL; - dev = ip_dev_find(&init_net, ip); - if (!dev) - return -EADDRNOTAVAIL; + switch (addr->sa_family) { + case AF_INET: + dev = ip_dev_find(&init_net, + ((struct sockaddr_in *) addr)->sin_addr.s_addr); + + if (!dev) + return ret; - ret = rdma_copy_addr(dev_addr, dev, NULL); - dev_put(dev); + ret = rdma_copy_addr(dev_addr, dev, NULL); + dev_put(dev); + break; + case AF_INET6: + for_each_netdev(&init_net, dev) { + if (ipv6_chk_addr(&init_net, + &((struct sockaddr_in6 *) addr)->sin6_addr, + dev, 1)) { + ret = rdma_copy_addr(dev_addr, dev, NULL); + break; + } + } + break; + default: + break; + } return ret; } EXPORT_SYMBOL(rdma_translate_ip); @@ -156,22 +175,37 @@ static void queue_req(struct addr_req *req) mutex_unlock(&lock); } -static void addr_send_arp(struct sockaddr_in *dst_in) +static void addr_send_arp(struct sockaddr *dst_in) { struct rtable *rt; struct flowi fl; - __be32 dst_ip = dst_in->sin_addr.s_addr; + struct dst_entry *dst; memset(&fl, 0, sizeof fl); - fl.nl_u.ip4_u.daddr = dst_ip; - if (ip_route_output_key(&init_net, &rt, &fl)) - return; + if (dst_in->sa_family == AF_INET) { + fl.nl_u.ip4_u.daddr = + ((struct sockaddr_in *) dst_in)->sin_addr.s_addr; - neigh_event_send(rt->u.dst.neighbour, NULL); - ip_rt_put(rt); + if (ip_route_output_key(&init_net, &rt, &fl)) + return; + + neigh_event_send(rt->u.dst.neighbour, NULL); + ip_rt_put(rt); + + } else { + fl.nl_u.ip6_u.daddr = + ((struct sockaddr_in6 *) dst_in)->sin6_addr; + + dst = ip6_route_output(&init_net, NULL, &fl); + if (!dst) + return; + + neigh_event_send(dst->neighbour, NULL); + dst_release(dst); + } } -static int addr_resolve_remote(struct sockaddr_in *src_in, +static int addr4_resolve_remote(struct sockaddr_in *src_in, struct sockaddr_in *dst_in, struct rdma_dev_addr *addr) { @@ -220,10 +254,51 @@ out: return ret; } +static int addr6_resolve_remote(struct sockaddr_in6 *src_in, + struct sockaddr_in6 *dst_in, + struct rdma_dev_addr *addr) +{ + struct flowi fl; + struct neighbour *neigh; + struct dst_entry *dst; + int ret = -ENODATA; + + memset(&fl, 0, sizeof fl); + fl.nl_u.ip6_u.daddr = dst_in->sin6_addr; + fl.nl_u.ip6_u.saddr = src_in->sin6_addr; + + dst = ip6_route_output(&init_net, NULL, &fl); + if (!dst) + return ret; + + if (dst->dev->flags & IFF_NOARP) { + ret = rdma_copy_addr(addr, dst->dev, NULL); + } else { + neigh = dst->neighbour; + if (neigh && (neigh->nud_state & NUD_VALID)) + ret = rdma_copy_addr(addr, neigh->dev, neigh->ha); + } + + dst_release(dst); + return ret; +} + +static int addr_resolve_remote(struct sockaddr *src_in, + struct sockaddr *dst_in, + struct rdma_dev_addr *addr) +{ + if (src_in->sa_family == AF_INET) { + return addr4_resolve_remote((struct sockaddr_in *) src_in, + (struct sockaddr_in *) dst_in, addr); + } else + return addr6_resolve_remote((struct sockaddr_in6 *) src_in, + (struct sockaddr_in6 *) dst_in, addr); +} + static void process_req(struct work_struct *work) { struct addr_req *req, *temp_req; - struct sockaddr_in *src_in, *dst_in; + struct sockaddr *src_in, *dst_in; struct list_head done_list; INIT_LIST_HEAD(&done_list); @@ -231,8 +306,8 @@ static void process_req(struct work_struct *work) mutex_lock(&lock); list_for_each_entry_safe(req, temp_req, &req_list, list) { if (req->status == -ENODATA) { - src_in = (struct sockaddr_in *) &req->src_addr; - dst_in = (struct sockaddr_in *) &req->dst_addr; + src_in = (struct sockaddr *) &req->src_addr; + dst_in = (struct sockaddr *) &req->dst_addr; req->status = addr_resolve_remote(src_in, dst_in, req->addr); if (req->status && time_after_eq(jiffies, req->timeout)) @@ -251,41 +326,72 @@ static void process_req(struct work_struct *work) list_for_each_entry_safe(req, temp_req, &done_list, list) { list_del(&req->list); - req->callback(req->status, &req->src_addr, req->addr, - req->context); + req->callback(req->status, (struct sockaddr *) &req->src_addr, + req->addr, req->context); put_client(req->client); kfree(req); } } -static int addr_resolve_local(struct sockaddr_in *src_in, - struct sockaddr_in *dst_in, +static int addr_resolve_local(struct sockaddr *src_in, + struct sockaddr *dst_in, struct rdma_dev_addr *addr) { struct net_device *dev; - __be32 src_ip = src_in->sin_addr.s_addr; - __be32 dst_ip = dst_in->sin_addr.s_addr; int ret; - dev = ip_dev_find(&init_net, dst_ip); - if (!dev) - return -EADDRNOTAVAIL; - - if (ipv4_is_zeronet(src_ip)) { - src_in->sin_family = dst_in->sin_family; - src_in->sin_addr.s_addr = dst_ip; - ret = rdma_copy_addr(addr, dev, dev->dev_addr); - } else if (ipv4_is_loopback(src_ip)) { - ret = rdma_translate_ip((struct sockaddr *)dst_in, addr); - if (!ret) - memcpy(addr->dst_dev_addr, dev->dev_addr, MAX_ADDR_LEN); + if (dst_in->sa_family == AF_INET) { + __be32 src_ip = ((struct sockaddr_in *) src_in)->sin_addr.s_addr; + __be32 dst_ip = ((struct sockaddr_in *) dst_in)->sin_addr.s_addr; + + dev = ip_dev_find(&init_net, dst_ip); + if (!dev) + return -EADDRNOTAVAIL; + + if (ipv4_is_zeronet(src_ip)) { + src_in->sa_family = dst_in->sa_family; + ((struct sockaddr_in *) src_in)->sin_addr.s_addr = dst_ip; + ret = rdma_copy_addr(addr, dev, dev->dev_addr); + } else if (ipv4_is_loopback(src_ip)) { + ret = rdma_translate_ip(dst_in, addr); + if (!ret) + memcpy(addr->dst_dev_addr, dev->dev_addr, MAX_ADDR_LEN); + } else { + ret = rdma_translate_ip(src_in, addr); + if (!ret) + memcpy(addr->dst_dev_addr, dev->dev_addr, MAX_ADDR_LEN); + } + dev_put(dev); } else { - ret = rdma_translate_ip((struct sockaddr *)src_in, addr); - if (!ret) - memcpy(addr->dst_dev_addr, dev->dev_addr, MAX_ADDR_LEN); + struct in6_addr *a; + + for_each_netdev(&init_net, dev) + if (ipv6_chk_addr(&init_net, + &((struct sockaddr_in6 *) addr)->sin6_addr, + dev, 1)) + break; + + if (!dev) + return -EADDRNOTAVAIL; + + a = &((struct sockaddr_in6 *) src_in)->sin6_addr; + + if (ipv6_addr_any(a)) { + src_in->sa_family = dst_in->sa_family; + ((struct sockaddr_in6 *) src_in)->sin6_addr = + ((struct sockaddr_in6 *) dst_in)->sin6_addr; + ret = rdma_copy_addr(addr, dev, dev->dev_addr); + } else if (ipv6_addr_loopback(a)) { + ret = rdma_translate_ip(dst_in, addr); + if (!ret) + memcpy(addr->dst_dev_addr, dev->dev_addr, MAX_ADDR_LEN); + } else { + ret = rdma_translate_ip(src_in, addr); + if (!ret) + memcpy(addr->dst_dev_addr, dev->dev_addr, MAX_ADDR_LEN); + } } - dev_put(dev); return ret; } @@ -296,7 +402,7 @@ int rdma_resolve_ip(struct rdma_addr_client *client, struct rdma_dev_addr *addr, void *context), void *context) { - struct sockaddr_in *src_in, *dst_in; + struct sockaddr *src_in, *dst_in; struct addr_req *req; int ret = 0; @@ -313,8 +419,8 @@ int rdma_resolve_ip(struct rdma_addr_client *client, req->client = client; atomic_inc(&client->refcount); - src_in = (struct sockaddr_in *) &req->src_addr; - dst_in = (struct sockaddr_in *) &req->dst_addr; + src_in = (struct sockaddr *) &req->src_addr; + dst_in = (struct sockaddr *) &req->dst_addr; req->status = addr_resolve_local(src_in, dst_in, addr); if (req->status == -EADDRNOTAVAIL) diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c index d951896ff7f..2a2e50871b4 100644 --- a/drivers/infiniband/core/cma.c +++ b/drivers/infiniband/core/cma.c @@ -42,6 +42,7 @@ #include <linux/inetdevice.h> #include <net/tcp.h> +#include <net/ipv6.h> #include <rdma/rdma_cm.h> #include <rdma/rdma_cm_ib.h> @@ -636,7 +637,12 @@ static inline int cma_zero_addr(struct sockaddr *addr) static inline int cma_loopback_addr(struct sockaddr *addr) { - return ipv4_is_loopback(((struct sockaddr_in *) addr)->sin_addr.s_addr); + if (addr->sa_family == AF_INET) + return ipv4_is_loopback( + ((struct sockaddr_in *) addr)->sin_addr.s_addr); + else + return ipv6_addr_loopback( + &((struct sockaddr_in6 *) addr)->sin6_addr); } static inline int cma_any_addr(struct sockaddr *addr) @@ -1467,10 +1473,10 @@ static void cma_listen_on_all(struct rdma_id_private *id_priv) static int cma_bind_any(struct rdma_cm_id *id, sa_family_t af) { - struct sockaddr_in addr_in; + struct sockaddr_storage addr_in; memset(&addr_in, 0, sizeof addr_in); - addr_in.sin_family = af; + addr_in.ss_family = af; return rdma_bind_addr(id, (struct sockaddr *) &addr_in); } @@ -2073,7 +2079,7 @@ int rdma_bind_addr(struct rdma_cm_id *id, struct sockaddr *addr) struct rdma_id_private *id_priv; int ret; - if (addr->sa_family != AF_INET) + if (addr->sa_family != AF_INET && addr->sa_family != AF_INET6) return -EAFNOSUPPORT; id_priv = container_of(id, struct rdma_id_private, id); @@ -2113,31 +2119,59 @@ EXPORT_SYMBOL(rdma_bind_addr); static int cma_format_hdr(void *hdr, enum rdma_port_space ps, struct rdma_route *route) { - struct sockaddr_in *src4, *dst4; struct cma_hdr *cma_hdr; struct sdp_hh *sdp_hdr; - src4 = (struct sockaddr_in *) &route->addr.src_addr; - dst4 = (struct sockaddr_in *) &route->addr.dst_addr; - - switch (ps) { - case RDMA_PS_SDP: - sdp_hdr = hdr; - if (sdp_get_majv(sdp_hdr->sdp_version) != SDP_MAJ_VERSION) - return -EINVAL; - sdp_set_ip_ver(sdp_hdr, 4); - sdp_hdr->src_addr.ip4.addr = src4->sin_addr.s_addr; - sdp_hdr->dst_addr.ip4.addr = dst4->sin_addr.s_addr; - sdp_hdr->port = src4->sin_port; - break; - default: - cma_hdr = hdr; - cma_hdr->cma_version = CMA_VERSION; - cma_set_ip_ver(cma_hdr, 4); - cma_hdr->src_addr.ip4.addr = src4->sin_addr.s_addr; - cma_hdr->dst_addr.ip4.addr = dst4->sin_addr.s_addr; - cma_hdr->port = src4->sin_port; - break; + if (route->addr.src_addr.ss_family == AF_INET) { + struct sockaddr_in *src4, *dst4; + + src4 = (struct sockaddr_in *) &route->addr.src_addr; + dst4 = (struct sockaddr_in *) &route->addr.dst_addr; + + switch (ps) { + case RDMA_PS_SDP: + sdp_hdr = hdr; + if (sdp_get_majv(sdp_hdr->sdp_version) != SDP_MAJ_VERSION) + return -EINVAL; + sdp_set_ip_ver(sdp_hdr, 4); + sdp_hdr->src_addr.ip4.addr = src4->sin_addr.s_addr; + sdp_hdr->dst_addr.ip4.addr = dst4->sin_addr.s_addr; + sdp_hdr->port = src4->sin_port; + break; + default: + cma_hdr = hdr; + cma_hdr->cma_version = CMA_VERSION; + cma_set_ip_ver(cma_hdr, 4); + cma_hdr->src_addr.ip4.addr = src4->sin_addr.s_addr; + cma_hdr->dst_addr.ip4.addr = dst4->sin_addr.s_addr; + cma_hdr->port = src4->sin_port; + break; + } + } else { + struct sockaddr_in6 *src6, *dst6; + + src6 = (struct sockaddr_in6 *) &route->addr.src_addr; + dst6 = (struct sockaddr_in6 *) &route->addr.dst_addr; + + switch (ps) { + case RDMA_PS_SDP: + sdp_hdr = hdr; + if (sdp_get_majv(sdp_hdr->sdp_version) != SDP_MAJ_VERSION) + return -EINVAL; + sdp_set_ip_ver(sdp_hdr, 6); + sdp_hdr->src_addr.ip6 = src6->sin6_addr; + sdp_hdr->dst_addr.ip6 = dst6->sin6_addr; + sdp_hdr->port = src6->sin6_port; + break; + default: + cma_hdr = hdr; + cma_hdr->cma_version = CMA_VERSION; + cma_set_ip_ver(cma_hdr, 6); + cma_hdr->src_addr.ip6 = src6->sin6_addr; + cma_hdr->dst_addr.ip6 = dst6->sin6_addr; + cma_hdr->port = src6->sin6_port; + break; + } } return 0; } diff --git a/drivers/infiniband/hw/ehca/ehca_classes.h b/drivers/infiniband/hw/ehca/ehca_classes.h index 7fc35cf0cdd..c825142a2fb 100644 --- a/drivers/infiniband/hw/ehca/ehca_classes.h +++ b/drivers/infiniband/hw/ehca/ehca_classes.h @@ -175,6 +175,13 @@ struct ehca_queue_map { unsigned int next_wqe_idx; /* Idx to first wqe to be flushed */ }; +/* function to calculate the next index for the qmap */ +static inline unsigned int next_index(unsigned int cur_index, unsigned int limit) +{ + unsigned int temp = cur_index + 1; + return (temp == limit) ? 0 : temp; +} + struct ehca_qp { union { struct ib_qp ib_qp; diff --git a/drivers/infiniband/hw/ehca/ehca_eq.c b/drivers/infiniband/hw/ehca/ehca_eq.c index 49660dfa186..523e733c630 100644 --- a/drivers/infiniband/hw/ehca/ehca_eq.c +++ b/drivers/infiniband/hw/ehca/ehca_eq.c @@ -113,7 +113,7 @@ int ehca_create_eq(struct ehca_shca *shca, if (h_ret != H_SUCCESS || vpage) goto create_eq_exit2; } else { - if (h_ret != H_PAGE_REGISTERED || !vpage) + if (h_ret != H_PAGE_REGISTERED) goto create_eq_exit2; } } diff --git a/drivers/infiniband/hw/ehca/ehca_main.c b/drivers/infiniband/hw/ehca/ehca_main.c index bec7e024935..3b77b674cbf 100644 --- a/drivers/infiniband/hw/ehca/ehca_main.c +++ b/drivers/infiniband/hw/ehca/ehca_main.c @@ -717,6 +717,7 @@ static int __devinit ehca_probe(struct of_device *dev, const u64 *handle; struct ib_pd *ibpd; int ret, i, eq_size; + unsigned long flags; handle = of_get_property(dev->node, "ibm,hca-handle", NULL); if (!handle) { @@ -830,9 +831,9 @@ static int __devinit ehca_probe(struct of_device *dev, ehca_err(&shca->ib_device, "Cannot create device attributes ret=%d", ret); - spin_lock(&shca_list_lock); + spin_lock_irqsave(&shca_list_lock, flags); list_add(&shca->shca_list, &shca_list); - spin_unlock(&shca_list_lock); + spin_unlock_irqrestore(&shca_list_lock, flags); return 0; @@ -878,6 +879,7 @@ probe1: static int __devexit ehca_remove(struct of_device *dev) { struct ehca_shca *shca = dev->dev.driver_data; + unsigned long flags; int ret; sysfs_remove_group(&dev->dev.kobj, &ehca_dev_attr_grp); @@ -915,9 +917,9 @@ static int __devexit ehca_remove(struct of_device *dev) ib_dealloc_device(&shca->ib_device); - spin_lock(&shca_list_lock); + spin_lock_irqsave(&shca_list_lock, flags); list_del(&shca->shca_list); - spin_unlock(&shca_list_lock); + spin_unlock_irqrestore(&shca_list_lock, flags); return ret; } @@ -975,6 +977,7 @@ static int ehca_mem_notifier(struct notifier_block *nb, unsigned long action, void *data) { static unsigned long ehca_dmem_warn_time; + unsigned long flags; switch (action) { case MEM_CANCEL_OFFLINE: @@ -985,12 +988,12 @@ static int ehca_mem_notifier(struct notifier_block *nb, case MEM_GOING_ONLINE: case MEM_GOING_OFFLINE: /* only ok if no hca is attached to the lpar */ - spin_lock(&shca_list_lock); + spin_lock_irqsave(&shca_list_lock, flags); if (list_empty(&shca_list)) { - spin_unlock(&shca_list_lock); + spin_unlock_irqrestore(&shca_list_lock, flags); return NOTIFY_OK; } else { - spin_unlock(&shca_list_lock); + spin_unlock_irqrestore(&shca_list_lock, flags); if (printk_timed_ratelimit(&ehca_dmem_warn_time, 30 * 1000)) ehca_gen_err("DMEM operations are not allowed" diff --git a/drivers/infiniband/hw/ehca/ehca_qp.c b/drivers/infiniband/hw/ehca/ehca_qp.c index cadbf0cdd91..f161cf173db 100644 --- a/drivers/infiniband/hw/ehca/ehca_qp.c +++ b/drivers/infiniband/hw/ehca/ehca_qp.c @@ -1138,14 +1138,14 @@ static int calc_left_cqes(u64 wqe_p, struct ipz_queue *ipz_queue, return -EFAULT; } - tail_idx = (qmap->tail + 1) % qmap->entries; + tail_idx = next_index(qmap->tail, qmap->entries); wqe_idx = q_ofs / ipz_queue->qe_size; /* check all processed wqes, whether a cqe is requested or not */ while (tail_idx != wqe_idx) { if (qmap->map[tail_idx].cqe_req) qmap->left_to_poll++; - tail_idx = (tail_idx + 1) % qmap->entries; + tail_idx = next_index(tail_idx, qmap->entries); } /* save index in queue, where we have to start flushing */ qmap->next_wqe_idx = wqe_idx; @@ -1195,14 +1195,14 @@ static int check_for_left_cqes(struct ehca_qp *my_qp, struct ehca_shca *shca) } else { spin_lock_irqsave(&my_qp->send_cq->spinlock, flags); my_qp->sq_map.left_to_poll = 0; - my_qp->sq_map.next_wqe_idx = (my_qp->sq_map.tail + 1) % - my_qp->sq_map.entries; + my_qp->sq_map.next_wqe_idx = next_index(my_qp->sq_map.tail, + my_qp->sq_map.entries); spin_unlock_irqrestore(&my_qp->send_cq->spinlock, flags); spin_lock_irqsave(&my_qp->recv_cq->spinlock, flags); my_qp->rq_map.left_to_poll = 0; - my_qp->rq_map.next_wqe_idx = (my_qp->rq_map.tail + 1) % - my_qp->rq_map.entries; + my_qp->rq_map.next_wqe_idx = next_index(my_qp->rq_map.tail, + my_qp->rq_map.entries); spin_unlock_irqrestore(&my_qp->recv_cq->spinlock, flags); } diff --git a/drivers/infiniband/hw/ehca/ehca_reqs.c b/drivers/infiniband/hw/ehca/ehca_reqs.c index 00a648f4316..c7112686782 100644 --- a/drivers/infiniband/hw/ehca/ehca_reqs.c +++ b/drivers/infiniband/hw/ehca/ehca_reqs.c @@ -726,13 +726,13 @@ repoll: * set left_to_poll to 0 because in error state, we will not * get any additional CQEs */ - my_qp->sq_map.next_wqe_idx = (my_qp->sq_map.tail + 1) % - my_qp->sq_map.entries; + my_qp->sq_map.next_wqe_idx = next_index(my_qp->sq_map.tail, + my_qp->sq_map.entries); my_qp->sq_map.left_to_poll = 0; ehca_add_to_err_list(my_qp, 1); - my_qp->rq_map.next_wqe_idx = (my_qp->rq_map.tail + 1) % - my_qp->rq_map.entries; + my_qp->rq_map.next_wqe_idx = next_index(my_qp->rq_map.tail, + my_qp->rq_map.entries); my_qp->rq_map.left_to_poll = 0; if (HAS_RQ(my_qp)) ehca_add_to_err_list(my_qp, 0); @@ -860,9 +860,8 @@ static int generate_flush_cqes(struct ehca_qp *my_qp, struct ib_cq *cq, /* mark as reported and advance next_wqe pointer */ qmap_entry->reported = 1; - qmap->next_wqe_idx++; - if (qmap->next_wqe_idx == qmap->entries) - qmap->next_wqe_idx = 0; + qmap->next_wqe_idx = next_index(qmap->next_wqe_idx, + qmap->entries); qmap_entry = &qmap->map[qmap->next_wqe_idx]; wc++; nr++; diff --git a/drivers/infiniband/hw/ipath/ipath_driver.c b/drivers/infiniband/hw/ipath/ipath_driver.c index ad0aab60b05..69c0ce321b4 100644 --- a/drivers/infiniband/hw/ipath/ipath_driver.c +++ b/drivers/infiniband/hw/ipath/ipath_driver.c @@ -661,6 +661,8 @@ bail: static void __devexit cleanup_device(struct ipath_devdata *dd) { int port; + struct ipath_portdata **tmp; + unsigned long flags; if (*dd->ipath_statusp & IPATH_STATUS_CHIP_PRESENT) { /* can't do anything more with chip; needs re-init */ @@ -742,20 +744,21 @@ static void __devexit cleanup_device(struct ipath_devdata *dd) /* * free any resources still in use (usually just kernel ports) - * at unload; we do for portcnt, not cfgports, because cfgports - * could have changed while we were loaded. + * at unload; we do for portcnt, because that's what we allocate. + * We acquire lock to be really paranoid that ipath_pd isn't being + * accessed from some interrupt-related code (that should not happen, + * but best to be sure). */ + spin_lock_irqsave(&dd->ipath_uctxt_lock, flags); + tmp = dd->ipath_pd; + dd->ipath_pd = NULL; + spin_unlock_irqrestore(&dd->ipath_uctxt_lock, flags); for (port = 0; port < dd->ipath_portcnt; port++) { - struct ipath_portdata *pd = dd->ipath_pd[port]; - dd->ipath_pd[port] = NULL; + struct ipath_portdata *pd = tmp[port]; + tmp[port] = NULL; /* debugging paranoia */ ipath_free_pddata(dd, pd); } - kfree(dd->ipath_pd); - /* - * debuggability, in case some cleanup path tries to use it - * after this - */ - dd->ipath_pd = NULL; + kfree(tmp); } static void __devexit ipath_remove_one(struct pci_dev *pdev) @@ -2586,6 +2589,7 @@ int ipath_reset_device(int unit) { int ret, i; struct ipath_devdata *dd = ipath_lookup(unit); + unsigned long flags; if (!dd) { ret = -ENODEV; @@ -2611,18 +2615,21 @@ int ipath_reset_device(int unit) goto bail; } + spin_lock_irqsave(&dd->ipath_uctxt_lock, flags); if (dd->ipath_pd) for (i = 1; i < dd->ipath_cfgports; i++) { - if (dd->ipath_pd[i] && dd->ipath_pd[i]->port_cnt) { - ipath_dbg("unit %u port %d is in use " - "(PID %u cmd %s), can't reset\n", - unit, i, - pid_nr(dd->ipath_pd[i]->port_pid), - dd->ipath_pd[i]->port_comm); - ret = -EBUSY; - goto bail; - } + if (!dd->ipath_pd[i] || !dd->ipath_pd[i]->port_cnt) + continue; + spin_unlock_irqrestore(&dd->ipath_uctxt_lock, flags); + ipath_dbg("unit %u port %d is in use " + "(PID %u cmd %s), can't reset\n", + unit, i, + pid_nr(dd->ipath_pd[i]->port_pid), + dd->ipath_pd[i]->port_comm); + ret = -EBUSY; + goto bail; } + spin_unlock_irqrestore(&dd->ipath_uctxt_lock, flags); if (dd->ipath_flags & IPATH_HAS_SEND_DMA) teardown_sdma(dd); @@ -2656,9 +2663,12 @@ static int ipath_signal_procs(struct ipath_devdata *dd, int sig) { int i, sub, any = 0; struct pid *pid; + unsigned long flags; if (!dd->ipath_pd) return 0; + + spin_lock_irqsave(&dd->ipath_uctxt_lock, flags); for (i = 1; i < dd->ipath_cfgports; i++) { if (!dd->ipath_pd[i] || !dd->ipath_pd[i]->port_cnt) continue; @@ -2682,6 +2692,7 @@ static int ipath_signal_procs(struct ipath_devdata *dd, int sig) any++; } } + spin_unlock_irqrestore(&dd->ipath_uctxt_lock, flags); return any; } diff --git a/drivers/infiniband/hw/ipath/ipath_file_ops.c b/drivers/infiniband/hw/ipath/ipath_file_ops.c index 1af1f3a907c..239d4e8068a 100644 --- a/drivers/infiniband/hw/ipath/ipath_file_ops.c +++ b/drivers/infiniband/hw/ipath/ipath_file_ops.c @@ -223,8 +223,13 @@ static int ipath_get_base_info(struct file *fp, (unsigned long long) kinfo->spi_subport_rcvhdr_base); } - kinfo->spi_pioindex = (kinfo->spi_piobufbase - dd->ipath_piobufbase) / - dd->ipath_palign; + /* + * All user buffers are 2KB buffers. If we ever support + * giving 4KB buffers to user processes, this will need some + * work. + */ + kinfo->spi_pioindex = (kinfo->spi_piobufbase - + (dd->ipath_piobufbase & 0xffffffff)) / dd->ipath_palign; kinfo->spi_pioalign = dd->ipath_palign; kinfo->spi_qpair = IPATH_KD_QP; @@ -2041,7 +2046,9 @@ static int ipath_close(struct inode *in, struct file *fp) struct ipath_filedata *fd; struct ipath_portdata *pd; struct ipath_devdata *dd; + unsigned long flags; unsigned port; + struct pid *pid; ipath_cdbg(VERBOSE, "close on dev %lx, private data %p\n", (long)in->i_rdev, fp->private_data); @@ -2074,14 +2081,13 @@ static int ipath_close(struct inode *in, struct file *fp) mutex_unlock(&ipath_mutex); goto bail; } + /* early; no interrupt users after this */ + spin_lock_irqsave(&dd->ipath_uctxt_lock, flags); port = pd->port_port; - - if (pd->port_hdrqfull) { - ipath_cdbg(PROC, "%s[%u] had %u rcvhdrqfull errors " - "during run\n", pd->port_comm, pid_nr(pd->port_pid), - pd->port_hdrqfull); - pd->port_hdrqfull = 0; - } + dd->ipath_pd[port] = NULL; + pid = pd->port_pid; + pd->port_pid = NULL; + spin_unlock_irqrestore(&dd->ipath_uctxt_lock, flags); if (pd->port_rcvwait_to || pd->port_piowait_to || pd->port_rcvnowait || pd->port_pionowait) { @@ -2138,13 +2144,11 @@ static int ipath_close(struct inode *in, struct file *fp) unlock_expected_tids(pd); ipath_stats.sps_ports--; ipath_cdbg(PROC, "%s[%u] closed port %u:%u\n", - pd->port_comm, pid_nr(pd->port_pid), + pd->port_comm, pid_nr(pid), dd->ipath_unit, port); } - put_pid(pd->port_pid); - pd->port_pid = NULL; - dd->ipath_pd[pd->port_port] = NULL; /* before releasing mutex */ + put_pid(pid); mutex_unlock(&ipath_mutex); ipath_free_pddata(dd, pd); /* after releasing the mutex */ diff --git a/drivers/infiniband/hw/ipath/ipath_fs.c b/drivers/infiniband/hw/ipath/ipath_fs.c index 8bb5170b4e4..53912c327bf 100644 --- a/drivers/infiniband/hw/ipath/ipath_fs.c +++ b/drivers/infiniband/hw/ipath/ipath_fs.c @@ -86,7 +86,7 @@ static int create_file(const char *name, mode_t mode, *dentry = NULL; mutex_lock(&parent->d_inode->i_mutex); *dentry = lookup_one_len(name, parent, strlen(name)); - if (!IS_ERR(dentry)) + if (!IS_ERR(*dentry)) error = ipathfs_mknod(parent->d_inode, *dentry, mode, fops, data); else diff --git a/drivers/infiniband/hw/ipath/ipath_iba6120.c b/drivers/infiniband/hw/ipath/ipath_iba6120.c index 421cc2af891..fbf8c5379ea 100644 --- a/drivers/infiniband/hw/ipath/ipath_iba6120.c +++ b/drivers/infiniband/hw/ipath/ipath_iba6120.c @@ -721,6 +721,12 @@ static int ipath_pe_bringup_serdes(struct ipath_devdata *dd) INFINIPATH_HWE_SERDESPLLFAILED); } + dd->ibdeltainprog = 1; + dd->ibsymsnap = + ipath_read_creg32(dd, dd->ipath_cregs->cr_ibsymbolerrcnt); + dd->iblnkerrsnap = + ipath_read_creg32(dd, dd->ipath_cregs->cr_iblinkerrrecovcnt); + val = ipath_read_kreg64(dd, dd->ipath_kregs->kr_serdesconfig0); config1 = ipath_read_kreg64(dd, dd->ipath_kregs->kr_serdesconfig1); @@ -810,6 +816,36 @@ static void ipath_pe_quiet_serdes(struct ipath_devdata *dd) { u64 val = ipath_read_kreg64(dd, dd->ipath_kregs->kr_serdesconfig0); + if (dd->ibsymdelta || dd->iblnkerrdelta || + dd->ibdeltainprog) { + u64 diagc; + /* enable counter writes */ + diagc = ipath_read_kreg64(dd, dd->ipath_kregs->kr_hwdiagctrl); + ipath_write_kreg(dd, dd->ipath_kregs->kr_hwdiagctrl, + diagc | INFINIPATH_DC_COUNTERWREN); + + if (dd->ibsymdelta || dd->ibdeltainprog) { + val = ipath_read_creg32(dd, + dd->ipath_cregs->cr_ibsymbolerrcnt); + if (dd->ibdeltainprog) + val -= val - dd->ibsymsnap; + val -= dd->ibsymdelta; + ipath_write_creg(dd, + dd->ipath_cregs->cr_ibsymbolerrcnt, val); + } + if (dd->iblnkerrdelta || dd->ibdeltainprog) { + val = ipath_read_creg32(dd, + dd->ipath_cregs->cr_iblinkerrrecovcnt); + if (dd->ibdeltainprog) + val -= val - dd->iblnkerrsnap; + val -= dd->iblnkerrdelta; + ipath_write_creg(dd, + dd->ipath_cregs->cr_iblinkerrrecovcnt, val); + } + + /* and disable counter writes */ + ipath_write_kreg(dd, dd->ipath_kregs->kr_hwdiagctrl, diagc); + } val |= INFINIPATH_SERDC0_TXIDLE; ipath_dbg("Setting TxIdleEn on serdes (config0 = %llx)\n", (unsigned long long) val); @@ -1749,6 +1785,31 @@ static void ipath_pe_config_jint(struct ipath_devdata *dd, u16 a, u16 b) static int ipath_pe_ib_updown(struct ipath_devdata *dd, int ibup, u64 ibcs) { + if (ibup) { + if (dd->ibdeltainprog) { + dd->ibdeltainprog = 0; + dd->ibsymdelta += + ipath_read_creg32(dd, + dd->ipath_cregs->cr_ibsymbolerrcnt) - + dd->ibsymsnap; + dd->iblnkerrdelta += + ipath_read_creg32(dd, + dd->ipath_cregs->cr_iblinkerrrecovcnt) - + dd->iblnkerrsnap; + } + } else { + dd->ipath_lli_counter = 0; + if (!dd->ibdeltainprog) { + dd->ibdeltainprog = 1; + dd->ibsymsnap = + ipath_read_creg32(dd, + dd->ipath_cregs->cr_ibsymbolerrcnt); + dd->iblnkerrsnap = + ipath_read_creg32(dd, + dd->ipath_cregs->cr_iblinkerrrecovcnt); + } + } + ipath_setup_pe_setextled(dd, ipath_ib_linkstate(dd, ibcs), ipath_ib_linktrstate(dd, ibcs)); return 0; diff --git a/drivers/infiniband/hw/ipath/ipath_iba7220.c b/drivers/infiniband/hw/ipath/ipath_iba7220.c index 9839e20119b..b2a9d4c155d 100644 --- a/drivers/infiniband/hw/ipath/ipath_iba7220.c +++ b/drivers/infiniband/hw/ipath/ipath_iba7220.c @@ -951,6 +951,12 @@ static int ipath_7220_bringup_serdes(struct ipath_devdata *dd) INFINIPATH_HWE_SERDESPLLFAILED); } + dd->ibdeltainprog = 1; + dd->ibsymsnap = + ipath_read_creg32(dd, dd->ipath_cregs->cr_ibsymbolerrcnt); + dd->iblnkerrsnap = + ipath_read_creg32(dd, dd->ipath_cregs->cr_iblinkerrrecovcnt); + if (!dd->ipath_ibcddrctrl) { /* not on re-init after reset */ dd->ipath_ibcddrctrl = @@ -1084,6 +1090,37 @@ static void ipath_7220_config_jint(struct ipath_devdata *dd, static void ipath_7220_quiet_serdes(struct ipath_devdata *dd) { u64 val; + if (dd->ibsymdelta || dd->iblnkerrdelta || + dd->ibdeltainprog) { + u64 diagc; + /* enable counter writes */ + diagc = ipath_read_kreg64(dd, dd->ipath_kregs->kr_hwdiagctrl); + ipath_write_kreg(dd, dd->ipath_kregs->kr_hwdiagctrl, + diagc | INFINIPATH_DC_COUNTERWREN); + + if (dd->ibsymdelta || dd->ibdeltainprog) { + val = ipath_read_creg32(dd, + dd->ipath_cregs->cr_ibsymbolerrcnt); + if (dd->ibdeltainprog) + val -= val - dd->ibsymsnap; + val -= dd->ibsymdelta; + ipath_write_creg(dd, + dd->ipath_cregs->cr_ibsymbolerrcnt, val); + } + if (dd->iblnkerrdelta || dd->ibdeltainprog) { + val = ipath_read_creg32(dd, + dd->ipath_cregs->cr_iblinkerrrecovcnt); + if (dd->ibdeltainprog) + val -= val - dd->iblnkerrsnap; + val -= dd->iblnkerrdelta; + ipath_write_creg(dd, + dd->ipath_cregs->cr_iblinkerrrecovcnt, val); + } + + /* and disable counter writes */ + ipath_write_kreg(dd, dd->ipath_kregs->kr_hwdiagctrl, diagc); + } + dd->ipath_flags &= ~IPATH_IB_AUTONEG_INPROG; wake_up(&dd->ipath_autoneg_wait); cancel_delayed_work(&dd->ipath_autoneg_work); @@ -2325,7 +2362,7 @@ static void try_auto_neg(struct ipath_devdata *dd) static int ipath_7220_ib_updown(struct ipath_devdata *dd, int ibup, u64 ibcs) { - int ret = 0; + int ret = 0, symadj = 0; u32 ltstate = ipath_ib_linkstate(dd, ibcs); dd->ipath_link_width_active = @@ -2368,6 +2405,13 @@ static int ipath_7220_ib_updown(struct ipath_devdata *dd, int ibup, u64 ibcs) ipath_dbg("DDR negotiation try, %u/%u\n", dd->ipath_autoneg_tries, IPATH_AUTONEG_TRIES); + if (!dd->ibdeltainprog) { + dd->ibdeltainprog = 1; + dd->ibsymsnap = ipath_read_creg32(dd, + dd->ipath_cregs->cr_ibsymbolerrcnt); + dd->iblnkerrsnap = ipath_read_creg32(dd, + dd->ipath_cregs->cr_iblinkerrrecovcnt); + } try_auto_neg(dd); ret = 1; /* no other IB status change processing */ } else if ((dd->ipath_flags & IPATH_IB_AUTONEG_INPROG) @@ -2388,6 +2432,7 @@ static int ipath_7220_ib_updown(struct ipath_devdata *dd, int ibup, u64 ibcs) set_speed_fast(dd, dd->ipath_link_speed_enabled); wake_up(&dd->ipath_autoneg_wait); + symadj = 1; } else if (dd->ipath_flags & IPATH_IB_AUTONEG_FAILED) { /* * clear autoneg failure flag, and do setup @@ -2403,22 +2448,28 @@ static int ipath_7220_ib_updown(struct ipath_devdata *dd, int ibup, u64 ibcs) IBA7220_IBC_IBTA_1_2_MASK; ipath_write_kreg(dd, IPATH_KREG_OFFSET(IBNCModeCtrl), 0); + symadj = 1; } } /* - * if we are in 1X, and are in autoneg width, it - * could be due to an xgxs problem, so if we haven't + * if we are in 1X on rev1 only, and are in autoneg width, + * it could be due to an xgxs problem, so if we haven't * already tried, try twice to get to 4X; if we * tried, and couldn't, report it, since it will * probably not be what is desired. */ - if ((dd->ipath_link_width_enabled & (IB_WIDTH_1X | + if (dd->ipath_minrev == 1 && + (dd->ipath_link_width_enabled & (IB_WIDTH_1X | IB_WIDTH_4X)) == (IB_WIDTH_1X | IB_WIDTH_4X) && dd->ipath_link_width_active == IB_WIDTH_1X && dd->ipath_x1_fix_tries < 3) { - if (++dd->ipath_x1_fix_tries == 3) + if (++dd->ipath_x1_fix_tries == 3) { dev_info(&dd->pcidev->dev, "IB link is in 1X mode\n"); + if (!(dd->ipath_flags & + IPATH_IB_AUTONEG_INPROG)) + symadj = 1; + } else { ipath_cdbg(VERBOSE, "IB 1X in " "auto-width, try %u to be " @@ -2429,7 +2480,8 @@ static int ipath_7220_ib_updown(struct ipath_devdata *dd, int ibup, u64 ibcs) dd->ipath_f_xgxs_reset(dd); ret = 1; /* skip other processing */ } - } + } else if (!(dd->ipath_flags & IPATH_IB_AUTONEG_INPROG)) + symadj = 1; if (!ret) { dd->delay_mult = rate_to_delay @@ -2440,6 +2492,25 @@ static int ipath_7220_ib_updown(struct ipath_devdata *dd, int ibup, u64 ibcs) } } + if (symadj) { + if (dd->ibdeltainprog) { + dd->ibdeltainprog = 0; + dd->ibsymdelta += ipath_read_creg32(dd, + dd->ipath_cregs->cr_ibsymbolerrcnt) - + dd->ibsymsnap; + dd->iblnkerrdelta += ipath_read_creg32(dd, + dd->ipath_cregs->cr_iblinkerrrecovcnt) - + dd->iblnkerrsnap; + } + } else if (!ibup && !dd->ibdeltainprog + && !(dd->ipath_flags & IPATH_IB_AUTONEG_INPROG)) { + dd->ibdeltainprog = 1; + dd->ibsymsnap = ipath_read_creg32(dd, + dd->ipath_cregs->cr_ibsymbolerrcnt); + dd->iblnkerrsnap = ipath_read_creg32(dd, + dd->ipath_cregs->cr_iblinkerrrecovcnt); + } + if (!ret) ipath_setup_7220_setextled(dd, ipath_ib_linkstate(dd, ibcs), ltstate); diff --git a/drivers/infiniband/hw/ipath/ipath_init_chip.c b/drivers/infiniband/hw/ipath/ipath_init_chip.c index 3e5baa43fc8..64aeefbd2a5 100644 --- a/drivers/infiniband/hw/ipath/ipath_init_chip.c +++ b/drivers/infiniband/hw/ipath/ipath_init_chip.c @@ -229,6 +229,7 @@ static int init_chip_first(struct ipath_devdata *dd) spin_lock_init(&dd->ipath_kernel_tid_lock); spin_lock_init(&dd->ipath_user_tid_lock); spin_lock_init(&dd->ipath_sendctrl_lock); + spin_lock_init(&dd->ipath_uctxt_lock); spin_lock_init(&dd->ipath_sdma_lock); spin_lock_init(&dd->ipath_gpio_lock); spin_lock_init(&dd->ipath_eep_st_lock); diff --git a/drivers/infiniband/hw/ipath/ipath_kernel.h b/drivers/infiniband/hw/ipath/ipath_kernel.h index 0bd8bcb184a..6ba4861dd6a 100644 --- a/drivers/infiniband/hw/ipath/ipath_kernel.h +++ b/drivers/infiniband/hw/ipath/ipath_kernel.h @@ -355,6 +355,19 @@ struct ipath_devdata { /* errors masked because they occur too fast */ ipath_err_t ipath_maskederrs; u64 ipath_lastlinkrecov; /* link recoveries at last ACTIVE */ + /* these 5 fields are used to establish deltas for IB Symbol + * errors and linkrecovery errors. They can be reported on + * some chips during link negotiation prior to INIT, and with + * DDR when faking DDR negotiations with non-IBTA switches. + * The chip counters are adjusted at driver unload if there is + * a non-zero delta. + */ + u64 ibdeltainprog; + u64 ibsymdelta; + u64 ibsymsnap; + u64 iblnkerrdelta; + u64 iblnkerrsnap; + /* time in jiffies at which to re-enable maskederrs */ unsigned long ipath_unmasktime; /* count of egrfull errors, combined for all ports */ @@ -464,6 +477,8 @@ struct ipath_devdata { spinlock_t ipath_kernel_tid_lock; spinlock_t ipath_user_tid_lock; spinlock_t ipath_sendctrl_lock; + /* around ipath_pd and (user ports) port_cnt use (intr vs free) */ + spinlock_t ipath_uctxt_lock; /* * IPATH_STATUS_*, diff --git a/drivers/infiniband/hw/ipath/ipath_keys.c b/drivers/infiniband/hw/ipath/ipath_keys.c index 8f32b17a5ee..c0e933fec21 100644 --- a/drivers/infiniband/hw/ipath/ipath_keys.c +++ b/drivers/infiniband/hw/ipath/ipath_keys.c @@ -132,6 +132,7 @@ int ipath_lkey_ok(struct ipath_qp *qp, struct ipath_sge *isge, * (see ipath_get_dma_mr and ipath_dma.c). */ if (sge->lkey == 0) { + /* always a kernel port, no locking needed */ struct ipath_pd *pd = to_ipd(qp->ibqp.pd); if (pd->user) { @@ -211,6 +212,7 @@ int ipath_rkey_ok(struct ipath_qp *qp, struct ipath_sge_state *ss, * (see ipath_get_dma_mr and ipath_dma.c). */ if (rkey == 0) { + /* always a kernel port, no locking needed */ struct ipath_pd *pd = to_ipd(qp->ibqp.pd); if (pd->user) { diff --git a/drivers/infiniband/hw/ipath/ipath_mad.c b/drivers/infiniband/hw/ipath/ipath_mad.c index be4fc9ada8e..17a12319747 100644 --- a/drivers/infiniband/hw/ipath/ipath_mad.c +++ b/drivers/infiniband/hw/ipath/ipath_mad.c @@ -348,6 +348,7 @@ bail: */ static int get_pkeys(struct ipath_devdata *dd, u16 * pkeys) { + /* always a kernel port, no locking needed */ struct ipath_portdata *pd = dd->ipath_pd[0]; memcpy(pkeys, pd->port_pkeys, sizeof(pd->port_pkeys)); @@ -730,6 +731,7 @@ static int set_pkeys(struct ipath_devdata *dd, u16 *pkeys) int i; int changed = 0; + /* always a kernel port, no locking needed */ pd = dd->ipath_pd[0]; for (i = 0; i < ARRAY_SIZE(pd->port_pkeys); i++) { diff --git a/drivers/infiniband/hw/ipath/ipath_qp.c b/drivers/infiniband/hw/ipath/ipath_qp.c index 4715911101e..3a5a89b609c 100644 --- a/drivers/infiniband/hw/ipath/ipath_qp.c +++ b/drivers/infiniband/hw/ipath/ipath_qp.c @@ -745,6 +745,7 @@ struct ib_qp *ipath_create_qp(struct ib_pd *ibpd, struct ipath_swqe *swq = NULL; struct ipath_ibdev *dev; size_t sz; + size_t sg_list_sz; struct ib_qp *ret; if (init_attr->create_flags) { @@ -789,19 +790,31 @@ struct ib_qp *ipath_create_qp(struct ib_pd *ibpd, goto bail; } sz = sizeof(*qp); + sg_list_sz = 0; if (init_attr->srq) { struct ipath_srq *srq = to_isrq(init_attr->srq); - sz += sizeof(*qp->r_sg_list) * - srq->rq.max_sge; - } else - sz += sizeof(*qp->r_sg_list) * - init_attr->cap.max_recv_sge; - qp = kmalloc(sz, GFP_KERNEL); + if (srq->rq.max_sge > 1) + sg_list_sz = sizeof(*qp->r_sg_list) * + (srq->rq.max_sge - 1); + } else if (init_attr->cap.max_recv_sge > 1) + sg_list_sz = sizeof(*qp->r_sg_list) * + (init_attr->cap.max_recv_sge - 1); + qp = kmalloc(sz + sg_list_sz, GFP_KERNEL); if (!qp) { ret = ERR_PTR(-ENOMEM); goto bail_swq; } + if (sg_list_sz && (init_attr->qp_type == IB_QPT_UD || + init_attr->qp_type == IB_QPT_SMI || + init_attr->qp_type == IB_QPT_GSI)) { + qp->r_ud_sg_list = kmalloc(sg_list_sz, GFP_KERNEL); + if (!qp->r_ud_sg_list) { + ret = ERR_PTR(-ENOMEM); + goto bail_qp; + } + } else + qp->r_ud_sg_list = NULL; if (init_attr->srq) { sz = 0; qp->r_rq.size = 0; @@ -818,7 +831,7 @@ struct ib_qp *ipath_create_qp(struct ib_pd *ibpd, qp->r_rq.size * sz); if (!qp->r_rq.wq) { ret = ERR_PTR(-ENOMEM); - goto bail_qp; + goto bail_sg_list; } } @@ -848,7 +861,7 @@ struct ib_qp *ipath_create_qp(struct ib_pd *ibpd, if (err) { ret = ERR_PTR(err); vfree(qp->r_rq.wq); - goto bail_qp; + goto bail_sg_list; } qp->ip = NULL; qp->s_tx = NULL; @@ -925,6 +938,8 @@ bail_ip: vfree(qp->r_rq.wq); ipath_free_qp(&dev->qp_table, qp); free_qpn(&dev->qp_table, qp->ibqp.qp_num); +bail_sg_list: + kfree(qp->r_ud_sg_list); bail_qp: kfree(qp); bail_swq: @@ -989,6 +1004,7 @@ int ipath_destroy_qp(struct ib_qp *ibqp) kref_put(&qp->ip->ref, ipath_release_mmap_info); else vfree(qp->r_rq.wq); + kfree(qp->r_ud_sg_list); vfree(qp->s_wq); kfree(qp); return 0; diff --git a/drivers/infiniband/hw/ipath/ipath_rc.c b/drivers/infiniband/hw/ipath/ipath_rc.c index 7b93cda1a4b..9170710b950 100644 --- a/drivers/infiniband/hw/ipath/ipath_rc.c +++ b/drivers/infiniband/hw/ipath/ipath_rc.c @@ -573,9 +573,8 @@ int ipath_make_rc_req(struct ipath_qp *qp) ohdr->u.rc.reth.length = cpu_to_be32(qp->s_len); qp->s_state = OP(RDMA_READ_REQUEST); hwords += sizeof(ohdr->u.rc.reth) / sizeof(u32); - bth2 = qp->s_psn++ & IPATH_PSN_MASK; - if (ipath_cmp24(qp->s_psn, qp->s_next_psn) > 0) - qp->s_next_psn = qp->s_psn; + bth2 = qp->s_psn & IPATH_PSN_MASK; + qp->s_psn = wqe->lpsn + 1; ss = NULL; len = 0; qp->s_cur++; diff --git a/drivers/infiniband/hw/ipath/ipath_sdma.c b/drivers/infiniband/hw/ipath/ipath_sdma.c index 284c9bca517..8e255adf5d9 100644 --- a/drivers/infiniband/hw/ipath/ipath_sdma.c +++ b/drivers/infiniband/hw/ipath/ipath_sdma.c @@ -698,10 +698,8 @@ retry: addr = dma_map_single(&dd->pcidev->dev, tx->txreq.map_addr, tx->map_len, DMA_TO_DEVICE); - if (dma_mapping_error(&dd->pcidev->dev, addr)) { - ret = -EIO; - goto unlock; - } + if (dma_mapping_error(&dd->pcidev->dev, addr)) + goto ioerr; dwoffset = tx->map_len >> 2; make_sdma_desc(dd, sdmadesc, (u64) addr, dwoffset, 0); @@ -741,6 +739,8 @@ retry: dw = (len + 3) >> 2; addr = dma_map_single(&dd->pcidev->dev, sge->vaddr, dw << 2, DMA_TO_DEVICE); + if (dma_mapping_error(&dd->pcidev->dev, addr)) + goto unmap; make_sdma_desc(dd, sdmadesc, (u64) addr, dw, dwoffset); /* SDmaUseLargeBuf has to be set in every descriptor */ if (tx->txreq.flags & IPATH_SDMA_TXREQ_F_USELARGEBUF) @@ -798,7 +798,18 @@ retry: list_add_tail(&tx->txreq.list, &dd->ipath_sdma_activelist); if (tx->txreq.flags & IPATH_SDMA_TXREQ_F_VL15) vl15_watchdog_enq(dd); - + goto unlock; + +unmap: + while (tail != dd->ipath_sdma_descq_tail) { + if (!tail) + tail = dd->ipath_sdma_descq_cnt - 1; + else + tail--; + unmap_desc(dd, tail); + } +ioerr: + ret = -EIO; unlock: spin_unlock_irqrestore(&dd->ipath_sdma_lock, flags); fail: diff --git a/drivers/infiniband/hw/ipath/ipath_stats.c b/drivers/infiniband/hw/ipath/ipath_stats.c index c8e3d65f0de..f63e143e329 100644 --- a/drivers/infiniband/hw/ipath/ipath_stats.c +++ b/drivers/infiniband/hw/ipath/ipath_stats.c @@ -112,6 +112,14 @@ u64 ipath_snap_cntr(struct ipath_devdata *dd, ipath_creg creg) dd->ipath_lastrpkts = val; } val64 = dd->ipath_rpkts; + } else if (creg == dd->ipath_cregs->cr_ibsymbolerrcnt) { + if (dd->ibdeltainprog) + val64 -= val64 - dd->ibsymsnap; + val64 -= dd->ibsymdelta; + } else if (creg == dd->ipath_cregs->cr_iblinkerrrecovcnt) { + if (dd->ibdeltainprog) + val64 -= val64 - dd->iblnkerrsnap; + val64 -= dd->iblnkerrdelta; } else val64 = (u64) val; diff --git a/drivers/infiniband/hw/ipath/ipath_ud.c b/drivers/infiniband/hw/ipath/ipath_ud.c index 729446f56aa..91c74cc797a 100644 --- a/drivers/infiniband/hw/ipath/ipath_ud.c +++ b/drivers/infiniband/hw/ipath/ipath_ud.c @@ -70,8 +70,6 @@ static void ipath_ud_loopback(struct ipath_qp *sqp, struct ipath_swqe *swqe) goto done; } - rsge.sg_list = NULL; - /* * Check that the qkey matches (except for QP0, see 9.6.1.4.1). * Qkeys with the high order bit set mean use the @@ -115,21 +113,6 @@ static void ipath_ud_loopback(struct ipath_qp *sqp, struct ipath_swqe *swqe) rq = &qp->r_rq; } - if (rq->max_sge > 1) { - /* - * XXX We could use GFP_KERNEL if ipath_do_send() - * was always called from the tasklet instead of - * from ipath_post_send(). - */ - rsge.sg_list = kmalloc((rq->max_sge - 1) * - sizeof(struct ipath_sge), - GFP_ATOMIC); - if (!rsge.sg_list) { - dev->n_pkt_drops++; - goto drop; - } - } - /* * Get the next work request entry to find where to put the data. * Note that it is safe to drop the lock after changing rq->tail @@ -147,6 +130,7 @@ static void ipath_ud_loopback(struct ipath_qp *sqp, struct ipath_swqe *swqe) goto drop; } wqe = get_rwqe_ptr(rq, tail); + rsge.sg_list = qp->r_ud_sg_list; if (!ipath_init_sge(qp, wqe, &rlen, &rsge)) { spin_unlock_irqrestore(&rq->lock, flags); dev->n_pkt_drops++; @@ -242,7 +226,6 @@ static void ipath_ud_loopback(struct ipath_qp *sqp, struct ipath_swqe *swqe) ipath_cq_enter(to_icq(qp->ibqp.recv_cq), &wc, swqe->wr.send_flags & IB_SEND_SOLICITED); drop: - kfree(rsge.sg_list); if (atomic_dec_and_test(&qp->refcount)) wake_up(&qp->wait); done:; diff --git a/drivers/infiniband/hw/ipath/ipath_verbs.c b/drivers/infiniband/hw/ipath/ipath_verbs.c index eabc4247860..cdf0e6abd34 100644 --- a/drivers/infiniband/hw/ipath/ipath_verbs.c +++ b/drivers/infiniband/hw/ipath/ipath_verbs.c @@ -1852,7 +1852,7 @@ unsigned ipath_get_npkeys(struct ipath_devdata *dd) } /** - * ipath_get_pkey - return the indexed PKEY from the port 0 PKEY table + * ipath_get_pkey - return the indexed PKEY from the port PKEY table * @dd: the infinipath device * @index: the PKEY index */ @@ -1860,6 +1860,7 @@ unsigned ipath_get_pkey(struct ipath_devdata *dd, unsigned index) { unsigned ret; + /* always a kernel port, no locking needed */ if (index >= ARRAY_SIZE(dd->ipath_pd[0]->port_pkeys)) ret = 0; else diff --git a/drivers/infiniband/hw/ipath/ipath_verbs.h b/drivers/infiniband/hw/ipath/ipath_verbs.h index 9d12ae8a778..11e3f613df9 100644 --- a/drivers/infiniband/hw/ipath/ipath_verbs.h +++ b/drivers/infiniband/hw/ipath/ipath_verbs.h @@ -431,6 +431,7 @@ struct ipath_qp { u32 s_lsn; /* limit sequence number (credit) */ struct ipath_swqe *s_wq; /* send work queue */ struct ipath_swqe *s_wqe; + struct ipath_sge *r_ud_sg_list; struct ipath_rq r_rq; /* receive work queue */ struct ipath_sge r_sg_list[0]; /* verified SGEs */ }; diff --git a/drivers/infiniband/hw/mlx4/cq.c b/drivers/infiniband/hw/mlx4/cq.c index 18308494a19..8415ecce5c4 100644 --- a/drivers/infiniband/hw/mlx4/cq.c +++ b/drivers/infiniband/hw/mlx4/cq.c @@ -222,7 +222,7 @@ struct ib_cq *mlx4_ib_create_cq(struct ib_device *ibdev, int entries, int vector } err = mlx4_cq_alloc(dev->dev, entries, &cq->buf.mtt, uar, - cq->db.dma, &cq->mcq, 0); + cq->db.dma, &cq->mcq, vector, 0); if (err) goto err_dbmap; @@ -325,15 +325,17 @@ static int mlx4_ib_get_outstanding_cqes(struct mlx4_ib_cq *cq) static void mlx4_ib_cq_resize_copy_cqes(struct mlx4_ib_cq *cq) { - struct mlx4_cqe *cqe; + struct mlx4_cqe *cqe, *new_cqe; int i; i = cq->mcq.cons_index; cqe = get_cqe(cq, i & cq->ibcq.cqe); while ((cqe->owner_sr_opcode & MLX4_CQE_OPCODE_MASK) != MLX4_CQE_OPCODE_RESIZE) { - memcpy(get_cqe_from_buf(&cq->resize_buf->buf, - (i + 1) & cq->resize_buf->cqe), - get_cqe(cq, i & cq->ibcq.cqe), sizeof(struct mlx4_cqe)); + new_cqe = get_cqe_from_buf(&cq->resize_buf->buf, + (i + 1) & cq->resize_buf->cqe); + memcpy(new_cqe, get_cqe(cq, i & cq->ibcq.cqe), sizeof(struct mlx4_cqe)); + new_cqe->owner_sr_opcode = (cqe->owner_sr_opcode & ~MLX4_CQE_OWNER_MASK) | + (((i + 1) & (cq->resize_buf->cqe + 1)) ? MLX4_CQE_OWNER_MASK : 0); cqe = get_cqe(cq, ++i & cq->ibcq.cqe); } ++cq->mcq.cons_index; diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c index 2e80f8f47b0..dcefe1fceb5 100644 --- a/drivers/infiniband/hw/mlx4/main.c +++ b/drivers/infiniband/hw/mlx4/main.c @@ -578,7 +578,7 @@ static void *mlx4_ib_add(struct mlx4_dev *dev) mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_IB) ibdev->num_ports++; ibdev->ib_dev.phys_port_cnt = ibdev->num_ports; - ibdev->ib_dev.num_comp_vectors = 1; + ibdev->ib_dev.num_comp_vectors = dev->caps.num_comp_vectors; ibdev->ib_dev.dma_device = &dev->pdev->dev; ibdev->ib_dev.uverbs_abi_ver = MLX4_IB_UVERBS_ABI_VERSION; diff --git a/drivers/infiniband/hw/nes/nes.h b/drivers/infiniband/hw/nes/nes.h index 1595dc7bba9..13a5bb1a7bc 100644 --- a/drivers/infiniband/hw/nes/nes.h +++ b/drivers/infiniband/hw/nes/nes.h @@ -137,14 +137,18 @@ #ifdef CONFIG_INFINIBAND_NES_DEBUG #define nes_debug(level, fmt, args...) \ +do { \ if (level & nes_debug_level) \ - printk(KERN_ERR PFX "%s[%u]: " fmt, __func__, __LINE__, ##args) - -#define assert(expr) \ -if (!(expr)) { \ - printk(KERN_ERR PFX "Assertion failed! %s, %s, %s, line %d\n", \ - #expr, __FILE__, __func__, __LINE__); \ -} + printk(KERN_ERR PFX "%s[%u]: " fmt, __func__, __LINE__, ##args); \ +} while (0) + +#define assert(expr) \ +do { \ + if (!(expr)) { \ + printk(KERN_ERR PFX "Assertion failed! %s, %s, %s, line %d\n", \ + #expr, __FILE__, __func__, __LINE__); \ + } \ +} while (0) #define NES_EVENT_TIMEOUT 1200000 #else diff --git a/drivers/infiniband/hw/nes/nes_cm.c b/drivers/infiniband/hw/nes/nes_cm.c index 2854a6f7fdf..a812db24347 100644 --- a/drivers/infiniband/hw/nes/nes_cm.c +++ b/drivers/infiniband/hw/nes/nes_cm.c @@ -86,15 +86,14 @@ static int mini_cm_accept(struct nes_cm_core *, struct ietf_mpa_frame *, struct nes_cm_node *); static int mini_cm_reject(struct nes_cm_core *, struct ietf_mpa_frame *, struct nes_cm_node *); -static void mini_cm_recv_pkt(struct nes_cm_core *, struct nes_vnic *, +static int mini_cm_recv_pkt(struct nes_cm_core *, struct nes_vnic *, struct sk_buff *); static int mini_cm_dealloc_core(struct nes_cm_core *); static int mini_cm_get(struct nes_cm_core *); static int mini_cm_set(struct nes_cm_core *, u32, u32); -static struct sk_buff *form_cm_frame(struct sk_buff *, struct nes_cm_node *, +static void form_cm_frame(struct sk_buff *, struct nes_cm_node *, void *, u32, void *, u32, u8); -static struct sk_buff *get_free_pkt(struct nes_cm_node *cm_node); static int add_ref_cm_node(struct nes_cm_node *); static int rem_ref_cm_node(struct nes_cm_core *, struct nes_cm_node *); @@ -251,7 +250,7 @@ static int parse_mpa(struct nes_cm_node *cm_node, u8 *buffer, u32 len) * form_cm_frame - get a free packet and build empty frame Use * node info to build. */ -static struct sk_buff *form_cm_frame(struct sk_buff *skb, +static void form_cm_frame(struct sk_buff *skb, struct nes_cm_node *cm_node, void *options, u32 optionsize, void *data, u32 datasize, u8 flags) { @@ -339,7 +338,6 @@ static struct sk_buff *form_cm_frame(struct sk_buff *skb, skb_shinfo(skb)->nr_frags = 0; cm_packets_created++; - return skb; } @@ -356,7 +354,6 @@ static void print_core(struct nes_cm_core *core) nes_debug(NES_DBG_CM, "State : %u \n", core->state); - nes_debug(NES_DBG_CM, "Tx Free cnt : %u \n", skb_queue_len(&core->tx_free_list)); nes_debug(NES_DBG_CM, "Listen Nodes : %u \n", atomic_read(&core->listen_node_cnt)); nes_debug(NES_DBG_CM, "Active Nodes : %u \n", atomic_read(&core->node_cnt)); @@ -381,8 +378,6 @@ int schedule_nes_timer(struct nes_cm_node *cm_node, struct sk_buff *skb, int ret = 0; u32 was_timer_set; - if (!cm_node) - return -EINVAL; new_send = kzalloc(sizeof(*new_send), GFP_ATOMIC); if (!new_send) return -1; @@ -459,13 +454,23 @@ static void nes_cm_timer_tick(unsigned long pass) int ret = NETDEV_TX_OK; enum nes_cm_node_state last_state; + struct list_head timer_list; + INIT_LIST_HEAD(&timer_list); spin_lock_irqsave(&cm_core->ht_lock, flags); list_for_each_safe(list_node, list_core_temp, - &cm_core->connected_nodes) { + &cm_core->connected_nodes) { cm_node = container_of(list_node, struct nes_cm_node, list); - add_ref_cm_node(cm_node); - spin_unlock_irqrestore(&cm_core->ht_lock, flags); + if (!list_empty(&cm_node->recv_list) || (cm_node->send_entry)) { + add_ref_cm_node(cm_node); + list_add(&cm_node->timer_entry, &timer_list); + } + } + spin_unlock_irqrestore(&cm_core->ht_lock, flags); + + list_for_each_safe(list_node, list_core_temp, &timer_list) { + cm_node = container_of(list_node, struct nes_cm_node, + timer_entry); spin_lock_irqsave(&cm_node->recv_list_lock, flags); list_for_each_safe(list_core, list_node_temp, &cm_node->recv_list) { @@ -519,7 +524,7 @@ static void nes_cm_timer_tick(unsigned long pass) do { send_entry = cm_node->send_entry; if (!send_entry) - continue; + break; if (time_after(send_entry->timetosend, jiffies)) { if (cm_node->state != NES_CM_STATE_TSA) { if ((nexttimeout > @@ -528,18 +533,18 @@ static void nes_cm_timer_tick(unsigned long pass) nexttimeout = send_entry->timetosend; settimer = 1; - continue; + break; } } else { free_retrans_entry(cm_node); - continue; + break; } } if ((cm_node->state == NES_CM_STATE_TSA) || (cm_node->state == NES_CM_STATE_CLOSED)) { free_retrans_entry(cm_node); - continue; + break; } if (!send_entry->retranscount || @@ -557,7 +562,7 @@ static void nes_cm_timer_tick(unsigned long pass) NES_CM_EVENT_ABORTED); spin_lock_irqsave(&cm_node->retrans_list_lock, flags); - continue; + break; } atomic_inc(&send_entry->skb->users); cm_packets_retrans++; @@ -583,7 +588,7 @@ static void nes_cm_timer_tick(unsigned long pass) send_entry->retrycount--; nexttimeout = jiffies + NES_SHORT_TIME; settimer = 1; - continue; + break; } else { cm_packets_sent++; } @@ -615,14 +620,12 @@ static void nes_cm_timer_tick(unsigned long pass) spin_unlock_irqrestore(&cm_node->retrans_list_lock, flags); rem_ref_cm_node(cm_node->cm_core, cm_node); - spin_lock_irqsave(&cm_core->ht_lock, flags); if (ret != NETDEV_TX_OK) { nes_debug(NES_DBG_CM, "rexmit failed for cm_node=%p\n", cm_node); break; } } - spin_unlock_irqrestore(&cm_core->ht_lock, flags); if (settimer) { if (!timer_pending(&cm_core->tcp_timer)) { @@ -683,7 +686,7 @@ static int send_syn(struct nes_cm_node *cm_node, u32 sendack, optionssize += 1; if (!skb) - skb = get_free_pkt(cm_node); + skb = dev_alloc_skb(MAX_CM_BUFFER); if (!skb) { nes_debug(NES_DBG_CM, "Failed to get a Free pkt\n"); return -1; @@ -708,7 +711,7 @@ static int send_reset(struct nes_cm_node *cm_node, struct sk_buff *skb) int flags = SET_RST | SET_ACK; if (!skb) - skb = get_free_pkt(cm_node); + skb = dev_alloc_skb(MAX_CM_BUFFER); if (!skb) { nes_debug(NES_DBG_CM, "Failed to get a Free pkt\n"); return -1; @@ -729,7 +732,7 @@ static int send_ack(struct nes_cm_node *cm_node, struct sk_buff *skb) int ret; if (!skb) - skb = get_free_pkt(cm_node); + skb = dev_alloc_skb(MAX_CM_BUFFER); if (!skb) { nes_debug(NES_DBG_CM, "Failed to get a Free pkt\n"); @@ -752,7 +755,7 @@ static int send_fin(struct nes_cm_node *cm_node, struct sk_buff *skb) /* if we didn't get a frame get one */ if (!skb) - skb = get_free_pkt(cm_node); + skb = dev_alloc_skb(MAX_CM_BUFFER); if (!skb) { nes_debug(NES_DBG_CM, "Failed to get a Free pkt\n"); @@ -767,59 +770,15 @@ static int send_fin(struct nes_cm_node *cm_node, struct sk_buff *skb) /** - * get_free_pkt - */ -static struct sk_buff *get_free_pkt(struct nes_cm_node *cm_node) -{ - struct sk_buff *skb, *new_skb; - - /* check to see if we need to repopulate the free tx pkt queue */ - if (skb_queue_len(&cm_node->cm_core->tx_free_list) < NES_CM_FREE_PKT_LO_WATERMARK) { - while (skb_queue_len(&cm_node->cm_core->tx_free_list) < - cm_node->cm_core->free_tx_pkt_max) { - /* replace the frame we took, we won't get it back */ - new_skb = dev_alloc_skb(cm_node->cm_core->mtu); - BUG_ON(!new_skb); - /* add a replacement frame to the free tx list head */ - skb_queue_head(&cm_node->cm_core->tx_free_list, new_skb); - } - } - - skb = skb_dequeue(&cm_node->cm_core->tx_free_list); - - return skb; -} - - -/** - * make_hashkey - generate hash key from node tuple - */ -static inline int make_hashkey(u16 loc_port, nes_addr_t loc_addr, u16 rem_port, - nes_addr_t rem_addr) -{ - u32 hashkey = 0; - - hashkey = loc_addr + rem_addr + loc_port + rem_port; - hashkey = (hashkey % NES_CM_HASHTABLE_SIZE); - - return hashkey; -} - - -/** * find_node - find a cm node that matches the reference cm node */ static struct nes_cm_node *find_node(struct nes_cm_core *cm_core, u16 rem_port, nes_addr_t rem_addr, u16 loc_port, nes_addr_t loc_addr) { unsigned long flags; - u32 hashkey; struct list_head *hte; struct nes_cm_node *cm_node; - /* make a hash index key for this packet */ - hashkey = make_hashkey(loc_port, loc_addr, rem_port, rem_addr); - /* get a handle on the hte */ hte = &cm_core->connected_nodes; @@ -887,7 +846,6 @@ static struct nes_cm_listener *find_listener(struct nes_cm_core *cm_core, static int add_hte_node(struct nes_cm_core *cm_core, struct nes_cm_node *cm_node) { unsigned long flags; - u32 hashkey; struct list_head *hte; if (!cm_node || !cm_core) @@ -896,11 +854,6 @@ static int add_hte_node(struct nes_cm_core *cm_core, struct nes_cm_node *cm_node nes_debug(NES_DBG_CM, "Adding Node %p to Active Connection HT\n", cm_node); - /* first, make an index into our hash table */ - hashkey = make_hashkey(cm_node->loc_port, cm_node->loc_addr, - cm_node->rem_port, cm_node->rem_addr); - cm_node->hashkey = hashkey; - spin_lock_irqsave(&cm_core->ht_lock, flags); /* get a handle on the hash table element (list head for this slot) */ @@ -925,28 +878,36 @@ static int mini_cm_dec_refcnt_listen(struct nes_cm_core *cm_core, struct list_head *list_pos = NULL; struct list_head *list_temp = NULL; struct nes_cm_node *cm_node = NULL; + struct list_head reset_list; nes_debug(NES_DBG_CM, "attempting listener= %p free_nodes= %d, " "refcnt=%d\n", listener, free_hanging_nodes, atomic_read(&listener->ref_count)); /* free non-accelerated child nodes for this listener */ + INIT_LIST_HEAD(&reset_list); if (free_hanging_nodes) { spin_lock_irqsave(&cm_core->ht_lock, flags); list_for_each_safe(list_pos, list_temp, - &g_cm_core->connected_nodes) { + &g_cm_core->connected_nodes) { cm_node = container_of(list_pos, struct nes_cm_node, list); if ((cm_node->listener == listener) && - (!cm_node->accelerated)) { - cleanup_retrans_entry(cm_node); - spin_unlock_irqrestore(&cm_core->ht_lock, - flags); - send_reset(cm_node, NULL); - spin_lock_irqsave(&cm_core->ht_lock, flags); + (!cm_node->accelerated)) { + add_ref_cm_node(cm_node); + list_add(&cm_node->reset_entry, &reset_list); } } spin_unlock_irqrestore(&cm_core->ht_lock, flags); } + + list_for_each_safe(list_pos, list_temp, &reset_list) { + cm_node = container_of(list_pos, struct nes_cm_node, + reset_entry); + cleanup_retrans_entry(cm_node); + send_reset(cm_node, NULL); + rem_ref_cm_node(cm_node->cm_core, cm_node); + } + spin_lock_irqsave(&cm_core->listen_list_lock, flags); if (!atomic_dec_return(&listener->ref_count)) { list_del(&listener->list); @@ -1123,7 +1084,10 @@ static struct nes_cm_node *make_cm_node(struct nes_cm_core *cm_core, cm_node->loopbackpartner = NULL; /* get the mac addr for the remote node */ - arpindex = nes_arp_table(nesdev, cm_node->rem_addr, NULL, NES_ARP_RESOLVE); + if (ipv4_is_loopback(htonl(cm_node->rem_addr))) + arpindex = nes_arp_table(nesdev, ntohl(nesvnic->local_ipaddr), NULL, NES_ARP_RESOLVE); + else + arpindex = nes_arp_table(nesdev, cm_node->rem_addr, NULL, NES_ARP_RESOLVE); if (arpindex < 0) { arpindex = nes_addr_resolve_neigh(nesvnic, cm_info->rem_addr); if (arpindex < 0) { @@ -1303,7 +1267,6 @@ static void drop_packet(struct sk_buff *skb) static void handle_fin_pkt(struct nes_cm_node *cm_node, struct sk_buff *skb, struct tcphdr *tcph) { - atomic_inc(&cm_resets_recvd); nes_debug(NES_DBG_CM, "Received FIN, cm_node = %p, state = %u. " "refcnt=%d\n", cm_node, cm_node->state, atomic_read(&cm_node->ref_count)); @@ -1341,6 +1304,7 @@ static void handle_rst_pkt(struct nes_cm_node *cm_node, struct sk_buff *skb, { int reset = 0; /* whether to send reset in case of err.. */ + int passive_state; atomic_inc(&cm_resets_recvd); nes_debug(NES_DBG_CM, "Received Reset, cm_node = %p, state = %u." " refcnt=%d\n", cm_node, cm_node->state, @@ -1354,7 +1318,14 @@ static void handle_rst_pkt(struct nes_cm_node *cm_node, struct sk_buff *skb, cm_node->listener, cm_node->state); active_open_err(cm_node, skb, reset); break; - /* For PASSIVE open states, remove the cm_node event */ + case NES_CM_STATE_MPAREQ_RCVD: + passive_state = atomic_add_return(1, &cm_node->passive_state); + if (passive_state == NES_SEND_RESET_EVENT) + create_event(cm_node, NES_CM_EVENT_RESET); + cleanup_retrans_entry(cm_node); + cm_node->state = NES_CM_STATE_CLOSED; + dev_kfree_skb_any(skb); + break; case NES_CM_STATE_ESTABLISHED: case NES_CM_STATE_SYN_RCVD: case NES_CM_STATE_LISTENING: @@ -1362,7 +1333,14 @@ static void handle_rst_pkt(struct nes_cm_node *cm_node, struct sk_buff *skb, passive_open_err(cm_node, skb, reset); break; case NES_CM_STATE_TSA: + active_open_err(cm_node, skb, reset); + break; + case NES_CM_STATE_CLOSED: + cleanup_retrans_entry(cm_node); + drop_packet(skb); + break; default: + drop_packet(skb); break; } } @@ -1391,6 +1369,9 @@ static void handle_rcv_mpa(struct nes_cm_node *cm_node, struct sk_buff *skb, dev_kfree_skb_any(skb); if (type == NES_CM_EVENT_CONNECTED) cm_node->state = NES_CM_STATE_TSA; + else + atomic_set(&cm_node->passive_state, + NES_PASSIVE_STATE_INDICATED); create_event(cm_node, type); } @@ -1471,7 +1452,7 @@ static void handle_syn_pkt(struct nes_cm_node *cm_node, struct sk_buff *skb, int optionsize; optionsize = (tcph->doff << 2) - sizeof(struct tcphdr); - skb_pull(skb, tcph->doff << 2); + skb_trim(skb, 0); inc_sequence = ntohl(tcph->seq); switch (cm_node->state) { @@ -1504,6 +1485,10 @@ static void handle_syn_pkt(struct nes_cm_node *cm_node, struct sk_buff *skb, cm_node->state = NES_CM_STATE_SYN_RCVD; send_syn(cm_node, 1, skb); break; + case NES_CM_STATE_CLOSED: + cleanup_retrans_entry(cm_node); + send_reset(cm_node, skb); + break; case NES_CM_STATE_TSA: case NES_CM_STATE_ESTABLISHED: case NES_CM_STATE_FIN_WAIT1: @@ -1512,7 +1497,6 @@ static void handle_syn_pkt(struct nes_cm_node *cm_node, struct sk_buff *skb, case NES_CM_STATE_LAST_ACK: case NES_CM_STATE_CLOSING: case NES_CM_STATE_UNKNOWN: - case NES_CM_STATE_CLOSED: default: drop_packet(skb); break; @@ -1528,7 +1512,7 @@ static void handle_synack_pkt(struct nes_cm_node *cm_node, struct sk_buff *skb, int optionsize; optionsize = (tcph->doff << 2) - sizeof(struct tcphdr); - skb_pull(skb, tcph->doff << 2); + skb_trim(skb, 0); inc_sequence = ntohl(tcph->seq); switch (cm_node->state) { case NES_CM_STATE_SYN_SENT: @@ -1552,6 +1536,12 @@ static void handle_synack_pkt(struct nes_cm_node *cm_node, struct sk_buff *skb, /* passive open, so should not be here */ passive_open_err(cm_node, skb, 1); break; + case NES_CM_STATE_LISTENING: + case NES_CM_STATE_CLOSED: + cm_node->tcp_cntxt.loc_seq_num = ntohl(tcph->ack_seq); + cleanup_retrans_entry(cm_node); + send_reset(cm_node, skb); + break; case NES_CM_STATE_ESTABLISHED: case NES_CM_STATE_FIN_WAIT1: case NES_CM_STATE_FIN_WAIT2: @@ -1559,7 +1549,6 @@ static void handle_synack_pkt(struct nes_cm_node *cm_node, struct sk_buff *skb, case NES_CM_STATE_TSA: case NES_CM_STATE_CLOSING: case NES_CM_STATE_UNKNOWN: - case NES_CM_STATE_CLOSED: case NES_CM_STATE_MPAREQ_SENT: default: drop_packet(skb); @@ -1574,6 +1563,13 @@ static void handle_ack_pkt(struct nes_cm_node *cm_node, struct sk_buff *skb, u32 inc_sequence; u32 rem_seq_ack; u32 rem_seq; + int ret; + int optionsize; + u32 temp_seq = cm_node->tcp_cntxt.loc_seq_num; + + optionsize = (tcph->doff << 2) - sizeof(struct tcphdr); + cm_node->tcp_cntxt.loc_seq_num = ntohl(tcph->ack_seq); + if (check_seq(cm_node, tcph, skb)) return; @@ -1586,7 +1582,18 @@ static void handle_ack_pkt(struct nes_cm_node *cm_node, struct sk_buff *skb, switch (cm_node->state) { case NES_CM_STATE_SYN_RCVD: /* Passive OPEN */ + ret = handle_tcp_options(cm_node, tcph, skb, optionsize, 1); + if (ret) + break; cm_node->tcp_cntxt.rem_ack_num = ntohl(tcph->ack_seq); + cm_node->tcp_cntxt.loc_seq_num = temp_seq; + if (cm_node->tcp_cntxt.rem_ack_num != + cm_node->tcp_cntxt.loc_seq_num) { + nes_debug(NES_DBG_CM, "rem_ack_num != loc_seq_num\n"); + cleanup_retrans_entry(cm_node); + send_reset(cm_node, skb); + return; + } cm_node->state = NES_CM_STATE_ESTABLISHED; if (datasize) { cm_node->tcp_cntxt.rcv_nxt = inc_sequence + datasize; @@ -1618,11 +1625,15 @@ static void handle_ack_pkt(struct nes_cm_node *cm_node, struct sk_buff *skb, dev_kfree_skb_any(skb); } break; + case NES_CM_STATE_LISTENING: + case NES_CM_STATE_CLOSED: + cleanup_retrans_entry(cm_node); + send_reset(cm_node, skb); + break; case NES_CM_STATE_FIN_WAIT1: case NES_CM_STATE_SYN_SENT: case NES_CM_STATE_FIN_WAIT2: case NES_CM_STATE_TSA: - case NES_CM_STATE_CLOSED: case NES_CM_STATE_MPAREQ_RCVD: case NES_CM_STATE_LAST_ACK: case NES_CM_STATE_CLOSING: @@ -1645,9 +1656,9 @@ static int handle_tcp_options(struct nes_cm_node *cm_node, struct tcphdr *tcph, nes_debug(NES_DBG_CM, "%s: Node %p, Sending RESET\n", __func__, cm_node); if (passive) - passive_open_err(cm_node, skb, 0); + passive_open_err(cm_node, skb, 1); else - active_open_err(cm_node, skb, 0); + active_open_err(cm_node, skb, 1); return 1; } } @@ -1967,6 +1978,7 @@ static int mini_cm_reject(struct nes_cm_core *cm_core, struct ietf_mpa_frame *mpa_frame, struct nes_cm_node *cm_node) { int ret = 0; + int passive_state; nes_debug(NES_DBG_CM, "%s cm_node=%p type=%d state=%d\n", __func__, cm_node, cm_node->tcp_cntxt.client, cm_node->state); @@ -1974,9 +1986,13 @@ static int mini_cm_reject(struct nes_cm_core *cm_core, if (cm_node->tcp_cntxt.client) return ret; cleanup_retrans_entry(cm_node); - cm_node->state = NES_CM_STATE_CLOSED; - ret = send_reset(cm_node, NULL); + passive_state = atomic_add_return(1, &cm_node->passive_state); + cm_node->state = NES_CM_STATE_CLOSED; + if (passive_state == NES_SEND_RESET_EVENT) + rem_ref_cm_node(cm_core, cm_node); + else + ret = send_reset(cm_node, NULL); return ret; } @@ -2034,7 +2050,7 @@ static int mini_cm_close(struct nes_cm_core *cm_core, struct nes_cm_node *cm_nod * recv_pkt - recv an ETHERNET packet, and process it through CM * node state machine */ -static void mini_cm_recv_pkt(struct nes_cm_core *cm_core, +static int mini_cm_recv_pkt(struct nes_cm_core *cm_core, struct nes_vnic *nesvnic, struct sk_buff *skb) { struct nes_cm_node *cm_node = NULL; @@ -2042,23 +2058,16 @@ static void mini_cm_recv_pkt(struct nes_cm_core *cm_core, struct iphdr *iph; struct tcphdr *tcph; struct nes_cm_info nfo; + int skb_handled = 1; if (!skb) - return; + return 0; if (skb->len < sizeof(struct iphdr) + sizeof(struct tcphdr)) { - dev_kfree_skb_any(skb); - return; + return 0; } iph = (struct iphdr *)skb->data; tcph = (struct tcphdr *)(skb->data + sizeof(struct iphdr)); - skb_reset_network_header(skb); - skb_set_transport_header(skb, sizeof(*tcph)); - if (!tcph) { - dev_kfree_skb_any(skb); - return; - } - skb->len = ntohs(iph->tot_len); nfo.loc_addr = ntohl(iph->daddr); nfo.loc_port = ntohs(tcph->dest); @@ -2077,23 +2086,21 @@ static void mini_cm_recv_pkt(struct nes_cm_core *cm_core, /* Only type of packet accepted are for */ /* the PASSIVE open (syn only) */ if ((!tcph->syn) || (tcph->ack)) { - cm_packets_dropped++; + skb_handled = 0; break; } listener = find_listener(cm_core, nfo.loc_addr, nfo.loc_port, NES_CM_LISTENER_ACTIVE_STATE); - if (listener) { - nfo.cm_id = listener->cm_id; - nfo.conn_type = listener->conn_type; - } else { - nes_debug(NES_DBG_CM, "Unable to find listener " - "for the pkt\n"); - cm_packets_dropped++; - dev_kfree_skb_any(skb); + if (!listener) { + nfo.cm_id = NULL; + nfo.conn_type = 0; + nes_debug(NES_DBG_CM, "Unable to find listener for the pkt\n"); + skb_handled = 0; break; } - + nfo.cm_id = listener->cm_id; + nfo.conn_type = listener->conn_type; cm_node = make_cm_node(cm_core, nesvnic, &nfo, listener); if (!cm_node) { @@ -2119,9 +2126,13 @@ static void mini_cm_recv_pkt(struct nes_cm_core *cm_core, dev_kfree_skb_any(skb); break; } + skb_reset_network_header(skb); + skb_set_transport_header(skb, sizeof(*tcph)); + skb->len = ntohs(iph->tot_len); process_packet(cm_node, skb, cm_core); rem_ref_cm_node(cm_core, cm_node); } while (0); + return skb_handled; } @@ -2130,10 +2141,7 @@ static void mini_cm_recv_pkt(struct nes_cm_core *cm_core, */ static struct nes_cm_core *nes_cm_alloc_core(void) { - int i; - struct nes_cm_core *cm_core; - struct sk_buff *skb = NULL; /* setup the CM core */ /* alloc top level core control structure */ @@ -2151,19 +2159,6 @@ static struct nes_cm_core *nes_cm_alloc_core(void) atomic_set(&cm_core->events_posted, 0); - /* init the packet lists */ - skb_queue_head_init(&cm_core->tx_free_list); - - for (i = 0; i < NES_CM_DEFAULT_FRAME_CNT; i++) { - skb = dev_alloc_skb(cm_core->mtu); - if (!skb) { - kfree(cm_core); - return NULL; - } - /* add 'raw' skb to free frame list */ - skb_queue_head(&cm_core->tx_free_list, skb); - } - cm_core->api = &nes_cm_api; spin_lock_init(&cm_core->ht_lock); @@ -2392,7 +2387,6 @@ static int nes_cm_disconn_true(struct nes_qp *nesqp) atomic_inc(&cm_disconnects); cm_event.event = IW_CM_EVENT_DISCONNECT; if (last_ae == NES_AEQE_AEID_LLP_CONNECTION_RESET) { - issued_disconnect_reset = 1; cm_event.status = IW_CM_EVENT_STATUS_RESET; nes_debug(NES_DBG_CM, "Generating a CM " "Disconnect Event (status reset) for " @@ -2542,6 +2536,7 @@ int nes_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param) struct nes_v4_quad nes_quad; u32 crc_value; int ret; + int passive_state; ibqp = nes_get_qp(cm_id->device, conn_param->qpn); if (!ibqp) @@ -2709,8 +2704,6 @@ int nes_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param) conn_param->private_data_len + sizeof(struct ietf_mpa_frame)); - attr.qp_state = IB_QPS_RTS; - nes_modify_qp(&nesqp->ibqp, &attr, IB_QP_STATE, NULL); /* notify OF layer that accept event was successfull */ cm_id->add_ref(cm_id); @@ -2723,6 +2716,8 @@ int nes_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param) cm_event.private_data = NULL; cm_event.private_data_len = 0; ret = cm_id->event_handler(cm_id, &cm_event); + attr.qp_state = IB_QPS_RTS; + nes_modify_qp(&nesqp->ibqp, &attr, IB_QP_STATE, NULL); if (cm_node->loopbackpartner) { cm_node->loopbackpartner->mpa_frame_size = nesqp->private_data_len; @@ -2735,6 +2730,9 @@ int nes_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param) printk(KERN_ERR "%s[%u] OFA CM event_handler returned, " "ret=%d\n", __func__, __LINE__, ret); + passive_state = atomic_add_return(1, &cm_node->passive_state); + if (passive_state == NES_SEND_RESET_EVENT) + create_event(cm_node, NES_CM_EVENT_RESET); return 0; } @@ -2938,15 +2936,16 @@ int nes_destroy_listen(struct iw_cm_id *cm_id) */ int nes_cm_recv(struct sk_buff *skb, struct net_device *netdevice) { + int rc = 0; cm_packets_received++; if ((g_cm_core) && (g_cm_core->api)) { - g_cm_core->api->recv_pkt(g_cm_core, netdev_priv(netdevice), skb); + rc = g_cm_core->api->recv_pkt(g_cm_core, netdev_priv(netdevice), skb); } else { nes_debug(NES_DBG_CM, "Unable to process packet for CM," " cm is not setup properly.\n"); } - return 0; + return rc; } @@ -3217,6 +3216,18 @@ static void cm_event_reset(struct nes_cm_event *event) cm_event.private_data_len = 0; ret = cm_id->event_handler(cm_id, &cm_event); + cm_id->add_ref(cm_id); + atomic_inc(&cm_closes); + cm_event.event = IW_CM_EVENT_CLOSE; + cm_event.status = IW_CM_EVENT_STATUS_OK; + cm_event.provider_data = cm_id->provider_data; + cm_event.local_addr = cm_id->local_addr; + cm_event.remote_addr = cm_id->remote_addr; + cm_event.private_data = NULL; + cm_event.private_data_len = 0; + nes_debug(NES_DBG_CM, "NODE %p Generating CLOSE\n", event->cm_node); + ret = cm_id->event_handler(cm_id, &cm_event); + nes_debug(NES_DBG_CM, "OFA CM event_handler returned, ret=%d\n", ret); diff --git a/drivers/infiniband/hw/nes/nes_cm.h b/drivers/infiniband/hw/nes/nes_cm.h index 367b3d29014..fafa35042eb 100644 --- a/drivers/infiniband/hw/nes/nes_cm.h +++ b/drivers/infiniband/hw/nes/nes_cm.h @@ -76,6 +76,10 @@ enum nes_timer_type { NES_TIMER_TYPE_CLOSE, }; +#define NES_PASSIVE_STATE_INDICATED 0 +#define NES_DO_NOT_SEND_RESET_EVENT 1 +#define NES_SEND_RESET_EVENT 2 + #define MAX_NES_IFS 4 #define SET_ACK 1 @@ -161,6 +165,8 @@ struct nes_timer_entry { #define NES_CM_DEF_SEQ2 0x18ed5740 #define NES_CM_DEF_LOCAL_ID2 0xb807 +#define MAX_CM_BUFFER 512 + typedef u32 nes_addr_t; @@ -254,8 +260,6 @@ struct nes_cm_listener { /* per connection node and node state information */ struct nes_cm_node { - u32 hashkey; - nes_addr_t loc_addr, rem_addr; u16 loc_port, rem_port; @@ -292,7 +296,10 @@ struct nes_cm_node { int apbvt_set; int accept_pend; int freed; + struct list_head timer_entry; + struct list_head reset_entry; struct nes_qp *nesqp; + atomic_t passive_state; }; /* structure for client or CM to fill when making CM api calls. */ @@ -350,7 +357,6 @@ struct nes_cm_core { u32 mtu; u32 free_tx_pkt_max; u32 rx_pkt_posted; - struct sk_buff_head tx_free_list; atomic_t ht_node_cnt; struct list_head connected_nodes; /* struct list_head hashtable[NES_CM_HASHTABLE_SIZE]; */ @@ -390,7 +396,7 @@ struct nes_cm_ops { struct nes_cm_node *); int (*reject)(struct nes_cm_core *, struct ietf_mpa_frame *, struct nes_cm_node *); - void (*recv_pkt)(struct nes_cm_core *, struct nes_vnic *, + int (*recv_pkt)(struct nes_cm_core *, struct nes_vnic *, struct sk_buff *); int (*destroy_cm_core)(struct nes_cm_core *); int (*get)(struct nes_cm_core *); diff --git a/drivers/infiniband/hw/nes/nes_hw.c b/drivers/infiniband/hw/nes/nes_hw.c index 735c125b48a..5d139db1b77 100644 --- a/drivers/infiniband/hw/nes/nes_hw.c +++ b/drivers/infiniband/hw/nes/nes_hw.c @@ -2700,27 +2700,33 @@ void nes_nic_ce_handler(struct nes_device *nesdev, struct nes_hw_nic_cq *cq) pkt_type, (pkt_type & NES_PKT_TYPE_APBVT_MASK)); */ if ((pkt_type & NES_PKT_TYPE_APBVT_MASK) == NES_PKT_TYPE_APBVT_BITS) { - nes_cm_recv(rx_skb, nesvnic->netdev); + if (nes_cm_recv(rx_skb, nesvnic->netdev)) + rx_skb = NULL; + } + if (rx_skb == NULL) + goto skip_rx_indicate0; + + + if ((cqe_misc & NES_NIC_CQE_TAG_VALID) && + (nesvnic->vlan_grp != NULL)) { + vlan_tag = (u16)(le32_to_cpu( + cq->cq_vbase[head].cqe_words[NES_NIC_CQE_TAG_PKT_TYPE_IDX]) + >> 16); + nes_debug(NES_DBG_CQ, "%s: Reporting stripped VLAN packet. Tag = 0x%04X\n", + nesvnic->netdev->name, vlan_tag); + if (nes_use_lro) + lro_vlan_hwaccel_receive_skb(&nesvnic->lro_mgr, rx_skb, + nesvnic->vlan_grp, vlan_tag, NULL); + else + nes_vlan_rx(rx_skb, nesvnic->vlan_grp, vlan_tag); } else { - if ((cqe_misc & NES_NIC_CQE_TAG_VALID) && (nesvnic->vlan_grp != NULL)) { - vlan_tag = (u16)(le32_to_cpu( - cq->cq_vbase[head].cqe_words[NES_NIC_CQE_TAG_PKT_TYPE_IDX]) - >> 16); - nes_debug(NES_DBG_CQ, "%s: Reporting stripped VLAN packet. Tag = 0x%04X\n", - nesvnic->netdev->name, vlan_tag); - if (nes_use_lro) - lro_vlan_hwaccel_receive_skb(&nesvnic->lro_mgr, rx_skb, - nesvnic->vlan_grp, vlan_tag, NULL); - else - nes_vlan_rx(rx_skb, nesvnic->vlan_grp, vlan_tag); - } else { - if (nes_use_lro) - lro_receive_skb(&nesvnic->lro_mgr, rx_skb, NULL); - else - nes_netif_rx(rx_skb); - } + if (nes_use_lro) + lro_receive_skb(&nesvnic->lro_mgr, rx_skb, NULL); + else + nes_netif_rx(rx_skb); } +skip_rx_indicate0: nesvnic->netdev->last_rx = jiffies; /* nesvnic->netstats.rx_packets++; */ /* nesvnic->netstats.rx_bytes += rx_pkt_size; */ diff --git a/drivers/infiniband/hw/nes/nes_utils.c b/drivers/infiniband/hw/nes/nes_utils.c index 9f16f7a89ef..aa9b7348c72 100644 --- a/drivers/infiniband/hw/nes/nes_utils.c +++ b/drivers/infiniband/hw/nes/nes_utils.c @@ -540,11 +540,14 @@ struct nes_cqp_request *nes_get_cqp_request(struct nes_device *nesdev) if (!list_empty(&nesdev->cqp_avail_reqs)) { spin_lock_irqsave(&nesdev->cqp.lock, flags); - cqp_request = list_entry(nesdev->cqp_avail_reqs.next, + if (!list_empty(&nesdev->cqp_avail_reqs)) { + cqp_request = list_entry(nesdev->cqp_avail_reqs.next, struct nes_cqp_request, list); - list_del_init(&cqp_request->list); + list_del_init(&cqp_request->list); + } spin_unlock_irqrestore(&nesdev->cqp.lock, flags); - } else { + } + if (cqp_request == NULL) { cqp_request = kzalloc(sizeof(struct nes_cqp_request), GFP_KERNEL); if (cqp_request) { cqp_request->dynamic = 1; diff --git a/drivers/infiniband/hw/nes/nes_verbs.c b/drivers/infiniband/hw/nes/nes_verbs.c index d36c9a0bf1b..4fdb72454f9 100644 --- a/drivers/infiniband/hw/nes/nes_verbs.c +++ b/drivers/infiniband/hw/nes/nes_verbs.c @@ -1695,13 +1695,8 @@ static struct ib_cq *nes_create_cq(struct ib_device *ibdev, int entries, /* use 4k pbl */ nes_debug(NES_DBG_CQ, "pbl_entries=%u, use a 4k PBL\n", pbl_entries); if (nesadapter->free_4kpbl == 0) { - if (cqp_request->dynamic) { - spin_unlock_irqrestore(&nesadapter->pbl_lock, flags); - kfree(cqp_request); - } else { - list_add_tail(&cqp_request->list, &nesdev->cqp_avail_reqs); - spin_unlock_irqrestore(&nesadapter->pbl_lock, flags); - } + spin_unlock_irqrestore(&nesadapter->pbl_lock, flags); + nes_free_cqp_request(nesdev, cqp_request); if (!context) pci_free_consistent(nesdev->pcidev, nescq->cq_mem_size, mem, nescq->hw_cq.cq_pbase); @@ -1717,13 +1712,8 @@ static struct ib_cq *nes_create_cq(struct ib_device *ibdev, int entries, /* use 256 byte pbl */ nes_debug(NES_DBG_CQ, "pbl_entries=%u, use a 256 byte PBL\n", pbl_entries); if (nesadapter->free_256pbl == 0) { - if (cqp_request->dynamic) { - spin_unlock_irqrestore(&nesadapter->pbl_lock, flags); - kfree(cqp_request); - } else { - list_add_tail(&cqp_request->list, &nesdev->cqp_avail_reqs); - spin_unlock_irqrestore(&nesadapter->pbl_lock, flags); - } + spin_unlock_irqrestore(&nesadapter->pbl_lock, flags); + nes_free_cqp_request(nesdev, cqp_request); if (!context) pci_free_consistent(nesdev->pcidev, nescq->cq_mem_size, mem, nescq->hw_cq.cq_pbase); @@ -1928,13 +1918,8 @@ static int nes_reg_mr(struct nes_device *nesdev, struct nes_pd *nespd, /* Two level PBL */ if ((pbl_count+1) > nesadapter->free_4kpbl) { nes_debug(NES_DBG_MR, "Out of 4KB Pbls for two level request.\n"); - if (cqp_request->dynamic) { - spin_unlock_irqrestore(&nesadapter->pbl_lock, flags); - kfree(cqp_request); - } else { - list_add_tail(&cqp_request->list, &nesdev->cqp_avail_reqs); - spin_unlock_irqrestore(&nesadapter->pbl_lock, flags); - } + spin_unlock_irqrestore(&nesadapter->pbl_lock, flags); + nes_free_cqp_request(nesdev, cqp_request); return -ENOMEM; } else { nesadapter->free_4kpbl -= pbl_count+1; @@ -1942,13 +1927,8 @@ static int nes_reg_mr(struct nes_device *nesdev, struct nes_pd *nespd, } else if (residual_page_count > 32) { if (pbl_count > nesadapter->free_4kpbl) { nes_debug(NES_DBG_MR, "Out of 4KB Pbls.\n"); - if (cqp_request->dynamic) { - spin_unlock_irqrestore(&nesadapter->pbl_lock, flags); - kfree(cqp_request); - } else { - list_add_tail(&cqp_request->list, &nesdev->cqp_avail_reqs); - spin_unlock_irqrestore(&nesadapter->pbl_lock, flags); - } + spin_unlock_irqrestore(&nesadapter->pbl_lock, flags); + nes_free_cqp_request(nesdev, cqp_request); return -ENOMEM; } else { nesadapter->free_4kpbl -= pbl_count; @@ -1956,13 +1936,8 @@ static int nes_reg_mr(struct nes_device *nesdev, struct nes_pd *nespd, } else { if (pbl_count > nesadapter->free_256pbl) { nes_debug(NES_DBG_MR, "Out of 256B Pbls.\n"); - if (cqp_request->dynamic) { - spin_unlock_irqrestore(&nesadapter->pbl_lock, flags); - kfree(cqp_request); - } else { - list_add_tail(&cqp_request->list, &nesdev->cqp_avail_reqs); - spin_unlock_irqrestore(&nesadapter->pbl_lock, flags); - } + spin_unlock_irqrestore(&nesadapter->pbl_lock, flags); + nes_free_cqp_request(nesdev, cqp_request); return -ENOMEM; } else { nesadapter->free_256pbl -= pbl_count; diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.h b/drivers/infiniband/ulp/iser/iscsi_iser.h index 81a82628a5f..861119593f2 100644 --- a/drivers/infiniband/ulp/iser/iscsi_iser.h +++ b/drivers/infiniband/ulp/iser/iscsi_iser.h @@ -252,6 +252,9 @@ struct iser_conn { wait_queue_head_t wait; /* waitq for conn/disconn */ atomic_t post_recv_buf_count; /* posted rx count */ atomic_t post_send_buf_count; /* posted tx count */ + atomic_t unexpected_pdu_count;/* count of received * + * unexpected pdus * + * not yet retired */ char name[ISER_OBJECT_NAME_SIZE]; struct iser_page_vec *page_vec; /* represents SG to fmr maps* * maps serialized as tx is*/ diff --git a/drivers/infiniband/ulp/iser/iser_initiator.c b/drivers/infiniband/ulp/iser/iser_initiator.c index cdd28318904..ed1aff21b7e 100644 --- a/drivers/infiniband/ulp/iser/iser_initiator.c +++ b/drivers/infiniband/ulp/iser/iser_initiator.c @@ -183,14 +183,8 @@ static int iser_post_receive_control(struct iscsi_conn *conn) struct iser_regd_buf *regd_data; struct iser_dto *recv_dto = NULL; struct iser_device *device = iser_conn->ib_conn->device; - int rx_data_size, err = 0; - - rx_desc = kmem_cache_alloc(ig.desc_cache, GFP_NOIO); - if (rx_desc == NULL) { - iser_err("Failed to alloc desc for post recv\n"); - return -ENOMEM; - } - rx_desc->type = ISCSI_RX; + int rx_data_size, err; + int posts, outstanding_unexp_pdus; /* for the login sequence we must support rx of upto 8K; login is done * after conn create/bind (connect) and conn stop/bind (reconnect), @@ -201,46 +195,80 @@ static int iser_post_receive_control(struct iscsi_conn *conn) else /* FIXME till user space sets conn->max_recv_dlength correctly */ rx_data_size = 128; - rx_desc->data = kmalloc(rx_data_size, GFP_NOIO); - if (rx_desc->data == NULL) { - iser_err("Failed to alloc data buf for post recv\n"); - err = -ENOMEM; - goto post_rx_kmalloc_failure; - } + outstanding_unexp_pdus = + atomic_xchg(&iser_conn->ib_conn->unexpected_pdu_count, 0); - recv_dto = &rx_desc->dto; - recv_dto->ib_conn = iser_conn->ib_conn; - recv_dto->regd_vector_len = 0; + /* + * in addition to the response buffer, replace those consumed by + * unexpected pdus. + */ + for (posts = 0; posts < 1 + outstanding_unexp_pdus; posts++) { + rx_desc = kmem_cache_alloc(ig.desc_cache, GFP_NOIO); + if (rx_desc == NULL) { + iser_err("Failed to alloc desc for post recv %d\n", + posts); + err = -ENOMEM; + goto post_rx_cache_alloc_failure; + } + rx_desc->type = ISCSI_RX; + rx_desc->data = kmalloc(rx_data_size, GFP_NOIO); + if (rx_desc->data == NULL) { + iser_err("Failed to alloc data buf for post recv %d\n", + posts); + err = -ENOMEM; + goto post_rx_kmalloc_failure; + } - regd_hdr = &rx_desc->hdr_regd_buf; - memset(regd_hdr, 0, sizeof(struct iser_regd_buf)); - regd_hdr->device = device; - regd_hdr->virt_addr = rx_desc; /* == &rx_desc->iser_header */ - regd_hdr->data_size = ISER_TOTAL_HEADERS_LEN; + recv_dto = &rx_desc->dto; + recv_dto->ib_conn = iser_conn->ib_conn; + recv_dto->regd_vector_len = 0; - iser_reg_single(device, regd_hdr, DMA_FROM_DEVICE); + regd_hdr = &rx_desc->hdr_regd_buf; + memset(regd_hdr, 0, sizeof(struct iser_regd_buf)); + regd_hdr->device = device; + regd_hdr->virt_addr = rx_desc; /* == &rx_desc->iser_header */ + regd_hdr->data_size = ISER_TOTAL_HEADERS_LEN; - iser_dto_add_regd_buff(recv_dto, regd_hdr, 0, 0); + iser_reg_single(device, regd_hdr, DMA_FROM_DEVICE); - regd_data = &rx_desc->data_regd_buf; - memset(regd_data, 0, sizeof(struct iser_regd_buf)); - regd_data->device = device; - regd_data->virt_addr = rx_desc->data; - regd_data->data_size = rx_data_size; + iser_dto_add_regd_buff(recv_dto, regd_hdr, 0, 0); - iser_reg_single(device, regd_data, DMA_FROM_DEVICE); + regd_data = &rx_desc->data_regd_buf; + memset(regd_data, 0, sizeof(struct iser_regd_buf)); + regd_data->device = device; + regd_data->virt_addr = rx_desc->data; + regd_data->data_size = rx_data_size; - iser_dto_add_regd_buff(recv_dto, regd_data, 0, 0); + iser_reg_single(device, regd_data, DMA_FROM_DEVICE); - err = iser_post_recv(rx_desc); - if (!err) - return 0; + iser_dto_add_regd_buff(recv_dto, regd_data, 0, 0); - /* iser_post_recv failed */ + err = iser_post_recv(rx_desc); + if (err) { + iser_err("Failed iser_post_recv for post %d\n", posts); + goto post_rx_post_recv_failure; + } + } + /* all posts successful */ + return 0; + +post_rx_post_recv_failure: iser_dto_buffs_release(recv_dto); kfree(rx_desc->data); post_rx_kmalloc_failure: kmem_cache_free(ig.desc_cache, rx_desc); +post_rx_cache_alloc_failure: + if (posts > 0) { + /* + * response buffer posted, but did not replace all unexpected + * pdu recv bufs. Ignore error, retry occurs next send + */ + outstanding_unexp_pdus -= (posts - 1); + err = 0; + } + atomic_add(outstanding_unexp_pdus, + &iser_conn->ib_conn->unexpected_pdu_count); + return err; } @@ -274,8 +302,10 @@ int iser_conn_set_full_featured_mode(struct iscsi_conn *conn) struct iscsi_iser_conn *iser_conn = conn->dd_data; int i; - /* no need to keep it in a var, we are after login so if this should - * be negotiated, by now the result should be available here */ + /* + * FIXME this value should be declared to the target during login with + * the MaxOutstandingUnexpectedPDUs key when supported + */ int initial_post_recv_bufs_num = ISER_MAX_RX_MISC_PDUS; iser_dbg("Initially post: %d\n", initial_post_recv_bufs_num); @@ -478,6 +508,7 @@ int iser_send_control(struct iscsi_conn *conn, int err = 0; struct iser_regd_buf *regd_buf; struct iser_device *device; + unsigned char opcode; if (!iser_conn_state_comp(iser_conn->ib_conn, ISER_CONN_UP)) { iser_err("Failed to send, conn: 0x%p is not up\n", iser_conn->ib_conn); @@ -512,10 +543,15 @@ int iser_send_control(struct iscsi_conn *conn, data_seg_len); } - if (iser_post_receive_control(conn) != 0) { - iser_err("post_rcv_buff failed!\n"); - err = -ENOMEM; - goto send_control_error; + opcode = task->hdr->opcode & ISCSI_OPCODE_MASK; + + /* post recv buffer for response if one is expected */ + if (!(opcode == ISCSI_OP_NOOP_OUT && task->hdr->itt == RESERVED_ITT)) { + if (iser_post_receive_control(conn) != 0) { + iser_err("post_rcv_buff failed!\n"); + err = -ENOMEM; + goto send_control_error; + } } err = iser_post_send(mdesc); @@ -586,6 +622,20 @@ void iser_rcv_completion(struct iser_desc *rx_desc, * parallel to the execution of iser_conn_term. So the code that waits * * for the posted rx bufs refcount to become zero handles everything */ atomic_dec(&conn->ib_conn->post_recv_buf_count); + + /* + * if an unexpected PDU was received then the recv wr consumed must + * be replaced, this is done in the next send of a control-type PDU + */ + if (opcode == ISCSI_OP_NOOP_IN && hdr->itt == RESERVED_ITT) { + /* nop-in with itt = 0xffffffff */ + atomic_inc(&conn->ib_conn->unexpected_pdu_count); + } + else if (opcode == ISCSI_OP_ASYNC_EVENT) { + /* asyncronous message */ + atomic_inc(&conn->ib_conn->unexpected_pdu_count); + } + /* a reject PDU consumes the recv buf posted for the response */ } void iser_snd_completion(struct iser_desc *tx_desc) diff --git a/drivers/infiniband/ulp/iser/iser_verbs.c b/drivers/infiniband/ulp/iser/iser_verbs.c index e418b960e33..319b188145b 100644 --- a/drivers/infiniband/ulp/iser/iser_verbs.c +++ b/drivers/infiniband/ulp/iser/iser_verbs.c @@ -498,6 +498,7 @@ void iser_conn_init(struct iser_conn *ib_conn) init_waitqueue_head(&ib_conn->wait); atomic_set(&ib_conn->post_recv_buf_count, 0); atomic_set(&ib_conn->post_send_buf_count, 0); + atomic_set(&ib_conn->unexpected_pdu_count, 0); atomic_set(&ib_conn->refcount, 1); INIT_LIST_HEAD(&ib_conn->conn_list); spin_lock_init(&ib_conn->lock); diff --git a/drivers/input/keyboard/sh_keysc.c b/drivers/input/keyboard/sh_keysc.c index c600ab7f93e..5c8a1bcf7ca 100644 --- a/drivers/input/keyboard/sh_keysc.c +++ b/drivers/input/keyboard/sh_keysc.c @@ -18,6 +18,7 @@ #include <linux/delay.h> #include <linux/platform_device.h> #include <linux/input.h> +#include <linux/clk.h> #include <linux/io.h> #include <asm/sh_keysc.h> @@ -39,6 +40,7 @@ static const struct { struct sh_keysc_priv { void __iomem *iomem_base; + struct clk *clk; unsigned long last_keys; struct input_dev *input; struct sh_keysc_info pdata; @@ -125,6 +127,7 @@ static int __devinit sh_keysc_probe(struct platform_device *pdev) struct sh_keysc_info *pdata; struct resource *res; struct input_dev *input; + char clk_name[8]; int i, k; int irq, error; @@ -165,11 +168,19 @@ static int __devinit sh_keysc_probe(struct platform_device *pdev) goto err1; } + snprintf(clk_name, sizeof(clk_name), "keysc%d", pdev->id); + priv->clk = clk_get(&pdev->dev, clk_name); + if (IS_ERR(priv->clk)) { + dev_err(&pdev->dev, "cannot get clock \"%s\"\n", clk_name); + error = PTR_ERR(priv->clk); + goto err2; + } + priv->input = input_allocate_device(); if (!priv->input) { dev_err(&pdev->dev, "failed to allocate input device\n"); error = -ENOMEM; - goto err2; + goto err3; } input = priv->input; @@ -187,7 +198,7 @@ static int __devinit sh_keysc_probe(struct platform_device *pdev) error = request_irq(irq, sh_keysc_isr, 0, pdev->name, pdev); if (error) { dev_err(&pdev->dev, "failed to request IRQ\n"); - goto err3; + goto err4; } for (i = 0; i < SH_KEYSC_MAXKEYS; i++) { @@ -199,18 +210,22 @@ static int __devinit sh_keysc_probe(struct platform_device *pdev) error = input_register_device(input); if (error) { dev_err(&pdev->dev, "failed to register input device\n"); - goto err4; + goto err5; } + clk_enable(priv->clk); + iowrite16((sh_keysc_mode[pdata->mode].kymd << 8) | pdata->scan_timing, priv->iomem_base + KYCR1_OFFS); iowrite16(0, priv->iomem_base + KYOUTDR_OFFS); iowrite16(KYCR2_IRQ_LEVEL, priv->iomem_base + KYCR2_OFFS); return 0; - err4: + err5: free_irq(irq, pdev); - err3: + err4: input_free_device(input); + err3: + clk_put(priv->clk); err2: iounmap(priv->iomem_base); err1: @@ -230,6 +245,9 @@ static int __devexit sh_keysc_remove(struct platform_device *pdev) free_irq(platform_get_irq(pdev, 0), pdev); iounmap(priv->iomem_base); + clk_disable(priv->clk); + clk_put(priv->clk); + platform_set_drvdata(pdev, NULL); kfree(priv); return 0; diff --git a/drivers/isdn/capi/capifs.c b/drivers/isdn/capi/capifs.c index 550e80f390a..0aa66ec4cbd 100644 --- a/drivers/isdn/capi/capifs.c +++ b/drivers/isdn/capi/capifs.c @@ -156,8 +156,8 @@ void capifs_new_ncci(unsigned int number, dev_t device) if (!inode) return; inode->i_ino = number+2; - inode->i_uid = config.setuid ? config.uid : current->fsuid; - inode->i_gid = config.setgid ? config.gid : current->fsgid; + inode->i_uid = config.setuid ? config.uid : current_fsuid(); + inode->i_gid = config.setgid ? config.gid : current_fsgid(); inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME; init_special_inode(inode, S_IFCHR|config.mode, device); //inode->i_op = &capifs_file_inode_operations; diff --git a/drivers/isdn/hysdn/hysdn_procconf.c b/drivers/isdn/hysdn/hysdn_procconf.c index 484299b031f..8f9f4912de3 100644 --- a/drivers/isdn/hysdn/hysdn_procconf.c +++ b/drivers/isdn/hysdn/hysdn_procconf.c @@ -246,7 +246,8 @@ hysdn_conf_open(struct inode *ino, struct file *filep) } if (card->debug_flags & (LOG_PROC_OPEN | LOG_PROC_ALL)) hysdn_addlog(card, "config open for uid=%d gid=%d mode=0x%x", - filep->f_uid, filep->f_gid, filep->f_mode); + filep->f_cred->fsuid, filep->f_cred->fsgid, + filep->f_mode); if ((filep->f_mode & (FMODE_READ | FMODE_WRITE)) == FMODE_WRITE) { /* write only access -> write boot file or conf line */ @@ -331,7 +332,8 @@ hysdn_conf_close(struct inode *ino, struct file *filep) } if (card->debug_flags & (LOG_PROC_OPEN | LOG_PROC_ALL)) hysdn_addlog(card, "config close for uid=%d gid=%d mode=0x%x", - filep->f_uid, filep->f_gid, filep->f_mode); + filep->f_cred->fsuid, filep->f_cred->fsgid, + filep->f_mode); if ((filep->f_mode & (FMODE_READ | FMODE_WRITE)) == FMODE_WRITE) { /* write only access -> write boot file or conf line */ diff --git a/drivers/md/bitmap.c b/drivers/md/bitmap.c index ac89a5deaca..ab7c8e4a61f 100644 --- a/drivers/md/bitmap.c +++ b/drivers/md/bitmap.c @@ -208,16 +208,19 @@ static void bitmap_checkfree(struct bitmap *bitmap, unsigned long page) */ /* IO operations when bitmap is stored near all superblocks */ -static struct page *read_sb_page(mddev_t *mddev, long offset, unsigned long index) +static struct page *read_sb_page(mddev_t *mddev, long offset, + struct page *page, + unsigned long index, int size) { /* choose a good rdev and read the page from there */ mdk_rdev_t *rdev; struct list_head *tmp; - struct page *page = alloc_page(GFP_KERNEL); sector_t target; if (!page) + page = alloc_page(GFP_KERNEL); + if (!page) return ERR_PTR(-ENOMEM); rdev_for_each(rdev, tmp, mddev) { @@ -227,7 +230,9 @@ static struct page *read_sb_page(mddev_t *mddev, long offset, unsigned long inde target = rdev->sb_start + offset + index * (PAGE_SIZE/512); - if (sync_page_io(rdev->bdev, target, PAGE_SIZE, page, READ)) { + if (sync_page_io(rdev->bdev, target, + roundup(size, bdev_hardsect_size(rdev->bdev)), + page, READ)) { page->index = index; attach_page_buffers(page, NULL); /* so that free_buffer will * quietly no-op */ @@ -544,7 +549,9 @@ static int bitmap_read_sb(struct bitmap *bitmap) bitmap->sb_page = read_page(bitmap->file, 0, bitmap, bytes); } else { - bitmap->sb_page = read_sb_page(bitmap->mddev, bitmap->offset, 0); + bitmap->sb_page = read_sb_page(bitmap->mddev, bitmap->offset, + NULL, + 0, sizeof(bitmap_super_t)); } if (IS_ERR(bitmap->sb_page)) { err = PTR_ERR(bitmap->sb_page); @@ -957,11 +964,16 @@ static int bitmap_init_from_disk(struct bitmap *bitmap, sector_t start) */ page = bitmap->sb_page; offset = sizeof(bitmap_super_t); + read_sb_page(bitmap->mddev, bitmap->offset, + page, + index, count); } else if (file) { page = read_page(file, index, bitmap, count); offset = 0; } else { - page = read_sb_page(bitmap->mddev, bitmap->offset, index); + page = read_sb_page(bitmap->mddev, bitmap->offset, + NULL, + index, count); offset = 0; } if (IS_ERR(page)) { /* read error */ diff --git a/drivers/md/dm.c b/drivers/md/dm.c index c99e4728ff4..343094c3fee 100644 --- a/drivers/md/dm.c +++ b/drivers/md/dm.c @@ -21,6 +21,7 @@ #include <linux/idr.h> #include <linux/hdreg.h> #include <linux/blktrace_api.h> +#include <trace/block.h> #define DM_MSG_PREFIX "core" @@ -51,6 +52,8 @@ struct dm_target_io { union map_info info; }; +DEFINE_TRACE(block_bio_complete); + union map_info *dm_get_mapinfo(struct bio *bio) { if (bio && bio->bi_private) @@ -504,8 +507,7 @@ static void dec_pending(struct dm_io *io, int error) end_io_acct(io); if (io->error != DM_ENDIO_REQUEUE) { - blk_add_trace_bio(io->md->queue, io->bio, - BLK_TA_COMPLETE); + trace_block_bio_complete(io->md->queue, io->bio); bio_endio(io->bio, io->error); } @@ -598,7 +600,7 @@ static void __map_bio(struct dm_target *ti, struct bio *clone, if (r == DM_MAPIO_REMAPPED) { /* the bio has been remapped so dispatch it */ - blk_add_trace_remap(bdev_get_queue(clone->bi_bdev), clone, + trace_block_remap(bdev_get_queue(clone->bi_bdev), clone, tio->io->bio->bi_bdev->bd_dev, clone->bi_sector, sector); diff --git a/drivers/media/dvb/b2c2/Kconfig b/drivers/media/dvb/b2c2/Kconfig index 73dc2ee9b01..b34301d56cd 100644 --- a/drivers/media/dvb/b2c2/Kconfig +++ b/drivers/media/dvb/b2c2/Kconfig @@ -9,11 +9,11 @@ config DVB_B2C2_FLEXCOP select DVB_STV0297 if !DVB_FE_CUSTOMISE select DVB_BCM3510 if !DVB_FE_CUSTOMISE select DVB_LGDT330X if !DVB_FE_CUSTOMISE - select MEDIA_TUNER_SIMPLE if !DVB_FE_CUSTOMISE select DVB_S5H1420 if !DVB_FE_CUSTOMISE select DVB_TUNER_ITD1000 if !DVB_FE_CUSTOMISE select DVB_ISL6421 if !DVB_FE_CUSTOMISE select DVB_CX24123 if !DVB_FE_CUSTOMISE + select MEDIA_TUNER_SIMPLE if !MEDIA_TUNER_CUSTOMIZE help Support for the digital TV receiver chip made by B2C2 Inc. included in Technisats PCI cards and USB boxes. diff --git a/drivers/media/dvb/b2c2/flexcop-fe-tuner.c b/drivers/media/dvb/b2c2/flexcop-fe-tuner.c index a127a4175c4..5cded370854 100644 --- a/drivers/media/dvb/b2c2/flexcop-fe-tuner.c +++ b/drivers/media/dvb/b2c2/flexcop-fe-tuner.c @@ -628,12 +628,14 @@ int flexcop_frontend_init(struct flexcop_device *fc) } /* try the cable dvb (stv0297) */ + fc->fc_i2c_adap[0].no_base_addr = 1; fc->fe = dvb_attach(stv0297_attach, &alps_tdee4_stv0297_config, i2c); if (fc->fe != NULL) { fc->dev_type = FC_CABLE; fc->fe->ops.tuner_ops.set_params = alps_tdee4_stv0297_tuner_set_params; goto fe_found; } + fc->fc_i2c_adap[0].no_base_addr = 0; /* try the sky v2.3 (vp310/Samsung tbdu18132(tsa5059)) */ fc->fe = dvb_attach(mt312_attach, diff --git a/drivers/media/dvb/b2c2/flexcop-i2c.c b/drivers/media/dvb/b2c2/flexcop-i2c.c index 43a112ec6d4..f13783f08f0 100644 --- a/drivers/media/dvb/b2c2/flexcop-i2c.c +++ b/drivers/media/dvb/b2c2/flexcop-i2c.c @@ -47,9 +47,13 @@ static int flexcop_i2c_read4(struct flexcop_i2c_adapter *i2c, int len = r100.tw_sm_c_100.total_bytes, /* remember total_bytes is buflen-1 */ ret; - r100.tw_sm_c_100.no_base_addr_ack_error = i2c->no_base_addr; ret = flexcop_i2c_operation(i2c->fc, &r100); if (ret != 0) { + deb_i2c("Retrying operation\n"); + r100.tw_sm_c_100.no_base_addr_ack_error = i2c->no_base_addr; + ret = flexcop_i2c_operation(i2c->fc, &r100); + } + if (ret != 0) { deb_i2c("read failed. %d\n", ret); return ret; } diff --git a/drivers/media/dvb/bt8xx/Kconfig b/drivers/media/dvb/bt8xx/Kconfig index 7e9c090fc04..27edb0ece58 100644 --- a/drivers/media/dvb/bt8xx/Kconfig +++ b/drivers/media/dvb/bt8xx/Kconfig @@ -8,7 +8,7 @@ config DVB_BT8XX select DVB_OR51211 if !DVB_FE_CUSTOMISE select DVB_LGDT330X if !DVB_FE_CUSTOMISE select DVB_ZL10353 if !DVB_FE_CUSTOMISE - select MEDIA_TUNER_SIMPLE if !DVB_FE_CUSTOMISE + select MEDIA_TUNER_SIMPLE if !MEDIA_TUNER_CUSTOMIZE help Support for PCI cards based on the Bt8xx PCI bridge. Examples are the Nebula cards, the Pinnacle PCTV cards, the Twinhan DST cards, diff --git a/drivers/media/dvb/dvb-usb/Kconfig b/drivers/media/dvb/dvb-usb/Kconfig index 62b68c291d9..49f7b20c25d 100644 --- a/drivers/media/dvb/dvb-usb/Kconfig +++ b/drivers/media/dvb/dvb-usb/Kconfig @@ -24,8 +24,8 @@ config DVB_USB_A800 tristate "AVerMedia AverTV DVB-T USB 2.0 (A800)" depends on DVB_USB select DVB_DIB3000MC - select MEDIA_TUNER_MT2060 if !DVB_FE_CUSTOMISE select DVB_PLL if !DVB_FE_CUSTOMISE + select MEDIA_TUNER_MT2060 if !MEDIA_TUNER_CUSTOMIZE help Say Y here to support the AVerMedia AverTV DVB-T USB 2.0 (A800) receiver. @@ -34,7 +34,7 @@ config DVB_USB_DIBUSB_MB depends on DVB_USB select DVB_PLL if !DVB_FE_CUSTOMISE select DVB_DIB3000MB - select MEDIA_TUNER_MT2060 if !DVB_FE_CUSTOMISE + select MEDIA_TUNER_MT2060 if !MEDIA_TUNER_CUSTOMIZE help Support for USB 1.1 and 2.0 DVB-T receivers based on reference designs made by DiBcom (<http://www.dibcom.fr>) equipped with a DiB3000M-B demodulator. @@ -55,7 +55,7 @@ config DVB_USB_DIBUSB_MC tristate "DiBcom USB DVB-T devices (based on the DiB3000M-C/P) (see help for device list)" depends on DVB_USB select DVB_DIB3000MC - select MEDIA_TUNER_MT2060 if !DVB_FE_CUSTOMISE + select MEDIA_TUNER_MT2060 if !MEDIA_TUNER_CUSTOMIZE help Support for USB2.0 DVB-T receivers based on reference designs made by DiBcom (<http://www.dibcom.fr>) equipped with a DiB3000M-C/P demodulator. @@ -73,11 +73,11 @@ config DVB_USB_DIB0700 select DVB_DIB7000M select DVB_DIB3000MC select DVB_S5H1411 if !DVB_FE_CUSTOMISE - select MEDIA_TUNER_MT2060 if !DVB_FE_CUSTOMISE - select MEDIA_TUNER_MT2266 if !DVB_FE_CUSTOMISE - select MEDIA_TUNER_XC2028 if !DVB_FE_CUSTOMISE - select MEDIA_TUNER_XC5000 if !DVB_FE_CUSTOMIZE select DVB_TUNER_DIB0070 + select MEDIA_TUNER_MT2060 if !MEDIA_TUNER_CUSTOMIZE + select MEDIA_TUNER_MT2266 if !MEDIA_TUNER_CUSTOMIZE + select MEDIA_TUNER_XC2028 if !MEDIA_TUNER_CUSTOMIZE + select MEDIA_TUNER_XC5000 if !MEDIA_TUNER_CUSTOMIZE help Support for USB2.0/1.1 DVB receivers based on the DiB0700 USB bridge. The USB bridge is also present in devices having the DiB7700 DVB-T-USB @@ -95,7 +95,7 @@ config DVB_USB_UMT_010 depends on DVB_USB select DVB_PLL if !DVB_FE_CUSTOMISE select DVB_DIB3000MC - select MEDIA_TUNER_MT2060 if !DVB_FE_CUSTOMISE + select MEDIA_TUNER_MT2060 if !MEDIA_TUNER_CUSTOMIZE help Say Y here to support the HanfTek UMT-010 USB2.0 stick-sized DVB-T receiver. @@ -107,11 +107,11 @@ config DVB_USB_CXUSB select DVB_LGDT330X if !DVB_FE_CUSTOMISE select DVB_MT352 if !DVB_FE_CUSTOMISE select DVB_ZL10353 if !DVB_FE_CUSTOMISE - select MEDIA_TUNER_SIMPLE if !DVB_FE_CUSTOMISE - select MEDIA_TUNER_XC2028 if !DVB_FE_CUSTOMISE - select MEDIA_TUNER_MXL5005S if !DVB_FE_CUSTOMISE select DVB_DIB7000P if !DVB_FE_CUSTOMISE select DVB_TUNER_DIB0070 if !DVB_FE_CUSTOMISE + select MEDIA_TUNER_SIMPLE if !MEDIA_TUNER_CUSTOMIZE + select MEDIA_TUNER_XC2028 if !MEDIA_TUNER_CUSTOMIZE + select MEDIA_TUNER_MXL5005S if !MEDIA_TUNER_CUSTOMIZE help Say Y here to support the Conexant USB2.0 hybrid reference design. Currently, only DVB and ATSC modes are supported, analog mode @@ -124,9 +124,9 @@ config DVB_USB_M920X tristate "Uli m920x DVB-T USB2.0 support" depends on DVB_USB select DVB_MT352 if !DVB_FE_CUSTOMISE - select MEDIA_TUNER_QT1010 if !DVB_FE_CUSTOMISE - select MEDIA_TUNER_TDA827X if !DVB_FE_CUSTOMISE select DVB_TDA1004X if !DVB_FE_CUSTOMISE + select MEDIA_TUNER_QT1010 if !MEDIA_TUNER_CUSTOMIZE + select MEDIA_TUNER_TDA827X if !MEDIA_TUNER_CUSTOMIZE help Say Y here to support the MSI Mega Sky 580 USB2.0 DVB-T receiver. Currently, only devices with a product id of @@ -137,7 +137,7 @@ config DVB_USB_GL861 tristate "Genesys Logic GL861 USB2.0 support" depends on DVB_USB select DVB_ZL10353 if !DVB_FE_CUSTOMISE - select MEDIA_TUNER_QT1010 if !DVB_FE_CUSTOMISE + select MEDIA_TUNER_QT1010 if !MEDIA_TUNER_CUSTOMIZE help Say Y here to support the MSI Megasky 580 (55801) DVB-T USB2.0 receiver with USB ID 0db0:5581. @@ -146,7 +146,7 @@ config DVB_USB_AU6610 tristate "Alcor Micro AU6610 USB2.0 support" depends on DVB_USB select DVB_ZL10353 if !DVB_FE_CUSTOMISE - select MEDIA_TUNER_QT1010 if !DVB_FE_CUSTOMISE + select MEDIA_TUNER_QT1010 if !MEDIA_TUNER_CUSTOMIZE help Say Y here to support the Sigmatek DVB-110 DVB-T USB2.0 receiver. @@ -198,8 +198,8 @@ config DVB_USB_NOVA_T_USB2 tristate "Hauppauge WinTV-NOVA-T usb2 DVB-T USB2.0 support" depends on DVB_USB select DVB_DIB3000MC - select MEDIA_TUNER_MT2060 if !DVB_FE_CUSTOMISE select DVB_PLL if !DVB_FE_CUSTOMISE + select MEDIA_TUNER_MT2060 if !MEDIA_TUNER_CUSTOMIZE help Say Y here to support the Hauppauge WinTV-NOVA-T usb2 DVB-T USB2.0 receiver. @@ -235,8 +235,8 @@ config DVB_USB_OPERA1 config DVB_USB_AF9005 tristate "Afatech AF9005 DVB-T USB1.1 support" depends on DVB_USB && EXPERIMENTAL - select MEDIA_TUNER_MT2060 if !DVB_FE_CUSTOMISE - select MEDIA_TUNER_QT1010 if !DVB_FE_CUSTOMISE + select MEDIA_TUNER_MT2060 if !MEDIA_TUNER_CUSTOMIZE + select MEDIA_TUNER_QT1010 if !MEDIA_TUNER_CUSTOMIZE help Say Y here to support the Afatech AF9005 based DVB-T USB1.1 receiver and the TerraTec Cinergy T USB XE (Rev.1) @@ -284,7 +284,7 @@ config DVB_USB_DTV5100 tristate "AME DTV-5100 USB2.0 DVB-T support" depends on DVB_USB select DVB_ZL10353 if !DVB_FE_CUSTOMISE - select MEDIA_TUNER_QT1010 if !DVB_FE_CUSTOMISE + select MEDIA_TUNER_QT1010 if !MEDIA_TUNER_CUSTOMIZE help Say Y here to support the AME DTV-5100 USB2.0 DVB-T receiver. @@ -293,9 +293,9 @@ config DVB_USB_AF9015 depends on DVB_USB && EXPERIMENTAL select DVB_AF9013 select DVB_PLL if !DVB_FE_CUSTOMISE - select MEDIA_TUNER_MT2060 if !DVB_FE_CUSTOMISE - select MEDIA_TUNER_QT1010 if !DVB_FE_CUSTOMISE - select MEDIA_TUNER_TDA18271 if !DVB_FE_CUSTOMISE - select MEDIA_TUNER_MXL5005S if !DVB_FE_CUSTOMISE + select MEDIA_TUNER_MT2060 if !MEDIA_TUNER_CUSTOMIZE + select MEDIA_TUNER_QT1010 if !MEDIA_TUNER_CUSTOMIZE + select MEDIA_TUNER_TDA18271 if !MEDIA_TUNER_CUSTOMIZE + select MEDIA_TUNER_MXL5005S if !MEDIA_TUNER_CUSTOMIZE help Say Y here to support the Afatech AF9015 based DVB-T USB2.0 receiver diff --git a/drivers/media/dvb/dvb-usb/dib0700_devices.c b/drivers/media/dvb/dvb-usb/dib0700_devices.c index f28d3ae59e0..39173278891 100644 --- a/drivers/media/dvb/dvb-usb/dib0700_devices.c +++ b/drivers/media/dvb/dvb-usb/dib0700_devices.c @@ -446,13 +446,13 @@ static int stk7700ph_tuner_attach(struct dvb_usb_adapter *adap) == NULL ? -ENODEV : 0; } -#define DEFAULT_RC_INTERVAL 150 +#define DEFAULT_RC_INTERVAL 50 static u8 rc_request[] = { REQUEST_POLL_RC, 0 }; /* Number of keypresses to ignore before start repeating */ -#define RC_REPEAT_DELAY 2 -#define RC_REPEAT_DELAY_V1_20 5 +#define RC_REPEAT_DELAY 6 +#define RC_REPEAT_DELAY_V1_20 10 diff --git a/drivers/media/dvb/ttpci/Kconfig b/drivers/media/dvb/ttpci/Kconfig index 867027ceab3..401a04effc0 100644 --- a/drivers/media/dvb/ttpci/Kconfig +++ b/drivers/media/dvb/ttpci/Kconfig @@ -106,7 +106,7 @@ config DVB_BUDGET_CI select DVB_TDA1004X if !DVB_FE_CUSTOMISE select DVB_LNBP21 if !DVB_FE_CUSTOMISE select DVB_TDA10023 if !DVB_FE_CUSTOMISE - select MEDIA_TUNER_TDA827X if !DVB_FE_CUSTOMISE + select MEDIA_TUNER_TDA827X if !MEDIA_TUNER_CUSTOMIZE select VIDEO_IR help Support for simple SAA7146 based DVB cards diff --git a/drivers/media/video/Kconfig b/drivers/media/video/Kconfig index 47102c2c825..057fd7e160c 100644 --- a/drivers/media/video/Kconfig +++ b/drivers/media/video/Kconfig @@ -759,7 +759,7 @@ config VIDEO_PXA27x config VIDEO_SH_MOBILE_CEU tristate "SuperH Mobile CEU Interface driver" - depends on VIDEO_DEV && SOC_CAMERA && HAS_DMA + depends on VIDEO_DEV && SOC_CAMERA && HAS_DMA && HAVE_CLK select VIDEOBUF_DMA_CONTIG ---help--- This is a v4l2 driver for the SuperH Mobile CEU Interface diff --git a/drivers/media/video/compat_ioctl32.c b/drivers/media/video/compat_ioctl32.c index e6ca4012b5f..0ea85a05e5c 100644 --- a/drivers/media/video/compat_ioctl32.c +++ b/drivers/media/video/compat_ioctl32.c @@ -831,7 +831,7 @@ long v4l_compat_ioctl32(struct file *file, unsigned int cmd, unsigned long arg) { int ret = -ENOIOCTLCMD; - if (!file->f_op->ioctl) + if (!file->f_op->ioctl && !file->f_op->unlocked_ioctl) return ret; switch (cmd) { diff --git a/drivers/media/video/cx18/Kconfig b/drivers/media/video/cx18/Kconfig index ef48565de7f..8940b5387de 100644 --- a/drivers/media/video/cx18/Kconfig +++ b/drivers/media/video/cx18/Kconfig @@ -9,7 +9,7 @@ config VIDEO_CX18 select VIDEO_CX2341X select VIDEO_CS5345 select DVB_S5H1409 if !DVB_FE_CUSTOMISE - select MEDIA_TUNER_MXL5005S if !DVB_FE_CUSTOMISE + select MEDIA_TUNER_MXL5005S if !MEDIA_TUNER_CUSTOMIZE ---help--- This is a video4linux driver for Conexant cx23418 based PCI combo video recorder devices. diff --git a/drivers/media/video/cx23885/Kconfig b/drivers/media/video/cx23885/Kconfig index 8c1b7fa47a4..00f1e2e8889 100644 --- a/drivers/media/video/cx23885/Kconfig +++ b/drivers/media/video/cx23885/Kconfig @@ -11,16 +11,16 @@ config VIDEO_CX23885 select VIDEO_CX25840 select VIDEO_CX2341X select DVB_DIB7000P if !DVB_FE_CUSTOMISE - select MEDIA_TUNER_MT2131 if !DVB_FE_CUSTOMISE select DVB_S5H1409 if !DVB_FE_CUSTOMISE select DVB_S5H1411 if !DVB_FE_CUSTOMISE select DVB_LGDT330X if !DVB_FE_CUSTOMISE select DVB_ZL10353 if !DVB_FE_CUSTOMISE + select DVB_TDA10048 if !DVB_FE_CUSTOMIZE + select MEDIA_TUNER_MT2131 if !MEDIA_TUNER_CUSTOMIZE select MEDIA_TUNER_XC2028 if !DVB_FE_CUSTOMIZE select MEDIA_TUNER_TDA8290 if !DVB_FE_CUSTOMIZE select MEDIA_TUNER_TDA18271 if !DVB_FE_CUSTOMIZE select MEDIA_TUNER_XC5000 if !DVB_FE_CUSTOMIZE - select DVB_TDA10048 if !DVB_FE_CUSTOMIZE ---help--- This is a video4linux driver for Conexant 23885 based TV cards. diff --git a/drivers/media/video/cx88/Kconfig b/drivers/media/video/cx88/Kconfig index 0b9e5fac623..b0f837588e0 100644 --- a/drivers/media/video/cx88/Kconfig +++ b/drivers/media/video/cx88/Kconfig @@ -56,12 +56,12 @@ config VIDEO_CX88_DVB select DVB_NXT200X if !DVB_FE_CUSTOMISE select DVB_CX24123 if !DVB_FE_CUSTOMISE select DVB_ISL6421 if !DVB_FE_CUSTOMISE - select MEDIA_TUNER_SIMPLE if !DVB_FE_CUSTOMISE select DVB_S5H1411 if !DVB_FE_CUSTOMISE select DVB_CX24116 if !DVB_FE_CUSTOMISE select DVB_STV0299 if !DVB_FE_CUSTOMISE select DVB_STV0288 if !DVB_FE_CUSTOMISE select DVB_STB6000 if !DVB_FE_CUSTOMISE + select MEDIA_TUNER_SIMPLE if !MEDIA_TUNER_CUSTOMIZE ---help--- This adds support for DVB/ATSC cards based on the Conexant 2388x chip. diff --git a/drivers/media/video/em28xx/em28xx-video.c b/drivers/media/video/em28xx/em28xx-video.c index 610f535a257..4ea1f1e0489 100644 --- a/drivers/media/video/em28xx/em28xx-video.c +++ b/drivers/media/video/em28xx/em28xx-video.c @@ -549,10 +549,11 @@ static int em28xx_config(struct em28xx *dev) static void em28xx_config_i2c(struct em28xx *dev) { struct v4l2_routing route; + int zero = 0; route.input = INPUT(dev->ctl_input)->vmux; route.output = 0; - em28xx_i2c_call_clients(dev, VIDIOC_INT_RESET, NULL); + em28xx_i2c_call_clients(dev, VIDIOC_INT_RESET, &zero); em28xx_i2c_call_clients(dev, VIDIOC_INT_S_VIDEO_ROUTING, &route); em28xx_i2c_call_clients(dev, VIDIOC_STREAMON, NULL); } diff --git a/drivers/media/video/gspca/gspca.c b/drivers/media/video/gspca/gspca.c index 748a87e82e4..02a6e9ef033 100644 --- a/drivers/media/video/gspca/gspca.c +++ b/drivers/media/video/gspca/gspca.c @@ -1264,10 +1264,10 @@ static int vidioc_s_jpegcomp(struct file *file, void *priv, struct gspca_dev *gspca_dev = priv; int ret; - if (mutex_lock_interruptible(&gspca_dev->usb_lock)) - return -ERESTARTSYS; if (!gspca_dev->sd_desc->set_jcomp) return -EINVAL; + if (mutex_lock_interruptible(&gspca_dev->usb_lock)) + return -ERESTARTSYS; ret = gspca_dev->sd_desc->set_jcomp(gspca_dev, jpegcomp); mutex_unlock(&gspca_dev->usb_lock); return ret; diff --git a/drivers/media/video/pvrusb2/Kconfig b/drivers/media/video/pvrusb2/Kconfig index 19eb274c9cd..854c2a88535 100644 --- a/drivers/media/video/pvrusb2/Kconfig +++ b/drivers/media/video/pvrusb2/Kconfig @@ -42,7 +42,7 @@ config VIDEO_PVRUSB2_DVB select DVB_S5H1411 if !DVB_FE_CUSTOMISE select DVB_TDA10048 if !DVB_FE_CUSTOMIZE select MEDIA_TUNER_TDA18271 if !DVB_FE_CUSTOMIZE - select MEDIA_TUNER_SIMPLE if !DVB_FE_CUSTOMISE + select MEDIA_TUNER_SIMPLE if !MEDIA_TUNER_CUSTOMIZE select MEDIA_TUNER_TDA8290 if !DVB_FE_CUSTOMIZE ---help--- diff --git a/drivers/media/video/saa7134/Kconfig b/drivers/media/video/saa7134/Kconfig index 7021bbf5897..fc2164e28e7 100644 --- a/drivers/media/video/saa7134/Kconfig +++ b/drivers/media/video/saa7134/Kconfig @@ -34,9 +34,9 @@ config VIDEO_SAA7134_DVB select DVB_NXT200X if !DVB_FE_CUSTOMISE select DVB_TDA10086 if !DVB_FE_CUSTOMISE select DVB_TDA826X if !DVB_FE_CUSTOMISE - select MEDIA_TUNER_TDA827X if !DVB_FE_CUSTOMISE select DVB_ISL6421 if !DVB_FE_CUSTOMISE - select MEDIA_TUNER_SIMPLE if !DVB_FE_CUSTOMISE + select MEDIA_TUNER_TDA827X if !MEDIA_TUNER_CUSTOMIZE + select MEDIA_TUNER_SIMPLE if !MEDIA_TUNER_CUSTOMIZE ---help--- This adds support for DVB cards based on the Philips saa7134 chip. diff --git a/drivers/media/video/sh_mobile_ceu_camera.c b/drivers/media/video/sh_mobile_ceu_camera.c index 2407607f2ef..536b1a9b310 100644 --- a/drivers/media/video/sh_mobile_ceu_camera.c +++ b/drivers/media/video/sh_mobile_ceu_camera.c @@ -31,6 +31,7 @@ #include <linux/platform_device.h> #include <linux/mutex.h> #include <linux/videodev2.h> +#include <linux/clk.h> #include <media/v4l2-common.h> #include <media/v4l2-dev.h> @@ -89,6 +90,7 @@ struct sh_mobile_ceu_dev { unsigned int irq; void __iomem *base; + struct clk *clk; unsigned long video_limit; /* lock used to protect videobuf */ @@ -309,6 +311,8 @@ static int sh_mobile_ceu_add_device(struct soc_camera_device *icd) if (ret) goto err; + clk_enable(pcdev->clk); + ceu_write(pcdev, CAPSR, 1 << 16); /* reset */ while (ceu_read(pcdev, CSTSR) & 1) msleep(1); @@ -342,6 +346,8 @@ static void sh_mobile_ceu_remove_device(struct soc_camera_device *icd) } spin_unlock_irqrestore(&pcdev->lock, flags); + clk_disable(pcdev->clk); + icd->ops->release(icd); dev_info(&icd->dev, @@ -550,6 +556,7 @@ static int sh_mobile_ceu_probe(struct platform_device *pdev) struct sh_mobile_ceu_dev *pcdev; struct resource *res; void __iomem *base; + char clk_name[8]; unsigned int irq; int err = 0; @@ -615,6 +622,14 @@ static int sh_mobile_ceu_probe(struct platform_device *pdev) goto exit_release_mem; } + snprintf(clk_name, sizeof(clk_name), "ceu%d", pdev->id); + pcdev->clk = clk_get(&pdev->dev, clk_name); + if (IS_ERR(pcdev->clk)) { + dev_err(&pdev->dev, "cannot get clock \"%s\"\n", clk_name); + err = PTR_ERR(pcdev->clk); + goto exit_free_irq; + } + pcdev->ici.priv = pcdev; pcdev->ici.dev.parent = &pdev->dev; pcdev->ici.nr = pdev->id; @@ -623,10 +638,12 @@ static int sh_mobile_ceu_probe(struct platform_device *pdev) err = soc_camera_host_register(&pcdev->ici); if (err) - goto exit_free_irq; + goto exit_free_clk; return 0; +exit_free_clk: + clk_put(pcdev->clk); exit_free_irq: free_irq(pcdev->irq, pcdev); exit_release_mem: @@ -645,6 +662,7 @@ static int sh_mobile_ceu_remove(struct platform_device *pdev) struct sh_mobile_ceu_dev *pcdev = platform_get_drvdata(pdev); soc_camera_host_unregister(&pcdev->ici); + clk_put(pcdev->clk); free_irq(pcdev->irq, pcdev); if (platform_get_resource(pdev, IORESOURCE_MEM, 1)) dma_release_declared_memory(&pdev->dev); diff --git a/drivers/message/fusion/mptscsih.c b/drivers/message/fusion/mptscsih.c index d62fd4f6b52..ee090413e59 100644 --- a/drivers/message/fusion/mptscsih.c +++ b/drivers/message/fusion/mptscsih.c @@ -2008,6 +2008,9 @@ mptscsih_host_reset(struct scsi_cmnd *SCpnt) return FAILED; } + /* make sure we have no outstanding commands at this stage */ + mptscsih_flush_running_cmds(hd); + ioc = hd->ioc; printk(MYIOC_s_INFO_FMT "attempting host reset! (sc=%p)\n", ioc->name, SCpnt); diff --git a/drivers/misc/sgi-gru/gruprocfs.c b/drivers/misc/sgi-gru/gruprocfs.c index 533923f83f1..73b0ca061bb 100644 --- a/drivers/misc/sgi-gru/gruprocfs.c +++ b/drivers/misc/sgi-gru/gruprocfs.c @@ -317,7 +317,6 @@ int gru_proc_init(void) { struct proc_entry *p; - proc_mkdir("sgi_uv", NULL); proc_gru = proc_mkdir("sgi_uv/gru", NULL); for (p = proc_files; p->name; p++) diff --git a/drivers/misc/sgi-xp/xp.h b/drivers/misc/sgi-xp/xp.h index ed1722e5004..7b4cbd5e03e 100644 --- a/drivers/misc/sgi-xp/xp.h +++ b/drivers/misc/sgi-xp/xp.h @@ -194,9 +194,10 @@ enum xp_retval { xpGruSendMqError, /* 59: gru send message queue related error */ xpBadChannelNumber, /* 60: invalid channel number */ - xpBadMsgType, /* 60: invalid message type */ + xpBadMsgType, /* 61: invalid message type */ + xpBiosError, /* 62: BIOS error */ - xpUnknownReason /* 61: unknown reason - must be last in enum */ + xpUnknownReason /* 63: unknown reason - must be last in enum */ }; /* @@ -345,6 +346,8 @@ extern unsigned long (*xp_pa) (void *); extern enum xp_retval (*xp_remote_memcpy) (unsigned long, const unsigned long, size_t); extern int (*xp_cpu_to_nasid) (int); +extern enum xp_retval (*xp_expand_memprotect) (unsigned long, unsigned long); +extern enum xp_retval (*xp_restrict_memprotect) (unsigned long, unsigned long); extern u64 xp_nofault_PIOR_target; extern int xp_nofault_PIOR(void *); diff --git a/drivers/misc/sgi-xp/xp_main.c b/drivers/misc/sgi-xp/xp_main.c index 66a1d19e08a..9a2e77172d9 100644 --- a/drivers/misc/sgi-xp/xp_main.c +++ b/drivers/misc/sgi-xp/xp_main.c @@ -51,6 +51,13 @@ EXPORT_SYMBOL_GPL(xp_remote_memcpy); int (*xp_cpu_to_nasid) (int cpuid); EXPORT_SYMBOL_GPL(xp_cpu_to_nasid); +enum xp_retval (*xp_expand_memprotect) (unsigned long phys_addr, + unsigned long size); +EXPORT_SYMBOL_GPL(xp_expand_memprotect); +enum xp_retval (*xp_restrict_memprotect) (unsigned long phys_addr, + unsigned long size); +EXPORT_SYMBOL_GPL(xp_restrict_memprotect); + /* * xpc_registrations[] keeps track of xpc_connect()'s done by the kernel-level * users of XPC. diff --git a/drivers/misc/sgi-xp/xp_sn2.c b/drivers/misc/sgi-xp/xp_sn2.c index 1440134caf3..fb3ec9d735a 100644 --- a/drivers/misc/sgi-xp/xp_sn2.c +++ b/drivers/misc/sgi-xp/xp_sn2.c @@ -120,6 +120,38 @@ xp_cpu_to_nasid_sn2(int cpuid) return cpuid_to_nasid(cpuid); } +static enum xp_retval +xp_expand_memprotect_sn2(unsigned long phys_addr, unsigned long size) +{ + u64 nasid_array = 0; + int ret; + + ret = sn_change_memprotect(phys_addr, size, SN_MEMPROT_ACCESS_CLASS_1, + &nasid_array); + if (ret != 0) { + dev_err(xp, "sn_change_memprotect(,, " + "SN_MEMPROT_ACCESS_CLASS_1,) failed ret=%d\n", ret); + return xpSalError; + } + return xpSuccess; +} + +static enum xp_retval +xp_restrict_memprotect_sn2(unsigned long phys_addr, unsigned long size) +{ + u64 nasid_array = 0; + int ret; + + ret = sn_change_memprotect(phys_addr, size, SN_MEMPROT_ACCESS_CLASS_0, + &nasid_array); + if (ret != 0) { + dev_err(xp, "sn_change_memprotect(,, " + "SN_MEMPROT_ACCESS_CLASS_0,) failed ret=%d\n", ret); + return xpSalError; + } + return xpSuccess; +} + enum xp_retval xp_init_sn2(void) { @@ -132,6 +164,8 @@ xp_init_sn2(void) xp_pa = xp_pa_sn2; xp_remote_memcpy = xp_remote_memcpy_sn2; xp_cpu_to_nasid = xp_cpu_to_nasid_sn2; + xp_expand_memprotect = xp_expand_memprotect_sn2; + xp_restrict_memprotect = xp_restrict_memprotect_sn2; return xp_register_nofault_code_sn2(); } diff --git a/drivers/misc/sgi-xp/xp_uv.c b/drivers/misc/sgi-xp/xp_uv.c index d9f7ce2510b..d238576b26f 100644 --- a/drivers/misc/sgi-xp/xp_uv.c +++ b/drivers/misc/sgi-xp/xp_uv.c @@ -15,6 +15,11 @@ #include <linux/device.h> #include <asm/uv/uv_hub.h> +#if defined CONFIG_X86_64 +#include <asm/uv/bios.h> +#elif defined CONFIG_IA64_GENERIC || defined CONFIG_IA64_SGI_UV +#include <asm/sn/sn_sal.h> +#endif #include "../sgi-gru/grukservices.h" #include "xp.h" @@ -49,18 +54,79 @@ xp_cpu_to_nasid_uv(int cpuid) return UV_PNODE_TO_NASID(uv_cpu_to_pnode(cpuid)); } +static enum xp_retval +xp_expand_memprotect_uv(unsigned long phys_addr, unsigned long size) +{ + int ret; + +#if defined CONFIG_X86_64 + ret = uv_bios_change_memprotect(phys_addr, size, UV_MEMPROT_ALLOW_RW); + if (ret != BIOS_STATUS_SUCCESS) { + dev_err(xp, "uv_bios_change_memprotect(,, " + "UV_MEMPROT_ALLOW_RW) failed, ret=%d\n", ret); + return xpBiosError; + } + +#elif defined CONFIG_IA64_GENERIC || defined CONFIG_IA64_SGI_UV + u64 nasid_array; + + ret = sn_change_memprotect(phys_addr, size, SN_MEMPROT_ACCESS_CLASS_1, + &nasid_array); + if (ret != 0) { + dev_err(xp, "sn_change_memprotect(,, " + "SN_MEMPROT_ACCESS_CLASS_1,) failed ret=%d\n", ret); + return xpSalError; + } +#else + #error not a supported configuration +#endif + return xpSuccess; +} + +static enum xp_retval +xp_restrict_memprotect_uv(unsigned long phys_addr, unsigned long size) +{ + int ret; + +#if defined CONFIG_X86_64 + ret = uv_bios_change_memprotect(phys_addr, size, + UV_MEMPROT_RESTRICT_ACCESS); + if (ret != BIOS_STATUS_SUCCESS) { + dev_err(xp, "uv_bios_change_memprotect(,, " + "UV_MEMPROT_RESTRICT_ACCESS) failed, ret=%d\n", ret); + return xpBiosError; + } + +#elif defined CONFIG_IA64_GENERIC || defined CONFIG_IA64_SGI_UV + u64 nasid_array; + + ret = sn_change_memprotect(phys_addr, size, SN_MEMPROT_ACCESS_CLASS_0, + &nasid_array); + if (ret != 0) { + dev_err(xp, "sn_change_memprotect(,, " + "SN_MEMPROT_ACCESS_CLASS_0,) failed ret=%d\n", ret); + return xpSalError; + } +#else + #error not a supported configuration +#endif + return xpSuccess; +} + enum xp_retval xp_init_uv(void) { BUG_ON(!is_uv()); xp_max_npartitions = XP_MAX_NPARTITIONS_UV; - xp_partition_id = 0; /* !!! not correct value */ - xp_region_size = 0; /* !!! not correct value */ + xp_partition_id = sn_partition_id; + xp_region_size = sn_region_size; xp_pa = xp_pa_uv; xp_remote_memcpy = xp_remote_memcpy_uv; xp_cpu_to_nasid = xp_cpu_to_nasid_uv; + xp_expand_memprotect = xp_expand_memprotect_uv; + xp_restrict_memprotect = xp_restrict_memprotect_uv; return xpSuccess; } diff --git a/drivers/misc/sgi-xp/xpc.h b/drivers/misc/sgi-xp/xpc.h index 619208d6186..a5bd658c2e8 100644 --- a/drivers/misc/sgi-xp/xpc.h +++ b/drivers/misc/sgi-xp/xpc.h @@ -181,6 +181,18 @@ struct xpc_vars_part_sn2 { xpc_nasid_mask_nlongs)) /* + * Info pertinent to a GRU message queue using a watch list for irq generation. + */ +struct xpc_gru_mq_uv { + void *address; /* address of GRU message queue */ + unsigned int order; /* size of GRU message queue as a power of 2 */ + int irq; /* irq raised when message is received in mq */ + int mmr_blade; /* blade where watchlist was allocated from */ + unsigned long mmr_offset; /* offset of irq mmr located on mmr_blade */ + int watchlist_num; /* number of watchlist allocatd by BIOS */ +}; + +/* * The activate_mq is used to send/receive GRU messages that affect XPC's * heartbeat, partition active state, and channel state. This is UV only. */ diff --git a/drivers/misc/sgi-xp/xpc_sn2.c b/drivers/misc/sgi-xp/xpc_sn2.c index b4882ccf634..73b7fb8de47 100644 --- a/drivers/misc/sgi-xp/xpc_sn2.c +++ b/drivers/misc/sgi-xp/xpc_sn2.c @@ -553,22 +553,17 @@ static u64 xpc_prot_vec_sn2[MAX_NUMNODES]; static enum xp_retval xpc_allow_amo_ops_sn2(struct amo *amos_page) { - u64 nasid_array = 0; - int ret; + enum xp_retval ret = xpSuccess; /* * On SHUB 1.1, we cannot call sn_change_memprotect() since the BIST * collides with memory operations. On those systems we call * xpc_allow_amo_ops_shub_wars_1_1_sn2() instead. */ - if (!enable_shub_wars_1_1()) { - ret = sn_change_memprotect(ia64_tpa((u64)amos_page), PAGE_SIZE, - SN_MEMPROT_ACCESS_CLASS_1, - &nasid_array); - if (ret != 0) - return xpSalError; - } - return xpSuccess; + if (!enable_shub_wars_1_1()) + ret = xp_expand_memprotect(ia64_tpa((u64)amos_page), PAGE_SIZE); + + return ret; } /* diff --git a/drivers/misc/sgi-xp/xpc_uv.c b/drivers/misc/sgi-xp/xpc_uv.c index 1ac694c0162..91a55b1b103 100644 --- a/drivers/misc/sgi-xp/xpc_uv.c +++ b/drivers/misc/sgi-xp/xpc_uv.c @@ -18,7 +18,15 @@ #include <linux/interrupt.h> #include <linux/delay.h> #include <linux/device.h> +#include <linux/err.h> #include <asm/uv/uv_hub.h> +#if defined CONFIG_X86_64 +#include <asm/uv/bios.h> +#include <asm/uv/uv_irq.h> +#elif defined CONFIG_IA64_GENERIC || defined CONFIG_IA64_SGI_UV +#include <asm/sn/intr.h> +#include <asm/sn/sn_sal.h> +#endif #include "../sgi-gru/gru.h" #include "../sgi-gru/grukservices.h" #include "xpc.h" @@ -27,15 +35,17 @@ static atomic64_t xpc_heartbeat_uv; static DECLARE_BITMAP(xpc_heartbeating_to_mask_uv, XP_MAX_NPARTITIONS_UV); #define XPC_ACTIVATE_MSG_SIZE_UV (1 * GRU_CACHE_LINE_BYTES) -#define XPC_NOTIFY_MSG_SIZE_UV (2 * GRU_CACHE_LINE_BYTES) +#define XPC_ACTIVATE_MQ_SIZE_UV (4 * XP_MAX_NPARTITIONS_UV * \ + XPC_ACTIVATE_MSG_SIZE_UV) +#define XPC_ACTIVATE_IRQ_NAME "xpc_activate" -#define XPC_ACTIVATE_MQ_SIZE_UV (4 * XP_MAX_NPARTITIONS_UV * \ - XPC_ACTIVATE_MSG_SIZE_UV) -#define XPC_NOTIFY_MQ_SIZE_UV (4 * XP_MAX_NPARTITIONS_UV * \ - XPC_NOTIFY_MSG_SIZE_UV) +#define XPC_NOTIFY_MSG_SIZE_UV (2 * GRU_CACHE_LINE_BYTES) +#define XPC_NOTIFY_MQ_SIZE_UV (4 * XP_MAX_NPARTITIONS_UV * \ + XPC_NOTIFY_MSG_SIZE_UV) +#define XPC_NOTIFY_IRQ_NAME "xpc_notify" -static void *xpc_activate_mq_uv; -static void *xpc_notify_mq_uv; +static struct xpc_gru_mq_uv *xpc_activate_mq_uv; +static struct xpc_gru_mq_uv *xpc_notify_mq_uv; static int xpc_setup_partitions_sn_uv(void) @@ -52,62 +62,209 @@ xpc_setup_partitions_sn_uv(void) return 0; } -static void * -xpc_create_gru_mq_uv(unsigned int mq_size, int cpuid, unsigned int irq, +static int +xpc_get_gru_mq_irq_uv(struct xpc_gru_mq_uv *mq, int cpu, char *irq_name) +{ +#if defined CONFIG_X86_64 + mq->irq = uv_setup_irq(irq_name, cpu, mq->mmr_blade, mq->mmr_offset); + if (mq->irq < 0) { + dev_err(xpc_part, "uv_setup_irq() returned error=%d\n", + mq->irq); + } + +#elif defined CONFIG_IA64_GENERIC || defined CONFIG_IA64_SGI_UV + int mmr_pnode; + unsigned long mmr_value; + + if (strcmp(irq_name, XPC_ACTIVATE_IRQ_NAME) == 0) + mq->irq = SGI_XPC_ACTIVATE; + else if (strcmp(irq_name, XPC_NOTIFY_IRQ_NAME) == 0) + mq->irq = SGI_XPC_NOTIFY; + else + return -EINVAL; + + mmr_pnode = uv_blade_to_pnode(mq->mmr_blade); + mmr_value = (unsigned long)cpu_physical_id(cpu) << 32 | mq->irq; + + uv_write_global_mmr64(mmr_pnode, mq->mmr_offset, mmr_value); +#else + #error not a supported configuration +#endif + + return 0; +} + +static void +xpc_release_gru_mq_irq_uv(struct xpc_gru_mq_uv *mq) +{ +#if defined CONFIG_X86_64 + uv_teardown_irq(mq->irq, mq->mmr_blade, mq->mmr_offset); + +#elif defined CONFIG_IA64_GENERIC || defined CONFIG_IA64_SGI_UV + int mmr_pnode; + unsigned long mmr_value; + + mmr_pnode = uv_blade_to_pnode(mq->mmr_blade); + mmr_value = 1UL << 16; + + uv_write_global_mmr64(mmr_pnode, mq->mmr_offset, mmr_value); +#else + #error not a supported configuration +#endif +} + +static int +xpc_gru_mq_watchlist_alloc_uv(struct xpc_gru_mq_uv *mq) +{ + int ret; + +#if defined CONFIG_X86_64 + ret = uv_bios_mq_watchlist_alloc(mq->mmr_blade, uv_gpa(mq->address), + mq->order, &mq->mmr_offset); + if (ret < 0) { + dev_err(xpc_part, "uv_bios_mq_watchlist_alloc() failed, " + "ret=%d\n", ret); + return ret; + } +#elif defined CONFIG_IA64_GENERIC || defined CONFIG_IA64_SGI_UV + ret = sn_mq_watchlist_alloc(mq->mmr_blade, uv_gpa(mq->address), + mq->order, &mq->mmr_offset); + if (ret < 0) { + dev_err(xpc_part, "sn_mq_watchlist_alloc() failed, ret=%d\n", + ret); + return -EBUSY; + } +#else + #error not a supported configuration +#endif + + mq->watchlist_num = ret; + return 0; +} + +static void +xpc_gru_mq_watchlist_free_uv(struct xpc_gru_mq_uv *mq) +{ + int ret; + +#if defined CONFIG_X86_64 + ret = uv_bios_mq_watchlist_free(mq->mmr_blade, mq->watchlist_num); + BUG_ON(ret != BIOS_STATUS_SUCCESS); +#elif defined CONFIG_IA64_GENERIC || defined CONFIG_IA64_SGI_UV + ret = sn_mq_watchlist_free(mq->mmr_blade, mq->watchlist_num); + BUG_ON(ret != SALRET_OK); +#else + #error not a supported configuration +#endif +} + +static struct xpc_gru_mq_uv * +xpc_create_gru_mq_uv(unsigned int mq_size, int cpu, char *irq_name, irq_handler_t irq_handler) { + enum xp_retval xp_ret; int ret; int nid; - int mq_order; + int pg_order; struct page *page; - void *mq; + struct xpc_gru_mq_uv *mq; + + mq = kmalloc(sizeof(struct xpc_gru_mq_uv), GFP_KERNEL); + if (mq == NULL) { + dev_err(xpc_part, "xpc_create_gru_mq_uv() failed to kmalloc() " + "a xpc_gru_mq_uv structure\n"); + ret = -ENOMEM; + goto out_1; + } + + pg_order = get_order(mq_size); + mq->order = pg_order + PAGE_SHIFT; + mq_size = 1UL << mq->order; + + mq->mmr_blade = uv_cpu_to_blade_id(cpu); - nid = cpu_to_node(cpuid); - mq_order = get_order(mq_size); + nid = cpu_to_node(cpu); page = alloc_pages_node(nid, GFP_KERNEL | __GFP_ZERO | GFP_THISNODE, - mq_order); + pg_order); if (page == NULL) { dev_err(xpc_part, "xpc_create_gru_mq_uv() failed to alloc %d " "bytes of memory on nid=%d for GRU mq\n", mq_size, nid); - return NULL; + ret = -ENOMEM; + goto out_2; } + mq->address = page_address(page); - mq = page_address(page); - ret = gru_create_message_queue(mq, mq_size); + ret = gru_create_message_queue(mq->address, mq_size); if (ret != 0) { dev_err(xpc_part, "gru_create_message_queue() returned " "error=%d\n", ret); - free_pages((unsigned long)mq, mq_order); - return NULL; + ret = -EINVAL; + goto out_3; } - /* !!! Need to do some other things to set up IRQ */ + /* enable generation of irq when GRU mq operation occurs to this mq */ + ret = xpc_gru_mq_watchlist_alloc_uv(mq); + if (ret != 0) + goto out_3; - ret = request_irq(irq, irq_handler, 0, "xpc", NULL); + ret = xpc_get_gru_mq_irq_uv(mq, cpu, irq_name); + if (ret != 0) + goto out_4; + + ret = request_irq(mq->irq, irq_handler, 0, irq_name, NULL); if (ret != 0) { dev_err(xpc_part, "request_irq(irq=%d) returned error=%d\n", - irq, ret); - free_pages((unsigned long)mq, mq_order); - return NULL; + mq->irq, ret); + goto out_5; } - /* !!! enable generation of irq when GRU mq op occurs to this mq */ - - /* ??? allow other partitions to access GRU mq? */ + /* allow other partitions to access this GRU mq */ + xp_ret = xp_expand_memprotect(xp_pa(mq->address), mq_size); + if (xp_ret != xpSuccess) { + ret = -EACCES; + goto out_6; + } return mq; + + /* something went wrong */ +out_6: + free_irq(mq->irq, NULL); +out_5: + xpc_release_gru_mq_irq_uv(mq); +out_4: + xpc_gru_mq_watchlist_free_uv(mq); +out_3: + free_pages((unsigned long)mq->address, pg_order); +out_2: + kfree(mq); +out_1: + return ERR_PTR(ret); } static void -xpc_destroy_gru_mq_uv(void *mq, unsigned int mq_size, unsigned int irq) +xpc_destroy_gru_mq_uv(struct xpc_gru_mq_uv *mq) { - /* ??? disallow other partitions to access GRU mq? */ + unsigned int mq_size; + int pg_order; + int ret; + + /* disallow other partitions to access GRU mq */ + mq_size = 1UL << mq->order; + ret = xp_restrict_memprotect(xp_pa(mq->address), mq_size); + BUG_ON(ret != xpSuccess); - /* !!! disable generation of irq when GRU mq op occurs to this mq */ + /* unregister irq handler and release mq irq/vector mapping */ + free_irq(mq->irq, NULL); + xpc_release_gru_mq_irq_uv(mq); - free_irq(irq, NULL); + /* disable generation of irq when GRU mq op occurs to this mq */ + xpc_gru_mq_watchlist_free_uv(mq); - free_pages((unsigned long)mq, get_order(mq_size)); + pg_order = mq->order - PAGE_SHIFT; + free_pages((unsigned long)mq->address, pg_order); + + kfree(mq); } static enum xp_retval @@ -402,7 +559,10 @@ xpc_handle_activate_IRQ_uv(int irq, void *dev_id) struct xpc_partition *part; int wakeup_hb_checker = 0; - while ((msg_hdr = gru_get_next_message(xpc_activate_mq_uv)) != NULL) { + while (1) { + msg_hdr = gru_get_next_message(xpc_activate_mq_uv->address); + if (msg_hdr == NULL) + break; partid = msg_hdr->partid; if (partid < 0 || partid >= XP_MAX_NPARTITIONS_UV) { @@ -418,7 +578,7 @@ xpc_handle_activate_IRQ_uv(int irq, void *dev_id) } } - gru_free_message(xpc_activate_mq_uv, msg_hdr); + gru_free_message(xpc_activate_mq_uv->address, msg_hdr); } if (wakeup_hb_checker) @@ -482,7 +642,7 @@ xpc_send_local_activate_IRQ_uv(struct xpc_partition *part, int act_state_req) struct xpc_partition_uv *part_uv = &part->sn.uv; /* - * !!! Make our side think that the remote parition sent an activate + * !!! Make our side think that the remote partition sent an activate * !!! message our way by doing what the activate IRQ handler would * !!! do had one really been sent. */ @@ -500,14 +660,39 @@ static enum xp_retval xpc_get_partition_rsvd_page_pa_uv(void *buf, u64 *cookie, unsigned long *rp_pa, size_t *len) { - /* !!! call the UV version of sn_partition_reserved_page_pa() */ - return xpUnsupported; + s64 status; + enum xp_retval ret; + +#if defined CONFIG_X86_64 + status = uv_bios_reserved_page_pa((u64)buf, cookie, (u64 *)rp_pa, + (u64 *)len); + if (status == BIOS_STATUS_SUCCESS) + ret = xpSuccess; + else if (status == BIOS_STATUS_MORE_PASSES) + ret = xpNeedMoreInfo; + else + ret = xpBiosError; + +#elif defined CONFIG_IA64_GENERIC || defined CONFIG_IA64_SGI_UV + status = sn_partition_reserved_page_pa((u64)buf, cookie, rp_pa, len); + if (status == SALRET_OK) + ret = xpSuccess; + else if (status == SALRET_MORE_PASSES) + ret = xpNeedMoreInfo; + else + ret = xpSalError; + +#else + #error not a supported configuration +#endif + + return ret; } static int xpc_setup_rsvd_page_sn_uv(struct xpc_rsvd_page *rp) { - rp->sn.activate_mq_gpa = uv_gpa(xpc_activate_mq_uv); + rp->sn.activate_mq_gpa = uv_gpa(xpc_activate_mq_uv->address); return 0; } @@ -1411,22 +1596,18 @@ xpc_init_uv(void) return -E2BIG; } - /* ??? The cpuid argument's value is 0, is that what we want? */ - /* !!! The irq argument's value isn't correct. */ - xpc_activate_mq_uv = xpc_create_gru_mq_uv(XPC_ACTIVATE_MQ_SIZE_UV, 0, 0, + xpc_activate_mq_uv = xpc_create_gru_mq_uv(XPC_ACTIVATE_MQ_SIZE_UV, 0, + XPC_ACTIVATE_IRQ_NAME, xpc_handle_activate_IRQ_uv); - if (xpc_activate_mq_uv == NULL) - return -ENOMEM; + if (IS_ERR(xpc_activate_mq_uv)) + return PTR_ERR(xpc_activate_mq_uv); - /* ??? The cpuid argument's value is 0, is that what we want? */ - /* !!! The irq argument's value isn't correct. */ - xpc_notify_mq_uv = xpc_create_gru_mq_uv(XPC_NOTIFY_MQ_SIZE_UV, 0, 0, + xpc_notify_mq_uv = xpc_create_gru_mq_uv(XPC_NOTIFY_MQ_SIZE_UV, 0, + XPC_NOTIFY_IRQ_NAME, xpc_handle_notify_IRQ_uv); - if (xpc_notify_mq_uv == NULL) { - /* !!! The irq argument's value isn't correct. */ - xpc_destroy_gru_mq_uv(xpc_activate_mq_uv, - XPC_ACTIVATE_MQ_SIZE_UV, 0); - return -ENOMEM; + if (IS_ERR(xpc_notify_mq_uv)) { + xpc_destroy_gru_mq_uv(xpc_activate_mq_uv); + return PTR_ERR(xpc_notify_mq_uv); } return 0; @@ -1435,9 +1616,6 @@ xpc_init_uv(void) void xpc_exit_uv(void) { - /* !!! The irq argument's value isn't correct. */ - xpc_destroy_gru_mq_uv(xpc_notify_mq_uv, XPC_NOTIFY_MQ_SIZE_UV, 0); - - /* !!! The irq argument's value isn't correct. */ - xpc_destroy_gru_mq_uv(xpc_activate_mq_uv, XPC_ACTIVATE_MQ_SIZE_UV, 0); + xpc_destroy_gru_mq_uv(xpc_notify_mq_uv); + xpc_destroy_gru_mq_uv(xpc_activate_mq_uv); } diff --git a/drivers/net/mlx4/cq.c b/drivers/net/mlx4/cq.c index b7ad2829d67..ac57b6a42c6 100644 --- a/drivers/net/mlx4/cq.c +++ b/drivers/net/mlx4/cq.c @@ -189,7 +189,7 @@ EXPORT_SYMBOL_GPL(mlx4_cq_resize); int mlx4_cq_alloc(struct mlx4_dev *dev, int nent, struct mlx4_mtt *mtt, struct mlx4_uar *uar, u64 db_rec, struct mlx4_cq *cq, - int collapsed) + unsigned vector, int collapsed) { struct mlx4_priv *priv = mlx4_priv(dev); struct mlx4_cq_table *cq_table = &priv->cq_table; @@ -198,6 +198,11 @@ int mlx4_cq_alloc(struct mlx4_dev *dev, int nent, struct mlx4_mtt *mtt, u64 mtt_addr; int err; + if (vector >= dev->caps.num_comp_vectors) + return -EINVAL; + + cq->vector = vector; + cq->cqn = mlx4_bitmap_alloc(&cq_table->bitmap); if (cq->cqn == -1) return -ENOMEM; @@ -227,7 +232,7 @@ int mlx4_cq_alloc(struct mlx4_dev *dev, int nent, struct mlx4_mtt *mtt, cq_context->flags = cpu_to_be32(!!collapsed << 18); cq_context->logsize_usrpage = cpu_to_be32((ilog2(nent) << 24) | uar->index); - cq_context->comp_eqn = priv->eq_table.eq[MLX4_EQ_COMP].eqn; + cq_context->comp_eqn = priv->eq_table.eq[vector].eqn; cq_context->log_page_size = mtt->page_shift - MLX4_ICM_PAGE_SHIFT; mtt_addr = mlx4_mtt_addr(dev, mtt); @@ -276,7 +281,7 @@ void mlx4_cq_free(struct mlx4_dev *dev, struct mlx4_cq *cq) if (err) mlx4_warn(dev, "HW2SW_CQ failed (%d) for CQN %06x\n", err, cq->cqn); - synchronize_irq(priv->eq_table.eq[MLX4_EQ_COMP].irq); + synchronize_irq(priv->eq_table.eq[cq->vector].irq); spin_lock_irq(&cq_table->lock); radix_tree_delete(&cq_table->tree, cq->cqn); diff --git a/drivers/net/mlx4/en_cq.c b/drivers/net/mlx4/en_cq.c index 23d54a0e681..91f50de84be 100644 --- a/drivers/net/mlx4/en_cq.c +++ b/drivers/net/mlx4/en_cq.c @@ -51,10 +51,13 @@ int mlx4_en_create_cq(struct mlx4_en_priv *priv, int err; cq->size = entries; - if (mode == RX) + if (mode == RX) { cq->buf_size = cq->size * sizeof(struct mlx4_cqe); - else + cq->vector = ring % mdev->dev->caps.num_comp_vectors; + } else { cq->buf_size = sizeof(struct mlx4_cqe); + cq->vector = 0; + } cq->ring = ring; cq->is_tx = mode; @@ -87,7 +90,7 @@ int mlx4_en_activate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq) memset(cq->buf, 0, cq->buf_size); err = mlx4_cq_alloc(mdev->dev, cq->size, &cq->wqres.mtt, &mdev->priv_uar, - cq->wqres.db.dma, &cq->mcq, cq->is_tx); + cq->wqres.db.dma, &cq->mcq, cq->vector, cq->is_tx); if (err) return err; diff --git a/drivers/net/mlx4/en_main.c b/drivers/net/mlx4/en_main.c index 4b9794e97a7..c1c05852a95 100644 --- a/drivers/net/mlx4/en_main.c +++ b/drivers/net/mlx4/en_main.c @@ -170,9 +170,9 @@ static void *mlx4_en_add(struct mlx4_dev *dev) mlx4_info(mdev, "Using %d tx rings for port:%d\n", mdev->profile.prof[i].tx_ring_num, i); if (!mdev->profile.prof[i].rx_ring_num) { - mdev->profile.prof[i].rx_ring_num = 1; + mdev->profile.prof[i].rx_ring_num = dev->caps.num_comp_vectors; mlx4_info(mdev, "Defaulting to %d rx rings for port:%d\n", - 1, i); + mdev->profile.prof[i].rx_ring_num, i); } else mlx4_info(mdev, "Using %d rx rings for port:%d\n", mdev->profile.prof[i].rx_ring_num, i); diff --git a/drivers/net/mlx4/eq.c b/drivers/net/mlx4/eq.c index de169338cd9..2c19bff7cba 100644 --- a/drivers/net/mlx4/eq.c +++ b/drivers/net/mlx4/eq.c @@ -243,10 +243,6 @@ static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq) * least that often. */ if (unlikely(set_ci >= MLX4_NUM_SPARE_EQE)) { - /* - * Conditional on hca_type is OK here because - * this is a rare case, not the fast path. - */ eq_set_ci(eq, 0); set_ci = 0; } @@ -266,7 +262,7 @@ static irqreturn_t mlx4_interrupt(int irq, void *dev_ptr) writel(priv->eq_table.clr_mask, priv->eq_table.clr_int); - for (i = 0; i < MLX4_NUM_EQ; ++i) + for (i = 0; i < dev->caps.num_comp_vectors + 1; ++i) work |= mlx4_eq_int(dev, &priv->eq_table.eq[i]); return IRQ_RETVAL(work); @@ -304,6 +300,17 @@ static int mlx4_HW2SW_EQ(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox, MLX4_CMD_TIME_CLASS_A); } +static int mlx4_num_eq_uar(struct mlx4_dev *dev) +{ + /* + * Each UAR holds 4 EQ doorbells. To figure out how many UARs + * we need to map, take the difference of highest index and + * the lowest index we'll use and add 1. + */ + return (dev->caps.num_comp_vectors + 1 + dev->caps.reserved_eqs) / 4 - + dev->caps.reserved_eqs / 4 + 1; +} + static void __iomem *mlx4_get_eq_uar(struct mlx4_dev *dev, struct mlx4_eq *eq) { struct mlx4_priv *priv = mlx4_priv(dev); @@ -483,9 +490,11 @@ static void mlx4_free_irqs(struct mlx4_dev *dev) if (eq_table->have_irq) free_irq(dev->pdev->irq, dev); - for (i = 0; i < MLX4_NUM_EQ; ++i) + for (i = 0; i < dev->caps.num_comp_vectors + 1; ++i) if (eq_table->eq[i].have_irq) free_irq(eq_table->eq[i].irq, eq_table->eq + i); + + kfree(eq_table->irq_names); } static int mlx4_map_clr_int(struct mlx4_dev *dev) @@ -551,57 +560,93 @@ void mlx4_unmap_eq_icm(struct mlx4_dev *dev) __free_page(priv->eq_table.icm_page); } +int mlx4_alloc_eq_table(struct mlx4_dev *dev) +{ + struct mlx4_priv *priv = mlx4_priv(dev); + + priv->eq_table.eq = kcalloc(dev->caps.num_eqs - dev->caps.reserved_eqs, + sizeof *priv->eq_table.eq, GFP_KERNEL); + if (!priv->eq_table.eq) + return -ENOMEM; + + return 0; +} + +void mlx4_free_eq_table(struct mlx4_dev *dev) +{ + kfree(mlx4_priv(dev)->eq_table.eq); +} + int mlx4_init_eq_table(struct mlx4_dev *dev) { struct mlx4_priv *priv = mlx4_priv(dev); int err; int i; + priv->eq_table.uar_map = kcalloc(sizeof *priv->eq_table.uar_map, + mlx4_num_eq_uar(dev), GFP_KERNEL); + if (!priv->eq_table.uar_map) { + err = -ENOMEM; + goto err_out_free; + } + err = mlx4_bitmap_init(&priv->eq_table.bitmap, dev->caps.num_eqs, dev->caps.num_eqs - 1, dev->caps.reserved_eqs, 0); if (err) - return err; + goto err_out_free; - for (i = 0; i < ARRAY_SIZE(priv->eq_table.uar_map); ++i) + for (i = 0; i < mlx4_num_eq_uar(dev); ++i) priv->eq_table.uar_map[i] = NULL; err = mlx4_map_clr_int(dev); if (err) - goto err_out_free; + goto err_out_bitmap; priv->eq_table.clr_mask = swab32(1 << (priv->eq_table.inta_pin & 31)); priv->eq_table.clr_int = priv->clr_base + (priv->eq_table.inta_pin < 32 ? 4 : 0); - err = mlx4_create_eq(dev, dev->caps.num_cqs + MLX4_NUM_SPARE_EQE, - (dev->flags & MLX4_FLAG_MSI_X) ? MLX4_EQ_COMP : 0, - &priv->eq_table.eq[MLX4_EQ_COMP]); - if (err) - goto err_out_unmap; + priv->eq_table.irq_names = kmalloc(16 * dev->caps.num_comp_vectors, GFP_KERNEL); + if (!priv->eq_table.irq_names) { + err = -ENOMEM; + goto err_out_bitmap; + } + + for (i = 0; i < dev->caps.num_comp_vectors; ++i) { + err = mlx4_create_eq(dev, dev->caps.num_cqs + MLX4_NUM_SPARE_EQE, + (dev->flags & MLX4_FLAG_MSI_X) ? i : 0, + &priv->eq_table.eq[i]); + if (err) + goto err_out_unmap; + } err = mlx4_create_eq(dev, MLX4_NUM_ASYNC_EQE + MLX4_NUM_SPARE_EQE, - (dev->flags & MLX4_FLAG_MSI_X) ? MLX4_EQ_ASYNC : 0, - &priv->eq_table.eq[MLX4_EQ_ASYNC]); + (dev->flags & MLX4_FLAG_MSI_X) ? dev->caps.num_comp_vectors : 0, + &priv->eq_table.eq[dev->caps.num_comp_vectors]); if (err) goto err_out_comp; if (dev->flags & MLX4_FLAG_MSI_X) { - static const char *eq_name[] = { - [MLX4_EQ_COMP] = DRV_NAME " (comp)", - [MLX4_EQ_ASYNC] = DRV_NAME " (async)" - }; + static const char async_eq_name[] = "mlx4-async"; + const char *eq_name; + + for (i = 0; i < dev->caps.num_comp_vectors + 1; ++i) { + if (i < dev->caps.num_comp_vectors) { + snprintf(priv->eq_table.irq_names + i * 16, 16, + "mlx4-comp-%d", i); + eq_name = priv->eq_table.irq_names + i * 16; + } else + eq_name = async_eq_name; - for (i = 0; i < MLX4_NUM_EQ; ++i) { err = request_irq(priv->eq_table.eq[i].irq, - mlx4_msi_x_interrupt, - 0, eq_name[i], priv->eq_table.eq + i); + mlx4_msi_x_interrupt, 0, eq_name, + priv->eq_table.eq + i); if (err) goto err_out_async; priv->eq_table.eq[i].have_irq = 1; } - } else { err = request_irq(dev->pdev->irq, mlx4_interrupt, IRQF_SHARED, DRV_NAME, dev); @@ -612,28 +657,36 @@ int mlx4_init_eq_table(struct mlx4_dev *dev) } err = mlx4_MAP_EQ(dev, MLX4_ASYNC_EVENT_MASK, 0, - priv->eq_table.eq[MLX4_EQ_ASYNC].eqn); + priv->eq_table.eq[dev->caps.num_comp_vectors].eqn); if (err) mlx4_warn(dev, "MAP_EQ for async EQ %d failed (%d)\n", - priv->eq_table.eq[MLX4_EQ_ASYNC].eqn, err); + priv->eq_table.eq[dev->caps.num_comp_vectors].eqn, err); - for (i = 0; i < MLX4_NUM_EQ; ++i) + for (i = 0; i < dev->caps.num_comp_vectors + 1; ++i) eq_set_ci(&priv->eq_table.eq[i], 1); return 0; err_out_async: - mlx4_free_eq(dev, &priv->eq_table.eq[MLX4_EQ_ASYNC]); + mlx4_free_eq(dev, &priv->eq_table.eq[dev->caps.num_comp_vectors]); err_out_comp: - mlx4_free_eq(dev, &priv->eq_table.eq[MLX4_EQ_COMP]); + i = dev->caps.num_comp_vectors - 1; err_out_unmap: + while (i >= 0) { + mlx4_free_eq(dev, &priv->eq_table.eq[i]); + --i; + } mlx4_unmap_clr_int(dev); mlx4_free_irqs(dev); -err_out_free: +err_out_bitmap: mlx4_bitmap_cleanup(&priv->eq_table.bitmap); + +err_out_free: + kfree(priv->eq_table.uar_map); + return err; } @@ -643,18 +696,20 @@ void mlx4_cleanup_eq_table(struct mlx4_dev *dev) int i; mlx4_MAP_EQ(dev, MLX4_ASYNC_EVENT_MASK, 1, - priv->eq_table.eq[MLX4_EQ_ASYNC].eqn); + priv->eq_table.eq[dev->caps.num_comp_vectors].eqn); mlx4_free_irqs(dev); - for (i = 0; i < MLX4_NUM_EQ; ++i) + for (i = 0; i < dev->caps.num_comp_vectors + 1; ++i) mlx4_free_eq(dev, &priv->eq_table.eq[i]); mlx4_unmap_clr_int(dev); - for (i = 0; i < ARRAY_SIZE(priv->eq_table.uar_map); ++i) + for (i = 0; i < mlx4_num_eq_uar(dev); ++i) if (priv->eq_table.uar_map[i]) iounmap(priv->eq_table.uar_map[i]); mlx4_bitmap_cleanup(&priv->eq_table.bitmap); + + kfree(priv->eq_table.uar_map); } diff --git a/drivers/net/mlx4/main.c b/drivers/net/mlx4/main.c index 90a0281d15e..710c79e7a2d 100644 --- a/drivers/net/mlx4/main.c +++ b/drivers/net/mlx4/main.c @@ -421,9 +421,7 @@ static int mlx4_init_cmpt_table(struct mlx4_dev *dev, u64 cmpt_base, ((u64) (MLX4_CMPT_TYPE_EQ * cmpt_entry_sz) << MLX4_CMPT_SHIFT), cmpt_entry_sz, - roundup_pow_of_two(MLX4_NUM_EQ + - dev->caps.reserved_eqs), - MLX4_NUM_EQ + dev->caps.reserved_eqs, 0, 0); + dev->caps.num_eqs, dev->caps.num_eqs, 0, 0); if (err) goto err_cq; @@ -810,12 +808,12 @@ static int mlx4_setup_hca(struct mlx4_dev *dev) if (dev->flags & MLX4_FLAG_MSI_X) { mlx4_warn(dev, "NOP command failed to generate MSI-X " "interrupt IRQ %d).\n", - priv->eq_table.eq[MLX4_EQ_ASYNC].irq); + priv->eq_table.eq[dev->caps.num_comp_vectors].irq); mlx4_warn(dev, "Trying again without MSI-X.\n"); } else { mlx4_err(dev, "NOP command failed to generate interrupt " "(IRQ %d), aborting.\n", - priv->eq_table.eq[MLX4_EQ_ASYNC].irq); + priv->eq_table.eq[dev->caps.num_comp_vectors].irq); mlx4_err(dev, "BIOS or ACPI interrupt routing problem?\n"); } @@ -908,31 +906,50 @@ err_uar_table_free: static void mlx4_enable_msi_x(struct mlx4_dev *dev) { struct mlx4_priv *priv = mlx4_priv(dev); - struct msix_entry entries[MLX4_NUM_EQ]; + struct msix_entry *entries; + int nreq; int err; int i; if (msi_x) { - for (i = 0; i < MLX4_NUM_EQ; ++i) + nreq = min(dev->caps.num_eqs - dev->caps.reserved_eqs, + num_possible_cpus() + 1); + entries = kcalloc(nreq, sizeof *entries, GFP_KERNEL); + if (!entries) + goto no_msi; + + for (i = 0; i < nreq; ++i) entries[i].entry = i; - err = pci_enable_msix(dev->pdev, entries, ARRAY_SIZE(entries)); + retry: + err = pci_enable_msix(dev->pdev, entries, nreq); if (err) { - if (err > 0) - mlx4_info(dev, "Only %d MSI-X vectors available, " - "not using MSI-X\n", err); + /* Try again if at least 2 vectors are available */ + if (err > 1) { + mlx4_info(dev, "Requested %d vectors, " + "but only %d MSI-X vectors available, " + "trying again\n", nreq, err); + nreq = err; + goto retry; + } + goto no_msi; } - for (i = 0; i < MLX4_NUM_EQ; ++i) + dev->caps.num_comp_vectors = nreq - 1; + for (i = 0; i < nreq; ++i) priv->eq_table.eq[i].irq = entries[i].vector; dev->flags |= MLX4_FLAG_MSI_X; + + kfree(entries); return; } no_msi: - for (i = 0; i < MLX4_NUM_EQ; ++i) + dev->caps.num_comp_vectors = 1; + + for (i = 0; i < 2; ++i) priv->eq_table.eq[i].irq = dev->pdev->irq; } @@ -1074,6 +1091,10 @@ static int __mlx4_init_one(struct pci_dev *pdev, const struct pci_device_id *id) if (err) goto err_cmd; + err = mlx4_alloc_eq_table(dev); + if (err) + goto err_close; + mlx4_enable_msi_x(dev); err = mlx4_setup_hca(dev); @@ -1084,7 +1105,7 @@ static int __mlx4_init_one(struct pci_dev *pdev, const struct pci_device_id *id) } if (err) - goto err_close; + goto err_free_eq; for (port = 1; port <= dev->caps.num_ports; port++) { err = mlx4_init_port_info(dev, port); @@ -1114,6 +1135,9 @@ err_port: mlx4_cleanup_pd_table(dev); mlx4_cleanup_uar_table(dev); +err_free_eq: + mlx4_free_eq_table(dev); + err_close: if (dev->flags & MLX4_FLAG_MSI_X) pci_disable_msix(pdev); @@ -1177,6 +1201,7 @@ static void mlx4_remove_one(struct pci_dev *pdev) iounmap(priv->kar); mlx4_uar_free(dev, &priv->driver_uar); mlx4_cleanup_uar_table(dev); + mlx4_free_eq_table(dev); mlx4_close_hca(dev); mlx4_cmd_cleanup(dev); diff --git a/drivers/net/mlx4/mlx4.h b/drivers/net/mlx4/mlx4.h index 34c909deaff..e0213bad61c 100644 --- a/drivers/net/mlx4/mlx4.h +++ b/drivers/net/mlx4/mlx4.h @@ -63,12 +63,6 @@ enum { }; enum { - MLX4_EQ_ASYNC, - MLX4_EQ_COMP, - MLX4_NUM_EQ -}; - -enum { MLX4_NUM_PDS = 1 << 15 }; @@ -205,10 +199,11 @@ struct mlx4_cq_table { struct mlx4_eq_table { struct mlx4_bitmap bitmap; + char *irq_names; void __iomem *clr_int; - void __iomem *uar_map[(MLX4_NUM_EQ + 6) / 4]; + void __iomem **uar_map; u32 clr_mask; - struct mlx4_eq eq[MLX4_NUM_EQ]; + struct mlx4_eq *eq; u64 icm_virt; struct page *icm_page; dma_addr_t icm_dma; @@ -328,6 +323,9 @@ void mlx4_bitmap_cleanup(struct mlx4_bitmap *bitmap); int mlx4_reset(struct mlx4_dev *dev); +int mlx4_alloc_eq_table(struct mlx4_dev *dev); +void mlx4_free_eq_table(struct mlx4_dev *dev); + int mlx4_init_pd_table(struct mlx4_dev *dev); int mlx4_init_uar_table(struct mlx4_dev *dev); int mlx4_init_mr_table(struct mlx4_dev *dev); diff --git a/drivers/net/mlx4/profile.c b/drivers/net/mlx4/profile.c index 9ca42b213d5..919fb9eb1b6 100644 --- a/drivers/net/mlx4/profile.c +++ b/drivers/net/mlx4/profile.c @@ -107,7 +107,9 @@ u64 mlx4_make_profile(struct mlx4_dev *dev, profile[MLX4_RES_AUXC].num = request->num_qp; profile[MLX4_RES_SRQ].num = request->num_srq; profile[MLX4_RES_CQ].num = request->num_cq; - profile[MLX4_RES_EQ].num = MLX4_NUM_EQ + dev_cap->reserved_eqs; + profile[MLX4_RES_EQ].num = min(dev_cap->max_eqs, + dev_cap->reserved_eqs + + num_possible_cpus() + 1); profile[MLX4_RES_DMPT].num = request->num_mpt; profile[MLX4_RES_CMPT].num = MLX4_NUM_CMPTS; profile[MLX4_RES_MTT].num = request->num_mtt; diff --git a/drivers/net/tun.c b/drivers/net/tun.c index fd0b11ea556..666c1d98cda 100644 --- a/drivers/net/tun.c +++ b/drivers/net/tun.c @@ -715,6 +715,7 @@ static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr) struct tun_net *tn; struct tun_struct *tun; struct net_device *dev; + const struct cred *cred = current_cred(); int err; tn = net_generic(net, tun_net_id); @@ -725,11 +726,12 @@ static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr) /* Check permissions */ if (((tun->owner != -1 && - current->euid != tun->owner) || + cred->euid != tun->owner) || (tun->group != -1 && - current->egid != tun->group)) && - !capable(CAP_NET_ADMIN)) + cred->egid != tun->group)) && + !capable(CAP_NET_ADMIN)) { return -EPERM; + } } else if (__dev_get_by_name(net, ifr->ifr_name)) return -EINVAL; diff --git a/drivers/pci/hotplug/acpiphp.h b/drivers/pci/hotplug/acpiphp.h index f9e244da30a..9bcb6cbd5aa 100644 --- a/drivers/pci/hotplug/acpiphp.h +++ b/drivers/pci/hotplug/acpiphp.h @@ -113,7 +113,7 @@ struct acpiphp_slot { u8 device; /* pci device# */ - u32 sun; /* ACPI _SUN (slot unique number) */ + unsigned long long sun; /* ACPI _SUN (slot unique number) */ u32 flags; /* see below */ }; diff --git a/drivers/pci/hotplug/acpiphp_core.c b/drivers/pci/hotplug/acpiphp_core.c index 95b536a23d2..43c10bd261b 100644 --- a/drivers/pci/hotplug/acpiphp_core.c +++ b/drivers/pci/hotplug/acpiphp_core.c @@ -337,7 +337,7 @@ int acpiphp_register_hotplug_slot(struct acpiphp_slot *acpiphp_slot) slot->hotplug_slot->info->cur_bus_speed = PCI_SPEED_UNKNOWN; acpiphp_slot->slot = slot; - snprintf(name, SLOT_NAME_SIZE, "%u", slot->acpi_slot->sun); + snprintf(name, SLOT_NAME_SIZE, "%llu", slot->acpi_slot->sun); retval = pci_hp_register(slot->hotplug_slot, acpiphp_slot->bridge->pci_bus, diff --git a/drivers/pci/hotplug/acpiphp_glue.c b/drivers/pci/hotplug/acpiphp_glue.c index 955aae4071f..3affc6472e6 100644 --- a/drivers/pci/hotplug/acpiphp_glue.c +++ b/drivers/pci/hotplug/acpiphp_glue.c @@ -255,13 +255,13 @@ register_slot(acpi_handle handle, u32 lvl, void *context, void **rv) bridge->nr_slots++; - dbg("found ACPI PCI Hotplug slot %d at PCI %04x:%02x:%02x\n", + dbg("found ACPI PCI Hotplug slot %llu at PCI %04x:%02x:%02x\n", slot->sun, pci_domain_nr(bridge->pci_bus), bridge->pci_bus->number, slot->device); retval = acpiphp_register_hotplug_slot(slot); if (retval) { if (retval == -EBUSY) - warn("Slot %d already registered by another " + warn("Slot %llu already registered by another " "hotplug driver\n", slot->sun); else warn("acpiphp_register_hotplug_slot failed " diff --git a/drivers/pci/hotplug/ibmphp_core.c b/drivers/pci/hotplug/ibmphp_core.c index c892daae74d..633e743442a 100644 --- a/drivers/pci/hotplug/ibmphp_core.c +++ b/drivers/pci/hotplug/ibmphp_core.c @@ -1402,10 +1402,6 @@ static int __init ibmphp_init(void) goto error; } - /* lock ourselves into memory with a module - * count of -1 so that no one can unload us. */ - module_put(THIS_MODULE); - exit: return rc; @@ -1423,4 +1419,3 @@ static void __exit ibmphp_exit(void) } module_init(ibmphp_init); -module_exit(ibmphp_exit); diff --git a/drivers/pci/hotplug/pciehp_core.c b/drivers/pci/hotplug/pciehp_core.c index 4b23bc39b11..39cf248d24e 100644 --- a/drivers/pci/hotplug/pciehp_core.c +++ b/drivers/pci/hotplug/pciehp_core.c @@ -432,18 +432,19 @@ static int pciehp_probe(struct pcie_device *dev, const struct pcie_port_service_ goto err_out_release_ctlr; } + /* Check if slot is occupied */ t_slot = pciehp_find_slot(ctrl, ctrl->slot_device_offset); - - t_slot->hpc_ops->get_adapter_status(t_slot, &value); /* Check if slot is occupied */ - if (value && pciehp_force) { - rc = pciehp_enable_slot(t_slot); - if (rc) /* -ENODEV: shouldn't happen, but deal with it */ - value = 0; - } - if ((POWER_CTRL(ctrl)) && !value) { - rc = t_slot->hpc_ops->power_off_slot(t_slot); /* Power off slot if not occupied*/ - if (rc) - goto err_out_free_ctrl_slot; + t_slot->hpc_ops->get_adapter_status(t_slot, &value); + if (value) { + if (pciehp_force) + pciehp_enable_slot(t_slot); + } else { + /* Power off slot if not occupied */ + if (POWER_CTRL(ctrl)) { + rc = t_slot->hpc_ops->power_off_slot(t_slot); + if (rc) + goto err_out_free_ctrl_slot; + } } return 0; diff --git a/drivers/pci/pcie/aer/aerdrv_core.c b/drivers/pci/pcie/aer/aerdrv_core.c index dfc63d01f20..aac7006949f 100644 --- a/drivers/pci/pcie/aer/aerdrv_core.c +++ b/drivers/pci/pcie/aer/aerdrv_core.c @@ -252,7 +252,7 @@ static void report_resume(struct pci_dev *dev, void *data) if (!dev->driver || !dev->driver->err_handler || - !dev->driver->err_handler->slot_reset) + !dev->driver->err_handler->resume) return; err_handler = dev->driver->err_handler; diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c index 5f4f85f56cb..ce098561513 100644 --- a/drivers/pci/quirks.c +++ b/drivers/pci/quirks.c @@ -606,27 +606,6 @@ static void __init quirk_ioapic_rmw(struct pci_dev *dev) sis_apic_bug = 1; } DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_SI, PCI_ANY_ID, quirk_ioapic_rmw); - -#define AMD8131_revA0 0x01 -#define AMD8131_revB0 0x11 -#define AMD8131_MISC 0x40 -#define AMD8131_NIOAMODE_BIT 0 -static void quirk_amd_8131_ioapic(struct pci_dev *dev) -{ - unsigned char tmp; - - if (nr_ioapics == 0) - return; - - if (dev->revision == AMD8131_revA0 || dev->revision == AMD8131_revB0) { - dev_info(&dev->dev, "Fixing up AMD8131 IOAPIC mode\n"); - pci_read_config_byte( dev, AMD8131_MISC, &tmp); - tmp &= ~(1 << AMD8131_NIOAMODE_BIT); - pci_write_config_byte( dev, AMD8131_MISC, tmp); - } -} -DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8131_BRIDGE, quirk_amd_8131_ioapic); -DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8131_BRIDGE, quirk_amd_8131_ioapic); #endif /* CONFIG_X86_IO_APIC */ /* @@ -1423,6 +1402,155 @@ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x2609, quirk_intel_pcie_pm); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x260a, quirk_intel_pcie_pm); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x260b, quirk_intel_pcie_pm); +#ifdef CONFIG_X86_IO_APIC +/* + * Boot interrupts on some chipsets cannot be turned off. For these chipsets, + * remap the original interrupt in the linux kernel to the boot interrupt, so + * that a PCI device's interrupt handler is installed on the boot interrupt + * line instead. + */ +static void quirk_reroute_to_boot_interrupts_intel(struct pci_dev *dev) +{ + if (noioapicquirk || noioapicreroute) + return; + + dev->irq_reroute_variant = INTEL_IRQ_REROUTE_VARIANT; + + printk(KERN_INFO "PCI quirk: reroute interrupts for 0x%04x:0x%04x\n", + dev->vendor, dev->device); + return; +} +DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_80333_0, quirk_reroute_to_boot_interrupts_intel); +DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_80333_1, quirk_reroute_to_boot_interrupts_intel); +DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ESB2_0, quirk_reroute_to_boot_interrupts_intel); +DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_0, quirk_reroute_to_boot_interrupts_intel); +DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_1, quirk_reroute_to_boot_interrupts_intel); +DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXHV, quirk_reroute_to_boot_interrupts_intel); +DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_80332_0, quirk_reroute_to_boot_interrupts_intel); +DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_80332_1, quirk_reroute_to_boot_interrupts_intel); +DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_80333_0, quirk_reroute_to_boot_interrupts_intel); +DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_80333_1, quirk_reroute_to_boot_interrupts_intel); +DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ESB2_0, quirk_reroute_to_boot_interrupts_intel); +DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_0, quirk_reroute_to_boot_interrupts_intel); +DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_1, quirk_reroute_to_boot_interrupts_intel); +DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXHV, quirk_reroute_to_boot_interrupts_intel); +DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_80332_0, quirk_reroute_to_boot_interrupts_intel); +DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_80332_1, quirk_reroute_to_boot_interrupts_intel); + +/* + * On some chipsets we can disable the generation of legacy INTx boot + * interrupts. + */ + +/* + * IO-APIC1 on 6300ESB generates boot interrupts, see intel order no + * 300641-004US, section 5.7.3. + */ +#define INTEL_6300_IOAPIC_ABAR 0x40 +#define INTEL_6300_DISABLE_BOOT_IRQ (1<<14) + +static void quirk_disable_intel_boot_interrupt(struct pci_dev *dev) +{ + u16 pci_config_word; + + if (noioapicquirk) + return; + + pci_read_config_word(dev, INTEL_6300_IOAPIC_ABAR, &pci_config_word); + pci_config_word |= INTEL_6300_DISABLE_BOOT_IRQ; + pci_write_config_word(dev, INTEL_6300_IOAPIC_ABAR, pci_config_word); + + printk(KERN_INFO "disabled boot interrupt on device 0x%04x:0x%04x\n", + dev->vendor, dev->device); +} +DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ESB_10, quirk_disable_intel_boot_interrupt); +DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ESB_10, quirk_disable_intel_boot_interrupt); + +/* + * disable boot interrupts on HT-1000 + */ +#define BC_HT1000_FEATURE_REG 0x64 +#define BC_HT1000_PIC_REGS_ENABLE (1<<0) +#define BC_HT1000_MAP_IDX 0xC00 +#define BC_HT1000_MAP_DATA 0xC01 + +static void quirk_disable_broadcom_boot_interrupt(struct pci_dev *dev) +{ + u32 pci_config_dword; + u8 irq; + + if (noioapicquirk) + return; + + pci_read_config_dword(dev, BC_HT1000_FEATURE_REG, &pci_config_dword); + pci_write_config_dword(dev, BC_HT1000_FEATURE_REG, pci_config_dword | + BC_HT1000_PIC_REGS_ENABLE); + + for (irq = 0x10; irq < 0x10 + 32; irq++) { + outb(irq, BC_HT1000_MAP_IDX); + outb(0x00, BC_HT1000_MAP_DATA); + } + + pci_write_config_dword(dev, BC_HT1000_FEATURE_REG, pci_config_dword); + + printk(KERN_INFO "disabled boot interrupts on PCI device" + "0x%04x:0x%04x\n", dev->vendor, dev->device); +} +DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_SERVERWORKS, PCI_DEVICE_ID_SERVERWORKS_HT1000SB, quirk_disable_broadcom_boot_interrupt); +DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_SERVERWORKS, PCI_DEVICE_ID_SERVERWORKS_HT1000SB, quirk_disable_broadcom_boot_interrupt); + +/* + * disable boot interrupts on AMD and ATI chipsets + */ +/* + * NOIOAMODE needs to be disabled to disable "boot interrupts". For AMD 8131 + * rev. A0 and B0, NOIOAMODE needs to be disabled anyway to fix IO-APIC mode + * (due to an erratum). + */ +#define AMD_813X_MISC 0x40 +#define AMD_813X_NOIOAMODE (1<<0) + +static void quirk_disable_amd_813x_boot_interrupt(struct pci_dev *dev) +{ + u32 pci_config_dword; + + if (noioapicquirk) + return; + + pci_read_config_dword(dev, AMD_813X_MISC, &pci_config_dword); + pci_config_dword &= ~AMD_813X_NOIOAMODE; + pci_write_config_dword(dev, AMD_813X_MISC, pci_config_dword); + + printk(KERN_INFO "disabled boot interrupts on PCI device " + "0x%04x:0x%04x\n", dev->vendor, dev->device); +} +DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8131_BRIDGE, quirk_disable_amd_813x_boot_interrupt); +DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8132_BRIDGE, quirk_disable_amd_813x_boot_interrupt); + +#define AMD_8111_PCI_IRQ_ROUTING 0x56 + +static void quirk_disable_amd_8111_boot_interrupt(struct pci_dev *dev) +{ + u16 pci_config_word; + + if (noioapicquirk) + return; + + pci_read_config_word(dev, AMD_8111_PCI_IRQ_ROUTING, &pci_config_word); + if (!pci_config_word) { + printk(KERN_INFO "boot interrupts on PCI device 0x%04x:0x%04x " + "already disabled\n", + dev->vendor, dev->device); + return; + } + pci_write_config_word(dev, AMD_8111_PCI_IRQ_ROUTING, 0); + printk(KERN_INFO "disabled boot interrupts on PCI device " + "0x%04x:0x%04x\n", dev->vendor, dev->device); +} +DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8111_SMBUS, quirk_disable_amd_8111_boot_interrupt); +DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8111_SMBUS, quirk_disable_amd_8111_boot_interrupt); +#endif /* CONFIG_X86_IO_APIC */ + /* * Toshiba TC86C001 IDE controller reports the standard 8-byte BAR0 size * but the PIO transfers won't work if BAR0 falls at the odd 8 bytes. diff --git a/drivers/rtc/rtc-isl1208.c b/drivers/rtc/rtc-isl1208.c index 2cd77ab8fc6..054e05294af 100644 --- a/drivers/rtc/rtc-isl1208.c +++ b/drivers/rtc/rtc-isl1208.c @@ -328,6 +328,13 @@ isl1208_i2c_set_time(struct i2c_client *client, struct rtc_time const *tm) int sr; u8 regs[ISL1208_RTC_SECTION_LEN] = { 0, }; + /* The clock has an 8 bit wide bcd-coded register (they never learn) + * for the year. tm_year is an offset from 1900 and we are interested + * in the 2000-2099 range, so any value less than 100 is invalid. + */ + if (tm->tm_year < 100) + return -EINVAL; + regs[ISL1208_REG_SC] = bin2bcd(tm->tm_sec); regs[ISL1208_REG_MN] = bin2bcd(tm->tm_min); regs[ISL1208_REG_HR] = bin2bcd(tm->tm_hour) | ISL1208_REG_HR_MIL; diff --git a/drivers/s390/block/dasd.c b/drivers/s390/block/dasd.c index 363bd1303d2..570ae59c1d5 100644 --- a/drivers/s390/block/dasd.c +++ b/drivers/s390/block/dasd.c @@ -1898,15 +1898,19 @@ restart_cb: wait_event(dasd_flush_wq, (cqr->status < DASD_CQR_QUEUED)); /* Process finished ERP request. */ if (cqr->refers) { + spin_lock_bh(&block->queue_lock); __dasd_block_process_erp(block, cqr); + spin_unlock_bh(&block->queue_lock); /* restart list_for_xx loop since dasd_process_erp * might remove multiple elements */ goto restart_cb; } /* call the callback function */ + spin_lock_irq(&block->request_queue_lock); cqr->endclk = get_clock(); list_del_init(&cqr->blocklist); __dasd_cleanup_cqr(cqr); + spin_unlock_irq(&block->request_queue_lock); } return rc; } diff --git a/drivers/s390/block/dasd_devmap.c b/drivers/s390/block/dasd_devmap.c index 921443b01d1..2ef25731d19 100644 --- a/drivers/s390/block/dasd_devmap.c +++ b/drivers/s390/block/dasd_devmap.c @@ -23,6 +23,7 @@ /* This is ugly... */ #define PRINTK_HEADER "dasd_devmap:" +#define DASD_BUS_ID_SIZE 20 #include "dasd_int.h" @@ -41,7 +42,7 @@ EXPORT_SYMBOL_GPL(dasd_page_cache); */ struct dasd_devmap { struct list_head list; - char bus_id[BUS_ID_SIZE]; + char bus_id[DASD_BUS_ID_SIZE]; unsigned int devindex; unsigned short features; struct dasd_device *device; @@ -94,7 +95,7 @@ dasd_hash_busid(const char *bus_id) int hash, i; hash = 0; - for (i = 0; (i < BUS_ID_SIZE) && *bus_id; i++, bus_id++) + for (i = 0; (i < DASD_BUS_ID_SIZE) && *bus_id; i++, bus_id++) hash += *bus_id; return hash & 0xff; } @@ -301,7 +302,7 @@ dasd_parse_range( char *parsestring ) { int from, from_id0, from_id1; int to, to_id0, to_id1; int features, rc; - char bus_id[BUS_ID_SIZE+1], *str; + char bus_id[DASD_BUS_ID_SIZE+1], *str; str = parsestring; rc = dasd_busid(&str, &from_id0, &from_id1, &from); @@ -407,14 +408,14 @@ dasd_add_busid(const char *bus_id, int features) devmap = NULL; hash = dasd_hash_busid(bus_id); list_for_each_entry(tmp, &dasd_hashlists[hash], list) - if (strncmp(tmp->bus_id, bus_id, BUS_ID_SIZE) == 0) { + if (strncmp(tmp->bus_id, bus_id, DASD_BUS_ID_SIZE) == 0) { devmap = tmp; break; } if (!devmap) { /* This bus_id is new. */ new->devindex = dasd_max_devindex++; - strncpy(new->bus_id, bus_id, BUS_ID_SIZE); + strncpy(new->bus_id, bus_id, DASD_BUS_ID_SIZE); new->features = features; new->device = NULL; list_add(&new->list, &dasd_hashlists[hash]); @@ -439,7 +440,7 @@ dasd_find_busid(const char *bus_id) devmap = ERR_PTR(-ENODEV); hash = dasd_hash_busid(bus_id); list_for_each_entry(tmp, &dasd_hashlists[hash], list) { - if (strncmp(tmp->bus_id, bus_id, BUS_ID_SIZE) == 0) { + if (strncmp(tmp->bus_id, bus_id, DASD_BUS_ID_SIZE) == 0) { devmap = tmp; break; } @@ -561,7 +562,7 @@ dasd_create_device(struct ccw_device *cdev) } spin_lock_irqsave(get_ccwdev_lock(cdev), flags); - cdev->dev.driver_data = device; + dev_set_drvdata(&cdev->dev, device); spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags); return device; @@ -597,7 +598,7 @@ dasd_delete_device(struct dasd_device *device) /* Disconnect dasd_device structure from ccw_device structure. */ spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags); - device->cdev->dev.driver_data = NULL; + dev_set_drvdata(&device->cdev->dev, NULL); spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags); /* @@ -638,7 +639,7 @@ dasd_put_device_wake(struct dasd_device *device) struct dasd_device * dasd_device_from_cdev_locked(struct ccw_device *cdev) { - struct dasd_device *device = cdev->dev.driver_data; + struct dasd_device *device = dev_get_drvdata(&cdev->dev); if (!device) return ERR_PTR(-ENODEV); diff --git a/drivers/s390/block/dasd_eckd.c b/drivers/s390/block/dasd_eckd.c index 2e60d5f968c..bd2c52e2076 100644 --- a/drivers/s390/block/dasd_eckd.c +++ b/drivers/s390/block/dasd_eckd.c @@ -1496,7 +1496,7 @@ static void dasd_eckd_handle_unsolicited_interrupt(struct dasd_device *device, /* service information message SIM */ - if (irb->esw.esw0.erw.cons && (irb->ecw[27] & DASD_SENSE_BIT_0) && + if (irb->esw.esw0.erw.cons && !(irb->ecw[27] & DASD_SENSE_BIT_0) && ((irb->ecw[6] & DASD_SIM_SENSE) == DASD_SIM_SENSE)) { dasd_3990_erp_handle_sim(device, irb->ecw); dasd_schedule_device_bh(device); diff --git a/drivers/s390/block/dasd_proc.c b/drivers/s390/block/dasd_proc.c index 9088de84b45..bf6fd348f20 100644 --- a/drivers/s390/block/dasd_proc.c +++ b/drivers/s390/block/dasd_proc.c @@ -180,12 +180,12 @@ dasd_calc_metrics(char *page, char **start, off_t off, #ifdef CONFIG_DASD_PROFILE static char * -dasd_statistics_array(char *str, unsigned int *array, int shift) +dasd_statistics_array(char *str, unsigned int *array, int factor) { int i; for (i = 0; i < 32; i++) { - str += sprintf(str, "%7d ", array[i] >> shift); + str += sprintf(str, "%7d ", array[i] / factor); if (i == 15) str += sprintf(str, "\n"); } @@ -202,7 +202,7 @@ dasd_statistics_read(char *page, char **start, off_t off, #ifdef CONFIG_DASD_PROFILE struct dasd_profile_info_t *prof; char *str; - int shift; + int factor; /* check for active profiling */ if (dasd_profile_level == DASD_PROFILE_OFF) { @@ -214,12 +214,14 @@ dasd_statistics_read(char *page, char **start, off_t off, prof = &dasd_global_profile; /* prevent couter 'overflow' on output */ - for (shift = 0; (prof->dasd_io_reqs >> shift) > 9999999; shift++); + for (factor = 1; (prof->dasd_io_reqs / factor) > 9999999; + factor *= 10); str = page; str += sprintf(str, "%d dasd I/O requests\n", prof->dasd_io_reqs); - str += sprintf(str, "with %d sectors(512B each)\n", + str += sprintf(str, "with %u sectors(512B each)\n", prof->dasd_io_sects); + str += sprintf(str, "Scale Factor is %d\n", factor); str += sprintf(str, " __<4 ___8 __16 __32 __64 _128 " " _256 _512 __1k __2k __4k __8k " @@ -230,22 +232,22 @@ dasd_statistics_read(char *page, char **start, off_t off, " __1G __2G __4G " " _>4G\n"); str += sprintf(str, "Histogram of sizes (512B secs)\n"); - str = dasd_statistics_array(str, prof->dasd_io_secs, shift); + str = dasd_statistics_array(str, prof->dasd_io_secs, factor); str += sprintf(str, "Histogram of I/O times (microseconds)\n"); - str = dasd_statistics_array(str, prof->dasd_io_times, shift); + str = dasd_statistics_array(str, prof->dasd_io_times, factor); str += sprintf(str, "Histogram of I/O times per sector\n"); - str = dasd_statistics_array(str, prof->dasd_io_timps, shift); + str = dasd_statistics_array(str, prof->dasd_io_timps, factor); str += sprintf(str, "Histogram of I/O time till ssch\n"); - str = dasd_statistics_array(str, prof->dasd_io_time1, shift); + str = dasd_statistics_array(str, prof->dasd_io_time1, factor); str += sprintf(str, "Histogram of I/O time between ssch and irq\n"); - str = dasd_statistics_array(str, prof->dasd_io_time2, shift); + str = dasd_statistics_array(str, prof->dasd_io_time2, factor); str += sprintf(str, "Histogram of I/O time between ssch " "and irq per sector\n"); - str = dasd_statistics_array(str, prof->dasd_io_time2ps, shift); + str = dasd_statistics_array(str, prof->dasd_io_time2ps, factor); str += sprintf(str, "Histogram of I/O time between irq and end\n"); - str = dasd_statistics_array(str, prof->dasd_io_time3, shift); + str = dasd_statistics_array(str, prof->dasd_io_time3, factor); str += sprintf(str, "# of req in chanq at enqueuing (1..32) \n"); - str = dasd_statistics_array(str, prof->dasd_io_nr_req, shift); + str = dasd_statistics_array(str, prof->dasd_io_nr_req, factor); len = str - page; #else len = sprintf(page, "Statistics are not activated in this kernel\n"); diff --git a/drivers/s390/block/dcssblk.c b/drivers/s390/block/dcssblk.c index 63f26a135fe..26ffc6ab441 100644 --- a/drivers/s390/block/dcssblk.c +++ b/drivers/s390/block/dcssblk.c @@ -4,6 +4,9 @@ * Authors: Carsten Otte, Stefan Weinhuber, Gerald Schaefer */ +#define KMSG_COMPONENT "dcssblk" +#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt + #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/ctype.h> @@ -17,19 +20,10 @@ #include <linux/interrupt.h> #include <asm/s390_rdev.h> -//#define DCSSBLK_DEBUG /* Debug messages on/off */ #define DCSSBLK_NAME "dcssblk" #define DCSSBLK_MINORS_PER_DISK 1 #define DCSSBLK_PARM_LEN 400 - -#ifdef DCSSBLK_DEBUG -#define PRINT_DEBUG(x...) printk(KERN_DEBUG DCSSBLK_NAME " debug: " x) -#else -#define PRINT_DEBUG(x...) do {} while (0) -#endif -#define PRINT_INFO(x...) printk(KERN_INFO DCSSBLK_NAME " info: " x) -#define PRINT_WARN(x...) printk(KERN_WARNING DCSSBLK_NAME " warning: " x) -#define PRINT_ERR(x...) printk(KERN_ERR DCSSBLK_NAME " error: " x) +#define DCSS_BUS_ID_SIZE 20 static int dcssblk_open(struct block_device *bdev, fmode_t mode); static int dcssblk_release(struct gendisk *disk, fmode_t mode); @@ -50,7 +44,7 @@ static struct block_device_operations dcssblk_devops = { struct dcssblk_dev_info { struct list_head lh; struct device dev; - char segment_name[BUS_ID_SIZE]; + char segment_name[DCSS_BUS_ID_SIZE]; atomic_t use_count; struct gendisk *gd; unsigned long start; @@ -65,7 +59,7 @@ struct dcssblk_dev_info { struct segment_info { struct list_head lh; - char segment_name[BUS_ID_SIZE]; + char segment_name[DCSS_BUS_ID_SIZE]; unsigned long start; unsigned long end; int segment_type; @@ -261,10 +255,9 @@ dcssblk_is_continuous(struct dcssblk_dev_info *dev_info) /* check continuity */ for (i = 0; i < dev_info->num_of_segments - 1; i++) { if ((sort_list[i].end + 1) != sort_list[i+1].start) { - PRINT_ERR("Segment %s is not contiguous with " - "segment %s\n", - sort_list[i].segment_name, - sort_list[i+1].segment_name); + pr_err("Adjacent DCSSs %s and %s are not " + "contiguous\n", sort_list[i].segment_name, + sort_list[i+1].segment_name); rc = -EINVAL; goto out; } @@ -275,10 +268,10 @@ dcssblk_is_continuous(struct dcssblk_dev_info *dev_info) !(sort_list[i+1].segment_type & SEGMENT_EXCLUSIVE) || (sort_list[i+1].segment_type == SEG_TYPE_ER)) { - PRINT_ERR("Segment %s has different type from " - "segment %s\n", - sort_list[i].segment_name, - sort_list[i+1].segment_name); + pr_err("DCSS %s and DCSS %s have " + "incompatible types\n", + sort_list[i].segment_name, + sort_list[i+1].segment_name); rc = -EINVAL; goto out; } @@ -380,8 +373,9 @@ dcssblk_shared_store(struct device *dev, struct device_attribute *attr, const ch } else if (inbuf[0] == '0') { /* reload segments in exclusive mode */ if (dev_info->segment_type == SEG_TYPE_SC) { - PRINT_ERR("Segment type SC (%s) cannot be loaded in " - "non-shared mode\n", dev_info->segment_name); + pr_err("DCSS %s is of type SC and cannot be " + "loaded as exclusive-writable\n", + dev_info->segment_name); rc = -EINVAL; goto out; } @@ -404,9 +398,8 @@ dcssblk_shared_store(struct device *dev, struct device_attribute *attr, const ch goto out; removeseg: - PRINT_ERR("Could not reload segment(s) of the device %s, removing " - "segment(s) now!\n", - dev_info->segment_name); + pr_err("DCSS device %s is removed after a failed access mode " + "change\n", dev_info->segment_name); temp = entry; list_for_each_entry(entry, &dev_info->seg_list, lh) { if (entry != temp) @@ -454,17 +447,17 @@ dcssblk_save_store(struct device *dev, struct device_attribute *attr, const char if (inbuf[0] == '1') { if (atomic_read(&dev_info->use_count) == 0) { // device is idle => we save immediately - PRINT_INFO("Saving segment(s) of the device %s\n", - dev_info->segment_name); + pr_info("All DCSSs that map to device %s are " + "saved\n", dev_info->segment_name); list_for_each_entry(entry, &dev_info->seg_list, lh) { segment_save(entry->segment_name); } } else { // device is busy => we save it when it becomes // idle in dcssblk_release - PRINT_INFO("Device %s is currently busy, segment(s) " - "will be saved when it becomes idle...\n", - dev_info->segment_name); + pr_info("Device %s is in use, its DCSSs will be " + "saved when it becomes idle\n", + dev_info->segment_name); dev_info->save_pending = 1; } } else if (inbuf[0] == '0') { @@ -472,9 +465,9 @@ dcssblk_save_store(struct device *dev, struct device_attribute *attr, const char // device is busy & the user wants to undo his save // request dev_info->save_pending = 0; - PRINT_INFO("Pending save for segment(s) of the device " - "%s deactivated\n", - dev_info->segment_name); + pr_info("A pending save request for device %s " + "has been canceled\n", + dev_info->segment_name); } } else { up_write(&dcssblk_devices_sem); @@ -614,9 +607,8 @@ dcssblk_add_store(struct device *dev, struct device_attribute *attr, const char seg_byte_size = (dev_info->end - dev_info->start + 1); set_capacity(dev_info->gd, seg_byte_size >> 9); // size in sectors - PRINT_INFO("Loaded segment(s) %s, size = %lu Byte, " - "capacity = %lu (512 Byte) sectors\n", local_buf, - seg_byte_size, seg_byte_size >> 9); + pr_info("Loaded %s with total size %lu bytes and capacity %lu " + "sectors\n", local_buf, seg_byte_size, seg_byte_size >> 9); dev_info->save_pending = 0; dev_info->is_shared = 1; @@ -744,13 +736,15 @@ dcssblk_remove_store(struct device *dev, struct device_attribute *attr, const ch dev_info = dcssblk_get_device_by_name(local_buf); if (dev_info == NULL) { up_write(&dcssblk_devices_sem); - PRINT_WARN("Device %s is not loaded!\n", local_buf); + pr_warning("Device %s cannot be removed because it is not a " + "known device\n", local_buf); rc = -ENODEV; goto out_buf; } if (atomic_read(&dev_info->use_count) != 0) { up_write(&dcssblk_devices_sem); - PRINT_WARN("Device %s is in use!\n", local_buf); + pr_warning("Device %s cannot be removed while it is in " + "use\n", local_buf); rc = -EBUSY; goto out_buf; } @@ -807,8 +801,8 @@ dcssblk_release(struct gendisk *disk, fmode_t mode) down_write(&dcssblk_devices_sem); if (atomic_dec_and_test(&dev_info->use_count) && (dev_info->save_pending)) { - PRINT_INFO("Device %s became idle and is being saved now\n", - dev_info->segment_name); + pr_info("Device %s has become idle and is being saved " + "now\n", dev_info->segment_name); list_for_each_entry(entry, &dev_info->seg_list, lh) { segment_save(entry->segment_name); } @@ -851,7 +845,8 @@ dcssblk_make_request(struct request_queue *q, struct bio *bio) case SEG_TYPE_SC: /* cannot write to these segments */ if (bio_data_dir(bio) == WRITE) { - PRINT_WARN("rejecting write to ro device %s\n", + pr_warning("Writing to %s failed because it " + "is a read-only device\n", dev_name(&dev_info->dev)); goto fail; } diff --git a/drivers/s390/block/xpram.c b/drivers/s390/block/xpram.c index 03916989ed2..76814f3e898 100644 --- a/drivers/s390/block/xpram.c +++ b/drivers/s390/block/xpram.c @@ -25,6 +25,9 @@ * generic hard disk support to replace ad-hoc partitioning */ +#define KMSG_COMPONENT "xpram" +#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt + #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/ctype.h> /* isdigit, isxdigit */ @@ -42,12 +45,6 @@ #define XPRAM_DEVS 1 /* one partition */ #define XPRAM_MAX_DEVS 32 /* maximal number of devices (partitions) */ -#define PRINT_DEBUG(x...) printk(KERN_DEBUG XPRAM_NAME " debug:" x) -#define PRINT_INFO(x...) printk(KERN_INFO XPRAM_NAME " info:" x) -#define PRINT_WARN(x...) printk(KERN_WARNING XPRAM_NAME " warning:" x) -#define PRINT_ERR(x...) printk(KERN_ERR XPRAM_NAME " error:" x) - - typedef struct { unsigned int size; /* size of xpram segment in pages */ unsigned int offset; /* start page of xpram segment */ @@ -264,7 +261,7 @@ static int __init xpram_setup_sizes(unsigned long pages) /* Check number of devices. */ if (devs <= 0 || devs > XPRAM_MAX_DEVS) { - PRINT_ERR("invalid number %d of devices\n",devs); + pr_err("%d is not a valid number of XPRAM devices\n",devs); return -EINVAL; } xpram_devs = devs; @@ -295,22 +292,22 @@ static int __init xpram_setup_sizes(unsigned long pages) mem_auto_no++; } - PRINT_INFO(" number of devices (partitions): %d \n", xpram_devs); + pr_info(" number of devices (partitions): %d \n", xpram_devs); for (i = 0; i < xpram_devs; i++) { if (xpram_sizes[i]) - PRINT_INFO(" size of partition %d: %u kB\n", - i, xpram_sizes[i]); + pr_info(" size of partition %d: %u kB\n", + i, xpram_sizes[i]); else - PRINT_INFO(" size of partition %d to be set " - "automatically\n",i); + pr_info(" size of partition %d to be set " + "automatically\n",i); } - PRINT_DEBUG(" memory needed (for sized partitions): %lu kB\n", - mem_needed); - PRINT_DEBUG(" partitions to be sized automatically: %d\n", - mem_auto_no); + pr_info(" memory needed (for sized partitions): %lu kB\n", + mem_needed); + pr_info(" partitions to be sized automatically: %d\n", + mem_auto_no); if (mem_needed > pages * 4) { - PRINT_ERR("Not enough expanded memory available\n"); + pr_err("Not enough expanded memory available\n"); return -EINVAL; } @@ -322,8 +319,8 @@ static int __init xpram_setup_sizes(unsigned long pages) */ if (mem_auto_no) { mem_auto = ((pages - mem_needed / 4) / mem_auto_no) * 4; - PRINT_INFO(" automatically determined " - "partition size: %lu kB\n", mem_auto); + pr_info(" automatically determined " + "partition size: %lu kB\n", mem_auto); for (i = 0; i < xpram_devs; i++) if (xpram_sizes[i] == 0) xpram_sizes[i] = mem_auto; @@ -405,12 +402,12 @@ static int __init xpram_init(void) /* Find out size of expanded memory. */ if (xpram_present() != 0) { - PRINT_WARN("No expanded memory available\n"); + pr_err("No expanded memory available\n"); return -ENODEV; } xpram_pages = xpram_highest_page_index() + 1; - PRINT_INFO(" %u pages expanded memory found (%lu KB).\n", - xpram_pages, (unsigned long) xpram_pages*4); + pr_info(" %u pages expanded memory found (%lu KB).\n", + xpram_pages, (unsigned long) xpram_pages*4); rc = xpram_setup_sizes(xpram_pages); if (rc) return rc; diff --git a/drivers/s390/char/monreader.c b/drivers/s390/char/monreader.c index 35fd8dfcaaa..97e63cf4694 100644 --- a/drivers/s390/char/monreader.c +++ b/drivers/s390/char/monreader.c @@ -7,6 +7,9 @@ * Author: Gerald Schaefer <gerald.schaefer@de.ibm.com> */ +#define KMSG_COMPONENT "monreader" +#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt + #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/init.h> @@ -24,19 +27,6 @@ #include <asm/ebcdic.h> #include <asm/extmem.h> -//#define MON_DEBUG /* Debug messages on/off */ - -#define MON_NAME "monreader" - -#define P_INFO(x...) printk(KERN_INFO MON_NAME " info: " x) -#define P_ERROR(x...) printk(KERN_ERR MON_NAME " error: " x) -#define P_WARNING(x...) printk(KERN_WARNING MON_NAME " warning: " x) - -#ifdef MON_DEBUG -#define P_DEBUG(x...) printk(KERN_DEBUG MON_NAME " debug: " x) -#else -#define P_DEBUG(x...) do {} while (0) -#endif #define MON_COLLECT_SAMPLE 0x80 #define MON_COLLECT_EVENT 0x40 @@ -172,7 +162,7 @@ static int mon_send_reply(struct mon_msg *monmsg, } else monmsg->replied_msglim = 1; if (rc) { - P_ERROR("read, IUCV reply failed with rc = %i\n\n", rc); + pr_err("Reading monitor data failed with rc=%i\n", rc); return -EIO; } return 0; @@ -251,7 +241,8 @@ static void mon_iucv_path_severed(struct iucv_path *path, u8 ipuser[16]) { struct mon_private *monpriv = path->private; - P_ERROR("IUCV connection severed with rc = 0x%X\n", ipuser[0]); + pr_err("z/VM *MONITOR system service disconnected with rc=%i\n", + ipuser[0]); iucv_path_sever(path, NULL); atomic_set(&monpriv->iucv_severed, 1); wake_up(&mon_conn_wait_queue); @@ -266,8 +257,7 @@ static void mon_iucv_message_pending(struct iucv_path *path, memcpy(&monpriv->msg_array[monpriv->write_index]->msg, msg, sizeof(*msg)); if (atomic_inc_return(&monpriv->msglim_count) == MON_MSGLIM) { - P_WARNING("IUCV message pending, message limit (%i) reached\n", - MON_MSGLIM); + pr_warning("The read queue for monitor data is full\n"); monpriv->msg_array[monpriv->write_index]->msglim_reached = 1; } monpriv->write_index = (monpriv->write_index + 1) % MON_MSGLIM; @@ -311,8 +301,8 @@ static int mon_open(struct inode *inode, struct file *filp) rc = iucv_path_connect(monpriv->path, &monreader_iucv_handler, MON_SERVICE, NULL, user_data_connect, monpriv); if (rc) { - P_ERROR("iucv connection to *MONITOR failed with " - "IPUSER SEVER code = %i\n", rc); + pr_err("Connecting to the z/VM *MONITOR system service " + "failed with rc=%i\n", rc); rc = -EIO; goto out_path; } @@ -353,7 +343,8 @@ static int mon_close(struct inode *inode, struct file *filp) */ rc = iucv_path_sever(monpriv->path, user_data_sever); if (rc) - P_ERROR("close, iucv_sever failed with rc = %i\n", rc); + pr_warning("Disconnecting the z/VM *MONITOR system service " + "failed with rc=%i\n", rc); atomic_set(&monpriv->iucv_severed, 0); atomic_set(&monpriv->iucv_connected, 0); @@ -469,7 +460,8 @@ static int __init mon_init(void) int rc; if (!MACHINE_IS_VM) { - P_ERROR("not running under z/VM, driver not loaded\n"); + pr_err("The z/VM *MONITOR record device driver cannot be " + "loaded without z/VM\n"); return -ENODEV; } @@ -478,7 +470,8 @@ static int __init mon_init(void) */ rc = iucv_register(&monreader_iucv_handler, 1); if (rc) { - P_ERROR("failed to register with iucv driver\n"); + pr_err("The z/VM *MONITOR record device driver failed to " + "register with IUCV\n"); return rc; } @@ -488,8 +481,8 @@ static int __init mon_init(void) goto out_iucv; } if (rc != SEG_TYPE_SC) { - P_ERROR("segment %s has unsupported type, should be SC\n", - mon_dcss_name); + pr_err("The specified *MONITOR DCSS %s does not have the " + "required type SC\n", mon_dcss_name); rc = -EINVAL; goto out_iucv; } diff --git a/drivers/s390/char/monwriter.c b/drivers/s390/char/monwriter.c index 4d71aa8c1a7..c7d7483bab9 100644 --- a/drivers/s390/char/monwriter.c +++ b/drivers/s390/char/monwriter.c @@ -8,6 +8,9 @@ * Author(s): Melissa Howland <Melissa.Howland@us.ibm.com> */ +#define KMSG_COMPONENT "monwriter" +#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt + #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/init.h> @@ -64,9 +67,9 @@ static int monwrite_diag(struct monwrite_hdr *myhdr, char *buffer, int fcn) rc = appldata_asm(&id, fcn, (void *) buffer, myhdr->datalen); if (rc <= 0) return rc; + pr_err("Writing monitor data failed with rc=%i\n", rc); if (rc == 5) return -EPERM; - printk("DIAG X'DC' error with return code: %i\n", rc); return -EINVAL; } diff --git a/drivers/s390/char/sclp_cmd.c b/drivers/s390/char/sclp_cmd.c index ec9c0bcf66e..50639049641 100644 --- a/drivers/s390/char/sclp_cmd.c +++ b/drivers/s390/char/sclp_cmd.c @@ -6,6 +6,9 @@ * Peter Oberparleiter <peter.oberparleiter@de.ibm.com> */ +#define KMSG_COMPONENT "sclp_cmd" +#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt + #include <linux/completion.h> #include <linux/init.h> #include <linux/errno.h> @@ -16,9 +19,8 @@ #include <linux/memory.h> #include <asm/chpid.h> #include <asm/sclp.h> -#include "sclp.h" -#define TAG "sclp_cmd: " +#include "sclp.h" #define SCLP_CMDW_READ_SCP_INFO 0x00020001 #define SCLP_CMDW_READ_SCP_INFO_FORCED 0x00120001 @@ -169,8 +171,8 @@ static int do_sync_request(sclp_cmdw_t cmd, void *sccb) /* Check response. */ if (request->status != SCLP_REQ_DONE) { - printk(KERN_WARNING TAG "sync request failed " - "(cmd=0x%08x, status=0x%02x)\n", cmd, request->status); + pr_warning("sync request failed (cmd=0x%08x, " + "status=0x%02x)\n", cmd, request->status); rc = -EIO; } out: @@ -224,8 +226,8 @@ int sclp_get_cpu_info(struct sclp_cpu_info *info) if (rc) goto out; if (sccb->header.response_code != 0x0010) { - printk(KERN_WARNING TAG "readcpuinfo failed " - "(response=0x%04x)\n", sccb->header.response_code); + pr_warning("readcpuinfo failed (response=0x%04x)\n", + sccb->header.response_code); rc = -EIO; goto out; } @@ -262,8 +264,9 @@ static int do_cpu_configure(sclp_cmdw_t cmd) case 0x0120: break; default: - printk(KERN_WARNING TAG "configure cpu failed (cmd=0x%08x, " - "response=0x%04x)\n", cmd, sccb->header.response_code); + pr_warning("configure cpu failed (cmd=0x%08x, " + "response=0x%04x)\n", cmd, + sccb->header.response_code); rc = -EIO; break; } @@ -626,9 +629,9 @@ static int do_chp_configure(sclp_cmdw_t cmd) case 0x0450: break; default: - printk(KERN_WARNING TAG "configure channel-path failed " - "(cmd=0x%08x, response=0x%04x)\n", cmd, - sccb->header.response_code); + pr_warning("configure channel-path failed " + "(cmd=0x%08x, response=0x%04x)\n", cmd, + sccb->header.response_code); rc = -EIO; break; } @@ -695,8 +698,8 @@ int sclp_chp_read_info(struct sclp_chp_info *info) if (rc) goto out; if (sccb->header.response_code != 0x0010) { - printk(KERN_WARNING TAG "read channel-path info failed " - "(response=0x%04x)\n", sccb->header.response_code); + pr_warning("read channel-path info failed " + "(response=0x%04x)\n", sccb->header.response_code); rc = -EIO; goto out; } diff --git a/drivers/s390/char/sclp_config.c b/drivers/s390/char/sclp_config.c index 4cebd6ee6d2..b497afe061c 100644 --- a/drivers/s390/char/sclp_config.c +++ b/drivers/s390/char/sclp_config.c @@ -5,15 +5,17 @@ * Author(s): Heiko Carstens <heiko.carstens@de.ibm.com> */ +#define KMSG_COMPONENT "sclp_config" +#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt + #include <linux/init.h> #include <linux/errno.h> #include <linux/cpu.h> #include <linux/sysdev.h> #include <linux/workqueue.h> #include <asm/smp.h> -#include "sclp.h" -#define TAG "sclp_config: " +#include "sclp.h" struct conf_mgm_data { u8 reserved; @@ -31,7 +33,7 @@ static void sclp_cpu_capability_notify(struct work_struct *work) int cpu; struct sys_device *sysdev; - printk(KERN_WARNING TAG "cpu capability changed.\n"); + pr_warning("cpu capability changed.\n"); get_online_cpus(); for_each_online_cpu(cpu) { sysdev = get_cpu_sysdev(cpu); @@ -78,7 +80,7 @@ static int __init sclp_conf_init(void) return rc; if (!(sclp_conf_register.sclp_send_mask & EVTYP_CONFMGMDATA_MASK)) { - printk(KERN_WARNING TAG "no configuration management.\n"); + pr_warning("no configuration management.\n"); sclp_unregister(&sclp_conf_register); rc = -ENOSYS; } diff --git a/drivers/s390/char/sclp_cpi_sys.c b/drivers/s390/char/sclp_cpi_sys.c index d887bd261d2..62c2647f37f 100644 --- a/drivers/s390/char/sclp_cpi_sys.c +++ b/drivers/s390/char/sclp_cpi_sys.c @@ -7,6 +7,9 @@ * Michael Ernst <mernst@de.ibm.com> */ +#define KMSG_COMPONENT "sclp_cpi" +#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt + #include <linux/kernel.h> #include <linux/init.h> #include <linux/stat.h> @@ -20,6 +23,7 @@ #include <linux/completion.h> #include <asm/ebcdic.h> #include <asm/sclp.h> + #include "sclp.h" #include "sclp_rw.h" #include "sclp_cpi_sys.h" @@ -150,16 +154,16 @@ static int cpi_req(void) wait_for_completion(&completion); if (req->status != SCLP_REQ_DONE) { - printk(KERN_WARNING "cpi: request failed (status=0x%02x)\n", - req->status); + pr_warning("request failed (status=0x%02x)\n", + req->status); rc = -EIO; goto out_free_req; } response = ((struct cpi_sccb *) req->sccb)->header.response_code; if (response != 0x0020) { - printk(KERN_WARNING "cpi: failed with " - "response code 0x%x\n", response); + pr_warning("request failed with response code 0x%x\n", + response); rc = -EIO; } diff --git a/drivers/s390/char/sclp_sdias.c b/drivers/s390/char/sclp_sdias.c index 8b854857ba0..6a1c58dc61a 100644 --- a/drivers/s390/char/sclp_sdias.c +++ b/drivers/s390/char/sclp_sdias.c @@ -5,15 +5,18 @@ * Author(s): Michael Holzheu */ +#define KMSG_COMPONENT "sclp_sdias" +#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt + #include <linux/sched.h> #include <asm/sclp.h> #include <asm/debug.h> #include <asm/ipl.h> + #include "sclp.h" #include "sclp_rw.h" #define TRACE(x...) debug_sprintf_event(sdias_dbf, 1, x) -#define ERROR_MSG(x...) printk ( KERN_ALERT "SDIAS: " x ) #define SDIAS_RETRIES 300 #define SDIAS_SLEEP_TICKS 50 @@ -131,7 +134,7 @@ int sclp_sdias_blk_count(void) rc = sdias_sclp_send(&request); if (rc) { - ERROR_MSG("sclp_send failed for get_nr_blocks\n"); + pr_err("sclp_send failed for get_nr_blocks\n"); goto out; } if (sccb.hdr.response_code != 0x0020) { @@ -145,7 +148,8 @@ int sclp_sdias_blk_count(void) rc = sccb.evbuf.blk_cnt; break; default: - ERROR_MSG("SCLP error: %x\n", sccb.evbuf.event_status); + pr_err("SCLP error: %x\n", + sccb.evbuf.event_status); rc = -EIO; goto out; } @@ -201,7 +205,7 @@ int sclp_sdias_copy(void *dest, int start_blk, int nr_blks) rc = sdias_sclp_send(&request); if (rc) { - ERROR_MSG("sclp_send failed: %x\n", rc); + pr_err("sclp_send failed: %x\n", rc); goto out; } if (sccb.hdr.response_code != 0x0020) { @@ -219,9 +223,9 @@ int sclp_sdias_copy(void *dest, int start_blk, int nr_blks) case EVSTATE_NO_DATA: TRACE("no data\n"); default: - ERROR_MSG("Error from SCLP while copying hsa. " - "Event status = %x\n", - sccb.evbuf.event_status); + pr_err("Error from SCLP while copying hsa. " + "Event status = %x\n", + sccb.evbuf.event_status); rc = -EIO; } out: diff --git a/drivers/s390/char/sclp_vt220.c b/drivers/s390/char/sclp_vt220.c index 9854f19f5e6..a839aa531d7 100644 --- a/drivers/s390/char/sclp_vt220.c +++ b/drivers/s390/char/sclp_vt220.c @@ -583,23 +583,6 @@ sclp_vt220_chars_in_buffer(struct tty_struct *tty) return count; } -static void -__sclp_vt220_flush_buffer(void) -{ - unsigned long flags; - - sclp_vt220_emit_current(); - spin_lock_irqsave(&sclp_vt220_lock, flags); - if (timer_pending(&sclp_vt220_timer)) - del_timer(&sclp_vt220_timer); - while (sclp_vt220_outqueue_count > 0) { - spin_unlock_irqrestore(&sclp_vt220_lock, flags); - sclp_sync_wait(); - spin_lock_irqsave(&sclp_vt220_lock, flags); - } - spin_unlock_irqrestore(&sclp_vt220_lock, flags); -} - /* * Pass on all buffers to the hardware. Return only when there are no more * buffers pending. @@ -745,6 +728,22 @@ sclp_vt220_con_device(struct console *c, int *index) return sclp_vt220_driver; } +static void __sclp_vt220_flush_buffer(void) +{ + unsigned long flags; + + sclp_vt220_emit_current(); + spin_lock_irqsave(&sclp_vt220_lock, flags); + if (timer_pending(&sclp_vt220_timer)) + del_timer(&sclp_vt220_timer); + while (sclp_vt220_outqueue_count > 0) { + spin_unlock_irqrestore(&sclp_vt220_lock, flags); + sclp_sync_wait(); + spin_lock_irqsave(&sclp_vt220_lock, flags); + } + spin_unlock_irqrestore(&sclp_vt220_lock, flags); +} + static int sclp_vt220_notify(struct notifier_block *self, unsigned long event, void *data) diff --git a/drivers/s390/char/vmcp.c b/drivers/s390/char/vmcp.c index 09e7d9bf438..a6087cec55b 100644 --- a/drivers/s390/char/vmcp.c +++ b/drivers/s390/char/vmcp.c @@ -11,12 +11,14 @@ * The idea of this driver is based on cpint from Neale Ferguson and #CP in CMS */ +#define KMSG_COMPONENT "vmcp" +#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt + #include <linux/fs.h> #include <linux/init.h> #include <linux/kernel.h> #include <linux/miscdevice.h> #include <linux/module.h> -#include <linux/smp_lock.h> #include <asm/cpcmd.h> #include <asm/debug.h> #include <asm/uaccess.h> @@ -26,8 +28,6 @@ MODULE_LICENSE("GPL"); MODULE_AUTHOR("Christian Borntraeger <borntraeger@de.ibm.com>"); MODULE_DESCRIPTION("z/VM CP interface"); -#define PRINTK_HEADER "vmcp: " - static debug_info_t *vmcp_debug; static int vmcp_open(struct inode *inode, struct file *file) @@ -41,13 +41,11 @@ static int vmcp_open(struct inode *inode, struct file *file) if (!session) return -ENOMEM; - lock_kernel(); session->bufsize = PAGE_SIZE; session->response = NULL; session->resp_size = 0; mutex_init(&session->mutex); file->private_data = session; - unlock_kernel(); return nonseekable_open(inode, file); } @@ -193,7 +191,8 @@ static int __init vmcp_init(void) int ret; if (!MACHINE_IS_VM) { - PRINT_WARN("z/VM CP interface is only available under z/VM\n"); + pr_warning("The z/VM CP interface device driver cannot be " + "loaded without z/VM\n"); return -ENODEV; } diff --git a/drivers/s390/char/vmlogrdr.c b/drivers/s390/char/vmlogrdr.c index 24762727bc2..aabbeb909cc 100644 --- a/drivers/s390/char/vmlogrdr.c +++ b/drivers/s390/char/vmlogrdr.c @@ -10,6 +10,10 @@ * Stefan Weinhuber <wein@de.ibm.com> * */ + +#define KMSG_COMPONENT "vmlogrdr" +#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt + #include <linux/module.h> #include <linux/init.h> #include <linux/errno.h> @@ -28,8 +32,6 @@ #include <linux/smp_lock.h> #include <linux/string.h> - - MODULE_AUTHOR ("(C) 2004 IBM Corporation by Xenia Tkatschow (xenia@us.ibm.com)\n" " Stefan Weinhuber (wein@de.ibm.com)"); @@ -174,8 +176,7 @@ static void vmlogrdr_iucv_path_severed(struct iucv_path *path, u8 ipuser[16]) struct vmlogrdr_priv_t * logptr = path->private; u8 reason = (u8) ipuser[8]; - printk (KERN_ERR "vmlogrdr: connection severed with" - " reason %i\n", reason); + pr_err("vmlogrdr: connection severed with reason %i\n", reason); iucv_path_sever(path, NULL); kfree(path); @@ -333,8 +334,8 @@ static int vmlogrdr_open (struct inode *inode, struct file *filp) if (logptr->autorecording) { ret = vmlogrdr_recording(logptr,1,logptr->autopurge); if (ret) - printk (KERN_WARNING "vmlogrdr: failed to start " - "recording automatically\n"); + pr_warning("vmlogrdr: failed to start " + "recording automatically\n"); } /* create connection to the system service */ @@ -345,9 +346,9 @@ static int vmlogrdr_open (struct inode *inode, struct file *filp) logptr->system_service, NULL, NULL, logptr); if (connect_rc) { - printk (KERN_ERR "vmlogrdr: iucv connection to %s " - "failed with rc %i \n", logptr->system_service, - connect_rc); + pr_err("vmlogrdr: iucv connection to %s " + "failed with rc %i \n", + logptr->system_service, connect_rc); goto out_path; } @@ -388,8 +389,8 @@ static int vmlogrdr_release (struct inode *inode, struct file *filp) if (logptr->autorecording) { ret = vmlogrdr_recording(logptr,0,logptr->autopurge); if (ret) - printk (KERN_WARNING "vmlogrdr: failed to stop " - "recording automatically\n"); + pr_warning("vmlogrdr: failed to stop " + "recording automatically\n"); } logptr->dev_in_use = 0; @@ -823,8 +824,7 @@ static int __init vmlogrdr_init(void) dev_t dev; if (! MACHINE_IS_VM) { - printk (KERN_ERR "vmlogrdr: not running under VM, " - "driver not loaded.\n"); + pr_err("not running under VM, driver not loaded.\n"); return -ENODEV; } diff --git a/drivers/s390/char/vmur.c b/drivers/s390/char/vmur.c index 9020eba620e..5dcef81fc9d 100644 --- a/drivers/s390/char/vmur.c +++ b/drivers/s390/char/vmur.c @@ -8,6 +8,9 @@ * Frank Munzert <munzert@de.ibm.com> */ +#define KMSG_COMPONENT "vmur" +#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt + #include <linux/cdev.h> #include <linux/smp_lock.h> @@ -40,8 +43,6 @@ MODULE_AUTHOR("IBM Corporation"); MODULE_DESCRIPTION("s390 z/VM virtual unit record device driver"); MODULE_LICENSE("GPL"); -#define PRINTK_HEADER "vmur: " - static dev_t ur_first_dev_maj_min; static struct class *vmur_class; static struct debug_info *vmur_dbf; @@ -987,7 +988,8 @@ static int __init ur_init(void) dev_t dev; if (!MACHINE_IS_VM) { - PRINT_ERR("%s is only available under z/VM.\n", ur_banner); + pr_err("The %s cannot be loaded without z/VM\n", + ur_banner); return -ENODEV; } @@ -1006,7 +1008,8 @@ static int __init ur_init(void) rc = alloc_chrdev_region(&dev, 0, NUM_MINORS, "vmur"); if (rc) { - PRINT_ERR("alloc_chrdev_region failed: err = %d\n", rc); + pr_err("Kernel function alloc_chrdev_region failed with " + "error code %d\n", rc); goto fail_unregister_driver; } ur_first_dev_maj_min = MKDEV(MAJOR(dev), 0); @@ -1016,7 +1019,7 @@ static int __init ur_init(void) rc = PTR_ERR(vmur_class); goto fail_unregister_region; } - PRINT_INFO("%s loaded.\n", ur_banner); + pr_info("%s loaded.\n", ur_banner); return 0; fail_unregister_region: @@ -1034,7 +1037,7 @@ static void __exit ur_exit(void) unregister_chrdev_region(ur_first_dev_maj_min, NUM_MINORS); ccw_driver_unregister(&ur_driver); debug_unregister(vmur_dbf); - PRINT_INFO("%s unloaded.\n", ur_banner); + pr_info("%s unloaded.\n", ur_banner); } module_init(ur_init); diff --git a/drivers/s390/char/zcore.c b/drivers/s390/char/zcore.c index 7fd84be1193..eefc6611412 100644 --- a/drivers/s390/char/zcore.c +++ b/drivers/s390/char/zcore.c @@ -9,6 +9,9 @@ * Author(s): Michael Holzheu */ +#define KMSG_COMPONENT "zdump" +#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt + #include <linux/init.h> #include <linux/miscdevice.h> #include <linux/utsname.h> @@ -24,8 +27,6 @@ #include "sclp.h" #define TRACE(x...) debug_sprintf_event(zcore_dbf, 1, x) -#define MSG(x...) printk( KERN_ALERT x ) -#define ERROR_MSG(x...) printk ( KERN_ALERT "DUMP: " x ) #define TO_USER 0 #define TO_KERNEL 1 @@ -563,19 +564,19 @@ static int __init sys_info_init(enum arch_id arch) switch (arch) { case ARCH_S390X: - MSG("DETECTED 'S390X (64 bit) OS'\n"); + pr_alert("DETECTED 'S390X (64 bit) OS'\n"); sys_info.sa_base = SAVE_AREA_BASE_S390X; sys_info.sa_size = sizeof(struct save_area_s390x); set_s390x_lc_mask(&sys_info.lc_mask); break; case ARCH_S390: - MSG("DETECTED 'S390 (32 bit) OS'\n"); + pr_alert("DETECTED 'S390 (32 bit) OS'\n"); sys_info.sa_base = SAVE_AREA_BASE_S390; sys_info.sa_size = sizeof(struct save_area_s390); set_s390_lc_mask(&sys_info.lc_mask); break; default: - ERROR_MSG("unknown architecture 0x%x.\n",arch); + pr_alert("0x%x is an unknown architecture.\n",arch); return -EINVAL; } sys_info.arch = arch; @@ -674,7 +675,8 @@ static int __init zcore_init(void) #ifndef __s390x__ if (arch == ARCH_S390X) { - ERROR_MSG("32 bit dumper can't dump 64 bit system!\n"); + pr_alert("The 32-bit dump tool cannot be used for a " + "64-bit system\n"); rc = -EINVAL; goto fail; } diff --git a/drivers/s390/cio/blacklist.c b/drivers/s390/cio/blacklist.c index 2f547b840ef..fe00be3675c 100644 --- a/drivers/s390/cio/blacklist.c +++ b/drivers/s390/cio/blacklist.c @@ -9,6 +9,9 @@ * Arnd Bergmann (arndb@de.ibm.com) */ +#define KMSG_COMPONENT "cio" +#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt + #include <linux/init.h> #include <linux/vmalloc.h> #include <linux/slab.h> @@ -50,9 +53,10 @@ static int blacklist_range(range_action action, unsigned int from_ssid, { if ((from_ssid > to_ssid) || ((from_ssid == to_ssid) && (from > to))) { if (msgtrigger) - printk(KERN_WARNING "cio: Invalid cio_ignore range " - "0.%x.%04x-0.%x.%04x\n", from_ssid, from, - to_ssid, to); + pr_warning("0.%x.%04x to 0.%x.%04x is not a valid " + "range for cio_ignore\n", from_ssid, from, + to_ssid, to); + return 1; } @@ -140,8 +144,8 @@ static int parse_busid(char *str, unsigned int *cssid, unsigned int *ssid, rc = 0; out: if (rc && msgtrigger) - printk(KERN_WARNING "cio: Invalid cio_ignore device '%s'\n", - str); + pr_warning("%s is not a valid device for the cio_ignore " + "kernel parameter\n", str); return rc; } diff --git a/drivers/s390/cio/ccwgroup.c b/drivers/s390/cio/ccwgroup.c index 3ac2c2019f5..918e6fce257 100644 --- a/drivers/s390/cio/ccwgroup.c +++ b/drivers/s390/cio/ccwgroup.c @@ -19,6 +19,8 @@ #include <asm/ccwdev.h> #include <asm/ccwgroup.h> +#define CCW_BUS_ID_SIZE 20 + /* In Linux 2.4, we had a channel device layer called "chandev" * that did all sorts of obscure stuff for networking devices. * This is another driver that serves as a replacement for just @@ -89,15 +91,23 @@ ccwgroup_ungroup_store(struct device *dev, struct device_attribute *attr, const gdev = to_ccwgroupdev(dev); - if (gdev->state != CCWGROUP_OFFLINE) - return -EINVAL; - + /* Prevent concurrent online/offline processing and ungrouping. */ + if (atomic_cmpxchg(&gdev->onoff, 0, 1) != 0) + return -EAGAIN; + if (gdev->state != CCWGROUP_OFFLINE) { + rc = -EINVAL; + goto out; + } /* Note that we cannot unregister the device from one of its * attribute methods, so we have to use this roundabout approach. */ rc = device_schedule_callback(dev, ccwgroup_ungroup_callback); - if (rc) - count = rc; +out: + if (rc) { + /* Release onoff "lock" when ungrouping failed. */ + atomic_set(&gdev->onoff, 0); + return rc; + } return count; } @@ -172,7 +182,7 @@ static int __get_next_bus_id(const char **buf, char *bus_id) len = end - start + 1; end++; } - if (len < BUS_ID_SIZE) { + if (len < CCW_BUS_ID_SIZE) { strlcpy(bus_id, start, len); rc = 0; } else @@ -181,7 +191,7 @@ static int __get_next_bus_id(const char **buf, char *bus_id) return rc; } -static int __is_valid_bus_id(char bus_id[BUS_ID_SIZE]) +static int __is_valid_bus_id(char bus_id[CCW_BUS_ID_SIZE]) { int cssid, ssid, devno; @@ -213,7 +223,7 @@ int ccwgroup_create_from_string(struct device *root, unsigned int creator_id, { struct ccwgroup_device *gdev; int rc, i; - char tmp_bus_id[BUS_ID_SIZE]; + char tmp_bus_id[CCW_BUS_ID_SIZE]; const char *curr_buf; gdev = kzalloc(sizeof(*gdev) + num_devices * sizeof(gdev->cdev[0]), diff --git a/drivers/s390/cio/chsc.c b/drivers/s390/cio/chsc.c index 29826fdd47b..ebab6ea4659 100644 --- a/drivers/s390/cio/chsc.c +++ b/drivers/s390/cio/chsc.c @@ -8,6 +8,9 @@ * Arnd Bergmann (arndb@de.ibm.com) */ +#define KMSG_COMPONENT "cio" +#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt + #include <linux/module.h> #include <linux/slab.h> #include <linux/init.h> @@ -333,6 +336,7 @@ static void chsc_process_sei_chp_config(struct chsc_sei_area *sei_area) struct chp_config_data *data; struct chp_id chpid; int num; + char *events[3] = {"configure", "deconfigure", "cancel deconfigure"}; CIO_CRW_EVENT(4, "chsc: channel-path-configuration notification\n"); if (sei_area->rs != 0) @@ -343,8 +347,8 @@ static void chsc_process_sei_chp_config(struct chsc_sei_area *sei_area) if (!chp_test_bit(data->map, num)) continue; chpid.id = num; - printk(KERN_WARNING "cio: processing configure event %d for " - "chpid %x.%02x\n", data->op, chpid.cssid, chpid.id); + pr_notice("Processing %s for channel path %x.%02x\n", + events[data->op], chpid.cssid, chpid.id); switch (data->op) { case 0: chp_cfg_schedule(chpid, 1); diff --git a/drivers/s390/cio/chsc_sch.c b/drivers/s390/cio/chsc_sch.c index f49f0e502b8..0a2f2edafc0 100644 --- a/drivers/s390/cio/chsc_sch.c +++ b/drivers/s390/cio/chsc_sch.c @@ -61,7 +61,7 @@ static void chsc_subchannel_irq(struct subchannel *sch) } private->request = NULL; memcpy(&request->irb, irb, sizeof(*irb)); - stsch(sch->schid, &sch->schib); + cio_update_schib(sch); complete(&request->completion); put_device(&sch->dev); } diff --git a/drivers/s390/cio/cio.c b/drivers/s390/cio/cio.c index 3db2c386546..8a8df755296 100644 --- a/drivers/s390/cio/cio.c +++ b/drivers/s390/cio/cio.c @@ -9,6 +9,9 @@ * Martin Schwidefsky (schwidefsky@de.ibm.com) */ +#define KMSG_COMPONENT "cio" +#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt + #include <linux/module.h> #include <linux/init.h> #include <linux/slab.h> @@ -104,44 +107,6 @@ cio_get_options (struct subchannel *sch) return flags; } -/* - * Use tpi to get a pending interrupt, call the interrupt handler and - * return a pointer to the subchannel structure. - */ -static int -cio_tpi(void) -{ - struct tpi_info *tpi_info; - struct subchannel *sch; - struct irb *irb; - int irq_context; - - tpi_info = (struct tpi_info *) __LC_SUBCHANNEL_ID; - if (tpi (NULL) != 1) - return 0; - irb = (struct irb *) __LC_IRB; - /* Store interrupt response block to lowcore. */ - if (tsch (tpi_info->schid, irb) != 0) - /* Not status pending or not operational. */ - return 1; - sch = (struct subchannel *)(unsigned long)tpi_info->intparm; - if (!sch) - return 1; - irq_context = in_interrupt(); - if (!irq_context) - local_bh_disable(); - irq_enter (); - spin_lock(sch->lock); - memcpy(&sch->schib.scsw, &irb->scsw, sizeof(union scsw)); - if (sch->driver && sch->driver->irq) - sch->driver->irq(sch); - spin_unlock(sch->lock); - irq_exit (); - if (!irq_context) - _local_bh_enable(); - return 1; -} - static int cio_start_handle_notoper(struct subchannel *sch, __u8 lpm) { @@ -152,11 +117,13 @@ cio_start_handle_notoper(struct subchannel *sch, __u8 lpm) else sch->lpm = 0; - stsch (sch->schid, &sch->schib); - CIO_MSG_EVENT(2, "cio_start: 'not oper' status for " "subchannel 0.%x.%04x!\n", sch->schid.ssid, sch->schid.sch_no); + + if (cio_update_schib(sch)) + return -ENODEV; + sprintf(dbf_text, "no%s", dev_name(&sch->dev)); CIO_TRACE_EVENT(0, dbf_text); CIO_HEX_EVENT(0, &sch->schib, sizeof (struct schib)); @@ -354,7 +321,8 @@ cio_cancel (struct subchannel *sch) switch (ccode) { case 0: /* success */ /* Update information in scsw. */ - stsch (sch->schid, &sch->schib); + if (cio_update_schib(sch)) + return -ENODEV; return 0; case 1: /* status pending */ return -EBUSY; @@ -365,30 +333,70 @@ cio_cancel (struct subchannel *sch) } } + +static void cio_apply_config(struct subchannel *sch, struct schib *schib) +{ + schib->pmcw.intparm = sch->config.intparm; + schib->pmcw.mbi = sch->config.mbi; + schib->pmcw.isc = sch->config.isc; + schib->pmcw.ena = sch->config.ena; + schib->pmcw.mme = sch->config.mme; + schib->pmcw.mp = sch->config.mp; + schib->pmcw.csense = sch->config.csense; + schib->pmcw.mbfc = sch->config.mbfc; + if (sch->config.mbfc) + schib->mba = sch->config.mba; +} + +static int cio_check_config(struct subchannel *sch, struct schib *schib) +{ + return (schib->pmcw.intparm == sch->config.intparm) && + (schib->pmcw.mbi == sch->config.mbi) && + (schib->pmcw.isc == sch->config.isc) && + (schib->pmcw.ena == sch->config.ena) && + (schib->pmcw.mme == sch->config.mme) && + (schib->pmcw.mp == sch->config.mp) && + (schib->pmcw.csense == sch->config.csense) && + (schib->pmcw.mbfc == sch->config.mbfc) && + (!sch->config.mbfc || (schib->mba == sch->config.mba)); +} + /* - * Function: cio_modify - * Issues a "Modify Subchannel" on the specified subchannel + * cio_commit_config - apply configuration to the subchannel */ -int -cio_modify (struct subchannel *sch) +int cio_commit_config(struct subchannel *sch) { - int ccode, retry, ret; + struct schib schib; + int ccode, retry, ret = 0; + + if (stsch(sch->schid, &schib) || !css_sch_is_valid(&schib)) + return -ENODEV; - ret = 0; for (retry = 0; retry < 5; retry++) { - ccode = msch_err (sch->schid, &sch->schib); - if (ccode < 0) /* -EIO if msch gets a program check. */ + /* copy desired changes to local schib */ + cio_apply_config(sch, &schib); + ccode = msch_err(sch->schid, &schib); + if (ccode < 0) /* -EIO if msch gets a program check. */ return ccode; switch (ccode) { case 0: /* successfull */ - return 0; - case 1: /* status pending */ + if (stsch(sch->schid, &schib) || + !css_sch_is_valid(&schib)) + return -ENODEV; + if (cio_check_config(sch, &schib)) { + /* commit changes from local schib */ + memcpy(&sch->schib, &schib, sizeof(schib)); + return 0; + } + ret = -EAGAIN; + break; + case 1: /* status pending */ return -EBUSY; - case 2: /* busy */ - udelay (100); /* allow for recovery */ + case 2: /* busy */ + udelay(100); /* allow for recovery */ ret = -EBUSY; break; - case 3: /* not operational */ + case 3: /* not operational */ return -ENODEV; } } @@ -396,6 +404,23 @@ cio_modify (struct subchannel *sch) } /** + * cio_update_schib - Perform stsch and update schib if subchannel is valid. + * @sch: subchannel on which to perform stsch + * Return zero on success, -ENODEV otherwise. + */ +int cio_update_schib(struct subchannel *sch) +{ + struct schib schib; + + if (stsch(sch->schid, &schib) || !css_sch_is_valid(&schib)) + return -ENODEV; + + memcpy(&sch->schib, &schib, sizeof(schib)); + return 0; +} +EXPORT_SYMBOL_GPL(cio_update_schib); + +/** * cio_enable_subchannel - enable a subchannel. * @sch: subchannel to be enabled * @intparm: interruption parameter to set @@ -403,7 +428,6 @@ cio_modify (struct subchannel *sch) int cio_enable_subchannel(struct subchannel *sch, u32 intparm) { char dbf_txt[15]; - int ccode; int retry; int ret; @@ -412,33 +436,27 @@ int cio_enable_subchannel(struct subchannel *sch, u32 intparm) if (sch_is_pseudo_sch(sch)) return -EINVAL; - ccode = stsch (sch->schid, &sch->schib); - if (ccode) + if (cio_update_schib(sch)) return -ENODEV; - for (retry = 5, ret = 0; retry > 0; retry--) { - sch->schib.pmcw.ena = 1; - sch->schib.pmcw.isc = sch->isc; - sch->schib.pmcw.intparm = intparm; - ret = cio_modify(sch); - if (ret == -ENODEV) - break; - if (ret == -EIO) + sch->config.ena = 1; + sch->config.isc = sch->isc; + sch->config.intparm = intparm; + + for (retry = 0; retry < 3; retry++) { + ret = cio_commit_config(sch); + if (ret == -EIO) { /* - * Got a program check in cio_modify. Try without + * Got a program check in msch. Try without * the concurrent sense bit the next time. */ - sch->schib.pmcw.csense = 0; - if (ret == 0) { - stsch (sch->schid, &sch->schib); - if (sch->schib.pmcw.ena) - break; - } - if (ret == -EBUSY) { + sch->config.csense = 0; + } else if (ret == -EBUSY) { struct irb irb; if (tsch(sch->schid, &irb) != 0) break; - } + } else + break; } sprintf (dbf_txt, "ret:%d", ret); CIO_TRACE_EVENT (2, dbf_txt); @@ -453,8 +471,6 @@ EXPORT_SYMBOL_GPL(cio_enable_subchannel); int cio_disable_subchannel(struct subchannel *sch) { char dbf_txt[15]; - int ccode; - int retry; int ret; CIO_TRACE_EVENT (2, "dissch"); @@ -462,8 +478,7 @@ int cio_disable_subchannel(struct subchannel *sch) if (sch_is_pseudo_sch(sch)) return 0; - ccode = stsch (sch->schid, &sch->schib); - if (ccode == 3) /* Not operational. */ + if (cio_update_schib(sch)) return -ENODEV; if (scsw_actl(&sch->schib.scsw) != 0) @@ -473,24 +488,9 @@ int cio_disable_subchannel(struct subchannel *sch) */ return -EBUSY; - for (retry = 5, ret = 0; retry > 0; retry--) { - sch->schib.pmcw.ena = 0; - ret = cio_modify(sch); - if (ret == -ENODEV) - break; - if (ret == -EBUSY) - /* - * The subchannel is busy or status pending. - * We'll disable when the next interrupt was delivered - * via the state machine. - */ - break; - if (ret == 0) { - stsch (sch->schid, &sch->schib); - if (!sch->schib.pmcw.ena) - break; - } - } + sch->config.ena = 0; + ret = cio_commit_config(sch); + sprintf (dbf_txt, "ret:%d", ret); CIO_TRACE_EVENT (2, dbf_txt); return ret; @@ -687,6 +687,43 @@ static char console_sch_name[10] = "0.x.xxxx"; static struct io_subchannel_private console_priv; static int console_subchannel_in_use; +/* + * Use tpi to get a pending interrupt, call the interrupt handler and + * return a pointer to the subchannel structure. + */ +static int cio_tpi(void) +{ + struct tpi_info *tpi_info; + struct subchannel *sch; + struct irb *irb; + int irq_context; + + tpi_info = (struct tpi_info *) __LC_SUBCHANNEL_ID; + if (tpi(NULL) != 1) + return 0; + irb = (struct irb *) __LC_IRB; + /* Store interrupt response block to lowcore. */ + if (tsch(tpi_info->schid, irb) != 0) + /* Not status pending or not operational. */ + return 1; + sch = (struct subchannel *)(unsigned long)tpi_info->intparm; + if (!sch) + return 1; + irq_context = in_interrupt(); + if (!irq_context) + local_bh_disable(); + irq_enter(); + spin_lock(sch->lock); + memcpy(&sch->schib.scsw, &irb->scsw, sizeof(union scsw)); + if (sch->driver && sch->driver->irq) + sch->driver->irq(sch); + spin_unlock(sch->lock); + irq_exit(); + if (!irq_context) + _local_bh_enable(); + return 1; +} + void *cio_get_console_priv(void) { return &console_priv; @@ -780,7 +817,7 @@ cio_probe_console(void) sch_no = cio_get_console_sch_no(); if (sch_no == -1) { console_subchannel_in_use = 0; - printk(KERN_WARNING "cio: No ccw console found!\n"); + pr_warning("No CCW console was found\n"); return ERR_PTR(-ENODEV); } memset(&console_subchannel, 0, sizeof(struct subchannel)); @@ -796,10 +833,9 @@ cio_probe_console(void) * enable console I/O-interrupt subclass */ isc_register(CONSOLE_ISC); - console_subchannel.schib.pmcw.isc = CONSOLE_ISC; - console_subchannel.schib.pmcw.intparm = - (u32)(addr_t)&console_subchannel; - ret = cio_modify(&console_subchannel); + console_subchannel.config.isc = CONSOLE_ISC; + console_subchannel.config.intparm = (u32)(addr_t)&console_subchannel; + ret = cio_commit_config(&console_subchannel); if (ret) { isc_unregister(CONSOLE_ISC); console_subchannel_in_use = 0; @@ -811,8 +847,8 @@ cio_probe_console(void) void cio_release_console(void) { - console_subchannel.schib.pmcw.intparm = 0; - cio_modify(&console_subchannel); + console_subchannel.config.intparm = 0; + cio_commit_config(&console_subchannel); isc_unregister(CONSOLE_ISC); console_subchannel_in_use = 0; } @@ -852,7 +888,8 @@ __disable_subchannel_easy(struct subchannel_id schid, struct schib *schib) cc = msch(schid, schib); if (cc) return (cc==3?-ENODEV:-EBUSY); - stsch(schid, schib); + if (stsch(schid, schib) || !css_sch_is_valid(schib)) + return -ENODEV; if (!schib->pmcw.ena) return 0; } diff --git a/drivers/s390/cio/cio.h b/drivers/s390/cio/cio.h index 0fb24784e92..5150fba742a 100644 --- a/drivers/s390/cio/cio.h +++ b/drivers/s390/cio/cio.h @@ -45,6 +45,19 @@ struct pmcw { /* ... in an operand exception. */ } __attribute__ ((packed)); +/* Target SCHIB configuration. */ +struct schib_config { + u64 mba; + u32 intparm; + u16 mbi; + u32 isc:3; + u32 ena:1; + u32 mme:2; + u32 mp:1; + u32 csense:1; + u32 mbfc:1; +} __attribute__ ((packed)); + /* * subchannel information block */ @@ -82,6 +95,8 @@ struct subchannel { struct device dev; /* entry in device tree */ struct css_driver *driver; void *private; /* private per subchannel type data */ + struct work_struct work; + struct schib_config config; } __attribute__ ((aligned(8))); #define IO_INTERRUPT_TYPE 0 /* I/O interrupt type */ @@ -100,7 +115,8 @@ extern int cio_start_key (struct subchannel *, struct ccw1 *, __u8, __u8); extern int cio_cancel (struct subchannel *); extern int cio_set_options (struct subchannel *, int); extern int cio_get_options (struct subchannel *); -extern int cio_modify (struct subchannel *); +extern int cio_update_schib(struct subchannel *sch); +extern int cio_commit_config(struct subchannel *sch); int cio_tm_start_key(struct subchannel *sch, struct tcw *tcw, u8 lpm, u8 key); int cio_tm_intrg(struct subchannel *sch); diff --git a/drivers/s390/cio/cmf.c b/drivers/s390/cio/cmf.c index a90b28c0be5..dc98b2c6386 100644 --- a/drivers/s390/cio/cmf.c +++ b/drivers/s390/cio/cmf.c @@ -25,6 +25,9 @@ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ +#define KMSG_COMPONENT "cio" +#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt + #include <linux/bootmem.h> #include <linux/device.h> #include <linux/init.h> @@ -185,56 +188,19 @@ static inline void cmf_activate(void *area, unsigned int onoff) static int set_schib(struct ccw_device *cdev, u32 mme, int mbfc, unsigned long address) { - int ret; - int retry; struct subchannel *sch; - struct schib *schib; sch = to_subchannel(cdev->dev.parent); - schib = &sch->schib; - /* msch can silently fail, so do it again if necessary */ - for (retry = 0; retry < 3; retry++) { - /* prepare schib */ - stsch(sch->schid, schib); - schib->pmcw.mme = mme; - schib->pmcw.mbfc = mbfc; - /* address can be either a block address or a block index */ - if (mbfc) - schib->mba = address; - else - schib->pmcw.mbi = address; - - /* try to submit it */ - switch(ret = msch_err(sch->schid, schib)) { - case 0: - break; - case 1: - case 2: /* in I/O or status pending */ - ret = -EBUSY; - break; - case 3: /* subchannel is no longer valid */ - ret = -ENODEV; - break; - default: /* msch caught an exception */ - ret = -EINVAL; - break; - } - stsch(sch->schid, schib); /* restore the schib */ - - if (ret) - break; - /* check if it worked */ - if (schib->pmcw.mme == mme && - schib->pmcw.mbfc == mbfc && - (mbfc ? (schib->mba == address) - : (schib->pmcw.mbi == address))) - return 0; + sch->config.mme = mme; + sch->config.mbfc = mbfc; + /* address can be either a block address or a block index */ + if (mbfc) + sch->config.mba = address; + else + sch->config.mbi = address; - ret = -EINVAL; - } - - return ret; + return cio_commit_config(sch); } struct set_schib_struct { @@ -338,7 +304,7 @@ static int cmf_copy_block(struct ccw_device *cdev) sch = to_subchannel(cdev->dev.parent); - if (stsch(sch->schid, &sch->schib)) + if (cio_update_schib(sch)) return -ENODEV; if (scsw_fctl(&sch->schib.scsw) & SCSW_FCTL_START_FUNC) { @@ -1359,9 +1325,8 @@ static int __init init_cmf(void) default: return 1; } - - printk(KERN_INFO "cio: Channel measurement facility using %s " - "format (%s)\n", format_string, detect_string); + pr_info("Channel measurement facility initialized using format " + "%s (mode %s)\n", format_string, detect_string); return 0; } diff --git a/drivers/s390/cio/css.c b/drivers/s390/cio/css.c index 76bbb1e74c2..8019288bc6d 100644 --- a/drivers/s390/cio/css.c +++ b/drivers/s390/cio/css.c @@ -6,6 +6,10 @@ * Author(s): Arnd Bergmann (arndb@de.ibm.com) * Cornelia Huck (cornelia.huck@de.ibm.com) */ + +#define KMSG_COMPONENT "cio" +#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt + #include <linux/module.h> #include <linux/init.h> #include <linux/device.h> @@ -128,8 +132,8 @@ css_free_subchannel(struct subchannel *sch) { if (sch) { /* Reset intparm to zeroes. */ - sch->schib.pmcw.intparm = 0; - cio_modify(sch); + sch->config.intparm = 0; + cio_commit_config(sch); kfree(sch->lock); kfree(sch); } @@ -844,8 +848,8 @@ out: s390_unregister_crw_handler(CRW_RSC_CSS); chsc_free_sei_area(); kfree(slow_subchannel_set); - printk(KERN_WARNING"cio: failed to initialize css driver (%d)!\n", - ret); + pr_alert("The CSS device driver initialization failed with " + "errno=%d\n", ret); return ret; } diff --git a/drivers/s390/cio/device.c b/drivers/s390/cio/device.c index 4e4008325e2..23d5752349b 100644 --- a/drivers/s390/cio/device.c +++ b/drivers/s390/cio/device.c @@ -376,19 +376,23 @@ int ccw_device_set_offline(struct ccw_device *cdev) dev_fsm_event(cdev, DEV_EVENT_NOTOPER); } spin_unlock_irq(cdev->ccwlock); + /* Give up reference from ccw_device_set_online(). */ + put_device(&cdev->dev); return ret; } spin_unlock_irq(cdev->ccwlock); - if (ret == 0) + if (ret == 0) { wait_event(cdev->private->wait_q, dev_fsm_final_state(cdev)); - else { + /* Give up reference from ccw_device_set_online(). */ + put_device(&cdev->dev); + } else { CIO_MSG_EVENT(0, "ccw_device_offline returned %d, " "device 0.%x.%04x\n", ret, cdev->private->dev_id.ssid, cdev->private->dev_id.devno); cdev->online = 1; } - return ret; + return ret; } /** @@ -411,6 +415,9 @@ int ccw_device_set_online(struct ccw_device *cdev) return -ENODEV; if (cdev->online || !cdev->drv) return -EINVAL; + /* Hold on to an extra reference while device is online. */ + if (!get_device(&cdev->dev)) + return -ENODEV; spin_lock_irq(cdev->ccwlock); ret = ccw_device_online(cdev); @@ -422,10 +429,15 @@ int ccw_device_set_online(struct ccw_device *cdev) "device 0.%x.%04x\n", ret, cdev->private->dev_id.ssid, cdev->private->dev_id.devno); + /* Give up online reference since onlining failed. */ + put_device(&cdev->dev); return ret; } - if (cdev->private->state != DEV_STATE_ONLINE) + if (cdev->private->state != DEV_STATE_ONLINE) { + /* Give up online reference since onlining failed. */ + put_device(&cdev->dev); return -ENODEV; + } if (!cdev->drv->set_online || cdev->drv->set_online(cdev) == 0) { cdev->online = 1; return 0; @@ -440,6 +452,8 @@ int ccw_device_set_online(struct ccw_device *cdev) "device 0.%x.%04x\n", ret, cdev->private->dev_id.ssid, cdev->private->dev_id.devno); + /* Give up online reference since onlining failed. */ + put_device(&cdev->dev); return (ret == 0) ? -ENODEV : ret; } @@ -704,6 +718,8 @@ ccw_device_release(struct device *dev) struct ccw_device *cdev; cdev = to_ccwdev(dev); + /* Release reference of parent subchannel. */ + put_device(cdev->dev.parent); kfree(cdev->private); kfree(cdev); } @@ -735,8 +751,8 @@ static int io_subchannel_initialize_dev(struct subchannel *sch, /* Do first half of device_register. */ device_initialize(&cdev->dev); if (!get_device(&sch->dev)) { - if (cdev->dev.release) - cdev->dev.release(&cdev->dev); + /* Release reference from device_initialize(). */ + put_device(&cdev->dev); return -ENODEV; } return 0; @@ -778,37 +794,55 @@ static void sch_attach_disconnected_device(struct subchannel *sch, struct subchannel *other_sch; int ret; - other_sch = to_subchannel(get_device(cdev->dev.parent)); + /* Get reference for new parent. */ + if (!get_device(&sch->dev)) + return; + other_sch = to_subchannel(cdev->dev.parent); + /* Note: device_move() changes cdev->dev.parent */ ret = device_move(&cdev->dev, &sch->dev); if (ret) { CIO_MSG_EVENT(0, "Moving disconnected device 0.%x.%04x failed " "(ret=%d)!\n", cdev->private->dev_id.ssid, cdev->private->dev_id.devno, ret); - put_device(&other_sch->dev); + /* Put reference for new parent. */ + put_device(&sch->dev); return; } sch_set_cdev(other_sch, NULL); /* No need to keep a subchannel without ccw device around. */ css_sch_device_unregister(other_sch); - put_device(&other_sch->dev); sch_attach_device(sch, cdev); + /* Put reference for old parent. */ + put_device(&other_sch->dev); } static void sch_attach_orphaned_device(struct subchannel *sch, struct ccw_device *cdev) { int ret; + struct subchannel *pseudo_sch; - /* Try to move the ccw device to its new subchannel. */ + /* Get reference for new parent. */ + if (!get_device(&sch->dev)) + return; + pseudo_sch = to_subchannel(cdev->dev.parent); + /* + * Try to move the ccw device to its new subchannel. + * Note: device_move() changes cdev->dev.parent + */ ret = device_move(&cdev->dev, &sch->dev); if (ret) { CIO_MSG_EVENT(0, "Moving device 0.%x.%04x from orphanage " "failed (ret=%d)!\n", cdev->private->dev_id.ssid, cdev->private->dev_id.devno, ret); + /* Put reference for new parent. */ + put_device(&sch->dev); return; } sch_attach_device(sch, cdev); + /* Put reference on pseudo subchannel. */ + put_device(&pseudo_sch->dev); } static void sch_create_and_recog_new_device(struct subchannel *sch) @@ -830,9 +864,11 @@ static void sch_create_and_recog_new_device(struct subchannel *sch) spin_lock_irq(sch->lock); sch_set_cdev(sch, NULL); spin_unlock_irq(sch->lock); - if (cdev->dev.release) - cdev->dev.release(&cdev->dev); css_sch_device_unregister(sch); + /* Put reference from io_subchannel_create_ccwdev(). */ + put_device(&sch->dev); + /* Give up initial reference. */ + put_device(&cdev->dev); } } @@ -854,15 +890,20 @@ void ccw_device_move_to_orphanage(struct work_struct *work) dev_id.devno = sch->schib.pmcw.dev; dev_id.ssid = sch->schid.ssid; + /* Increase refcount for pseudo subchannel. */ + get_device(&css->pseudo_subchannel->dev); /* * Move the orphaned ccw device to the orphanage so the replacing * ccw device can take its place on the subchannel. + * Note: device_move() changes cdev->dev.parent */ ret = device_move(&cdev->dev, &css->pseudo_subchannel->dev); if (ret) { CIO_MSG_EVENT(0, "Moving device 0.%x.%04x to orphanage failed " "(ret=%d)!\n", cdev->private->dev_id.ssid, cdev->private->dev_id.devno, ret); + /* Decrease refcount for pseudo subchannel again. */ + put_device(&css->pseudo_subchannel->dev); return; } cdev->ccwlock = css->pseudo_subchannel->lock; @@ -875,17 +916,23 @@ void ccw_device_move_to_orphanage(struct work_struct *work) if (replacing_cdev) { sch_attach_disconnected_device(sch, replacing_cdev); /* Release reference from get_disc_ccwdev_by_dev_id() */ - put_device(&cdev->dev); + put_device(&replacing_cdev->dev); + /* Release reference of subchannel from old cdev. */ + put_device(&sch->dev); return; } replacing_cdev = get_orphaned_ccwdev_by_dev_id(css, &dev_id); if (replacing_cdev) { sch_attach_orphaned_device(sch, replacing_cdev); /* Release reference from get_orphaned_ccwdev_by_dev_id() */ - put_device(&cdev->dev); + put_device(&replacing_cdev->dev); + /* Release reference of subchannel from old cdev. */ + put_device(&sch->dev); return; } sch_create_and_recog_new_device(sch); + /* Release reference of subchannel from old cdev. */ + put_device(&sch->dev); } /* @@ -903,6 +950,14 @@ io_subchannel_register(struct work_struct *work) priv = container_of(work, struct ccw_device_private, kick_work); cdev = priv->cdev; sch = to_subchannel(cdev->dev.parent); + /* + * Check if subchannel is still registered. It may have become + * unregistered if a machine check hit us after finishing + * device recognition but before the register work could be + * queued. + */ + if (!device_is_registered(&sch->dev)) + goto out_err; css_update_ssd_info(sch); /* * io_subchannel_register() will also be called after device @@ -910,7 +965,7 @@ io_subchannel_register(struct work_struct *work) * be registered). We need to reprobe since we may now have sense id * information. */ - if (klist_node_attached(&cdev->dev.knode_parent)) { + if (device_is_registered(&cdev->dev)) { if (!cdev->drv) { ret = device_reprobe(&cdev->dev); if (ret) @@ -934,22 +989,19 @@ io_subchannel_register(struct work_struct *work) CIO_MSG_EVENT(0, "Could not register ccw dev 0.%x.%04x: %d\n", cdev->private->dev_id.ssid, cdev->private->dev_id.devno, ret); - put_device(&cdev->dev); spin_lock_irqsave(sch->lock, flags); sch_set_cdev(sch, NULL); spin_unlock_irqrestore(sch->lock, flags); - kfree (cdev->private); - kfree (cdev); - put_device(&sch->dev); - if (atomic_dec_and_test(&ccw_device_init_count)) - wake_up(&ccw_device_init_wq); - return; + /* Release initial device reference. */ + put_device(&cdev->dev); + goto out_err; } - put_device(&cdev->dev); out: cdev->private->flags.recog_done = 1; - put_device(&sch->dev); wake_up(&cdev->private->wait_q); +out_err: + /* Release reference for workqueue processing. */ + put_device(&cdev->dev); if (atomic_dec_and_test(&ccw_device_init_count)) wake_up(&ccw_device_init_wq); } @@ -968,8 +1020,8 @@ static void ccw_device_call_sch_unregister(struct work_struct *work) sch = to_subchannel(cdev->dev.parent); css_sch_device_unregister(sch); /* Reset intparm to zeroes. */ - sch->schib.pmcw.intparm = 0; - cio_modify(sch); + sch->config.intparm = 0; + cio_commit_config(sch); /* Release cdev reference for workqueue processing.*/ put_device(&cdev->dev); /* Release subchannel reference for local processing. */ @@ -998,8 +1050,6 @@ io_subchannel_recog_done(struct ccw_device *cdev) PREPARE_WORK(&cdev->private->kick_work, ccw_device_call_sch_unregister); queue_work(slow_path_wq, &cdev->private->kick_work); - /* Release subchannel reference for asynchronous recognition. */ - put_device(&sch->dev); if (atomic_dec_and_test(&ccw_device_init_count)) wake_up(&ccw_device_init_wq); break; @@ -1070,10 +1120,15 @@ static void ccw_device_move_to_sch(struct work_struct *work) priv = container_of(work, struct ccw_device_private, kick_work); sch = priv->sch; cdev = priv->cdev; - former_parent = ccw_device_is_orphan(cdev) ? - NULL : to_subchannel(get_device(cdev->dev.parent)); + former_parent = to_subchannel(cdev->dev.parent); + /* Get reference for new parent. */ + if (!get_device(&sch->dev)) + return; mutex_lock(&sch->reg_mutex); - /* Try to move the ccw device to its new subchannel. */ + /* + * Try to move the ccw device to its new subchannel. + * Note: device_move() changes cdev->dev.parent + */ rc = device_move(&cdev->dev, &sch->dev); mutex_unlock(&sch->reg_mutex); if (rc) { @@ -1083,21 +1138,23 @@ static void ccw_device_move_to_sch(struct work_struct *work) cdev->private->dev_id.devno, sch->schid.ssid, sch->schid.sch_no, rc); css_sch_device_unregister(sch); + /* Put reference for new parent again. */ + put_device(&sch->dev); goto out; } - if (former_parent) { + if (!sch_is_pseudo_sch(former_parent)) { spin_lock_irq(former_parent->lock); sch_set_cdev(former_parent, NULL); spin_unlock_irq(former_parent->lock); css_sch_device_unregister(former_parent); /* Reset intparm to zeroes. */ - former_parent->schib.pmcw.intparm = 0; - cio_modify(former_parent); + former_parent->config.intparm = 0; + cio_commit_config(former_parent); } sch_attach_device(sch, cdev); out: - if (former_parent) - put_device(&former_parent->dev); + /* Put reference for old parent. */ + put_device(&former_parent->dev); put_device(&cdev->dev); } @@ -1113,6 +1170,15 @@ static void io_subchannel_irq(struct subchannel *sch) dev_fsm_event(cdev, DEV_EVENT_INTERRUPT); } +void io_subchannel_init_config(struct subchannel *sch) +{ + memset(&sch->config, 0, sizeof(sch->config)); + sch->config.csense = 1; + /* Use subchannel mp mode when there is more than 1 installed CHPID. */ + if ((sch->schib.pmcw.pim & (sch->schib.pmcw.pim - 1)) != 0) + sch->config.mp = 1; +} + static void io_subchannel_init_fields(struct subchannel *sch) { if (cio_is_console(sch->schid)) @@ -1127,18 +1193,34 @@ static void io_subchannel_init_fields(struct subchannel *sch) sch->schib.pmcw.dev, sch->schid.ssid, sch->schid.sch_no, sch->schib.pmcw.pim, sch->schib.pmcw.pam, sch->schib.pmcw.pom); - /* Initially set up some fields in the pmcw. */ - sch->schib.pmcw.ena = 0; - sch->schib.pmcw.csense = 1; /* concurrent sense */ - if ((sch->lpm & (sch->lpm - 1)) != 0) - sch->schib.pmcw.mp = 1; /* multipath mode */ - /* clean up possible residual cmf stuff */ - sch->schib.pmcw.mme = 0; - sch->schib.pmcw.mbfc = 0; - sch->schib.pmcw.mbi = 0; - sch->schib.mba = 0; + + io_subchannel_init_config(sch); } +static void io_subchannel_do_unreg(struct work_struct *work) +{ + struct subchannel *sch; + + sch = container_of(work, struct subchannel, work); + css_sch_device_unregister(sch); + /* Reset intparm to zeroes. */ + sch->config.intparm = 0; + cio_commit_config(sch); + put_device(&sch->dev); +} + +/* Schedule unregister if we have no cdev. */ +static void io_subchannel_schedule_removal(struct subchannel *sch) +{ + get_device(&sch->dev); + INIT_WORK(&sch->work, io_subchannel_do_unreg); + queue_work(slow_path_wq, &sch->work); +} + +/* + * Note: We always return 0 so that we bind to the device even on error. + * This is needed so that our remove function is called on unregister. + */ static int io_subchannel_probe(struct subchannel *sch) { struct ccw_device *cdev; @@ -1168,9 +1250,8 @@ static int io_subchannel_probe(struct subchannel *sch) ccw_device_register(cdev); /* * Check if the device is already online. If it is - * the reference count needs to be corrected - * (see ccw_device_online and css_init_done for the - * ugly details). + * the reference count needs to be corrected since we + * didn't obtain a reference in ccw_device_set_online. */ if (cdev->private->state != DEV_STATE_NOT_OPER && cdev->private->state != DEV_STATE_OFFLINE && @@ -1179,23 +1260,24 @@ static int io_subchannel_probe(struct subchannel *sch) return 0; } io_subchannel_init_fields(sch); - /* - * First check if a fitting device may be found amongst the - * disconnected devices or in the orphanage. - */ - dev_id.devno = sch->schib.pmcw.dev; - dev_id.ssid = sch->schid.ssid; + rc = cio_commit_config(sch); + if (rc) + goto out_schedule; rc = sysfs_create_group(&sch->dev.kobj, &io_subchannel_attr_group); if (rc) - return rc; + goto out_schedule; /* Allocate I/O subchannel private data. */ sch->private = kzalloc(sizeof(struct io_subchannel_private), GFP_KERNEL | GFP_DMA); - if (!sch->private) { - rc = -ENOMEM; + if (!sch->private) goto out_err; - } + /* + * First check if a fitting device may be found amongst the + * disconnected devices or in the orphanage. + */ + dev_id.devno = sch->schib.pmcw.dev; + dev_id.ssid = sch->schid.ssid; cdev = get_disc_ccwdev_by_dev_id(&dev_id, NULL); if (!cdev) cdev = get_orphaned_ccwdev_by_dev_id(to_css(sch->dev.parent), @@ -1213,24 +1295,21 @@ static int io_subchannel_probe(struct subchannel *sch) return 0; } cdev = io_subchannel_create_ccwdev(sch); - if (IS_ERR(cdev)) { - rc = PTR_ERR(cdev); + if (IS_ERR(cdev)) goto out_err; - } rc = io_subchannel_recog(cdev, sch); if (rc) { spin_lock_irqsave(sch->lock, flags); - sch_set_cdev(sch, NULL); + io_subchannel_recog_done(cdev); spin_unlock_irqrestore(sch->lock, flags); - if (cdev->dev.release) - cdev->dev.release(&cdev->dev); - goto out_err; } return 0; out_err: kfree(sch->private); sysfs_remove_group(&sch->dev.kobj, &io_subchannel_attr_group); - return rc; +out_schedule: + io_subchannel_schedule_removal(sch); + return 0; } static int @@ -1275,10 +1354,7 @@ static void io_subchannel_verify(struct subchannel *sch) static int check_for_io_on_path(struct subchannel *sch, int mask) { - int cc; - - cc = stsch(sch->schid, &sch->schib); - if (cc) + if (cio_update_schib(sch)) return 0; if (scsw_actl(&sch->schib.scsw) && sch->schib.pmcw.lpum == mask) return 1; @@ -1347,15 +1423,13 @@ static int io_subchannel_chp_event(struct subchannel *sch, io_subchannel_verify(sch); break; case CHP_OFFLINE: - if (stsch(sch->schid, &sch->schib)) - return -ENXIO; - if (!css_sch_is_valid(&sch->schib)) + if (cio_update_schib(sch)) return -ENODEV; io_subchannel_terminate_path(sch, mask); break; case CHP_ONLINE: - if (stsch(sch->schid, &sch->schib)) - return -ENXIO; + if (cio_update_schib(sch)) + return -ENODEV; sch->lpm |= mask & sch->opm; io_subchannel_verify(sch); break; @@ -1610,8 +1684,8 @@ static int io_subchannel_sch_event(struct subchannel *sch, int slow) spin_lock_irqsave(sch->lock, flags); /* Reset intparm to zeroes. */ - sch->schib.pmcw.intparm = 0; - cio_modify(sch); + sch->config.intparm = 0; + cio_commit_config(sch); break; case REPROBE: ccw_device_trigger_reprobe(cdev); @@ -1652,6 +1726,9 @@ static int ccw_device_console_enable(struct ccw_device *cdev, sch->private = cio_get_console_priv(); memset(sch->private, 0, sizeof(struct io_subchannel_private)); io_subchannel_init_fields(sch); + rc = cio_commit_config(sch); + if (rc) + return rc; sch->driver = &io_subchannel_driver; /* Initialize the ccw_device structure. */ cdev->dev.parent= &sch->dev; @@ -1723,7 +1800,7 @@ __ccwdev_check_busid(struct device *dev, void *id) bus_id = id; - return (strncmp(bus_id, dev_name(dev), BUS_ID_SIZE) == 0); + return (strcmp(bus_id, dev_name(dev)) == 0); } @@ -1806,6 +1883,8 @@ ccw_device_remove (struct device *dev) "device 0.%x.%04x\n", ret, cdev->private->dev_id.ssid, cdev->private->dev_id.devno); + /* Give up reference obtained in ccw_device_set_online(). */ + put_device(&cdev->dev); } ccw_device_set_timeout(cdev, 0); cdev->drv = NULL; diff --git a/drivers/s390/cio/device.h b/drivers/s390/cio/device.h index 104ed669db4..0f2e63ea48d 100644 --- a/drivers/s390/cio/device.h +++ b/drivers/s390/cio/device.h @@ -76,6 +76,7 @@ extern wait_queue_head_t ccw_device_init_wq; extern atomic_t ccw_device_init_count; void io_subchannel_recog_done(struct ccw_device *cdev); +void io_subchannel_init_config(struct subchannel *sch); int ccw_device_cancel_halt_clear(struct ccw_device *); diff --git a/drivers/s390/cio/device_fsm.c b/drivers/s390/cio/device_fsm.c index 10bc03940fb..8df5eaafc5a 100644 --- a/drivers/s390/cio/device_fsm.c +++ b/drivers/s390/cio/device_fsm.c @@ -140,8 +140,7 @@ ccw_device_cancel_halt_clear(struct ccw_device *cdev) int ret; sch = to_subchannel(cdev->dev.parent); - ret = stsch(sch->schid, &sch->schib); - if (ret || !sch->schib.pmcw.dnv) + if (cio_update_schib(sch)) return -ENODEV; if (!sch->schib.pmcw.ena) /* Not operational -> done. */ @@ -245,11 +244,13 @@ ccw_device_recog_done(struct ccw_device *cdev, int state) * through ssch() and the path information is up to date. */ old_lpm = sch->lpm; - stsch(sch->schid, &sch->schib); - sch->lpm = sch->schib.pmcw.pam & sch->opm; + /* Check since device may again have become not operational. */ - if (!sch->schib.pmcw.dnv) + if (cio_update_schib(sch)) state = DEV_STATE_NOT_OPER; + else + sch->lpm = sch->schib.pmcw.pam & sch->opm; + if (cdev->private->state == DEV_STATE_DISCONNECTED_SENSE_ID) /* Force reprobe on all chpids. */ old_lpm = 0; @@ -399,9 +400,6 @@ ccw_device_done(struct ccw_device *cdev, int state) ccw_device_oper_notify(cdev); } wake_up(&cdev->private->wait_q); - - if (css_init_done && state != DEV_STATE_ONLINE) - put_device (&cdev->dev); } static int cmp_pgid(struct pgid *p1, struct pgid *p2) @@ -552,7 +550,11 @@ ccw_device_verify_done(struct ccw_device *cdev, int err) sch = to_subchannel(cdev->dev.parent); /* Update schib - pom may have changed. */ - stsch(sch->schid, &sch->schib); + if (cio_update_schib(sch)) { + cdev->private->flags.donotify = 0; + ccw_device_done(cdev, DEV_STATE_NOT_OPER); + return; + } /* Update lpm with verified path mask. */ sch->lpm = sch->vpm; /* Repeat path verification? */ @@ -611,8 +613,6 @@ ccw_device_online(struct ccw_device *cdev) (cdev->private->state != DEV_STATE_BOXED)) return -EINVAL; sch = to_subchannel(cdev->dev.parent); - if (css_init_done && !get_device(&cdev->dev)) - return -ENODEV; ret = cio_enable_subchannel(sch, (u32)(addr_t)sch); if (ret != 0) { /* Couldn't enable the subchannel for i/o. Sick device. */ @@ -672,7 +672,7 @@ ccw_device_offline(struct ccw_device *cdev) return 0; } sch = to_subchannel(cdev->dev.parent); - if (stsch(sch->schid, &sch->schib) || !sch->schib.pmcw.dnv) + if (cio_update_schib(sch)) return -ENODEV; if (scsw_actl(&sch->schib.scsw) != 0) return -EBUSY; @@ -750,7 +750,10 @@ ccw_device_online_verify(struct ccw_device *cdev, enum dev_event dev_event) * Since we might not just be coming from an interrupt from the * subchannel we have to update the schib. */ - stsch(sch->schid, &sch->schib); + if (cio_update_schib(sch)) { + ccw_device_verify_done(cdev, -ENODEV); + return; + } if (scsw_actl(&sch->schib.scsw) != 0 || (scsw_stctl(&sch->schib.scsw) & SCSW_STCTL_STATUS_PEND) || @@ -1016,20 +1019,21 @@ void ccw_device_trigger_reprobe(struct ccw_device *cdev) sch = to_subchannel(cdev->dev.parent); /* Update some values. */ - if (stsch(sch->schid, &sch->schib)) - return; - if (!sch->schib.pmcw.dnv) + if (cio_update_schib(sch)) return; /* * The pim, pam, pom values may not be accurate, but they are the best * we have before performing device selection :/ */ sch->lpm = sch->schib.pmcw.pam & sch->opm; - /* Re-set some bits in the pmcw that were lost. */ - sch->schib.pmcw.csense = 1; - sch->schib.pmcw.ena = 0; - if ((sch->lpm & (sch->lpm - 1)) != 0) - sch->schib.pmcw.mp = 1; + /* + * Use the initial configuration since we can't be shure that the old + * paths are valid. + */ + io_subchannel_init_config(sch); + if (cio_commit_config(sch)) + return; + /* We should also udate ssd info, but this has to wait. */ /* Check if this is another device which appeared on the same sch. */ if (sch->schib.pmcw.dev != cdev->private->dev_id.devno) { diff --git a/drivers/s390/cio/device_pgid.c b/drivers/s390/cio/device_pgid.c index 86bc94eb607..fc5ca1dd52b 100644 --- a/drivers/s390/cio/device_pgid.c +++ b/drivers/s390/cio/device_pgid.c @@ -504,7 +504,7 @@ ccw_device_verify_start(struct ccw_device *cdev) sch->vpm = 0; /* Get current pam. */ - if (stsch(sch->schid, &sch->schib)) { + if (cio_update_schib(sch)) { ccw_device_verify_done(cdev, -ENODEV); return; } diff --git a/drivers/s390/cio/device_status.c b/drivers/s390/cio/device_status.c index 1b03c5423be..5814dbee241 100644 --- a/drivers/s390/cio/device_status.c +++ b/drivers/s390/cio/device_status.c @@ -56,7 +56,8 @@ ccw_device_path_notoper(struct ccw_device *cdev) struct subchannel *sch; sch = to_subchannel(cdev->dev.parent); - stsch (sch->schid, &sch->schib); + if (cio_update_schib(sch)) + goto doverify; CIO_MSG_EVENT(0, "%s(0.%x.%04x) - path(s) %02x are " "not operational \n", __func__, @@ -64,6 +65,7 @@ ccw_device_path_notoper(struct ccw_device *cdev) sch->schib.pmcw.pnom); sch->lpm &= ~sch->schib.pmcw.pnom; +doverify: cdev->private->flags.doverify = 1; } diff --git a/drivers/s390/cio/qdio.h b/drivers/s390/cio/qdio.h index e3ea1d5f281..42f2b09631b 100644 --- a/drivers/s390/cio/qdio.h +++ b/drivers/s390/cio/qdio.h @@ -10,10 +10,10 @@ #include <asm/page.h> #include <asm/schid.h> +#include <asm/debug.h> #include "chsc.h" #define QDIO_BUSY_BIT_PATIENCE 100 /* 100 microseconds */ -#define QDIO_BUSY_BIT_GIVE_UP 2000000 /* 2 seconds = eternity */ #define QDIO_INPUT_THRESHOLD 500 /* 500 microseconds */ /* @@ -111,12 +111,12 @@ static inline int do_sqbs(u64 token, unsigned char state, int queue, } static inline int do_eqbs(u64 token, unsigned char *state, int queue, - int *start, int *count) + int *start, int *count, int ack) { register unsigned long _ccq asm ("0") = *count; register unsigned long _token asm ("1") = token; unsigned long _queuestart = ((unsigned long)queue << 32) | *start; - unsigned long _state = 0; + unsigned long _state = (unsigned long)ack << 63; asm volatile( " .insn rrf,0xB99c0000,%1,%2,0,0" @@ -133,7 +133,7 @@ static inline int do_eqbs(u64 token, unsigned char *state, int queue, static inline int do_sqbs(u64 token, unsigned char state, int queue, int *start, int *count) { return 0; } static inline int do_eqbs(u64 token, unsigned char *state, int queue, - int *start, int *count) { return 0; } + int *start, int *count, int ack) { return 0; } #endif /* CONFIG_64BIT */ struct qdio_irq; @@ -186,20 +186,14 @@ struct qdio_input_q { /* input buffer acknowledgement flag */ int polling; + /* how much sbals are acknowledged with qebsm */ + int ack_count; + /* last time of noticing incoming data */ u64 timestamp; - - /* lock for clearing the acknowledgement */ - spinlock_t lock; }; struct qdio_output_q { - /* failed siga-w attempts*/ - atomic_t busy_siga_counter; - - /* start time of busy condition */ - u64 timestamp; - /* PCIs are enabled for the queue */ int pci_out_enabled; @@ -250,6 +244,7 @@ struct qdio_q { struct qdio_irq *irq_ptr; struct tasklet_struct tasklet; + spinlock_t lock; /* error condition during a data transfer */ unsigned int qdio_error; @@ -300,11 +295,13 @@ struct qdio_irq { struct qdio_q *input_qs[QDIO_MAX_QUEUES_PER_IRQ]; struct qdio_q *output_qs[QDIO_MAX_QUEUES_PER_IRQ]; + debug_info_t *debug_area; struct mutex setup_mutex; }; /* helper functions */ #define queue_type(q) q->irq_ptr->qib.qfmt +#define SCH_NO(q) (q->irq_ptr->schid.sch_no) #define is_thinint_irq(irq) \ (irq->qib.qfmt == QDIO_IQDIO_QFMT || \ @@ -348,10 +345,13 @@ static inline unsigned long long get_usecs(void) ((bufnr + 1) & QDIO_MAX_BUFFERS_MASK) #define add_buf(bufnr, inc) \ ((bufnr + inc) & QDIO_MAX_BUFFERS_MASK) +#define sub_buf(bufnr, dec) \ + ((bufnr - dec) & QDIO_MAX_BUFFERS_MASK) /* prototypes for thin interrupt */ void qdio_sync_after_thinint(struct qdio_q *q); -int get_buf_state(struct qdio_q *q, unsigned int bufnr, unsigned char *state); +int get_buf_state(struct qdio_q *q, unsigned int bufnr, unsigned char *state, + int auto_ack); void qdio_check_outbound_after_thinint(struct qdio_q *q); int qdio_inbound_q_moved(struct qdio_q *q); void qdio_kick_inbound_handler(struct qdio_q *q); @@ -378,10 +378,15 @@ void qdio_int_handler(struct ccw_device *cdev, unsigned long intparm, int qdio_allocate_qs(struct qdio_irq *irq_ptr, int nr_input_qs, int nr_output_qs); void qdio_setup_ssqd_info(struct qdio_irq *irq_ptr); +int qdio_setup_get_ssqd(struct qdio_irq *irq_ptr, + struct subchannel_id *schid, + struct qdio_ssqd_desc *data); int qdio_setup_irq(struct qdio_initialize *init_data); void qdio_print_subchannel_info(struct qdio_irq *irq_ptr, struct ccw_device *cdev); void qdio_release_memory(struct qdio_irq *irq_ptr); +int qdio_setup_create_sysfs(struct ccw_device *cdev); +void qdio_setup_destroy_sysfs(struct ccw_device *cdev); int qdio_setup_init(void); void qdio_setup_exit(void); diff --git a/drivers/s390/cio/qdio_debug.c b/drivers/s390/cio/qdio_debug.c index f05590355be..f8a3b6967f6 100644 --- a/drivers/s390/cio/qdio_debug.c +++ b/drivers/s390/cio/qdio_debug.c @@ -14,7 +14,7 @@ #include "qdio.h" debug_info_t *qdio_dbf_setup; -debug_info_t *qdio_dbf_trace; +debug_info_t *qdio_dbf_error; static struct dentry *debugfs_root; #define MAX_DEBUGFS_QUEUES 32 @@ -22,59 +22,33 @@ static struct dentry *debugfs_queues[MAX_DEBUGFS_QUEUES] = { NULL }; static DEFINE_MUTEX(debugfs_mutex); #define QDIO_DEBUGFS_NAME_LEN 40 -void qdio_allocate_do_dbf(struct qdio_initialize *init_data) +void qdio_allocate_dbf(struct qdio_initialize *init_data, + struct qdio_irq *irq_ptr) { - char dbf_text[20]; - - sprintf(dbf_text, "qfmt:%x", init_data->q_format); - QDIO_DBF_TEXT0(0, setup, dbf_text); - QDIO_DBF_HEX0(0, setup, init_data->adapter_name, 8); - sprintf(dbf_text, "qpff%4x", init_data->qib_param_field_format); - QDIO_DBF_TEXT0(0, setup, dbf_text); - QDIO_DBF_HEX0(0, setup, &init_data->qib_param_field, sizeof(void *)); - QDIO_DBF_HEX0(0, setup, &init_data->input_slib_elements, sizeof(void *)); - QDIO_DBF_HEX0(0, setup, &init_data->output_slib_elements, sizeof(void *)); - sprintf(dbf_text, "niq:%4x", init_data->no_input_qs); - QDIO_DBF_TEXT0(0, setup, dbf_text); - sprintf(dbf_text, "noq:%4x", init_data->no_output_qs); - QDIO_DBF_TEXT0(0, setup, dbf_text); - QDIO_DBF_HEX0(0, setup, &init_data->input_handler, sizeof(void *)); - QDIO_DBF_HEX0(0, setup, &init_data->output_handler, sizeof(void *)); - QDIO_DBF_HEX0(0, setup, &init_data->int_parm, sizeof(long)); - QDIO_DBF_HEX0(0, setup, &init_data->flags, sizeof(long)); - QDIO_DBF_HEX0(0, setup, &init_data->input_sbal_addr_array, sizeof(void *)); - QDIO_DBF_HEX0(0, setup, &init_data->output_sbal_addr_array, sizeof(void *)); -} - -static void qdio_unregister_dbf_views(void) -{ - if (qdio_dbf_setup) - debug_unregister(qdio_dbf_setup); - if (qdio_dbf_trace) - debug_unregister(qdio_dbf_trace); -} - -static int qdio_register_dbf_views(void) -{ - qdio_dbf_setup = debug_register("qdio_setup", QDIO_DBF_SETUP_PAGES, - QDIO_DBF_SETUP_NR_AREAS, - QDIO_DBF_SETUP_LEN); - if (!qdio_dbf_setup) - goto oom; - debug_register_view(qdio_dbf_setup, &debug_hex_ascii_view); - debug_set_level(qdio_dbf_setup, QDIO_DBF_SETUP_LEVEL); - - qdio_dbf_trace = debug_register("qdio_trace", QDIO_DBF_TRACE_PAGES, - QDIO_DBF_TRACE_NR_AREAS, - QDIO_DBF_TRACE_LEN); - if (!qdio_dbf_trace) - goto oom; - debug_register_view(qdio_dbf_trace, &debug_hex_ascii_view); - debug_set_level(qdio_dbf_trace, QDIO_DBF_TRACE_LEVEL); - return 0; -oom: - qdio_unregister_dbf_views(); - return -ENOMEM; + char text[20]; + + DBF_EVENT("qfmt:%1d", init_data->q_format); + DBF_HEX(init_data->adapter_name, 8); + DBF_EVENT("qpff%4x", init_data->qib_param_field_format); + DBF_HEX(&init_data->qib_param_field, sizeof(void *)); + DBF_HEX(&init_data->input_slib_elements, sizeof(void *)); + DBF_HEX(&init_data->output_slib_elements, sizeof(void *)); + DBF_EVENT("niq:%1d noq:%1d", init_data->no_input_qs, + init_data->no_output_qs); + DBF_HEX(&init_data->input_handler, sizeof(void *)); + DBF_HEX(&init_data->output_handler, sizeof(void *)); + DBF_HEX(&init_data->int_parm, sizeof(long)); + DBF_HEX(&init_data->flags, sizeof(long)); + DBF_HEX(&init_data->input_sbal_addr_array, sizeof(void *)); + DBF_HEX(&init_data->output_sbal_addr_array, sizeof(void *)); + DBF_EVENT("irq:%8lx", (unsigned long)irq_ptr); + + /* allocate trace view for the interface */ + snprintf(text, 20, "qdio_%s", dev_name(&init_data->cdev->dev)); + irq_ptr->debug_area = debug_register(text, 2, 1, 16); + debug_register_view(irq_ptr->debug_area, &debug_hex_ascii_view); + debug_set_level(irq_ptr->debug_area, DBF_WARN); + DBF_DEV_EVENT(DBF_ERR, irq_ptr, "dbf created"); } static int qstat_show(struct seq_file *m, void *v) @@ -86,16 +60,18 @@ static int qstat_show(struct seq_file *m, void *v) if (!q) return 0; - seq_printf(m, "device state indicator: %d\n", *q->irq_ptr->dsci); + seq_printf(m, "device state indicator: %d\n", *(u32 *)q->irq_ptr->dsci); seq_printf(m, "nr_used: %d\n", atomic_read(&q->nr_buf_used)); seq_printf(m, "ftc: %d\n", q->first_to_check); seq_printf(m, "last_move_ftc: %d\n", q->last_move_ftc); seq_printf(m, "polling: %d\n", q->u.in.polling); + seq_printf(m, "ack count: %d\n", q->u.in.ack_count); seq_printf(m, "slsb buffer states:\n"); + seq_printf(m, "|0 |8 |16 |24 |32 |40 |48 |56 63|\n"); qdio_siga_sync_q(q); for (i = 0; i < QDIO_MAX_BUFFERS_PER_Q; i++) { - get_buf_state(q, i, &state); + get_buf_state(q, i, &state, 0); switch (state) { case SLSB_P_INPUT_NOT_INIT: case SLSB_P_OUTPUT_NOT_INIT: @@ -127,6 +103,7 @@ static int qstat_show(struct seq_file *m, void *v) seq_printf(m, "\n"); } seq_printf(m, "\n"); + seq_printf(m, "|64 |72 |80 |88 |96 |104 |112 | 127|\n"); return 0; } @@ -223,11 +200,24 @@ void qdio_shutdown_debug_entries(struct qdio_irq *irq_ptr, struct ccw_device *cd int __init qdio_debug_init(void) { debugfs_root = debugfs_create_dir("qdio_queues", NULL); - return qdio_register_dbf_views(); + + qdio_dbf_setup = debug_register("qdio_setup", 16, 1, 16); + debug_register_view(qdio_dbf_setup, &debug_hex_ascii_view); + debug_set_level(qdio_dbf_setup, DBF_INFO); + DBF_EVENT("dbf created\n"); + + qdio_dbf_error = debug_register("qdio_error", 4, 1, 16); + debug_register_view(qdio_dbf_error, &debug_hex_ascii_view); + debug_set_level(qdio_dbf_error, DBF_INFO); + DBF_ERROR("dbf created\n"); + return 0; } void qdio_debug_exit(void) { debugfs_remove(debugfs_root); - qdio_unregister_dbf_views(); + if (qdio_dbf_setup) + debug_unregister(qdio_dbf_setup); + if (qdio_dbf_error) + debug_unregister(qdio_dbf_error); } diff --git a/drivers/s390/cio/qdio_debug.h b/drivers/s390/cio/qdio_debug.h index 5a4d85b829a..5d70bd162ae 100644 --- a/drivers/s390/cio/qdio_debug.h +++ b/drivers/s390/cio/qdio_debug.h @@ -12,80 +12,72 @@ #include <asm/qdio.h> #include "qdio.h" -#define QDIO_DBF_HEX(ex, name, level, addr, len) \ +/* that gives us 15 characters in the text event views */ +#define QDIO_DBF_LEN 16 + +extern debug_info_t *qdio_dbf_setup; +extern debug_info_t *qdio_dbf_error; + +/* sort out low debug levels early to avoid wasted sprints */ +static inline int qdio_dbf_passes(debug_info_t *dbf_grp, int level) +{ + return (level <= dbf_grp->level); +} + +#define DBF_ERR 3 /* error conditions */ +#define DBF_WARN 4 /* warning conditions */ +#define DBF_INFO 6 /* informational */ + +#undef DBF_EVENT +#undef DBF_ERROR +#undef DBF_DEV_EVENT + +#define DBF_EVENT(text...) \ do { \ - if (ex) \ - debug_exception(qdio_dbf_##name, level, (void *)(addr), len); \ - else \ - debug_event(qdio_dbf_##name, level, (void *)(addr), len); \ + char debug_buffer[QDIO_DBF_LEN]; \ + snprintf(debug_buffer, QDIO_DBF_LEN, text); \ + debug_text_event(qdio_dbf_setup, DBF_ERR, debug_buffer); \ } while (0) -#define QDIO_DBF_TEXT(ex, name, level, text) \ + +#define DBF_HEX(addr, len) \ do { \ - if (ex) \ - debug_text_exception(qdio_dbf_##name, level, text); \ - else \ - debug_text_event(qdio_dbf_##name, level, text); \ + debug_event(qdio_dbf_setup, DBF_ERR, (void*)(addr), len); \ } while (0) -#define QDIO_DBF_HEX0(ex, name, addr, len) QDIO_DBF_HEX(ex, name, 0, addr, len) -#define QDIO_DBF_HEX1(ex, name, addr, len) QDIO_DBF_HEX(ex, name, 1, addr, len) -#define QDIO_DBF_HEX2(ex, name, addr, len) QDIO_DBF_HEX(ex, name, 2, addr, len) - -#ifdef CONFIG_QDIO_DEBUG -#define QDIO_DBF_HEX3(ex, name, addr, len) QDIO_DBF_HEX(ex, name, 3, addr, len) -#define QDIO_DBF_HEX4(ex, name, addr, len) QDIO_DBF_HEX(ex, name, 4, addr, len) -#define QDIO_DBF_HEX5(ex, name, addr, len) QDIO_DBF_HEX(ex, name, 5, addr, len) -#define QDIO_DBF_HEX6(ex, name, addr, len) QDIO_DBF_HEX(ex, name, 6, addr, len) -#else -#define QDIO_DBF_HEX3(ex, name, addr, len) do {} while (0) -#define QDIO_DBF_HEX4(ex, name, addr, len) do {} while (0) -#define QDIO_DBF_HEX5(ex, name, addr, len) do {} while (0) -#define QDIO_DBF_HEX6(ex, name, addr, len) do {} while (0) -#endif /* CONFIG_QDIO_DEBUG */ - -#define QDIO_DBF_TEXT0(ex, name, text) QDIO_DBF_TEXT(ex, name, 0, text) -#define QDIO_DBF_TEXT1(ex, name, text) QDIO_DBF_TEXT(ex, name, 1, text) -#define QDIO_DBF_TEXT2(ex, name, text) QDIO_DBF_TEXT(ex, name, 2, text) - -#ifdef CONFIG_QDIO_DEBUG -#define QDIO_DBF_TEXT3(ex, name, text) QDIO_DBF_TEXT(ex, name, 3, text) -#define QDIO_DBF_TEXT4(ex, name, text) QDIO_DBF_TEXT(ex, name, 4, text) -#define QDIO_DBF_TEXT5(ex, name, text) QDIO_DBF_TEXT(ex, name, 5, text) -#define QDIO_DBF_TEXT6(ex, name, text) QDIO_DBF_TEXT(ex, name, 6, text) -#else -#define QDIO_DBF_TEXT3(ex, name, text) do {} while (0) -#define QDIO_DBF_TEXT4(ex, name, text) do {} while (0) -#define QDIO_DBF_TEXT5(ex, name, text) do {} while (0) -#define QDIO_DBF_TEXT6(ex, name, text) do {} while (0) -#endif /* CONFIG_QDIO_DEBUG */ +#define DBF_ERROR(text...) \ + do { \ + char debug_buffer[QDIO_DBF_LEN]; \ + snprintf(debug_buffer, QDIO_DBF_LEN, text); \ + debug_text_event(qdio_dbf_error, DBF_ERR, debug_buffer); \ + } while (0) -/* s390dbf views */ -#define QDIO_DBF_SETUP_LEN 8 -#define QDIO_DBF_SETUP_PAGES 8 -#define QDIO_DBF_SETUP_NR_AREAS 1 +#define DBF_ERROR_HEX(addr, len) \ + do { \ + debug_event(qdio_dbf_error, DBF_ERR, (void*)(addr), len); \ + } while (0) -#define QDIO_DBF_TRACE_LEN 8 -#define QDIO_DBF_TRACE_NR_AREAS 2 -#ifdef CONFIG_QDIO_DEBUG -#define QDIO_DBF_TRACE_PAGES 32 -#define QDIO_DBF_SETUP_LEVEL 6 -#define QDIO_DBF_TRACE_LEVEL 4 -#else /* !CONFIG_QDIO_DEBUG */ -#define QDIO_DBF_TRACE_PAGES 8 -#define QDIO_DBF_SETUP_LEVEL 2 -#define QDIO_DBF_TRACE_LEVEL 2 -#endif /* CONFIG_QDIO_DEBUG */ +#define DBF_DEV_EVENT(level, device, text...) \ + do { \ + char debug_buffer[QDIO_DBF_LEN]; \ + if (qdio_dbf_passes(device->debug_area, level)) { \ + snprintf(debug_buffer, QDIO_DBF_LEN, text); \ + debug_text_event(device->debug_area, level, debug_buffer); \ + } \ + } while (0) -extern debug_info_t *qdio_dbf_setup; -extern debug_info_t *qdio_dbf_trace; +#define DBF_DEV_HEX(level, device, addr, len) \ + do { \ + debug_event(device->debug_area, level, (void*)(addr), len); \ + } while (0) -void qdio_allocate_do_dbf(struct qdio_initialize *init_data); -void debug_print_bstat(struct qdio_q *q); +void qdio_allocate_dbf(struct qdio_initialize *init_data, + struct qdio_irq *irq_ptr); void qdio_setup_debug_entries(struct qdio_irq *irq_ptr, struct ccw_device *cdev); void qdio_shutdown_debug_entries(struct qdio_irq *irq_ptr, struct ccw_device *cdev); int qdio_debug_init(void); void qdio_debug_exit(void); + #endif diff --git a/drivers/s390/cio/qdio_main.c b/drivers/s390/cio/qdio_main.c index 7c865915199..744f928a59e 100644 --- a/drivers/s390/cio/qdio_main.c +++ b/drivers/s390/cio/qdio_main.c @@ -74,7 +74,7 @@ static inline int do_siga_input(struct subchannel_id schid, unsigned int mask) * Note: For IQDC unicast queues only the highest priority queue is processed. */ static inline int do_siga_output(unsigned long schid, unsigned long mask, - u32 *bb, unsigned int fc) + unsigned int *bb, unsigned int fc) { register unsigned long __fc asm("0") = fc; register unsigned long __schid asm("1") = schid; @@ -95,8 +95,6 @@ static inline int do_siga_output(unsigned long schid, unsigned long mask, static inline int qdio_check_ccq(struct qdio_q *q, unsigned int ccq) { - char dbf_text[15]; - /* all done or next buffer state different */ if (ccq == 0 || ccq == 32) return 0; @@ -104,8 +102,7 @@ static inline int qdio_check_ccq(struct qdio_q *q, unsigned int ccq) if (ccq == 96 || ccq == 97) return 1; /* notify devices immediately */ - sprintf(dbf_text, "%d", ccq); - QDIO_DBF_TEXT2(1, trace, dbf_text); + DBF_ERROR("%4x ccq:%3d", SCH_NO(q), ccq); return -EIO; } @@ -115,41 +112,45 @@ static inline int qdio_check_ccq(struct qdio_q *q, unsigned int ccq) * @state: state of the extracted buffers * @start: buffer number to start at * @count: count of buffers to examine + * @auto_ack: automatically acknowledge buffers * * Returns the number of successfull extracted equal buffer states. * Stops processing if a state is different from the last buffers state. */ static int qdio_do_eqbs(struct qdio_q *q, unsigned char *state, - int start, int count) + int start, int count, int auto_ack) { unsigned int ccq = 0; int tmp_count = count, tmp_start = start; int nr = q->nr; int rc; - char dbf_text[15]; BUG_ON(!q->irq_ptr->sch_token); + qdio_perf_stat_inc(&perf_stats.debug_eqbs_all); if (!q->is_input_q) nr += q->irq_ptr->nr_input_qs; again: - ccq = do_eqbs(q->irq_ptr->sch_token, state, nr, &tmp_start, &tmp_count); + ccq = do_eqbs(q->irq_ptr->sch_token, state, nr, &tmp_start, &tmp_count, + auto_ack); rc = qdio_check_ccq(q, ccq); /* At least one buffer was processed, return and extract the remaining * buffers later. */ - if ((ccq == 96) && (count != tmp_count)) + if ((ccq == 96) && (count != tmp_count)) { + qdio_perf_stat_inc(&perf_stats.debug_eqbs_incomplete); return (count - tmp_count); + } + if (rc == 1) { - QDIO_DBF_TEXT5(1, trace, "eqAGAIN"); + DBF_DEV_EVENT(DBF_WARN, q->irq_ptr, "EQBS again:%2d", ccq); goto again; } if (rc < 0) { - QDIO_DBF_TEXT2(1, trace, "eqberr"); - sprintf(dbf_text, "%2x,%2x,%d,%d", count, tmp_count, ccq, nr); - QDIO_DBF_TEXT2(1, trace, dbf_text); + DBF_ERROR("%4x EQBS ERROR", SCH_NO(q)); + DBF_ERROR("%3d%3d%2d", count, tmp_count, nr); q->handler(q->irq_ptr->cdev, QDIO_ERROR_ACTIVATE_CHECK_CONDITION, 0, -1, -1, q->irq_ptr->int_parm); @@ -176,9 +177,12 @@ static int qdio_do_sqbs(struct qdio_q *q, unsigned char state, int start, int tmp_count = count, tmp_start = start; int nr = q->nr; int rc; - char dbf_text[15]; + + if (!count) + return 0; BUG_ON(!q->irq_ptr->sch_token); + qdio_perf_stat_inc(&perf_stats.debug_sqbs_all); if (!q->is_input_q) nr += q->irq_ptr->nr_input_qs; @@ -186,16 +190,13 @@ again: ccq = do_sqbs(q->irq_ptr->sch_token, state, nr, &tmp_start, &tmp_count); rc = qdio_check_ccq(q, ccq); if (rc == 1) { - QDIO_DBF_TEXT5(1, trace, "sqAGAIN"); + DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "SQBS again:%2d", ccq); + qdio_perf_stat_inc(&perf_stats.debug_sqbs_incomplete); goto again; } if (rc < 0) { - QDIO_DBF_TEXT3(1, trace, "sqberr"); - sprintf(dbf_text, "%2x,%2x", count, tmp_count); - QDIO_DBF_TEXT3(1, trace, dbf_text); - sprintf(dbf_text, "%d,%d", ccq, nr); - QDIO_DBF_TEXT3(1, trace, dbf_text); - + DBF_ERROR("%4x SQBS ERROR", SCH_NO(q)); + DBF_ERROR("%3d%3d%2d", count, tmp_count, nr); q->handler(q->irq_ptr->cdev, QDIO_ERROR_ACTIVATE_CHECK_CONDITION, 0, -1, -1, q->irq_ptr->int_parm); @@ -207,7 +208,8 @@ again: /* returns number of examined buffers and their common state in *state */ static inline int get_buf_states(struct qdio_q *q, unsigned int bufnr, - unsigned char *state, unsigned int count) + unsigned char *state, unsigned int count, + int auto_ack) { unsigned char __state = 0; int i; @@ -216,7 +218,7 @@ static inline int get_buf_states(struct qdio_q *q, unsigned int bufnr, BUG_ON(count > QDIO_MAX_BUFFERS_PER_Q); if (is_qebsm(q)) - return qdio_do_eqbs(q, state, bufnr, count); + return qdio_do_eqbs(q, state, bufnr, count, auto_ack); for (i = 0; i < count; i++) { if (!__state) @@ -230,9 +232,9 @@ static inline int get_buf_states(struct qdio_q *q, unsigned int bufnr, } inline int get_buf_state(struct qdio_q *q, unsigned int bufnr, - unsigned char *state) + unsigned char *state, int auto_ack) { - return get_buf_states(q, bufnr, state, 1); + return get_buf_states(q, bufnr, state, 1, auto_ack); } /* wrap-around safe setting of slsb states, returns number of changed buffers */ @@ -282,14 +284,12 @@ static int qdio_siga_sync(struct qdio_q *q, unsigned int output, if (!need_siga_sync(q)) return 0; + DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "siga-s:%1d", q->nr); qdio_perf_stat_inc(&perf_stats.siga_sync); cc = do_siga_sync(q->irq_ptr->schid, output, input); - if (cc) { - QDIO_DBF_TEXT4(0, trace, "sigasync"); - QDIO_DBF_HEX4(0, trace, &q, sizeof(void *)); - QDIO_DBF_HEX3(0, trace, &cc, sizeof(int *)); - } + if (cc) + DBF_ERROR("%4x SIGA-S:%2d", SCH_NO(q), cc); return cc; } @@ -311,50 +311,37 @@ static inline int qdio_siga_sync_all(struct qdio_q *q) return qdio_siga_sync(q, ~0U, ~0U); } -static inline int qdio_do_siga_output(struct qdio_q *q, unsigned int *busy_bit) +static int qdio_siga_output(struct qdio_q *q, unsigned int *busy_bit) { - unsigned int fc = 0; unsigned long schid; + unsigned int fc = 0; + u64 start_time = 0; + int cc; - if (q->u.out.use_enh_siga) { + if (q->u.out.use_enh_siga) fc = 3; - } - if (!is_qebsm(q)) - schid = *((u32 *)&q->irq_ptr->schid); - else { + + if (is_qebsm(q)) { schid = q->irq_ptr->sch_token; fc |= 0x80; } - return do_siga_output(schid, q->mask, busy_bit, fc); -} - -static int qdio_siga_output(struct qdio_q *q) -{ - int cc; - u32 busy_bit; - u64 start_time = 0; - char dbf_text[15]; - - QDIO_DBF_TEXT5(0, trace, "sigaout"); - QDIO_DBF_HEX5(0, trace, &q, sizeof(void *)); + else + schid = *((u32 *)&q->irq_ptr->schid); - qdio_perf_stat_inc(&perf_stats.siga_out); again: - cc = qdio_do_siga_output(q, &busy_bit); - if (queue_type(q) == QDIO_IQDIO_QFMT && cc == 2 && busy_bit) { - sprintf(dbf_text, "bb%4x%2x", q->irq_ptr->schid.sch_no, q->nr); - QDIO_DBF_TEXT3(0, trace, dbf_text); + cc = do_siga_output(schid, q->mask, busy_bit, fc); - if (!start_time) + /* hipersocket busy condition */ + if (*busy_bit) { + WARN_ON(queue_type(q) != QDIO_IQDIO_QFMT || cc != 2); + + if (!start_time) { start_time = get_usecs(); - else if ((get_usecs() - start_time) < QDIO_BUSY_BIT_PATIENCE) + goto again; + } + if ((get_usecs() - start_time) < QDIO_BUSY_BIT_PATIENCE) goto again; } - - if (cc == 2 && busy_bit) - cc |= QDIO_ERROR_SIGA_BUSY; - if (cc) - QDIO_DBF_HEX3(0, trace, &cc, sizeof(int *)); return cc; } @@ -362,14 +349,12 @@ static inline int qdio_siga_input(struct qdio_q *q) { int cc; - QDIO_DBF_TEXT4(0, trace, "sigain"); - QDIO_DBF_HEX4(0, trace, &q, sizeof(void *)); - + DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "siga-r:%1d", q->nr); qdio_perf_stat_inc(&perf_stats.siga_in); cc = do_siga_input(q->irq_ptr->schid, q->mask); if (cc) - QDIO_DBF_HEX3(0, trace, &cc, sizeof(int *)); + DBF_ERROR("%4x SIGA-R:%2d", SCH_NO(q), cc); return cc; } @@ -387,35 +372,91 @@ void qdio_sync_after_thinint(struct qdio_q *q) inline void qdio_stop_polling(struct qdio_q *q) { - spin_lock_bh(&q->u.in.lock); - if (!q->u.in.polling) { - spin_unlock_bh(&q->u.in.lock); + if (!q->u.in.polling) return; - } + q->u.in.polling = 0; qdio_perf_stat_inc(&perf_stats.debug_stop_polling); /* show the card that we are not polling anymore */ - set_buf_state(q, q->last_move_ftc, SLSB_P_INPUT_NOT_INIT); - spin_unlock_bh(&q->u.in.lock); + if (is_qebsm(q)) { + set_buf_states(q, q->last_move_ftc, SLSB_P_INPUT_NOT_INIT, + q->u.in.ack_count); + q->u.in.ack_count = 0; + } else + set_buf_state(q, q->last_move_ftc, SLSB_P_INPUT_NOT_INIT); } -static void announce_buffer_error(struct qdio_q *q) +static void announce_buffer_error(struct qdio_q *q, int count) { - char dbf_text[15]; + q->qdio_error |= QDIO_ERROR_SLSB_STATE; + + /* special handling for no target buffer empty */ + if ((!q->is_input_q && + (q->sbal[q->first_to_check]->element[15].flags & 0xff) == 0x10)) { + qdio_perf_stat_inc(&perf_stats.outbound_target_full); + DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "OUTFULL FTC:%3d", + q->first_to_check); + return; + } - if (q->is_input_q) - QDIO_DBF_TEXT3(1, trace, "inperr"); - else - QDIO_DBF_TEXT3(0, trace, "outperr"); + DBF_ERROR("%4x BUF ERROR", SCH_NO(q)); + DBF_ERROR((q->is_input_q) ? "IN:%2d" : "OUT:%2d", q->nr); + DBF_ERROR("FTC:%3d C:%3d", q->first_to_check, count); + DBF_ERROR("F14:%2x F15:%2x", + q->sbal[q->first_to_check]->element[14].flags & 0xff, + q->sbal[q->first_to_check]->element[15].flags & 0xff); +} + +static inline void inbound_primed(struct qdio_q *q, int count) +{ + int new; + + DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "in prim: %3d", count); + + /* for QEBSM the ACK was already set by EQBS */ + if (is_qebsm(q)) { + if (!q->u.in.polling) { + q->u.in.polling = 1; + q->u.in.ack_count = count; + q->last_move_ftc = q->first_to_check; + return; + } + + /* delete the previous ACK's */ + set_buf_states(q, q->last_move_ftc, SLSB_P_INPUT_NOT_INIT, + q->u.in.ack_count); + q->u.in.ack_count = count; + q->last_move_ftc = q->first_to_check; + return; + } + + /* + * ACK the newest buffer. The ACK will be removed in qdio_stop_polling + * or by the next inbound run. + */ + new = add_buf(q->first_to_check, count - 1); + if (q->u.in.polling) { + /* reset the previous ACK but first set the new one */ + set_buf_state(q, new, SLSB_P_INPUT_ACK); + set_buf_state(q, q->last_move_ftc, SLSB_P_INPUT_NOT_INIT); + } + else { + q->u.in.polling = 1; + set_buf_state(q, q->first_to_check, SLSB_P_INPUT_ACK); + } - sprintf(dbf_text, "%x-%x-%x", q->first_to_check, - q->sbal[q->first_to_check]->element[14].flags, - q->sbal[q->first_to_check]->element[15].flags); - QDIO_DBF_TEXT3(1, trace, dbf_text); - QDIO_DBF_HEX2(1, trace, q->sbal[q->first_to_check], 256); + q->last_move_ftc = new; + count--; + if (!count) + return; - q->qdio_error = QDIO_ERROR_SLSB_STATE; + /* + * Need to change all PRIMED buffers to NOT_INIT, otherwise + * we're loosing initiative in the thinint code. + */ + set_buf_states(q, next_buf(q->first_to_check), SLSB_P_INPUT_NOT_INIT, + count); } static int get_inbound_buffer_frontier(struct qdio_q *q) @@ -424,13 +465,6 @@ static int get_inbound_buffer_frontier(struct qdio_q *q) unsigned char state; /* - * If we still poll don't update last_move_ftc, keep the - * previously ACK buffer there. - */ - if (!q->u.in.polling) - q->last_move_ftc = q->first_to_check; - - /* * Don't check 128 buffers, as otherwise qdio_inbound_q_moved * would return 0. */ @@ -450,34 +484,13 @@ check_next: if (q->first_to_check == stop) goto out; - count = get_buf_states(q, q->first_to_check, &state, count); + count = get_buf_states(q, q->first_to_check, &state, count, 1); if (!count) goto out; switch (state) { case SLSB_P_INPUT_PRIMED: - QDIO_DBF_TEXT5(0, trace, "inptprim"); - - /* - * Only ACK the first buffer. The ACK will be removed in - * qdio_stop_polling. - */ - if (q->u.in.polling) - state = SLSB_P_INPUT_NOT_INIT; - else { - q->u.in.polling = 1; - state = SLSB_P_INPUT_ACK; - } - set_buf_state(q, q->first_to_check, state); - - /* - * Need to change all PRIMED buffers to NOT_INIT, otherwise - * we're loosing initiative in the thinint code. - */ - if (count > 1) - set_buf_states(q, next_buf(q->first_to_check), - SLSB_P_INPUT_NOT_INIT, count - 1); - + inbound_primed(q, count); /* * No siga-sync needed for non-qebsm here, as the inbound queue * will be synced on the next siga-r, resp. @@ -487,7 +500,7 @@ check_next: atomic_sub(count, &q->nr_buf_used); goto check_next; case SLSB_P_INPUT_ERROR: - announce_buffer_error(q); + announce_buffer_error(q, count); /* process the buffer, the upper layer will take care of it */ q->first_to_check = add_buf(q->first_to_check, count); atomic_sub(count, &q->nr_buf_used); @@ -495,13 +508,12 @@ check_next: case SLSB_CU_INPUT_EMPTY: case SLSB_P_INPUT_NOT_INIT: case SLSB_P_INPUT_ACK: - QDIO_DBF_TEXT5(0, trace, "inpnipro"); + DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "in nop"); break; default: BUG(); } out: - QDIO_DBF_HEX4(0, trace, &q->first_to_check, sizeof(int)); return q->first_to_check; } @@ -515,8 +527,7 @@ int qdio_inbound_q_moved(struct qdio_q *q) if (!need_siga_sync(q) && !pci_out_supported(q)) q->u.in.timestamp = get_usecs(); - QDIO_DBF_TEXT4(0, trace, "inhasmvd"); - QDIO_DBF_HEX4(0, trace, &q, sizeof(void *)); + DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "in moved"); return 1; } else return 0; @@ -524,10 +535,7 @@ int qdio_inbound_q_moved(struct qdio_q *q) static int qdio_inbound_q_done(struct qdio_q *q) { - unsigned char state; -#ifdef CONFIG_QDIO_DEBUG - char dbf_text[15]; -#endif + unsigned char state = 0; if (!atomic_read(&q->nr_buf_used)) return 1; @@ -538,7 +546,7 @@ static int qdio_inbound_q_done(struct qdio_q *q) */ qdio_siga_sync_q(q); - get_buf_state(q, q->first_to_check, &state); + get_buf_state(q, q->first_to_check, &state, 0); if (state == SLSB_P_INPUT_PRIMED) /* we got something to do */ return 0; @@ -552,20 +560,12 @@ static int qdio_inbound_q_done(struct qdio_q *q) * has (probably) not moved (see qdio_inbound_processing). */ if (get_usecs() > q->u.in.timestamp + QDIO_INPUT_THRESHOLD) { -#ifdef CONFIG_QDIO_DEBUG - QDIO_DBF_TEXT4(0, trace, "inqisdon"); - QDIO_DBF_HEX4(0, trace, &q, sizeof(void *)); - sprintf(dbf_text, "pf%02x", q->first_to_check); - QDIO_DBF_TEXT4(0, trace, dbf_text); -#endif /* CONFIG_QDIO_DEBUG */ + DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "in done:%3d", + q->first_to_check); return 1; } else { -#ifdef CONFIG_QDIO_DEBUG - QDIO_DBF_TEXT4(0, trace, "inqisntd"); - QDIO_DBF_HEX4(0, trace, &q, sizeof(void *)); - sprintf(dbf_text, "pf%02x", q->first_to_check); - QDIO_DBF_TEXT4(0, trace, dbf_text); -#endif /* CONFIG_QDIO_DEBUG */ + DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "in notd:%3d", + q->first_to_check); return 0; } } @@ -573,9 +573,6 @@ static int qdio_inbound_q_done(struct qdio_q *q) void qdio_kick_inbound_handler(struct qdio_q *q) { int count, start, end; -#ifdef CONFIG_QDIO_DEBUG - char dbf_text[15]; -#endif qdio_perf_stat_inc(&perf_stats.inbound_handler); @@ -586,10 +583,7 @@ void qdio_kick_inbound_handler(struct qdio_q *q) else count = end + QDIO_MAX_BUFFERS_PER_Q - start; -#ifdef CONFIG_QDIO_DEBUG - sprintf(dbf_text, "s=%2xc=%2x", start, count); - QDIO_DBF_TEXT4(0, trace, dbf_text); -#endif /* CONFIG_QDIO_DEBUG */ + DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "kih s:%3d c:%3d", start, count); if (unlikely(q->irq_ptr->state != QDIO_IRQ_STATE_ACTIVE)) return; @@ -655,14 +649,14 @@ check_next: if (q->first_to_check == stop) return q->first_to_check; - count = get_buf_states(q, q->first_to_check, &state, count); + count = get_buf_states(q, q->first_to_check, &state, count, 0); if (!count) return q->first_to_check; switch (state) { case SLSB_P_OUTPUT_EMPTY: /* the adapter got it */ - QDIO_DBF_TEXT5(0, trace, "outpempt"); + DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "out empty:%1d %3d", q->nr, count); atomic_sub(count, &q->nr_buf_used); q->first_to_check = add_buf(q->first_to_check, count); @@ -674,14 +668,14 @@ check_next: break; goto check_next; case SLSB_P_OUTPUT_ERROR: - announce_buffer_error(q); + announce_buffer_error(q, count); /* process the buffer, the upper layer will take care of it */ q->first_to_check = add_buf(q->first_to_check, count); atomic_sub(count, &q->nr_buf_used); break; case SLSB_CU_OUTPUT_PRIMED: /* the adapter has not fetched the output yet */ - QDIO_DBF_TEXT5(0, trace, "outpprim"); + DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "out primed:%1d", q->nr); break; case SLSB_P_OUTPUT_NOT_INIT: case SLSB_P_OUTPUT_HALTED: @@ -706,99 +700,48 @@ static inline int qdio_outbound_q_moved(struct qdio_q *q) if ((bufnr != q->last_move_ftc) || q->qdio_error) { q->last_move_ftc = bufnr; - QDIO_DBF_TEXT4(0, trace, "oqhasmvd"); - QDIO_DBF_HEX4(0, trace, &q, sizeof(void *)); + DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "out moved:%1d", q->nr); return 1; } else return 0; } -/* - * VM could present us cc=2 and busy bit set on SIGA-write - * during reconfiguration of their Guest LAN (only in iqdio mode, - * otherwise qdio is asynchronous and cc=2 and busy bit there will take - * the queues down immediately). - * - * Therefore qdio_siga_output will try for a short time constantly, - * if such a condition occurs. If it doesn't change, it will - * increase the busy_siga_counter and save the timestamp, and - * schedule the queue for later processing. qdio_outbound_processing - * will check out the counter. If non-zero, it will call qdio_kick_outbound_q - * as often as the value of the counter. This will attempt further SIGA - * instructions. For each successful SIGA, the counter is - * decreased, for failing SIGAs the counter remains the same, after - * all. After some time of no movement, qdio_kick_outbound_q will - * finally fail and reflect corresponding error codes to call - * the upper layer module and have it take the queues down. - * - * Note that this is a change from the original HiperSockets design - * (saying cc=2 and busy bit means take the queues down), but in - * these days Guest LAN didn't exist... excessive cc=2 with busy bit - * conditions will still take the queues down, but the threshold is - * higher due to the Guest LAN environment. - * - * Called from outbound tasklet and do_QDIO handler. - */ static void qdio_kick_outbound_q(struct qdio_q *q) { - int rc; -#ifdef CONFIG_QDIO_DEBUG - char dbf_text[15]; - - QDIO_DBF_TEXT5(0, trace, "kickoutq"); - QDIO_DBF_HEX5(0, trace, &q, sizeof(void *)); -#endif /* CONFIG_QDIO_DEBUG */ + unsigned int busy_bit; + int cc; if (!need_siga_out(q)) return; - rc = qdio_siga_output(q); - switch (rc) { + DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "siga-w:%1d", q->nr); + qdio_perf_stat_inc(&perf_stats.siga_out); + + cc = qdio_siga_output(q, &busy_bit); + switch (cc) { case 0: - /* TODO: improve error handling for CC=0 case */ -#ifdef CONFIG_QDIO_DEBUG - if (q->u.out.timestamp) { - QDIO_DBF_TEXT3(0, trace, "cc2reslv"); - sprintf(dbf_text, "%4x%2x%2x", q->irq_ptr->schid.sch_no, - q->nr, - atomic_read(&q->u.out.busy_siga_counter)); - QDIO_DBF_TEXT3(0, trace, dbf_text); - } -#endif /* CONFIG_QDIO_DEBUG */ - /* went smooth this time, reset timestamp */ - q->u.out.timestamp = 0; break; - /* cc=2 and busy bit */ - case (2 | QDIO_ERROR_SIGA_BUSY): - atomic_inc(&q->u.out.busy_siga_counter); - - /* if the last siga was successful, save timestamp here */ - if (!q->u.out.timestamp) - q->u.out.timestamp = get_usecs(); - - /* if we're in time, don't touch qdio_error */ - if (get_usecs() - q->u.out.timestamp < QDIO_BUSY_BIT_GIVE_UP) { - tasklet_schedule(&q->tasklet); - break; + case 2: + if (busy_bit) { + DBF_ERROR("%4x cc2 REP:%1d", SCH_NO(q), q->nr); + q->qdio_error = cc | QDIO_ERROR_SIGA_BUSY; + } else { + DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "siga-w cc2:%1d", + q->nr); + q->qdio_error = cc; } - QDIO_DBF_TEXT2(0, trace, "cc2REPRT"); -#ifdef CONFIG_QDIO_DEBUG - sprintf(dbf_text, "%4x%2x%2x", q->irq_ptr->schid.sch_no, q->nr, - atomic_read(&q->u.out.busy_siga_counter)); - QDIO_DBF_TEXT3(0, trace, dbf_text); -#endif /* CONFIG_QDIO_DEBUG */ - default: - /* for plain cc=1, 2 or 3 */ - q->qdio_error = rc; + break; + case 1: + case 3: + DBF_ERROR("%4x SIGA-W:%1d", SCH_NO(q), cc); + q->qdio_error = cc; + break; } } static void qdio_kick_outbound_handler(struct qdio_q *q) { int start, end, count; -#ifdef CONFIG_QDIO_DEBUG - char dbf_text[15]; -#endif start = q->first_to_kick; end = q->last_move_ftc; @@ -807,13 +750,8 @@ static void qdio_kick_outbound_handler(struct qdio_q *q) else count = end + QDIO_MAX_BUFFERS_PER_Q - start; -#ifdef CONFIG_QDIO_DEBUG - QDIO_DBF_TEXT4(0, trace, "kickouth"); - QDIO_DBF_HEX4(0, trace, &q, sizeof(void *)); - - sprintf(dbf_text, "s=%2xc=%2x", start, count); - QDIO_DBF_TEXT4(0, trace, dbf_text); -#endif /* CONFIG_QDIO_DEBUG */ + DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "kickouth: %1d", q->nr); + DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "s:%3d c:%3d", start, count); if (unlikely(q->irq_ptr->state != QDIO_IRQ_STATE_ACTIVE)) return; @@ -828,22 +766,18 @@ static void qdio_kick_outbound_handler(struct qdio_q *q) static void __qdio_outbound_processing(struct qdio_q *q) { - int siga_attempts; + unsigned long flags; qdio_perf_stat_inc(&perf_stats.tasklet_outbound); - - /* see comment in qdio_kick_outbound_q */ - siga_attempts = atomic_read(&q->u.out.busy_siga_counter); - while (siga_attempts--) { - atomic_dec(&q->u.out.busy_siga_counter); - qdio_kick_outbound_q(q); - } + spin_lock_irqsave(&q->lock, flags); BUG_ON(atomic_read(&q->nr_buf_used) < 0); if (qdio_outbound_q_moved(q)) qdio_kick_outbound_handler(q); + spin_unlock_irqrestore(&q->lock, flags); + if (queue_type(q) == QDIO_ZFCP_QFMT) { if (!pci_out_supported(q) && !qdio_outbound_q_done(q)) tasklet_schedule(&q->tasklet); @@ -908,27 +842,18 @@ void qdio_check_outbound_after_thinint(struct qdio_q *q) static inline void qdio_set_state(struct qdio_irq *irq_ptr, enum qdio_irq_states state) { -#ifdef CONFIG_QDIO_DEBUG - char dbf_text[15]; - - QDIO_DBF_TEXT5(0, trace, "newstate"); - sprintf(dbf_text, "%4x%4x", irq_ptr->schid.sch_no, state); - QDIO_DBF_TEXT5(0, trace, dbf_text); -#endif /* CONFIG_QDIO_DEBUG */ + DBF_DEV_EVENT(DBF_INFO, irq_ptr, "newstate: %1d", state); irq_ptr->state = state; mb(); } -static void qdio_irq_check_sense(struct subchannel_id schid, struct irb *irb) +static void qdio_irq_check_sense(struct qdio_irq *irq_ptr, struct irb *irb) { - char dbf_text[15]; - if (irb->esw.esw0.erw.cons) { - sprintf(dbf_text, "sens%4x", schid.sch_no); - QDIO_DBF_TEXT2(1, trace, dbf_text); - QDIO_DBF_HEX0(0, trace, irb, 64); - QDIO_DBF_HEX0(0, trace, irb->ecw, 64); + DBF_ERROR("%4x sense:", irq_ptr->schid.sch_no); + DBF_ERROR_HEX(irb, 64); + DBF_ERROR_HEX(irb->ecw, 64); } } @@ -962,14 +887,10 @@ static void qdio_handle_activate_check(struct ccw_device *cdev, { struct qdio_irq *irq_ptr = cdev->private->qdio_data; struct qdio_q *q; - char dbf_text[15]; - QDIO_DBF_TEXT2(1, trace, "ick2"); - sprintf(dbf_text, "%s", dev_name(&cdev->dev)); - QDIO_DBF_TEXT2(1, trace, dbf_text); - QDIO_DBF_HEX2(0, trace, &intparm, sizeof(int)); - QDIO_DBF_HEX2(0, trace, &dstat, sizeof(int)); - QDIO_DBF_HEX2(0, trace, &cstat, sizeof(int)); + DBF_ERROR("%4x ACT CHECK", irq_ptr->schid.sch_no); + DBF_ERROR("intp :%lx", intparm); + DBF_ERROR("ds: %2x cs:%2x", dstat, cstat); if (irq_ptr->nr_input_qs) { q = irq_ptr->input_qs[0]; @@ -1022,28 +943,29 @@ static void qdio_int_error(struct ccw_device *cdev) } static int qdio_establish_check_errors(struct ccw_device *cdev, int cstat, - int dstat) + int dstat) { struct qdio_irq *irq_ptr = cdev->private->qdio_data; if (cstat || (dstat & ~(DEV_STAT_CHN_END | DEV_STAT_DEV_END))) { - QDIO_DBF_TEXT2(1, setup, "eq:ckcon"); + DBF_ERROR("EQ:ck con"); goto error; } if (!(dstat & DEV_STAT_DEV_END)) { - QDIO_DBF_TEXT2(1, setup, "eq:no de"); + DBF_ERROR("EQ:no dev"); goto error; } if (dstat & ~(DEV_STAT_CHN_END | DEV_STAT_DEV_END)) { - QDIO_DBF_TEXT2(1, setup, "eq:badio"); + DBF_ERROR("EQ: bad io"); goto error; } return 0; error: - QDIO_DBF_HEX2(0, trace, &cstat, sizeof(int)); - QDIO_DBF_HEX2(0, trace, &dstat, sizeof(int)); + DBF_ERROR("%4x EQ:error", irq_ptr->schid.sch_no); + DBF_ERROR("ds: %2x cs:%2x", dstat, cstat); + qdio_set_state(irq_ptr, QDIO_IRQ_STATE_ERR); return 1; } @@ -1052,12 +974,8 @@ static void qdio_establish_handle_irq(struct ccw_device *cdev, int cstat, int dstat) { struct qdio_irq *irq_ptr = cdev->private->qdio_data; - char dbf_text[15]; - - sprintf(dbf_text, "qehi%4x", cdev->private->schid.sch_no); - QDIO_DBF_TEXT0(0, setup, dbf_text); - QDIO_DBF_TEXT0(0, trace, dbf_text); + DBF_DEV_EVENT(DBF_INFO, irq_ptr, "qest irq"); if (!qdio_establish_check_errors(cdev, cstat, dstat)) qdio_set_state(irq_ptr, QDIO_IRQ_STATE_ESTABLISHED); } @@ -1068,25 +986,21 @@ void qdio_int_handler(struct ccw_device *cdev, unsigned long intparm, { struct qdio_irq *irq_ptr = cdev->private->qdio_data; int cstat, dstat; - char dbf_text[15]; qdio_perf_stat_inc(&perf_stats.qdio_int); if (!intparm || !irq_ptr) { - sprintf(dbf_text, "qihd%4x", cdev->private->schid.sch_no); - QDIO_DBF_TEXT2(1, setup, dbf_text); + DBF_ERROR("qint:%4x", cdev->private->schid.sch_no); return; } if (IS_ERR(irb)) { switch (PTR_ERR(irb)) { case -EIO: - sprintf(dbf_text, "ierr%4x", irq_ptr->schid.sch_no); - QDIO_DBF_TEXT2(1, setup, dbf_text); + DBF_ERROR("%4x IO error", irq_ptr->schid.sch_no); return; case -ETIMEDOUT: - sprintf(dbf_text, "qtoh%4x", irq_ptr->schid.sch_no); - QDIO_DBF_TEXT2(1, setup, dbf_text); + DBF_ERROR("%4x IO timeout", irq_ptr->schid.sch_no); qdio_int_error(cdev); return; default: @@ -1094,7 +1008,7 @@ void qdio_int_handler(struct ccw_device *cdev, unsigned long intparm, return; } } - qdio_irq_check_sense(irq_ptr->schid, irb); + qdio_irq_check_sense(irq_ptr, irb); cstat = irb->scsw.cmd.cstat; dstat = irb->scsw.cmd.dstat; @@ -1129,23 +1043,20 @@ void qdio_int_handler(struct ccw_device *cdev, unsigned long intparm, /** * qdio_get_ssqd_desc - get qdio subchannel description * @cdev: ccw device to get description for + * @data: where to store the ssqd * - * Returns a pointer to the saved qdio subchannel description, - * or NULL for not setup qdio devices. + * Returns 0 or an error code. The results of the chsc are stored in the + * specified structure. */ -struct qdio_ssqd_desc *qdio_get_ssqd_desc(struct ccw_device *cdev) +int qdio_get_ssqd_desc(struct ccw_device *cdev, + struct qdio_ssqd_desc *data) { - struct qdio_irq *irq_ptr; - char dbf_text[15]; - - sprintf(dbf_text, "qssq%4x", cdev->private->schid.sch_no); - QDIO_DBF_TEXT0(0, setup, dbf_text); - irq_ptr = cdev->private->qdio_data; - if (!irq_ptr) - return NULL; + if (!cdev || !cdev->private) + return -EINVAL; - return &irq_ptr->ssqd_desc; + DBF_EVENT("get ssqd:%4x", cdev->private->schid.sch_no); + return qdio_setup_get_ssqd(NULL, &cdev->private->schid, data); } EXPORT_SYMBOL_GPL(qdio_get_ssqd_desc); @@ -1159,14 +1070,9 @@ EXPORT_SYMBOL_GPL(qdio_get_ssqd_desc); */ int qdio_cleanup(struct ccw_device *cdev, int how) { - struct qdio_irq *irq_ptr; - char dbf_text[15]; + struct qdio_irq *irq_ptr = cdev->private->qdio_data; int rc; - sprintf(dbf_text, "qcln%4x", cdev->private->schid.sch_no); - QDIO_DBF_TEXT0(0, setup, dbf_text); - - irq_ptr = cdev->private->qdio_data; if (!irq_ptr) return -ENODEV; @@ -1199,18 +1105,15 @@ static void qdio_shutdown_queues(struct ccw_device *cdev) */ int qdio_shutdown(struct ccw_device *cdev, int how) { - struct qdio_irq *irq_ptr; + struct qdio_irq *irq_ptr = cdev->private->qdio_data; int rc; unsigned long flags; - char dbf_text[15]; - sprintf(dbf_text, "qshu%4x", cdev->private->schid.sch_no); - QDIO_DBF_TEXT0(0, setup, dbf_text); - - irq_ptr = cdev->private->qdio_data; if (!irq_ptr) return -ENODEV; + DBF_EVENT("qshutdown:%4x", cdev->private->schid.sch_no); + mutex_lock(&irq_ptr->setup_mutex); /* * Subchannel was already shot down. We cannot prevent being called @@ -1234,10 +1137,8 @@ int qdio_shutdown(struct ccw_device *cdev, int how) /* default behaviour is halt */ rc = ccw_device_halt(cdev, QDIO_DOING_CLEANUP); if (rc) { - sprintf(dbf_text, "sher%4x", irq_ptr->schid.sch_no); - QDIO_DBF_TEXT0(0, setup, dbf_text); - sprintf(dbf_text, "rc=%d", rc); - QDIO_DBF_TEXT0(0, setup, dbf_text); + DBF_ERROR("%4x SHUTD ERR", irq_ptr->schid.sch_no); + DBF_ERROR("rc:%4d", rc); goto no_cleanup; } @@ -1271,17 +1172,18 @@ EXPORT_SYMBOL_GPL(qdio_shutdown); */ int qdio_free(struct ccw_device *cdev) { - struct qdio_irq *irq_ptr; - char dbf_text[15]; - - sprintf(dbf_text, "qfre%4x", cdev->private->schid.sch_no); - QDIO_DBF_TEXT0(0, setup, dbf_text); + struct qdio_irq *irq_ptr = cdev->private->qdio_data; - irq_ptr = cdev->private->qdio_data; if (!irq_ptr) return -ENODEV; + DBF_EVENT("qfree:%4x", cdev->private->schid.sch_no); mutex_lock(&irq_ptr->setup_mutex); + + if (irq_ptr->debug_area != NULL) { + debug_unregister(irq_ptr->debug_area); + irq_ptr->debug_area = NULL; + } cdev->private->qdio_data = NULL; mutex_unlock(&irq_ptr->setup_mutex); @@ -1300,10 +1202,6 @@ EXPORT_SYMBOL_GPL(qdio_free); int qdio_initialize(struct qdio_initialize *init_data) { int rc; - char dbf_text[15]; - - sprintf(dbf_text, "qini%4x", init_data->cdev->private->schid.sch_no); - QDIO_DBF_TEXT0(0, setup, dbf_text); rc = qdio_allocate(init_data); if (rc) @@ -1323,10 +1221,8 @@ EXPORT_SYMBOL_GPL(qdio_initialize); int qdio_allocate(struct qdio_initialize *init_data) { struct qdio_irq *irq_ptr; - char dbf_text[15]; - sprintf(dbf_text, "qalc%4x", init_data->cdev->private->schid.sch_no); - QDIO_DBF_TEXT0(0, setup, dbf_text); + DBF_EVENT("qallocate:%4x", init_data->cdev->private->schid.sch_no); if ((init_data->no_input_qs && !init_data->input_handler) || (init_data->no_output_qs && !init_data->output_handler)) @@ -1340,16 +1236,13 @@ int qdio_allocate(struct qdio_initialize *init_data) (!init_data->output_sbal_addr_array)) return -EINVAL; - qdio_allocate_do_dbf(init_data); - /* irq_ptr must be in GFP_DMA since it contains ccw1.cda */ irq_ptr = (void *) get_zeroed_page(GFP_KERNEL | GFP_DMA); if (!irq_ptr) goto out_err; - QDIO_DBF_TEXT0(0, setup, "irq_ptr:"); - QDIO_DBF_HEX0(0, setup, &irq_ptr, sizeof(void *)); mutex_init(&irq_ptr->setup_mutex); + qdio_allocate_dbf(init_data, irq_ptr); /* * Allocate a page for the chsc calls in qdio_establish. @@ -1367,9 +1260,6 @@ int qdio_allocate(struct qdio_initialize *init_data) goto out_rel; WARN_ON((unsigned long)irq_ptr->qdr & 0xfff); - QDIO_DBF_TEXT0(0, setup, "qdr:"); - QDIO_DBF_HEX0(0, setup, &irq_ptr->qdr, sizeof(void *)); - if (qdio_allocate_qs(irq_ptr, init_data->no_input_qs, init_data->no_output_qs)) goto out_rel; @@ -1390,14 +1280,12 @@ EXPORT_SYMBOL_GPL(qdio_allocate); */ int qdio_establish(struct qdio_initialize *init_data) { - char dbf_text[20]; struct qdio_irq *irq_ptr; struct ccw_device *cdev = init_data->cdev; unsigned long saveflags; int rc; - sprintf(dbf_text, "qest%4x", cdev->private->schid.sch_no); - QDIO_DBF_TEXT0(0, setup, dbf_text); + DBF_EVENT("qestablish:%4x", cdev->private->schid.sch_no); irq_ptr = cdev->private->qdio_data; if (!irq_ptr) @@ -1427,10 +1315,8 @@ int qdio_establish(struct qdio_initialize *init_data) rc = ccw_device_start(cdev, &irq_ptr->ccw, QDIO_DOING_ESTABLISH, 0, 0); if (rc) { - sprintf(dbf_text, "eq:io%4x", irq_ptr->schid.sch_no); - QDIO_DBF_TEXT2(1, setup, dbf_text); - sprintf(dbf_text, "eq:rc%4x", rc); - QDIO_DBF_TEXT2(1, setup, dbf_text); + DBF_ERROR("%4x est IO ERR", irq_ptr->schid.sch_no); + DBF_ERROR("rc:%4x", rc); } spin_unlock_irqrestore(get_ccwdev_lock(cdev), saveflags); @@ -1451,10 +1337,8 @@ int qdio_establish(struct qdio_initialize *init_data) } qdio_setup_ssqd_info(irq_ptr); - sprintf(dbf_text, "qDmmwc%2x", irq_ptr->ssqd_desc.mmwc); - QDIO_DBF_TEXT2(0, setup, dbf_text); - sprintf(dbf_text, "qib ac%2x", irq_ptr->qib.ac); - QDIO_DBF_TEXT2(0, setup, dbf_text); + DBF_EVENT("qDmmwc:%2x", irq_ptr->ssqd_desc.mmwc); + DBF_EVENT("qib ac:%4x", irq_ptr->qib.ac); /* qebsm is now setup if available, initialize buffer states */ qdio_init_buf_states(irq_ptr); @@ -1475,10 +1359,8 @@ int qdio_activate(struct ccw_device *cdev) struct qdio_irq *irq_ptr; int rc; unsigned long saveflags; - char dbf_text[20]; - sprintf(dbf_text, "qact%4x", cdev->private->schid.sch_no); - QDIO_DBF_TEXT0(0, setup, dbf_text); + DBF_EVENT("qactivate:%4x", cdev->private->schid.sch_no); irq_ptr = cdev->private->qdio_data; if (!irq_ptr) @@ -1504,10 +1386,8 @@ int qdio_activate(struct ccw_device *cdev) rc = ccw_device_start(cdev, &irq_ptr->ccw, QDIO_DOING_ACTIVATE, 0, DOIO_DENY_PREFETCH); if (rc) { - sprintf(dbf_text, "aq:io%4x", irq_ptr->schid.sch_no); - QDIO_DBF_TEXT2(1, setup, dbf_text); - sprintf(dbf_text, "aq:rc%4x", rc); - QDIO_DBF_TEXT2(1, setup, dbf_text); + DBF_ERROR("%4x act IO ERR", irq_ptr->schid.sch_no); + DBF_ERROR("rc:%4x", rc); } spin_unlock_irqrestore(get_ccwdev_lock(cdev), saveflags); @@ -1565,23 +1445,38 @@ static inline int buf_in_between(int bufnr, int start, int count) static void handle_inbound(struct qdio_q *q, unsigned int callflags, int bufnr, int count) { - unsigned long flags; - int used, rc; + int used, cc, diff; - /* - * do_QDIO could run in parallel with the queue tasklet so the - * upper-layer programm could empty the ACK'ed buffer here. - * If that happens we must clear the polling flag, otherwise - * qdio_stop_polling() could set the buffer to NOT_INIT after - * it was set to EMPTY which would kill us. - */ - spin_lock_irqsave(&q->u.in.lock, flags); - if (q->u.in.polling) - if (buf_in_between(q->last_move_ftc, bufnr, count)) + if (!q->u.in.polling) + goto set; + + /* protect against stop polling setting an ACK for an emptied slsb */ + if (count == QDIO_MAX_BUFFERS_PER_Q) { + /* overwriting everything, just delete polling status */ + q->u.in.polling = 0; + q->u.in.ack_count = 0; + goto set; + } else if (buf_in_between(q->last_move_ftc, bufnr, count)) { + if (is_qebsm(q)) { + /* partial overwrite, just update last_move_ftc */ + diff = add_buf(bufnr, count); + diff = sub_buf(diff, q->last_move_ftc); + q->u.in.ack_count -= diff; + if (q->u.in.ack_count <= 0) { + q->u.in.polling = 0; + q->u.in.ack_count = 0; + /* TODO: must we set last_move_ftc to something meaningful? */ + goto set; + } + q->last_move_ftc = add_buf(q->last_move_ftc, diff); + } + else + /* the only ACK will be deleted, so stop polling */ q->u.in.polling = 0; + } +set: count = set_buf_states(q, bufnr, SLSB_CU_INPUT_EMPTY, count); - spin_unlock_irqrestore(&q->u.in.lock, flags); used = atomic_add_return(count, &q->nr_buf_used) - count; BUG_ON(used + count > QDIO_MAX_BUFFERS_PER_Q); @@ -1591,9 +1486,9 @@ static void handle_inbound(struct qdio_q *q, unsigned int callflags, return; if (need_siga_in(q)) { - rc = qdio_siga_input(q); - if (rc) - q->qdio_error = rc; + cc = qdio_siga_input(q); + if (cc) + q->qdio_error = cc; } } @@ -1640,6 +1535,10 @@ static void handle_outbound(struct qdio_q *q, unsigned int callflags, while (count--) qdio_kick_outbound_q(q); } + + /* report CC=2 conditions synchronously */ + if (q->qdio_error) + __qdio_outbound_processing(q); goto out; } @@ -1649,11 +1548,11 @@ static void handle_outbound(struct qdio_q *q, unsigned int callflags, } /* try to fast requeue buffers */ - get_buf_state(q, prev_buf(bufnr), &state); + get_buf_state(q, prev_buf(bufnr), &state, 0); if (state != SLSB_CU_OUTPUT_PRIMED) qdio_kick_outbound_q(q); else { - QDIO_DBF_TEXT5(0, trace, "fast-req"); + DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "fast-req"); qdio_perf_stat_inc(&perf_stats.fast_requeue); } out: @@ -1673,12 +1572,6 @@ int do_QDIO(struct ccw_device *cdev, unsigned int callflags, int q_nr, int bufnr, int count) { struct qdio_irq *irq_ptr; -#ifdef CONFIG_QDIO_DEBUG - char dbf_text[20]; - - sprintf(dbf_text, "doQD%4x", cdev->private->schid.sch_no); - QDIO_DBF_TEXT3(0, trace, dbf_text); -#endif /* CONFIG_QDIO_DEBUG */ if ((bufnr > QDIO_MAX_BUFFERS_PER_Q) || (count > QDIO_MAX_BUFFERS_PER_Q) || @@ -1692,33 +1585,24 @@ int do_QDIO(struct ccw_device *cdev, unsigned int callflags, if (!irq_ptr) return -ENODEV; -#ifdef CONFIG_QDIO_DEBUG if (callflags & QDIO_FLAG_SYNC_INPUT) - QDIO_DBF_HEX3(0, trace, &irq_ptr->input_qs[q_nr], - sizeof(void *)); + DBF_DEV_EVENT(DBF_INFO, irq_ptr, "doQDIO input"); else - QDIO_DBF_HEX3(0, trace, &irq_ptr->output_qs[q_nr], - sizeof(void *)); - - sprintf(dbf_text, "flag%04x", callflags); - QDIO_DBF_TEXT3(0, trace, dbf_text); - sprintf(dbf_text, "qi%02xct%02x", bufnr, count); - QDIO_DBF_TEXT3(0, trace, dbf_text); -#endif /* CONFIG_QDIO_DEBUG */ + DBF_DEV_EVENT(DBF_INFO, irq_ptr, "doQDIO output"); + DBF_DEV_EVENT(DBF_INFO, irq_ptr, "q:%1d flag:%4x", q_nr, callflags); + DBF_DEV_EVENT(DBF_INFO, irq_ptr, "buf:%2d cnt:%3d", bufnr, count); if (irq_ptr->state != QDIO_IRQ_STATE_ACTIVE) return -EBUSY; if (callflags & QDIO_FLAG_SYNC_INPUT) - handle_inbound(irq_ptr->input_qs[q_nr], - callflags, bufnr, count); + handle_inbound(irq_ptr->input_qs[q_nr], callflags, bufnr, + count); else if (callflags & QDIO_FLAG_SYNC_OUTPUT) - handle_outbound(irq_ptr->output_qs[q_nr], - callflags, bufnr, count); - else { - QDIO_DBF_TEXT3(1, trace, "doQD:inv"); + handle_outbound(irq_ptr->output_qs[q_nr], callflags, bufnr, + count); + else return -EINVAL; - } return 0; } EXPORT_SYMBOL_GPL(do_QDIO); diff --git a/drivers/s390/cio/qdio_perf.c b/drivers/s390/cio/qdio_perf.c index ec5c4a41423..136d0f0b1e9 100644 --- a/drivers/s390/cio/qdio_perf.c +++ b/drivers/s390/cio/qdio_perf.c @@ -74,12 +74,20 @@ static int qdio_perf_proc_show(struct seq_file *m, void *v) seq_printf(m, "\n"); seq_printf(m, "Number of fast requeues (outg. SBAL w/o SIGA)\t: %li\n", (long)atomic_long_read(&perf_stats.fast_requeue)); + seq_printf(m, "Number of outbound target full condition\t: %li\n", + (long)atomic_long_read(&perf_stats.outbound_target_full)); seq_printf(m, "Number of outbound tasklet mod_timer calls\t: %li\n", (long)atomic_long_read(&perf_stats.debug_tl_out_timer)); seq_printf(m, "Number of stop polling calls\t\t\t: %li\n", (long)atomic_long_read(&perf_stats.debug_stop_polling)); seq_printf(m, "AI inbound tasklet loops after stop polling\t: %li\n", (long)atomic_long_read(&perf_stats.thinint_inbound_loop2)); + seq_printf(m, "QEBSM EQBS total/incomplete\t\t\t: %li/%li\n", + (long)atomic_long_read(&perf_stats.debug_eqbs_all), + (long)atomic_long_read(&perf_stats.debug_eqbs_incomplete)); + seq_printf(m, "QEBSM SQBS total/incomplete\t\t\t: %li/%li\n", + (long)atomic_long_read(&perf_stats.debug_sqbs_all), + (long)atomic_long_read(&perf_stats.debug_sqbs_incomplete)); seq_printf(m, "\n"); return 0; } diff --git a/drivers/s390/cio/qdio_perf.h b/drivers/s390/cio/qdio_perf.h index 5c406a8b738..7821ac4fa51 100644 --- a/drivers/s390/cio/qdio_perf.h +++ b/drivers/s390/cio/qdio_perf.h @@ -36,10 +36,15 @@ struct qdio_perf_stats { atomic_long_t inbound_handler; atomic_long_t outbound_handler; atomic_long_t fast_requeue; + atomic_long_t outbound_target_full; /* for debugging */ atomic_long_t debug_tl_out_timer; atomic_long_t debug_stop_polling; + atomic_long_t debug_eqbs_all; + atomic_long_t debug_eqbs_incomplete; + atomic_long_t debug_sqbs_all; + atomic_long_t debug_sqbs_incomplete; }; extern struct qdio_perf_stats perf_stats; diff --git a/drivers/s390/cio/qdio_setup.c b/drivers/s390/cio/qdio_setup.c index a0b6b46e746..c08356b95bf 100644 --- a/drivers/s390/cio/qdio_setup.c +++ b/drivers/s390/cio/qdio_setup.c @@ -117,17 +117,16 @@ static void setup_queues_misc(struct qdio_q *q, struct qdio_irq *irq_ptr, q->mask = 1 << (31 - i); q->nr = i; q->handler = handler; + spin_lock_init(&q->lock); } static void setup_storage_lists(struct qdio_q *q, struct qdio_irq *irq_ptr, - void **sbals_array, char *dbf_text, int i) + void **sbals_array, int i) { struct qdio_q *prev; int j; - QDIO_DBF_TEXT0(0, setup, dbf_text); - QDIO_DBF_HEX0(0, setup, &q, sizeof(void *)); - + DBF_HEX(&q, sizeof(void *)); q->sl = (struct sl *)((char *)q->slib + PAGE_SIZE / 2); /* fill in sbal */ @@ -150,31 +149,26 @@ static void setup_storage_lists(struct qdio_q *q, struct qdio_irq *irq_ptr, for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; j++) q->sl->element[j].sbal = (unsigned long)q->sbal[j]; - QDIO_DBF_TEXT2(0, setup, "sl-sb-b0"); - QDIO_DBF_HEX2(0, setup, q->sl, sizeof(void *)); - QDIO_DBF_HEX2(0, setup, &q->slsb, sizeof(void *)); - QDIO_DBF_HEX2(0, setup, q->sbal, sizeof(void *)); + DBF_EVENT("sl-slsb-sbal"); + DBF_HEX(q->sl, sizeof(void *)); + DBF_HEX(&q->slsb, sizeof(void *)); + DBF_HEX(q->sbal, sizeof(void *)); } static void setup_queues(struct qdio_irq *irq_ptr, struct qdio_initialize *qdio_init) { - char dbf_text[20]; struct qdio_q *q; void **input_sbal_array = qdio_init->input_sbal_addr_array; void **output_sbal_array = qdio_init->output_sbal_addr_array; int i; - sprintf(dbf_text, "qset%4x", qdio_init->cdev->private->schid.sch_no); - QDIO_DBF_TEXT0(0, setup, dbf_text); - for_each_input_queue(irq_ptr, q, i) { - sprintf(dbf_text, "in-q%4x", i); + DBF_EVENT("in-q:%1d", i); setup_queues_misc(q, irq_ptr, qdio_init->input_handler, i); q->is_input_q = 1; - spin_lock_init(&q->u.in.lock); - setup_storage_lists(q, irq_ptr, input_sbal_array, dbf_text, i); + setup_storage_lists(q, irq_ptr, input_sbal_array, i); input_sbal_array += QDIO_MAX_BUFFERS_PER_Q; if (is_thinint_irq(irq_ptr)) @@ -186,12 +180,11 @@ static void setup_queues(struct qdio_irq *irq_ptr, } for_each_output_queue(irq_ptr, q, i) { - sprintf(dbf_text, "outq%4x", i); + DBF_EVENT("outq:%1d", i); setup_queues_misc(q, irq_ptr, qdio_init->output_handler, i); q->is_input_q = 0; - setup_storage_lists(q, irq_ptr, output_sbal_array, - dbf_text, i); + setup_storage_lists(q, irq_ptr, output_sbal_array, i); output_sbal_array += QDIO_MAX_BUFFERS_PER_Q; tasklet_init(&q->tasklet, qdio_outbound_processing, @@ -222,8 +215,6 @@ static void process_ac_flags(struct qdio_irq *irq_ptr, unsigned char qdioac) static void check_and_setup_qebsm(struct qdio_irq *irq_ptr, unsigned char qdioac, unsigned long token) { - char dbf_text[15]; - if (!(irq_ptr->qib.rflags & QIB_RFLAGS_ENABLE_QEBSM)) goto no_qebsm; if (!(qdioac & AC1_SC_QEBSM_AVAILABLE) || @@ -232,33 +223,41 @@ static void check_and_setup_qebsm(struct qdio_irq *irq_ptr, irq_ptr->sch_token = token; - QDIO_DBF_TEXT0(0, setup, "V=V:1"); - sprintf(dbf_text, "%8lx", irq_ptr->sch_token); - QDIO_DBF_TEXT0(0, setup, dbf_text); + DBF_EVENT("V=V:1"); + DBF_EVENT("%8lx", irq_ptr->sch_token); return; no_qebsm: irq_ptr->sch_token = 0; irq_ptr->qib.rflags &= ~QIB_RFLAGS_ENABLE_QEBSM; - QDIO_DBF_TEXT0(0, setup, "noV=V"); + DBF_EVENT("noV=V"); } -static int __get_ssqd_info(struct qdio_irq *irq_ptr) +/* + * If there is a qdio_irq we use the chsc_page and store the information + * in the qdio_irq, otherwise we copy it to the specified structure. + */ +int qdio_setup_get_ssqd(struct qdio_irq *irq_ptr, + struct subchannel_id *schid, + struct qdio_ssqd_desc *data) { struct chsc_ssqd_area *ssqd; int rc; - QDIO_DBF_TEXT0(0, setup, "getssqd"); - ssqd = (struct chsc_ssqd_area *)irq_ptr->chsc_page; + DBF_EVENT("getssqd:%4x", schid->sch_no); + if (irq_ptr != NULL) + ssqd = (struct chsc_ssqd_area *)irq_ptr->chsc_page; + else + ssqd = (struct chsc_ssqd_area *)__get_free_page(GFP_KERNEL); memset(ssqd, 0, PAGE_SIZE); ssqd->request = (struct chsc_header) { .length = 0x0010, .code = 0x0024, }; - ssqd->first_sch = irq_ptr->schid.sch_no; - ssqd->last_sch = irq_ptr->schid.sch_no; - ssqd->ssid = irq_ptr->schid.ssid; + ssqd->first_sch = schid->sch_no; + ssqd->last_sch = schid->sch_no; + ssqd->ssid = schid->ssid; if (chsc(ssqd)) return -EIO; @@ -268,27 +267,29 @@ static int __get_ssqd_info(struct qdio_irq *irq_ptr) if (!(ssqd->qdio_ssqd.flags & CHSC_FLAG_QDIO_CAPABILITY) || !(ssqd->qdio_ssqd.flags & CHSC_FLAG_VALIDITY) || - (ssqd->qdio_ssqd.sch != irq_ptr->schid.sch_no)) + (ssqd->qdio_ssqd.sch != schid->sch_no)) return -EINVAL; - memcpy(&irq_ptr->ssqd_desc, &ssqd->qdio_ssqd, - sizeof(struct qdio_ssqd_desc)); + if (irq_ptr != NULL) + memcpy(&irq_ptr->ssqd_desc, &ssqd->qdio_ssqd, + sizeof(struct qdio_ssqd_desc)); + else { + memcpy(data, &ssqd->qdio_ssqd, + sizeof(struct qdio_ssqd_desc)); + free_page((unsigned long)ssqd); + } return 0; } void qdio_setup_ssqd_info(struct qdio_irq *irq_ptr) { unsigned char qdioac; - char dbf_text[15]; int rc; - rc = __get_ssqd_info(irq_ptr); + rc = qdio_setup_get_ssqd(irq_ptr, &irq_ptr->schid, NULL); if (rc) { - QDIO_DBF_TEXT2(0, setup, "ssqdasig"); - sprintf(dbf_text, "schn%4x", irq_ptr->schid.sch_no); - QDIO_DBF_TEXT2(0, setup, dbf_text); - sprintf(dbf_text, "rc:%d", rc); - QDIO_DBF_TEXT2(0, setup, dbf_text); + DBF_ERROR("%4x ssqd ERR", irq_ptr->schid.sch_no); + DBF_ERROR("rc:%x", rc); /* all flags set, worst case */ qdioac = AC1_SIGA_INPUT_NEEDED | AC1_SIGA_OUTPUT_NEEDED | AC1_SIGA_SYNC_NEEDED; @@ -297,9 +298,7 @@ void qdio_setup_ssqd_info(struct qdio_irq *irq_ptr) check_and_setup_qebsm(irq_ptr, qdioac, irq_ptr->ssqd_desc.sch_token); process_ac_flags(irq_ptr, qdioac); - - sprintf(dbf_text, "qdioac%2x", qdioac); - QDIO_DBF_TEXT2(0, setup, dbf_text); + DBF_EVENT("qdioac:%4x", qdioac); } void qdio_release_memory(struct qdio_irq *irq_ptr) @@ -419,7 +418,7 @@ int qdio_setup_irq(struct qdio_initialize *init_data) /* get qdio commands */ ciw = ccw_device_get_ciw(init_data->cdev, CIW_TYPE_EQUEUE); if (!ciw) { - QDIO_DBF_TEXT2(1, setup, "no eq"); + DBF_ERROR("%4x NO EQ", irq_ptr->schid.sch_no); rc = -EINVAL; goto out_err; } @@ -427,7 +426,7 @@ int qdio_setup_irq(struct qdio_initialize *init_data) ciw = ccw_device_get_ciw(init_data->cdev, CIW_TYPE_AQUEUE); if (!ciw) { - QDIO_DBF_TEXT2(1, setup, "no aq"); + DBF_ERROR("%4x NO AQ", irq_ptr->schid.sch_no); rc = -EINVAL; goto out_err; } @@ -447,56 +446,38 @@ void qdio_print_subchannel_info(struct qdio_irq *irq_ptr, { char s[80]; - sprintf(s, "qdio: %s ", dev_name(&cdev->dev)); - switch (irq_ptr->qib.qfmt) { - case QDIO_QETH_QFMT: - sprintf(s + strlen(s), "OSA "); - break; - case QDIO_ZFCP_QFMT: - sprintf(s + strlen(s), "ZFCP "); - break; - case QDIO_IQDIO_QFMT: - sprintf(s + strlen(s), "HS "); - break; - } - sprintf(s + strlen(s), "on SC %x using ", irq_ptr->schid.sch_no); - sprintf(s + strlen(s), "AI:%d ", is_thinint_irq(irq_ptr)); - sprintf(s + strlen(s), "QEBSM:%d ", (irq_ptr->sch_token) ? 1 : 0); - sprintf(s + strlen(s), "PCI:%d ", - (irq_ptr->qib.ac & QIB_AC_OUTBOUND_PCI_SUPPORTED) ? 1 : 0); - sprintf(s + strlen(s), "TDD:%d ", css_general_characteristics.aif_tdd); - sprintf(s + strlen(s), "SIGA:"); - sprintf(s + strlen(s), "%s", (irq_ptr->siga_flag.input) ? "R" : " "); - sprintf(s + strlen(s), "%s", (irq_ptr->siga_flag.output) ? "W" : " "); - sprintf(s + strlen(s), "%s", (irq_ptr->siga_flag.sync) ? "S" : " "); - sprintf(s + strlen(s), "%s", - (!irq_ptr->siga_flag.no_sync_ti) ? "A" : " "); - sprintf(s + strlen(s), "%s", - (!irq_ptr->siga_flag.no_sync_out_ti) ? "O" : " "); - sprintf(s + strlen(s), "%s", - (!irq_ptr->siga_flag.no_sync_out_pci) ? "P" : " "); - sprintf(s + strlen(s), "\n"); + snprintf(s, 80, "qdio: %s %s on SC %x using " + "AI:%d QEBSM:%d PCI:%d TDD:%d SIGA:%s%s%s%s%s%s\n", + dev_name(&cdev->dev), + (irq_ptr->qib.qfmt == QDIO_QETH_QFMT) ? "OSA" : + ((irq_ptr->qib.qfmt == QDIO_ZFCP_QFMT) ? "ZFCP" : "HS"), + irq_ptr->schid.sch_no, + is_thinint_irq(irq_ptr), + (irq_ptr->sch_token) ? 1 : 0, + (irq_ptr->qib.ac & QIB_AC_OUTBOUND_PCI_SUPPORTED) ? 1 : 0, + css_general_characteristics.aif_tdd, + (irq_ptr->siga_flag.input) ? "R" : " ", + (irq_ptr->siga_flag.output) ? "W" : " ", + (irq_ptr->siga_flag.sync) ? "S" : " ", + (!irq_ptr->siga_flag.no_sync_ti) ? "A" : " ", + (!irq_ptr->siga_flag.no_sync_out_ti) ? "O" : " ", + (!irq_ptr->siga_flag.no_sync_out_pci) ? "P" : " "); printk(KERN_INFO "%s", s); } int __init qdio_setup_init(void) { - char dbf_text[15]; - qdio_q_cache = kmem_cache_create("qdio_q", sizeof(struct qdio_q), 256, 0, NULL); if (!qdio_q_cache) return -ENOMEM; /* Check for OSA/FCP thin interrupts (bit 67). */ - sprintf(dbf_text, "thini%1x", - (css_general_characteristics.aif_osa) ? 1 : 0); - QDIO_DBF_TEXT0(0, setup, dbf_text); + DBF_EVENT("thinint:%1d", + (css_general_characteristics.aif_osa) ? 1 : 0); /* Check for QEBSM support in general (bit 58). */ - sprintf(dbf_text, "cssQBS:%1x", - (qebsm_possible()) ? 1 : 0); - QDIO_DBF_TEXT0(0, setup, dbf_text); + DBF_EVENT("cssQEBSM:%1d", (qebsm_possible()) ? 1 : 0); return 0; } diff --git a/drivers/s390/cio/qdio_thinint.c b/drivers/s390/cio/qdio_thinint.c index ea7f6140026..8e90e147b74 100644 --- a/drivers/s390/cio/qdio_thinint.c +++ b/drivers/s390/cio/qdio_thinint.c @@ -125,13 +125,13 @@ void tiqdio_remove_input_queues(struct qdio_irq *irq_ptr) static inline int tiqdio_inbound_q_done(struct qdio_q *q) { - unsigned char state; + unsigned char state = 0; if (!atomic_read(&q->nr_buf_used)) return 1; qdio_siga_sync_q(q); - get_buf_state(q, q->first_to_check, &state); + get_buf_state(q, q->first_to_check, &state, 0); if (state == SLSB_P_INPUT_PRIMED) /* more work coming */ @@ -258,8 +258,6 @@ static void tiqdio_thinint_handler(void *ind, void *drv_data) static int set_subchannel_ind(struct qdio_irq *irq_ptr, int reset) { struct scssc_area *scssc_area; - char dbf_text[15]; - void *ptr; int rc; scssc_area = (struct scssc_area *)irq_ptr->chsc_page; @@ -294,19 +292,15 @@ static int set_subchannel_ind(struct qdio_irq *irq_ptr, int reset) rc = chsc_error_from_response(scssc_area->response.code); if (rc) { - sprintf(dbf_text, "sidR%4x", scssc_area->response.code); - QDIO_DBF_TEXT1(0, trace, dbf_text); - QDIO_DBF_TEXT1(0, setup, dbf_text); - ptr = &scssc_area->response; - QDIO_DBF_HEX2(1, setup, &ptr, QDIO_DBF_SETUP_LEN); + DBF_ERROR("%4x SSI r:%4x", irq_ptr->schid.sch_no, + scssc_area->response.code); + DBF_ERROR_HEX(&scssc_area->response, sizeof(void *)); return rc; } - QDIO_DBF_TEXT2(0, setup, "setscind"); - QDIO_DBF_HEX2(0, setup, &scssc_area->summary_indicator_addr, - sizeof(unsigned long)); - QDIO_DBF_HEX2(0, setup, &scssc_area->subchannel_indicator_addr, - sizeof(unsigned long)); + DBF_EVENT("setscind"); + DBF_HEX(&scssc_area->summary_indicator_addr, sizeof(unsigned long)); + DBF_HEX(&scssc_area->subchannel_indicator_addr, sizeof(unsigned long)); return 0; } @@ -327,14 +321,11 @@ void tiqdio_free_memory(void) int __init tiqdio_register_thinints(void) { - char dbf_text[20]; - isc_register(QDIO_AIRQ_ISC); tiqdio_alsi = s390_register_adapter_interrupt(&tiqdio_thinint_handler, NULL, QDIO_AIRQ_ISC); if (IS_ERR(tiqdio_alsi)) { - sprintf(dbf_text, "regthn%lx", PTR_ERR(tiqdio_alsi)); - QDIO_DBF_TEXT0(0, setup, dbf_text); + DBF_EVENT("RTI:%lx", PTR_ERR(tiqdio_alsi)); tiqdio_alsi = NULL; isc_unregister(QDIO_AIRQ_ISC); return -ENOMEM; @@ -360,7 +351,7 @@ void qdio_setup_thinint(struct qdio_irq *irq_ptr) if (!is_thinint_irq(irq_ptr)) return; irq_ptr->dsci = get_indicator(); - QDIO_DBF_HEX1(0, setup, &irq_ptr->dsci, sizeof(void *)); + DBF_HEX(&irq_ptr->dsci, sizeof(void *)); } void qdio_shutdown_thinint(struct qdio_irq *irq_ptr) diff --git a/drivers/s390/crypto/ap_bus.c b/drivers/s390/crypto/ap_bus.c index e3fe6838293..1f5f5d2d87d 100644 --- a/drivers/s390/crypto/ap_bus.c +++ b/drivers/s390/crypto/ap_bus.c @@ -5,6 +5,7 @@ * Author(s): Cornelia Huck <cornelia.huck@de.ibm.com> * Martin Schwidefsky <schwidefsky@de.ibm.com> * Ralph Wuerthner <rwuerthn@de.ibm.com> + * Felix Beck <felix.beck@de.ibm.com> * * Adjunct processor bus. * @@ -23,6 +24,9 @@ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ +#define KMSG_COMPONENT "ap" +#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt + #include <linux/module.h> #include <linux/init.h> #include <linux/delay.h> @@ -34,6 +38,10 @@ #include <linux/mutex.h> #include <asm/s390_rdev.h> #include <asm/reset.h> +#include <asm/airq.h> +#include <asm/atomic.h> +#include <asm/system.h> +#include <asm/isc.h> #include <linux/hrtimer.h> #include <linux/ktime.h> @@ -46,6 +54,7 @@ static enum hrtimer_restart ap_poll_timeout(struct hrtimer *); static int ap_poll_thread_start(void); static void ap_poll_thread_stop(void); static void ap_request_timeout(unsigned long); +static inline void ap_schedule_poll_timer(void); /* * Module description. @@ -68,7 +77,7 @@ module_param_named(poll_thread, ap_thread_flag, int, 0000); MODULE_PARM_DESC(poll_thread, "Turn on/off poll thread, default is 0 (off)."); static struct device *ap_root_device = NULL; -static DEFINE_SPINLOCK(ap_device_lock); +static DEFINE_SPINLOCK(ap_device_list_lock); static LIST_HEAD(ap_device_list); /* @@ -80,19 +89,29 @@ static int ap_config_time = AP_CONFIG_TIME; static DECLARE_WORK(ap_config_work, ap_scan_bus); /* - * Tasklet & timer for AP request polling. + * Tasklet & timer for AP request polling and interrupts */ static DECLARE_TASKLET(ap_tasklet, ap_poll_all, 0); static atomic_t ap_poll_requests = ATOMIC_INIT(0); static DECLARE_WAIT_QUEUE_HEAD(ap_poll_wait); static struct task_struct *ap_poll_kthread = NULL; static DEFINE_MUTEX(ap_poll_thread_mutex); +static void *ap_interrupt_indicator; static struct hrtimer ap_poll_timer; /* In LPAR poll with 4kHz frequency. Poll every 250000 nanoseconds. * If z/VM change to 1500000 nanoseconds to adjust to z/VM polling.*/ static unsigned long long poll_timeout = 250000; /** + * ap_using_interrupts() - Returns non-zero if interrupt support is + * available. + */ +static inline int ap_using_interrupts(void) +{ + return ap_interrupt_indicator != NULL; +} + +/** * ap_intructions_available() - Test if AP instructions are available. * * Returns 0 if the AP instructions are installed. @@ -113,6 +132,23 @@ static inline int ap_instructions_available(void) } /** + * ap_interrupts_available(): Test if AP interrupts are available. + * + * Returns 1 if AP interrupts are available. + */ +static int ap_interrupts_available(void) +{ + unsigned long long facility_bits[2]; + + if (stfle(facility_bits, 2) <= 1) + return 0; + if (!(facility_bits[0] & (1ULL << 61)) || + !(facility_bits[1] & (1ULL << 62))) + return 0; + return 1; +} + +/** * ap_test_queue(): Test adjunct processor queue. * @qid: The AP queue number * @queue_depth: Pointer to queue depth value @@ -152,6 +188,80 @@ static inline struct ap_queue_status ap_reset_queue(ap_qid_t qid) return reg1; } +#ifdef CONFIG_64BIT +/** + * ap_queue_interruption_control(): Enable interruption for a specific AP. + * @qid: The AP queue number + * @ind: The notification indicator byte + * + * Returns AP queue status. + */ +static inline struct ap_queue_status +ap_queue_interruption_control(ap_qid_t qid, void *ind) +{ + register unsigned long reg0 asm ("0") = qid | 0x03000000UL; + register unsigned long reg1_in asm ("1") = 0x0000800000000000UL | AP_ISC; + register struct ap_queue_status reg1_out asm ("1"); + register void *reg2 asm ("2") = ind; + asm volatile( + ".long 0xb2af0000" /* PQAP(RAPQ) */ + : "+d" (reg0), "+d" (reg1_in), "=d" (reg1_out), "+d" (reg2) + : + : "cc" ); + return reg1_out; +} +#endif + +/** + * ap_queue_enable_interruption(): Enable interruption on an AP. + * @qid: The AP queue number + * @ind: the notification indicator byte + * + * Enables interruption on AP queue via ap_queue_interruption_control(). Based + * on the return value it waits a while and tests the AP queue if interrupts + * have been switched on using ap_test_queue(). + */ +static int ap_queue_enable_interruption(ap_qid_t qid, void *ind) +{ +#ifdef CONFIG_64BIT + struct ap_queue_status status; + int t_depth, t_device_type, rc, i; + + rc = -EBUSY; + status = ap_queue_interruption_control(qid, ind); + + for (i = 0; i < AP_MAX_RESET; i++) { + switch (status.response_code) { + case AP_RESPONSE_NORMAL: + if (status.int_enabled) + return 0; + break; + case AP_RESPONSE_RESET_IN_PROGRESS: + case AP_RESPONSE_BUSY: + break; + case AP_RESPONSE_Q_NOT_AVAIL: + case AP_RESPONSE_DECONFIGURED: + case AP_RESPONSE_CHECKSTOPPED: + case AP_RESPONSE_INVALID_ADDRESS: + return -ENODEV; + case AP_RESPONSE_OTHERWISE_CHANGED: + if (status.int_enabled) + return 0; + break; + default: + break; + } + if (i < AP_MAX_RESET - 1) { + udelay(5); + status = ap_test_queue(qid, &t_depth, &t_device_type); + } + } + return rc; +#else + return -EINVAL; +#endif +} + /** * __ap_send(): Send message to adjunct processor queue. * @qid: The AP queue number @@ -295,6 +405,11 @@ static int ap_query_queue(ap_qid_t qid, int *queue_depth, int *device_type) case AP_RESPONSE_CHECKSTOPPED: rc = -ENODEV; break; + case AP_RESPONSE_INVALID_ADDRESS: + rc = -ENODEV; + break; + case AP_RESPONSE_OTHERWISE_CHANGED: + break; case AP_RESPONSE_BUSY: break; default: @@ -345,6 +460,15 @@ static int ap_init_queue(ap_qid_t qid) status = ap_test_queue(qid, &dummy, &dummy); } } + if (rc == 0 && ap_using_interrupts()) { + rc = ap_queue_enable_interruption(qid, ap_interrupt_indicator); + /* If interruption mode is supported by the machine, + * but an AP can not be enabled for interruption then + * the AP will be discarded. */ + if (rc) + pr_err("Registering adapter interrupts for " + "AP %d failed\n", AP_QID_DEVICE(qid)); + } return rc; } @@ -397,16 +521,16 @@ static ssize_t ap_hwtype_show(struct device *dev, struct ap_device *ap_dev = to_ap_dev(dev); return snprintf(buf, PAGE_SIZE, "%d\n", ap_dev->device_type); } -static DEVICE_ATTR(hwtype, 0444, ap_hwtype_show, NULL); +static DEVICE_ATTR(hwtype, 0444, ap_hwtype_show, NULL); static ssize_t ap_depth_show(struct device *dev, struct device_attribute *attr, char *buf) { struct ap_device *ap_dev = to_ap_dev(dev); return snprintf(buf, PAGE_SIZE, "%d\n", ap_dev->queue_depth); } -static DEVICE_ATTR(depth, 0444, ap_depth_show, NULL); +static DEVICE_ATTR(depth, 0444, ap_depth_show, NULL); static ssize_t ap_request_count_show(struct device *dev, struct device_attribute *attr, char *buf) @@ -509,9 +633,9 @@ static int ap_device_probe(struct device *dev) ap_dev->drv = ap_drv; rc = ap_drv->probe ? ap_drv->probe(ap_dev) : -ENODEV; if (!rc) { - spin_lock_bh(&ap_device_lock); + spin_lock_bh(&ap_device_list_lock); list_add(&ap_dev->list, &ap_device_list); - spin_unlock_bh(&ap_device_lock); + spin_unlock_bh(&ap_device_list_lock); } return rc; } @@ -553,9 +677,9 @@ static int ap_device_remove(struct device *dev) ap_flush_queue(ap_dev); del_timer_sync(&ap_dev->timeout); - spin_lock_bh(&ap_device_lock); + spin_lock_bh(&ap_device_list_lock); list_del_init(&ap_dev->list); - spin_unlock_bh(&ap_device_lock); + spin_unlock_bh(&ap_device_list_lock); if (ap_drv->remove) ap_drv->remove(ap_dev); spin_lock_bh(&ap_dev->lock); @@ -599,6 +723,14 @@ static ssize_t ap_config_time_show(struct bus_type *bus, char *buf) return snprintf(buf, PAGE_SIZE, "%d\n", ap_config_time); } +static ssize_t ap_interrupts_show(struct bus_type *bus, char *buf) +{ + return snprintf(buf, PAGE_SIZE, "%d\n", + ap_using_interrupts() ? 1 : 0); +} + +static BUS_ATTR(ap_interrupts, 0444, ap_interrupts_show, NULL); + static ssize_t ap_config_time_store(struct bus_type *bus, const char *buf, size_t count) { @@ -653,7 +785,8 @@ static ssize_t poll_timeout_store(struct bus_type *bus, const char *buf, ktime_t hr_time; /* 120 seconds = maximum poll interval */ - if (sscanf(buf, "%llu\n", &time) != 1 || time < 1 || time > 120000000000) + if (sscanf(buf, "%llu\n", &time) != 1 || time < 1 || + time > 120000000000ULL) return -EINVAL; poll_timeout = time; hr_time = ktime_set(0, poll_timeout); @@ -672,6 +805,7 @@ static struct bus_attribute *const ap_bus_attrs[] = { &bus_attr_ap_domain, &bus_attr_config_time, &bus_attr_poll_thread, + &bus_attr_ap_interrupts, &bus_attr_poll_timeout, NULL, }; @@ -814,6 +948,11 @@ out: return rc; } +static void ap_interrupt_handler(void *unused1, void *unused2) +{ + tasklet_schedule(&ap_tasklet); +} + /** * __ap_scan_bus(): Scan the AP bus. * @dev: Pointer to device @@ -928,6 +1067,8 @@ ap_config_timeout(unsigned long ptr) */ static inline void ap_schedule_poll_timer(void) { + if (ap_using_interrupts()) + return; if (hrtimer_is_queued(&ap_poll_timer)) return; hrtimer_start(&ap_poll_timer, ktime_set(0, poll_timeout), @@ -1181,7 +1322,7 @@ static void ap_reset(struct ap_device *ap_dev) ap_dev->unregistered = 1; } -static int __ap_poll_all(struct ap_device *ap_dev, unsigned long *flags) +static int __ap_poll_device(struct ap_device *ap_dev, unsigned long *flags) { spin_lock(&ap_dev->lock); if (!ap_dev->unregistered) { @@ -1207,13 +1348,19 @@ static void ap_poll_all(unsigned long dummy) unsigned long flags; struct ap_device *ap_dev; + /* Reset the indicator if interrupts are used. Thus new interrupts can + * be received. Doing it in the beginning of the tasklet is therefor + * important that no requests on any AP get lost. + */ + if (ap_using_interrupts()) + xchg((u8 *)ap_interrupt_indicator, 0); do { flags = 0; - spin_lock(&ap_device_lock); + spin_lock(&ap_device_list_lock); list_for_each_entry(ap_dev, &ap_device_list, list) { - __ap_poll_all(ap_dev, &flags); + __ap_poll_device(ap_dev, &flags); } - spin_unlock(&ap_device_lock); + spin_unlock(&ap_device_list_lock); } while (flags & 1); if (flags & 2) ap_schedule_poll_timer(); @@ -1253,11 +1400,11 @@ static int ap_poll_thread(void *data) remove_wait_queue(&ap_poll_wait, &wait); flags = 0; - spin_lock_bh(&ap_device_lock); + spin_lock_bh(&ap_device_list_lock); list_for_each_entry(ap_dev, &ap_device_list, list) { - __ap_poll_all(ap_dev, &flags); + __ap_poll_device(ap_dev, &flags); } - spin_unlock_bh(&ap_device_lock); + spin_unlock_bh(&ap_device_list_lock); } set_current_state(TASK_RUNNING); remove_wait_queue(&ap_poll_wait, &wait); @@ -1268,6 +1415,8 @@ static int ap_poll_thread_start(void) { int rc; + if (ap_using_interrupts()) + return 0; mutex_lock(&ap_poll_thread_mutex); if (!ap_poll_kthread) { ap_poll_kthread = kthread_run(ap_poll_thread, NULL, "appoll"); @@ -1301,8 +1450,12 @@ static void ap_request_timeout(unsigned long data) { struct ap_device *ap_dev = (struct ap_device *) data; - if (ap_dev->reset == AP_RESET_ARMED) + if (ap_dev->reset == AP_RESET_ARMED) { ap_dev->reset = AP_RESET_DO; + + if (ap_using_interrupts()) + tasklet_schedule(&ap_tasklet); + } } static void ap_reset_domain(void) @@ -1337,14 +1490,25 @@ int __init ap_module_init(void) int rc, i; if (ap_domain_index < -1 || ap_domain_index >= AP_DOMAINS) { - printk(KERN_WARNING "Invalid param: domain = %d. " - " Not loading.\n", ap_domain_index); + pr_warning("%d is not a valid cryptographic domain\n", + ap_domain_index); return -EINVAL; } if (ap_instructions_available() != 0) { - printk(KERN_WARNING "AP instructions not installed.\n"); + pr_warning("The hardware system does not support " + "AP instructions\n"); return -ENODEV; } + if (ap_interrupts_available()) { + isc_register(AP_ISC); + ap_interrupt_indicator = s390_register_adapter_interrupt( + &ap_interrupt_handler, NULL, AP_ISC); + if (IS_ERR(ap_interrupt_indicator)) { + ap_interrupt_indicator = NULL; + isc_unregister(AP_ISC); + } + } + register_reset_call(&ap_reset_call); /* Create /sys/bus/ap. */ @@ -1408,6 +1572,10 @@ out_bus: bus_unregister(&ap_bus_type); out: unregister_reset_call(&ap_reset_call); + if (ap_using_interrupts()) { + s390_unregister_adapter_interrupt(ap_interrupt_indicator, AP_ISC); + isc_unregister(AP_ISC); + } return rc; } @@ -1443,6 +1611,10 @@ void ap_module_exit(void) bus_remove_file(&ap_bus_type, ap_bus_attrs[i]); bus_unregister(&ap_bus_type); unregister_reset_call(&ap_reset_call); + if (ap_using_interrupts()) { + s390_unregister_adapter_interrupt(ap_interrupt_indicator, AP_ISC); + isc_unregister(AP_ISC); + } } #ifndef CONFIG_ZCRYPT_MONOLITHIC diff --git a/drivers/s390/crypto/ap_bus.h b/drivers/s390/crypto/ap_bus.h index 446378b308f..a3536224180 100644 --- a/drivers/s390/crypto/ap_bus.h +++ b/drivers/s390/crypto/ap_bus.h @@ -5,6 +5,7 @@ * Author(s): Cornelia Huck <cornelia.huck@de.ibm.com> * Martin Schwidefsky <schwidefsky@de.ibm.com> * Ralph Wuerthner <rwuerthn@de.ibm.com> + * Felix Beck <felix.beck@de.ibm.com> * * Adjunct processor bus header file. * @@ -67,7 +68,8 @@ struct ap_queue_status { unsigned int queue_empty : 1; unsigned int replies_waiting : 1; unsigned int queue_full : 1; - unsigned int pad1 : 5; + unsigned int pad1 : 4; + unsigned int int_enabled : 1; unsigned int response_code : 8; unsigned int pad2 : 16; }; @@ -78,6 +80,8 @@ struct ap_queue_status { #define AP_RESPONSE_DECONFIGURED 0x03 #define AP_RESPONSE_CHECKSTOPPED 0x04 #define AP_RESPONSE_BUSY 0x05 +#define AP_RESPONSE_INVALID_ADDRESS 0x06 +#define AP_RESPONSE_OTHERWISE_CHANGED 0x07 #define AP_RESPONSE_Q_FULL 0x10 #define AP_RESPONSE_NO_PENDING_REPLY 0x10 #define AP_RESPONSE_INDEX_TOO_BIG 0x11 diff --git a/drivers/s390/crypto/zcrypt_cex2a.c b/drivers/s390/crypto/zcrypt_cex2a.c index 54f4cbc3be9..326ea08f67c 100644 --- a/drivers/s390/crypto/zcrypt_cex2a.c +++ b/drivers/s390/crypto/zcrypt_cex2a.c @@ -264,17 +264,21 @@ static void zcrypt_cex2a_receive(struct ap_device *ap_dev, .type = TYPE82_RSP_CODE, .reply_code = REP82_ERROR_MACHINE_FAILURE, }; - struct type80_hdr *t80h = reply->message; + struct type80_hdr *t80h; int length; /* Copy the reply message to the request message buffer. */ - if (IS_ERR(reply)) + if (IS_ERR(reply)) { memcpy(msg->message, &error_reply, sizeof(error_reply)); - else if (t80h->type == TYPE80_RSP_CODE) { + goto out; + } + t80h = reply->message; + if (t80h->type == TYPE80_RSP_CODE) { length = min(CEX2A_MAX_RESPONSE_SIZE, (int) t80h->len); memcpy(msg->message, reply->message, length); } else memcpy(msg->message, reply->message, sizeof error_reply); +out: complete((struct completion *) msg->private); } diff --git a/drivers/s390/crypto/zcrypt_pcica.c b/drivers/s390/crypto/zcrypt_pcica.c index 12da4815ba8..17ba81b58c7 100644 --- a/drivers/s390/crypto/zcrypt_pcica.c +++ b/drivers/s390/crypto/zcrypt_pcica.c @@ -247,17 +247,21 @@ static void zcrypt_pcica_receive(struct ap_device *ap_dev, .type = TYPE82_RSP_CODE, .reply_code = REP82_ERROR_MACHINE_FAILURE, }; - struct type84_hdr *t84h = reply->message; + struct type84_hdr *t84h; int length; /* Copy the reply message to the request message buffer. */ - if (IS_ERR(reply)) + if (IS_ERR(reply)) { memcpy(msg->message, &error_reply, sizeof(error_reply)); - else if (t84h->code == TYPE84_RSP_CODE) { + goto out; + } + t84h = reply->message; + if (t84h->code == TYPE84_RSP_CODE) { length = min(PCICA_MAX_RESPONSE_SIZE, (int) t84h->len); memcpy(msg->message, reply->message, length); } else memcpy(msg->message, reply->message, sizeof error_reply); +out: complete((struct completion *) msg->private); } diff --git a/drivers/s390/crypto/zcrypt_pcicc.c b/drivers/s390/crypto/zcrypt_pcicc.c index 779952cb19f..f4b0c479543 100644 --- a/drivers/s390/crypto/zcrypt_pcicc.c +++ b/drivers/s390/crypto/zcrypt_pcicc.c @@ -447,19 +447,23 @@ static void zcrypt_pcicc_receive(struct ap_device *ap_dev, .type = TYPE82_RSP_CODE, .reply_code = REP82_ERROR_MACHINE_FAILURE, }; - struct type86_reply *t86r = reply->message; + struct type86_reply *t86r; int length; /* Copy the reply message to the request message buffer. */ - if (IS_ERR(reply)) + if (IS_ERR(reply)) { memcpy(msg->message, &error_reply, sizeof(error_reply)); - else if (t86r->hdr.type == TYPE86_RSP_CODE && + goto out; + } + t86r = reply->message; + if (t86r->hdr.type == TYPE86_RSP_CODE && t86r->cprb.cprb_ver_id == 0x01) { length = sizeof(struct type86_reply) + t86r->length - 2; length = min(PCICC_MAX_RESPONSE_SIZE, length); memcpy(msg->message, reply->message, length); } else memcpy(msg->message, reply->message, sizeof error_reply); +out: complete((struct completion *) msg->private); } diff --git a/drivers/s390/crypto/zcrypt_pcixcc.c b/drivers/s390/crypto/zcrypt_pcixcc.c index d8ad36f8154..e7a1e22e77a 100644 --- a/drivers/s390/crypto/zcrypt_pcixcc.c +++ b/drivers/s390/crypto/zcrypt_pcixcc.c @@ -635,13 +635,16 @@ static void zcrypt_pcixcc_receive(struct ap_device *ap_dev, }; struct response_type *resp_type = (struct response_type *) msg->private; - struct type86x_reply *t86r = reply->message; + struct type86x_reply *t86r; int length; /* Copy the reply message to the request message buffer. */ - if (IS_ERR(reply)) + if (IS_ERR(reply)) { memcpy(msg->message, &error_reply, sizeof(error_reply)); - else if (t86r->hdr.type == TYPE86_RSP_CODE && + goto out; + } + t86r = reply->message; + if (t86r->hdr.type == TYPE86_RSP_CODE && t86r->cprbx.cprb_ver_id == 0x02) { switch (resp_type->type) { case PCIXCC_RESPONSE_TYPE_ICA: @@ -660,6 +663,7 @@ static void zcrypt_pcixcc_receive(struct ap_device *ap_dev, } } else memcpy(msg->message, reply->message, sizeof error_reply); +out: complete(&(resp_type->work)); } diff --git a/drivers/s390/net/ctcm_fsms.c b/drivers/s390/net/ctcm_fsms.c index 42776550acf..f29c7086fc1 100644 --- a/drivers/s390/net/ctcm_fsms.c +++ b/drivers/s390/net/ctcm_fsms.c @@ -13,6 +13,9 @@ #undef DEBUGDATA #undef DEBUGCCW +#define KMSG_COMPONENT "ctcm" +#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt + #include <linux/module.h> #include <linux/init.h> #include <linux/kernel.h> @@ -190,21 +193,22 @@ static void ctcmpc_chx_send_sweep(fsm_instance *fsm, int event, void *arg); void ctcm_ccw_check_rc(struct channel *ch, int rc, char *msg) { CTCM_DBF_TEXT_(ERROR, CTC_DBF_ERROR, - "%s(%s): %s: %04x\n", - CTCM_FUNTAIL, ch->id, msg, rc); + "%s(%s): %s: %04x\n", + CTCM_FUNTAIL, ch->id, msg, rc); switch (rc) { case -EBUSY: - ctcm_pr_warn("%s (%s): Busy !\n", ch->id, msg); + pr_info("%s: The communication peer is busy\n", + ch->id); fsm_event(ch->fsm, CTC_EVENT_IO_EBUSY, ch); break; case -ENODEV: - ctcm_pr_emerg("%s (%s): Invalid device called for IO\n", - ch->id, msg); + pr_err("%s: The specified target device is not valid\n", + ch->id); fsm_event(ch->fsm, CTC_EVENT_IO_ENODEV, ch); break; default: - ctcm_pr_emerg("%s (%s): Unknown error in do_IO %04x\n", - ch->id, msg, rc); + pr_err("An I/O operation resulted in error %04x\n", + rc); fsm_event(ch->fsm, CTC_EVENT_IO_UNKNOWN, ch); } } @@ -886,8 +890,15 @@ static void ctcm_chx_rxiniterr(fsm_instance *fi, int event, void *arg) fsm_newstate(fi, CTC_STATE_RXERR); fsm_event(priv->fsm, DEV_EVENT_RXDOWN, dev); } - } else - ctcm_pr_warn("%s: Error during RX init handshake\n", dev->name); + } else { + CTCM_DBF_TEXT_(ERROR, CTC_DBF_ERROR, + "%s(%s): %s in %s", CTCM_FUNTAIL, ch->id, + ctc_ch_event_names[event], fsm_getstate_str(fi)); + + dev_warn(&dev->dev, + "Initialization failed with RX/TX init handshake " + "error %s\n", ctc_ch_event_names[event]); + } } /** @@ -969,7 +980,9 @@ static void ctcm_chx_txiniterr(fsm_instance *fi, int event, void *arg) "%s(%s): %s in %s", CTCM_FUNTAIL, ch->id, ctc_ch_event_names[event], fsm_getstate_str(fi)); - ctcm_pr_warn("%s: Error during TX init handshake\n", dev->name); + dev_warn(&dev->dev, + "Initialization failed with RX/TX init handshake " + "error %s\n", ctc_ch_event_names[event]); } } @@ -2101,14 +2114,11 @@ static void dev_action_restart(fsm_instance *fi, int event, void *arg) CTCMY_DBF_DEV_NAME(TRACE, dev, ""); if (IS_MPC(priv)) { - ctcm_pr_info("ctcm: %s Restarting Device and " - "MPC Group in 5 seconds\n", - dev->name); restart_timer = CTCM_TIME_1_SEC; } else { - ctcm_pr_info("%s: Restarting\n", dev->name); restart_timer = CTCM_TIME_5_SEC; } + dev_info(&dev->dev, "Restarting device\n"); dev_action_stop(fi, event, arg); fsm_event(priv->fsm, DEV_EVENT_STOP, dev); @@ -2150,16 +2160,16 @@ static void dev_action_chup(fsm_instance *fi, int event, void *arg) case DEV_STATE_STARTWAIT_RX: if (event == DEV_EVENT_RXUP) { fsm_newstate(fi, DEV_STATE_RUNNING); - ctcm_pr_info("%s: connected with remote side\n", - dev->name); + dev_info(&dev->dev, + "Connected with remote side\n"); ctcm_clear_busy(dev); } break; case DEV_STATE_STARTWAIT_TX: if (event == DEV_EVENT_TXUP) { fsm_newstate(fi, DEV_STATE_RUNNING); - ctcm_pr_info("%s: connected with remote side\n", - dev->name); + dev_info(&dev->dev, + "Connected with remote side\n"); ctcm_clear_busy(dev); } break; diff --git a/drivers/s390/net/ctcm_main.c b/drivers/s390/net/ctcm_main.c index a4e29836a2a..2678573bece 100644 --- a/drivers/s390/net/ctcm_main.c +++ b/drivers/s390/net/ctcm_main.c @@ -21,6 +21,9 @@ #undef DEBUGDATA #undef DEBUGCCW +#define KMSG_COMPONENT "ctcm" +#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt + #include <linux/module.h> #include <linux/init.h> #include <linux/kernel.h> @@ -281,14 +284,16 @@ static long ctcm_check_irb_error(struct ccw_device *cdev, struct irb *irb) switch (PTR_ERR(irb)) { case -EIO: - ctcm_pr_warn("i/o-error on device %s\n", dev_name(&cdev->dev)); + dev_err(&cdev->dev, + "An I/O-error occurred on the CTCM device\n"); break; case -ETIMEDOUT: - ctcm_pr_warn("timeout on device %s\n", dev_name(&cdev->dev)); + dev_err(&cdev->dev, + "An adapter hardware operation timed out\n"); break; default: - ctcm_pr_warn("unknown error %ld on device %s\n", - PTR_ERR(irb), dev_name(&cdev->dev)); + dev_err(&cdev->dev, + "An error occurred on the adapter hardware\n"); } return PTR_ERR(irb); } @@ -309,15 +314,17 @@ static inline void ccw_unit_check(struct channel *ch, __u8 sense) if (sense & SNS0_INTERVENTION_REQ) { if (sense & 0x01) { if (ch->sense_rc != 0x01) { - ctcm_pr_debug("%s: Interface disc. or Sel. " - "reset (remote)\n", ch->id); + pr_notice( + "%s: The communication peer has " + "disconnected\n", ch->id); ch->sense_rc = 0x01; } fsm_event(ch->fsm, CTC_EVENT_UC_RCRESET, ch); } else { if (ch->sense_rc != SNS0_INTERVENTION_REQ) { - ctcm_pr_debug("%s: System reset (remote)\n", - ch->id); + pr_notice( + "%s: The remote operating system is " + "not available\n", ch->id); ch->sense_rc = SNS0_INTERVENTION_REQ; } fsm_event(ch->fsm, CTC_EVENT_UC_RSRESET, ch); @@ -1194,8 +1201,11 @@ static void ctcm_irq_handler(struct ccw_device *cdev, /* Check for unsolicited interrupts. */ if (cgdev == NULL) { - ctcm_pr_warn("ctcm: Got unsolicited irq: c-%02x d-%02x\n", - cstat, dstat); + CTCM_DBF_TEXT_(TRACE, CTC_DBF_ERROR, + "%s(%s) unsolicited irq: c-%02x d-%02x\n", + CTCM_FUNTAIL, dev_name(&cdev->dev), cstat, dstat); + dev_warn(&cdev->dev, + "The adapter received a non-specific IRQ\n"); return; } @@ -1207,31 +1217,34 @@ static void ctcm_irq_handler(struct ccw_device *cdev, else if (priv->channel[WRITE]->cdev == cdev) ch = priv->channel[WRITE]; else { - ctcm_pr_err("ctcm: Can't determine channel for interrupt, " - "device %s\n", dev_name(&cdev->dev)); + dev_err(&cdev->dev, + "%s: Internal error: Can't determine channel for " + "interrupt device %s\n", + __func__, dev_name(&cdev->dev)); + /* Explain: inconsistent internal structures */ return; } dev = ch->netdev; if (dev == NULL) { - ctcm_pr_crit("ctcm: %s dev=NULL bus_id=%s, ch=0x%p\n", - __func__, dev_name(&cdev->dev), ch); + dev_err(&cdev->dev, + "%s Internal error: net_device is NULL, ch = 0x%p\n", + __func__, ch); + /* Explain: inconsistent internal structures */ return; } - CTCM_DBF_TEXT_(TRACE, CTC_DBF_DEBUG, - "%s(%s): int. for %s: cstat=%02x dstat=%02x", - CTCM_FUNTAIL, dev->name, ch->id, cstat, dstat); - /* Copy interruption response block. */ memcpy(ch->irb, irb, sizeof(struct irb)); + /* Issue error message and return on subchannel error code */ if (irb->scsw.cmd.cstat) { - /* Check for good subchannel return code, otherwise error message */ fsm_event(ch->fsm, CTC_EVENT_SC_UNKNOWN, ch); - ctcm_pr_warn("%s: subchannel check for dev: %s - %02x %02x\n", - dev->name, ch->id, irb->scsw.cmd.cstat, - irb->scsw.cmd.dstat); + CTCM_DBF_TEXT_(TRACE, CTC_DBF_WARN, + "%s(%s): sub-ch check %s: cs=%02x ds=%02x", + CTCM_FUNTAIL, dev->name, ch->id, cstat, dstat); + dev_warn(&cdev->dev, + "A check occurred on the subchannel\n"); return; } @@ -1239,7 +1252,7 @@ static void ctcm_irq_handler(struct ccw_device *cdev, if (irb->scsw.cmd.dstat & DEV_STAT_UNIT_CHECK) { if ((irb->ecw[0] & ch->sense_rc) == 0) /* print it only once */ - CTCM_DBF_TEXT_(TRACE, CTC_DBF_INFO, + CTCM_DBF_TEXT_(TRACE, CTC_DBF_WARN, "%s(%s): sense=%02x, ds=%02x", CTCM_FUNTAIL, ch->id, irb->ecw[0], dstat); ccw_unit_check(ch, irb->ecw[0]); @@ -1574,6 +1587,11 @@ static int ctcm_new_device(struct ccwgroup_device *cgdev) strlcpy(priv->fsm->name, dev->name, sizeof(priv->fsm->name)); + dev_info(&dev->dev, + "setup OK : r/w = %s/%s, protocol : %d\n", + priv->channel[READ]->id, + priv->channel[WRITE]->id, priv->protocol); + CTCM_DBF_TEXT_(SETUP, CTC_DBF_INFO, "setup(%s) OK : r/w = %s/%s, protocol : %d", dev->name, priv->channel[READ]->id, @@ -1687,7 +1705,7 @@ static void __exit ctcm_exit(void) { unregister_cu3088_discipline(&ctcm_group_driver); ctcm_unregister_dbf_views(); - ctcm_pr_info("CTCM driver unloaded\n"); + pr_info("CTCM driver unloaded\n"); } /* @@ -1695,7 +1713,7 @@ static void __exit ctcm_exit(void) */ static void print_banner(void) { - printk(KERN_INFO "CTCM driver initialized\n"); + pr_info("CTCM driver initialized\n"); } /** @@ -1717,8 +1735,8 @@ static int __init ctcm_init(void) ret = register_cu3088_discipline(&ctcm_group_driver); if (ret) { ctcm_unregister_dbf_views(); - ctcm_pr_crit("ctcm_init failed with register_cu3088_discipline " - "(rc = %d)\n", ret); + pr_err("%s / register_cu3088_discipline failed, ret = %d\n", + __func__, ret); return ret; } print_banner(); diff --git a/drivers/s390/net/ctcm_main.h b/drivers/s390/net/ctcm_main.h index d77cce3fe4d..d925e732b7d 100644 --- a/drivers/s390/net/ctcm_main.h +++ b/drivers/s390/net/ctcm_main.h @@ -41,12 +41,6 @@ #define LOG_FLAG_NOMEM 8 #define ctcm_pr_debug(fmt, arg...) printk(KERN_DEBUG fmt, ##arg) -#define ctcm_pr_info(fmt, arg...) printk(KERN_INFO fmt, ##arg) -#define ctcm_pr_notice(fmt, arg...) printk(KERN_NOTICE fmt, ##arg) -#define ctcm_pr_warn(fmt, arg...) printk(KERN_WARNING fmt, ##arg) -#define ctcm_pr_emerg(fmt, arg...) printk(KERN_EMERG fmt, ##arg) -#define ctcm_pr_err(fmt, arg...) printk(KERN_ERR fmt, ##arg) -#define ctcm_pr_crit(fmt, arg...) printk(KERN_CRIT fmt, ##arg) #define CTCM_PR_DEBUG(fmt, arg...) \ do { \ diff --git a/drivers/s390/net/ctcm_mpc.c b/drivers/s390/net/ctcm_mpc.c index 19f5d5ed85e..3db5f846bbf 100644 --- a/drivers/s390/net/ctcm_mpc.c +++ b/drivers/s390/net/ctcm_mpc.c @@ -19,6 +19,9 @@ #undef DEBUGDATA #undef DEBUGCCW +#define KMSG_COMPONENT "ctcm" +#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt + #include <linux/module.h> #include <linux/init.h> #include <linux/kernel.h> @@ -386,7 +389,7 @@ int ctc_mpc_alloc_channel(int port_num, void (*callback)(int, int)) if (grp->allocchan_callback_retries < 4) { if (grp->allochanfunc) grp->allochanfunc(grp->port_num, - grp->group_max_buflen); + grp->group_max_buflen); } else { /* there are problems...bail out */ /* there may be a state mismatch so restart */ @@ -1232,8 +1235,9 @@ done: dev_kfree_skb_any(pskb); if (sendrc == NET_RX_DROP) { - printk(KERN_WARNING "%s %s() NETWORK BACKLOG EXCEEDED" - " - PACKET DROPPED\n", dev->name, __func__); + dev_warn(&dev->dev, + "The network backlog for %s is exceeded, " + "package dropped\n", __func__); fsm_event(grp->fsm, MPCG_EVENT_INOP, dev); } @@ -1670,10 +1674,11 @@ static int mpc_validate_xid(struct mpcg_info *mpcginfo) CTCM_FUNTAIL, ch->id); } } - done: if (rc) { - ctcm_pr_info("ctcmpc : %s() failed\n", __func__); + dev_warn(&dev->dev, + "The XID used in the MPC protocol is not valid, " + "rc = %d\n", rc); priv->xid->xid2_flag2 = 0x40; grp->saved_xid2->xid2_flag2 = 0x40; } diff --git a/drivers/s390/net/ctcm_sysfs.c b/drivers/s390/net/ctcm_sysfs.c index bb2d13721d3..8452bb052d6 100644 --- a/drivers/s390/net/ctcm_sysfs.c +++ b/drivers/s390/net/ctcm_sysfs.c @@ -10,6 +10,9 @@ #undef DEBUGDATA #undef DEBUGCCW +#define KMSG_COMPONENT "ctcm" +#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt + #include <linux/sysfs.h> #include "ctcm_main.h" diff --git a/drivers/s390/net/lcs.c b/drivers/s390/net/lcs.c index c7a036a5d7a..acca6678cb2 100644 --- a/drivers/s390/net/lcs.c +++ b/drivers/s390/net/lcs.c @@ -26,6 +26,9 @@ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ +#define KMSG_COMPONENT "lcs" +#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt + #include <linux/module.h> #include <linux/if.h> #include <linux/netdevice.h> @@ -54,8 +57,6 @@ #error Cannot compile lcs.c without some net devices switched on. #endif -#define PRINTK_HEADER " lcs: " - /** * initialization string for output */ @@ -96,7 +97,7 @@ lcs_register_debug_facility(void) lcs_dbf_setup = debug_register("lcs_setup", 2, 1, 8); lcs_dbf_trace = debug_register("lcs_trace", 4, 1, 8); if (lcs_dbf_setup == NULL || lcs_dbf_trace == NULL) { - PRINT_ERR("Not enough memory for debug facility.\n"); + pr_err("Not enough memory for debug facility.\n"); lcs_unregister_debug_facility(); return -ENOMEM; } @@ -503,7 +504,9 @@ lcs_start_channel(struct lcs_channel *channel) if (rc) { LCS_DBF_TEXT_(4,trace,"essh%s", dev_name(&channel->ccwdev->dev)); - PRINT_ERR("Error in starting channel, rc=%d!\n", rc); + dev_err(&channel->ccwdev->dev, + "Starting an LCS device resulted in an error," + " rc=%d!\n", rc); } return rc; } @@ -640,7 +643,9 @@ __lcs_resume_channel(struct lcs_channel *channel) if (rc) { LCS_DBF_TEXT_(4, trace, "ersc%s", dev_name(&channel->ccwdev->dev)); - PRINT_ERR("Error in lcs_resume_channel: rc=%d\n",rc); + dev_err(&channel->ccwdev->dev, + "Sending data from the LCS device to the LAN failed" + " with rc=%d\n",rc); } else channel->state = LCS_CH_STATE_RUNNING; return rc; @@ -1086,7 +1091,7 @@ lcs_check_multicast_support(struct lcs_card *card) cmd->cmd.lcs_qipassist.num_ip_pairs = 1; rc = lcs_send_lancmd(card, buffer, __lcs_check_multicast_cb); if (rc != 0) { - PRINT_ERR("Query IPAssist failed. Assuming unsupported!\n"); + pr_err("Query IPAssist failed. Assuming unsupported!\n"); return -EOPNOTSUPP; } if (card->ip_assists_supported & LCS_IPASS_MULTICAST_SUPPORT) @@ -1119,8 +1124,8 @@ list_modified: rc = lcs_send_setipm(card, ipm); spin_lock_irqsave(&card->ipm_lock, flags); if (rc) { - PRINT_INFO("Adding multicast address failed. " - "Table possibly full!\n"); + pr_info("Adding multicast address failed." + " Table possibly full!\n"); /* store ipm in failed list -> will be added * to ipm_list again, so a retry will be done * during the next call of this function */ @@ -1231,8 +1236,8 @@ lcs_set_mc_addresses(struct lcs_card *card, struct in_device *in4_dev) ipm = (struct lcs_ipm_list *) kzalloc(sizeof(struct lcs_ipm_list), GFP_ATOMIC); if (ipm == NULL) { - PRINT_INFO("Not enough memory to add " - "new multicast entry!\n"); + pr_info("Not enough memory to add" + " new multicast entry!\n"); break; } memcpy(&ipm->ipm.mac_addr, buf, LCS_MAC_LENGTH); @@ -1306,18 +1311,21 @@ lcs_check_irb_error(struct ccw_device *cdev, struct irb *irb) switch (PTR_ERR(irb)) { case -EIO: - PRINT_WARN("i/o-error on device %s\n", dev_name(&cdev->dev)); + dev_warn(&cdev->dev, + "An I/O-error occurred on the LCS device\n"); LCS_DBF_TEXT(2, trace, "ckirberr"); LCS_DBF_TEXT_(2, trace, " rc%d", -EIO); break; case -ETIMEDOUT: - PRINT_WARN("timeout on device %s\n", dev_name(&cdev->dev)); + dev_warn(&cdev->dev, + "A command timed out on the LCS device\n"); LCS_DBF_TEXT(2, trace, "ckirberr"); LCS_DBF_TEXT_(2, trace, " rc%d", -ETIMEDOUT); break; default: - PRINT_WARN("unknown error %ld on device %s\n", PTR_ERR(irb), - dev_name(&cdev->dev)); + dev_warn(&cdev->dev, + "An error occurred on the LCS device, rc=%ld\n", + PTR_ERR(irb)); LCS_DBF_TEXT(2, trace, "ckirberr"); LCS_DBF_TEXT(2, trace, " rc???"); } @@ -1403,8 +1411,10 @@ lcs_irq(struct ccw_device *cdev, unsigned long intparm, struct irb *irb) /* Check for channel and device errors presented */ rc = lcs_get_problem(cdev, irb); if (rc || (dstat & DEV_STAT_UNIT_EXCEP)) { - PRINT_WARN("check on device %s, dstat=0x%X, cstat=0x%X \n", - dev_name(&cdev->dev), dstat, cstat); + dev_warn(&cdev->dev, + "The LCS device stopped because of an error," + " dstat=0x%X, cstat=0x%X \n", + dstat, cstat); if (rc) { channel->state = LCS_CH_STATE_ERROR; } @@ -1761,8 +1771,8 @@ lcs_get_control(struct lcs_card *card, struct lcs_cmd *cmd) lcs_schedule_recovery(card); break; case LCS_CMD_STOPLAN: - PRINT_WARN("Stoplan for %s initiated by LGW.\n", - card->dev->name); + pr_warning("Stoplan for %s initiated by LGW.\n", + card->dev->name); if (card->dev) netif_carrier_off(card->dev); break; @@ -1790,7 +1800,8 @@ lcs_get_skb(struct lcs_card *card, char *skb_data, unsigned int skb_len) skb = dev_alloc_skb(skb_len); if (skb == NULL) { - PRINT_ERR("LCS: alloc_skb failed for device=%s\n", + dev_err(&card->dev->dev, + " Allocating a socket buffer to interface %s failed\n", card->dev->name); card->stats.rx_dropped++; return; @@ -1886,7 +1897,8 @@ lcs_stop_device(struct net_device *dev) (card->write.state != LCS_CH_STATE_RUNNING)); rc = lcs_stopcard(card); if (rc) - PRINT_ERR("Try it again!\n "); + dev_err(&card->dev->dev, + " Shutting down the LCS device failed\n "); return rc; } @@ -1905,7 +1917,7 @@ lcs_open_device(struct net_device *dev) /* initialize statistics */ rc = lcs_detect(card); if (rc) { - PRINT_ERR("LCS:Error in opening device!\n"); + pr_err("Error in opening device!\n"); } else { dev->flags |= IFF_UP; @@ -2113,8 +2125,9 @@ lcs_new_device(struct ccwgroup_device *ccwgdev) rc = lcs_detect(card); if (rc) { LCS_DBF_TEXT(2, setup, "dtctfail"); - PRINT_WARN("Detection of LCS card failed with return code " - "%d (0x%x)\n", rc, rc); + dev_err(&card->dev->dev, + "Detecting a network adapter for LCS devices" + " failed with rc=%d (0x%x)\n", rc, rc); lcs_stopcard(card); goto out; } @@ -2144,7 +2157,7 @@ lcs_new_device(struct ccwgroup_device *ccwgdev) #endif default: LCS_DBF_TEXT(3, setup, "errinit"); - PRINT_ERR("LCS: Initialization failed\n"); + pr_err(" Initialization failed\n"); goto out; } if (!dev) @@ -2176,13 +2189,13 @@ netdev_out: goto out; /* Print out supported assists: IPv6 */ - PRINT_INFO("LCS device %s %s IPv6 support\n", card->dev->name, - (card->ip_assists_supported & LCS_IPASS_IPV6_SUPPORT) ? - "with" : "without"); + pr_info("LCS device %s %s IPv6 support\n", card->dev->name, + (card->ip_assists_supported & LCS_IPASS_IPV6_SUPPORT) ? + "with" : "without"); /* Print out supported assist: Multicast */ - PRINT_INFO("LCS device %s %s Multicast support\n", card->dev->name, - (card->ip_assists_supported & LCS_IPASS_MULTICAST_SUPPORT) ? - "with" : "without"); + pr_info("LCS device %s %s Multicast support\n", card->dev->name, + (card->ip_assists_supported & LCS_IPASS_MULTICAST_SUPPORT) ? + "with" : "without"); return 0; out: @@ -2248,15 +2261,16 @@ lcs_recovery(void *ptr) return 0; LCS_DBF_TEXT(4, trace, "recover2"); gdev = card->gdev; - PRINT_WARN("Recovery of device %s started...\n", dev_name(&gdev->dev)); + dev_warn(&gdev->dev, + "A recovery process has been started for the LCS device\n"); rc = __lcs_shutdown_device(gdev, 1); rc = lcs_new_device(gdev); if (!rc) - PRINT_INFO("Device %s successfully recovered!\n", - card->dev->name); + pr_info("Device %s successfully recovered!\n", + card->dev->name); else - PRINT_INFO("Device %s could not be recovered!\n", - card->dev->name); + pr_info("Device %s could not be recovered!\n", + card->dev->name); lcs_clear_thread_running_bit(card, LCS_RECOVERY_THREAD); return 0; } @@ -2308,17 +2322,17 @@ __init lcs_init_module(void) { int rc; - PRINT_INFO("Loading %s\n",version); + pr_info("Loading %s\n", version); rc = lcs_register_debug_facility(); LCS_DBF_TEXT(0, setup, "lcsinit"); if (rc) { - PRINT_ERR("Initialization failed\n"); + pr_err("Initialization failed\n"); return rc; } rc = register_cu3088_discipline(&lcs_group_driver); if (rc) { - PRINT_ERR("Initialization failed\n"); + pr_err("Initialization failed\n"); return rc; } return 0; @@ -2331,7 +2345,7 @@ __init lcs_init_module(void) static void __exit lcs_cleanup_module(void) { - PRINT_INFO("Terminating lcs module.\n"); + pr_info("Terminating lcs module.\n"); LCS_DBF_TEXT(0, trace, "cleanup"); unregister_cu3088_discipline(&lcs_group_driver); lcs_unregister_debug_facility(); diff --git a/drivers/s390/net/netiucv.c b/drivers/s390/net/netiucv.c index 0fea51e34b5..930e2fc2a01 100644 --- a/drivers/s390/net/netiucv.c +++ b/drivers/s390/net/netiucv.c @@ -31,6 +31,9 @@ * */ +#define KMSG_COMPONENT "netiucv" +#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt + #undef DEBUG #include <linux/module.h> @@ -846,7 +849,8 @@ static void conn_action_connsever(fsm_instance *fi, int event, void *arg) fsm_deltimer(&conn->timer); iucv_path_sever(conn->path, NULL); - PRINT_INFO("%s: Remote dropped connection\n", netdev->name); + dev_info(privptr->dev, "The peer interface of the IUCV device" + " has closed the connection\n"); IUCV_DBF_TEXT(data, 2, "conn_action_connsever: Remote dropped connection\n"); fsm_newstate(fi, CONN_STATE_STARTWAIT); @@ -856,13 +860,15 @@ static void conn_action_connsever(fsm_instance *fi, int event, void *arg) static void conn_action_start(fsm_instance *fi, int event, void *arg) { struct iucv_connection *conn = arg; + struct net_device *netdev = conn->netdev; + struct netiucv_priv *privptr = netdev_priv(netdev); int rc; IUCV_DBF_TEXT(trace, 3, __func__); fsm_newstate(fi, CONN_STATE_STARTWAIT); IUCV_DBF_TEXT_(setup, 2, "%s('%s'): connecting ...\n", - conn->netdev->name, conn->userid); + netdev->name, conn->userid); /* * We must set the state before calling iucv_connect because the @@ -876,41 +882,45 @@ static void conn_action_start(fsm_instance *fi, int event, void *arg) NULL, iucvMagic, conn); switch (rc) { case 0: - conn->netdev->tx_queue_len = conn->path->msglim; + netdev->tx_queue_len = conn->path->msglim; fsm_addtimer(&conn->timer, NETIUCV_TIMEOUT_5SEC, CONN_EVENT_TIMER, conn); return; case 11: - PRINT_INFO("%s: User %s is currently not available.\n", - conn->netdev->name, - netiucv_printname(conn->userid)); + dev_warn(privptr->dev, + "The IUCV device failed to connect to z/VM guest %s\n", + netiucv_printname(conn->userid)); fsm_newstate(fi, CONN_STATE_STARTWAIT); break; case 12: - PRINT_INFO("%s: User %s is currently not ready.\n", - conn->netdev->name, - netiucv_printname(conn->userid)); + dev_warn(privptr->dev, + "The IUCV device failed to connect to the peer on z/VM" + " guest %s\n", netiucv_printname(conn->userid)); fsm_newstate(fi, CONN_STATE_STARTWAIT); break; case 13: - PRINT_WARN("%s: Too many IUCV connections.\n", - conn->netdev->name); + dev_err(privptr->dev, + "Connecting the IUCV device would exceed the maximum" + " number of IUCV connections\n"); fsm_newstate(fi, CONN_STATE_CONNERR); break; case 14: - PRINT_WARN("%s: User %s has too many IUCV connections.\n", - conn->netdev->name, - netiucv_printname(conn->userid)); + dev_err(privptr->dev, + "z/VM guest %s has too many IUCV connections" + " to connect with the IUCV device\n", + netiucv_printname(conn->userid)); fsm_newstate(fi, CONN_STATE_CONNERR); break; case 15: - PRINT_WARN("%s: No IUCV authorization in CP directory.\n", - conn->netdev->name); + dev_err(privptr->dev, + "The IUCV device cannot connect to a z/VM guest with no" + " IUCV authorization\n"); fsm_newstate(fi, CONN_STATE_CONNERR); break; default: - PRINT_WARN("%s: iucv_connect returned error %d\n", - conn->netdev->name, rc); + dev_err(privptr->dev, + "Connecting the IUCV device failed with error %d\n", + rc); fsm_newstate(fi, CONN_STATE_CONNERR); break; } @@ -1059,8 +1069,9 @@ dev_action_connup(fsm_instance *fi, int event, void *arg) switch (fsm_getstate(fi)) { case DEV_STATE_STARTWAIT: fsm_newstate(fi, DEV_STATE_RUNNING); - PRINT_INFO("%s: connected with remote side %s\n", - dev->name, privptr->conn->userid); + dev_info(privptr->dev, + "The IUCV device has been connected" + " successfully to %s\n", privptr->conn->userid); IUCV_DBF_TEXT(setup, 3, "connection is up and running\n"); break; @@ -1982,6 +1993,8 @@ static ssize_t conn_write(struct device_driver *drv, if (rc) goto out_unreg; + dev_info(priv->dev, "The IUCV interface to %s has been" + " established successfully\n", netiucv_printname(username)); return count; @@ -2027,10 +2040,9 @@ static ssize_t remove_write (struct device_driver *drv, continue; read_unlock_bh(&iucv_connection_rwlock); if (ndev->flags & (IFF_UP | IFF_RUNNING)) { - PRINT_WARN("netiucv: net device %s active with peer " - "%s\n", ndev->name, priv->conn->userid); - PRINT_WARN("netiucv: %s cannot be removed\n", - ndev->name); + dev_warn(dev, "The IUCV device is connected" + " to %s and cannot be removed\n", + priv->conn->userid); IUCV_DBF_TEXT(data, 2, "remove_write: still active\n"); return -EPERM; } @@ -2062,7 +2074,7 @@ static struct attribute_group *netiucv_drv_attr_groups[] = { static void netiucv_banner(void) { - PRINT_INFO("NETIUCV driver initialized\n"); + pr_info("driver initialized\n"); } static void __exit netiucv_exit(void) @@ -2088,7 +2100,7 @@ static void __exit netiucv_exit(void) iucv_unregister(&netiucv_handler, 1); iucv_unregister_dbf_views(); - PRINT_INFO("NETIUCV driver unloaded\n"); + pr_info("driver unloaded\n"); return; } diff --git a/drivers/s390/net/qeth_core.h b/drivers/s390/net/qeth_core.h index af6d6045851..d5ccce1643e 100644 --- a/drivers/s390/net/qeth_core.h +++ b/drivers/s390/net/qeth_core.h @@ -31,11 +31,10 @@ #include <asm/qdio.h> #include <asm/ccwdev.h> #include <asm/ccwgroup.h> +#include <asm/sysinfo.h> #include "qeth_core_mpc.h" -#define KMSG_COMPONENT "qeth" - /** * Debug Facility stuff */ @@ -74,11 +73,6 @@ struct qeth_dbf_info { #define QETH_DBF_TEXT_(name, level, text...) \ qeth_dbf_longtext(QETH_DBF_##name, level, text) -/** - * some more debug stuff - */ -#define PRINTK_HEADER "qeth: " - #define SENSE_COMMAND_REJECT_BYTE 0 #define SENSE_COMMAND_REJECT_FLAG 0x80 #define SENSE_RESETTING_EVENT_BYTE 1 @@ -733,6 +727,7 @@ struct qeth_card { struct qeth_osn_info osn_info; struct qeth_discipline discipline; atomic_t force_alloc_skb; + struct service_level qeth_service_level; }; struct qeth_card_list_struct { diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c index 52d26592c72..e783644a210 100644 --- a/drivers/s390/net/qeth_core_main.c +++ b/drivers/s390/net/qeth_core_main.c @@ -8,6 +8,9 @@ * Frank Blaschka <frank.blaschka@de.ibm.com> */ +#define KMSG_COMPONENT "qeth" +#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt + #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/string.h> @@ -319,7 +322,10 @@ static int qeth_issue_next_read(struct qeth_card *card) return -EIO; iob = qeth_get_buffer(&card->read); if (!iob) { - PRINT_WARN("issue_next_read failed: no iob available!\n"); + dev_warn(&card->gdev->dev, "The qeth device driver " + "failed to recover an error on the device\n"); + QETH_DBF_MESSAGE(2, "%s issue_next_read failed: no iob " + "available\n", dev_name(&card->gdev->dev)); return -ENOMEM; } qeth_setup_ccw(&card->read, iob->data, QETH_BUFSIZE); @@ -327,7 +333,8 @@ static int qeth_issue_next_read(struct qeth_card *card) rc = ccw_device_start(card->read.ccwdev, &card->read.ccw, (addr_t) iob, 0, 0); if (rc) { - PRINT_ERR("Error in starting next read ccw! rc=%i\n", rc); + QETH_DBF_MESSAGE(2, "%s error in starting next read ccw! " + "rc=%i\n", dev_name(&card->gdev->dev), rc); atomic_set(&card->read.irq_pending, 0); qeth_schedule_recovery(card); wake_up(&card->wait_q); @@ -393,10 +400,9 @@ static struct qeth_ipa_cmd *qeth_check_ipa_data(struct qeth_card *card, } else { switch (cmd->hdr.command) { case IPA_CMD_STOPLAN: - PRINT_WARN("Link failure on %s (CHPID 0x%X) - " - "there is a network problem or " - "someone pulled the cable or " - "disabled the port.\n", + dev_warn(&card->gdev->dev, + "The link for interface %s on CHPID" + " 0x%X failed\n", QETH_CARD_IFNAME(card), card->info.chpid); card->lan_online = 0; @@ -404,9 +410,9 @@ static struct qeth_ipa_cmd *qeth_check_ipa_data(struct qeth_card *card, netif_carrier_off(card->dev); return NULL; case IPA_CMD_STARTLAN: - PRINT_INFO("Link reestablished on %s " - "(CHPID 0x%X). Scheduling " - "IP address reset.\n", + dev_info(&card->gdev->dev, + "The link for %s on CHPID 0x%X has" + " been restored\n", QETH_CARD_IFNAME(card), card->info.chpid); netif_carrier_on(card->dev); @@ -458,7 +464,7 @@ static int qeth_check_idx_response(unsigned char *buffer) QETH_DBF_HEX(CTRL, 2, buffer, QETH_DBF_CTRL_LEN); if ((buffer[2] & 0xc0) == 0xc0) { - PRINT_WARN("received an IDX TERMINATE " + QETH_DBF_MESSAGE(2, "received an IDX TERMINATE " "with cause code 0x%02x%s\n", buffer[4], ((buffer[4] == 0x22) ? @@ -744,8 +750,10 @@ static int qeth_get_problem(struct ccw_device *cdev, struct irb *irb) SCHN_STAT_CHN_DATA_CHK | SCHN_STAT_CHAIN_CHECK | SCHN_STAT_PROT_CHECK | SCHN_STAT_PROG_CHECK)) { QETH_DBF_TEXT(TRACE, 2, "CGENCHK"); - PRINT_WARN("check on device %s, dstat=x%x, cstat=x%x ", - dev_name(&cdev->dev), dstat, cstat); + dev_warn(&cdev->dev, "The qeth device driver " + "failed to recover an error on the device\n"); + QETH_DBF_MESSAGE(2, "%s check on device dstat=x%x, cstat=x%x ", + dev_name(&cdev->dev), dstat, cstat); print_hex_dump(KERN_WARNING, "qeth: irb ", DUMP_PREFIX_OFFSET, 16, 1, irb, 64, 1); return 1; @@ -784,12 +792,14 @@ static long __qeth_check_irb_error(struct ccw_device *cdev, switch (PTR_ERR(irb)) { case -EIO: - PRINT_WARN("i/o-error on device %s\n", dev_name(&cdev->dev)); + QETH_DBF_MESSAGE(2, "%s i/o-error on device\n", + dev_name(&cdev->dev)); QETH_DBF_TEXT(TRACE, 2, "ckirberr"); QETH_DBF_TEXT_(TRACE, 2, " rc%d", -EIO); break; case -ETIMEDOUT: - PRINT_WARN("timeout on device %s\n", dev_name(&cdev->dev)); + dev_warn(&cdev->dev, "A hardware operation timed out" + " on the device\n"); QETH_DBF_TEXT(TRACE, 2, "ckirberr"); QETH_DBF_TEXT_(TRACE, 2, " rc%d", -ETIMEDOUT); if (intparm == QETH_RCD_PARM) { @@ -802,8 +812,8 @@ static long __qeth_check_irb_error(struct ccw_device *cdev, } break; default: - PRINT_WARN("unknown error %ld on device %s\n", PTR_ERR(irb), - dev_name(&cdev->dev)); + QETH_DBF_MESSAGE(2, "%s unknown error %ld on device\n", + dev_name(&cdev->dev), PTR_ERR(irb)); QETH_DBF_TEXT(TRACE, 2, "ckirberr"); QETH_DBF_TEXT(TRACE, 2, " rc???"); } @@ -869,10 +879,12 @@ static void qeth_irq(struct ccw_device *cdev, unsigned long intparm, (dstat & DEV_STAT_UNIT_CHECK) || (cstat)) { if (irb->esw.esw0.erw.cons) { - /* TODO: we should make this s390dbf */ - PRINT_WARN("sense data available on channel %s.\n", - CHANNEL_ID(channel)); - PRINT_WARN(" cstat 0x%X\n dstat 0x%X\n", cstat, dstat); + dev_warn(&channel->ccwdev->dev, + "The qeth device driver failed to recover " + "an error on the device\n"); + QETH_DBF_MESSAGE(2, "%s sense data available. cstat " + "0x%X dstat 0x%X\n", + dev_name(&channel->ccwdev->dev), cstat, dstat); print_hex_dump(KERN_WARNING, "qeth: irb ", DUMP_PREFIX_OFFSET, 16, 1, irb, 32, 1); print_hex_dump(KERN_WARNING, "qeth: sense data ", @@ -1138,6 +1150,14 @@ static int qeth_setup_card(struct qeth_card *card) return 0; } +static void qeth_core_sl_print(struct seq_file *m, struct service_level *slr) +{ + struct qeth_card *card = container_of(slr, struct qeth_card, + qeth_service_level); + seq_printf(m, "qeth: %s firmware level %s\n", CARD_BUS_ID(card), + card->info.mcl_level); +} + static struct qeth_card *qeth_alloc_card(void) { struct qeth_card *card; @@ -1157,6 +1177,8 @@ static struct qeth_card *qeth_alloc_card(void) return NULL; } card->options.layer2 = -1; + card->qeth_service_level.seq_print = qeth_core_sl_print; + register_service_level(&card->qeth_service_level); return card; } @@ -1175,8 +1197,8 @@ static int qeth_determine_card_type(struct qeth_card *card) card->qdio.no_out_queues = known_devices[i][8]; card->info.is_multicast_different = known_devices[i][9]; if (qeth_is_1920_device(card)) { - PRINT_INFO("Priority Queueing not able " - "due to hardware limitations!\n"); + dev_info(&card->gdev->dev, + "Priority Queueing not supported\n"); card->qdio.no_out_queues = 1; card->qdio.default_out_queue = 0; } @@ -1185,7 +1207,8 @@ static int qeth_determine_card_type(struct qeth_card *card) i++; } card->info.type = QETH_CARD_TYPE_UNKNOWN; - PRINT_ERR("unknown card type on device %s\n", CARD_BUS_ID(card)); + dev_err(&card->gdev->dev, "The adapter hardware is of an " + "unknown type\n"); return -ENOENT; } @@ -1368,8 +1391,8 @@ static int qeth_get_unitaddr(struct qeth_card *card) QETH_DBF_TEXT(SETUP, 2, "getunit"); rc = qeth_read_conf_data(card, (void **) &prcd, &length); if (rc) { - PRINT_ERR("qeth_read_conf_data for device %s returned %i\n", - CARD_DDEV_ID(card), rc); + QETH_DBF_MESSAGE(2, "%s qeth_read_conf_data returned %i\n", + dev_name(&card->gdev->dev), rc); return rc; } card->info.chpid = prcd[30]; @@ -1519,7 +1542,10 @@ static int qeth_idx_activate_channel(struct qeth_channel *channel, if (rc == -ERESTARTSYS) return rc; if (channel->state != CH_STATE_ACTIVATING) { - PRINT_WARN("IDX activate timed out!\n"); + dev_warn(&channel->ccwdev->dev, "The qeth device driver" + " failed to recover an error on the device\n"); + QETH_DBF_MESSAGE(2, "%s IDX activate timed out\n", + dev_name(&channel->ccwdev->dev)); QETH_DBF_TEXT_(SETUP, 2, "2err%d", -ETIME); qeth_clear_cmd_buffers(channel); return -ETIME; @@ -1552,20 +1578,21 @@ static void qeth_idx_write_cb(struct qeth_channel *channel, if (!(QETH_IS_IDX_ACT_POS_REPLY(iob->data))) { if (QETH_IDX_ACT_CAUSE_CODE(iob->data) == 0x19) - PRINT_ERR("IDX_ACTIVATE on write channel device %s: " - "adapter exclusively used by another host\n", - CARD_WDEV_ID(card)); + dev_err(&card->write.ccwdev->dev, + "The adapter is used exclusively by another " + "host\n"); else - PRINT_ERR("IDX_ACTIVATE on write channel device %s: " - "negative reply\n", CARD_WDEV_ID(card)); + QETH_DBF_MESSAGE(2, "%s IDX_ACTIVATE on write channel:" + " negative reply\n", + dev_name(&card->write.ccwdev->dev)); goto out; } memcpy(&temp, QETH_IDX_ACT_FUNC_LEVEL(iob->data), 2); if ((temp & ~0x0100) != qeth_peer_func_level(card->info.func_level)) { - PRINT_WARN("IDX_ACTIVATE on write channel device %s: " - "function level mismatch " - "(sent: 0x%x, received: 0x%x)\n", - CARD_WDEV_ID(card), card->info.func_level, temp); + QETH_DBF_MESSAGE(2, "%s IDX_ACTIVATE on write channel: " + "function level mismatch (sent: 0x%x, received: " + "0x%x)\n", dev_name(&card->write.ccwdev->dev), + card->info.func_level, temp); goto out; } channel->state = CH_STATE_UP; @@ -1591,12 +1618,13 @@ static void qeth_idx_read_cb(struct qeth_channel *channel, if (!(QETH_IS_IDX_ACT_POS_REPLY(iob->data))) { if (QETH_IDX_ACT_CAUSE_CODE(iob->data) == 0x19) - PRINT_ERR("IDX_ACTIVATE on read channel device %s: " - "adapter exclusively used by another host\n", - CARD_RDEV_ID(card)); + dev_err(&card->write.ccwdev->dev, + "The adapter is used exclusively by another " + "host\n"); else - PRINT_ERR("IDX_ACTIVATE on read channel device %s: " - "negative reply\n", CARD_RDEV_ID(card)); + QETH_DBF_MESSAGE(2, "%s IDX_ACTIVATE on read channel:" + " negative reply\n", + dev_name(&card->read.ccwdev->dev)); goto out; } @@ -1610,9 +1638,10 @@ static void qeth_idx_read_cb(struct qeth_channel *channel, memcpy(&temp, QETH_IDX_ACT_FUNC_LEVEL(iob->data), 2); if (temp != qeth_peer_func_level(card->info.func_level)) { - PRINT_WARN("IDX_ACTIVATE on read channel device %s: function " - "level mismatch (sent: 0x%x, received: 0x%x)\n", - CARD_RDEV_ID(card), card->info.func_level, temp); + QETH_DBF_MESSAGE(2, "%s IDX_ACTIVATE on read channel: function " + "level mismatch (sent: 0x%x, received: 0x%x)\n", + dev_name(&card->read.ccwdev->dev), + card->info.func_level, temp); goto out; } memcpy(&card->token.issuer_rm_r, @@ -1686,8 +1715,9 @@ int qeth_send_control_data(struct qeth_card *card, int len, (addr_t) iob, 0, 0); spin_unlock_irqrestore(get_ccwdev_lock(card->write.ccwdev), flags); if (rc) { - PRINT_WARN("qeth_send_control_data: " - "ccw_device_start rc = %i\n", rc); + QETH_DBF_MESSAGE(2, "%s qeth_send_control_data: " + "ccw_device_start rc = %i\n", + dev_name(&card->write.ccwdev->dev), rc); QETH_DBF_TEXT_(TRACE, 2, " err%d", rc); spin_lock_irqsave(&card->lock, flags); list_del_init(&reply->list); @@ -2170,11 +2200,8 @@ static void qeth_print_status_with_portname(struct qeth_card *card) dbf_text[i] = (char) _ebcasc[(__u8) dbf_text[i]]; dbf_text[8] = 0; - PRINT_INFO("Device %s/%s/%s is a%s card%s%s%s\n" + dev_info(&card->gdev->dev, "Device is a%s card%s%s%s\n" "with link type %s (portname: %s)\n", - CARD_RDEV_ID(card), - CARD_WDEV_ID(card), - CARD_DDEV_ID(card), qeth_get_cardname(card), (card->info.mcl_level[0]) ? " (level: " : "", (card->info.mcl_level[0]) ? card->info.mcl_level : "", @@ -2187,23 +2214,17 @@ static void qeth_print_status_with_portname(struct qeth_card *card) static void qeth_print_status_no_portname(struct qeth_card *card) { if (card->info.portname[0]) - PRINT_INFO("Device %s/%s/%s is a%s " + dev_info(&card->gdev->dev, "Device is a%s " "card%s%s%s\nwith link type %s " "(no portname needed by interface).\n", - CARD_RDEV_ID(card), - CARD_WDEV_ID(card), - CARD_DDEV_ID(card), qeth_get_cardname(card), (card->info.mcl_level[0]) ? " (level: " : "", (card->info.mcl_level[0]) ? card->info.mcl_level : "", (card->info.mcl_level[0]) ? ")" : "", qeth_get_cardname_short(card)); else - PRINT_INFO("Device %s/%s/%s is a%s " + dev_info(&card->gdev->dev, "Device is a%s " "card%s%s%s\nwith link type %s.\n", - CARD_RDEV_ID(card), - CARD_WDEV_ID(card), - CARD_DDEV_ID(card), qeth_get_cardname(card), (card->info.mcl_level[0]) ? " (level: " : "", (card->info.mcl_level[0]) ? card->info.mcl_level : "", @@ -2325,7 +2346,6 @@ static int qeth_init_input_buffer(struct qeth_card *card, * the QETH_IN_BUF_REQUEUE_THRESHOLD we should never run out off * buffers */ - BUG_ON(!pool_entry); buf->pool_entry = pool_entry; for (i = 0; i < QETH_MAX_BUFFER_ELEMENTS(card); ++i) { @@ -2630,9 +2650,8 @@ void qeth_queue_input_buffer(struct qeth_card *card, int index) qeth_get_micros() - card->perf_stats.inbound_do_qdio_start_time; if (rc) { - PRINT_WARN("qeth_queue_input_buffer's do_QDIO " - "return %i (device %s).\n", - rc, CARD_DDEV_ID(card)); + dev_warn(&card->gdev->dev, + "QDIO reported an error, rc=%i\n", rc); QETH_DBF_TEXT(TRACE, 2, "qinberr"); QETH_DBF_TEXT_(TRACE, 2, "%s", CARD_BUS_ID(card)); } @@ -3730,6 +3749,7 @@ static void qeth_core_free_card(struct qeth_card *card) free_netdev(card->dev); kfree(card->ip_tbd_list); qeth_free_qdio_buffers(card); + unregister_service_level(&card->qeth_service_level); kfree(card); } @@ -3757,7 +3777,7 @@ static int qeth_core_driver_group(const char *buf, struct device *root_dev, int qeth_core_hardsetup_card(struct qeth_card *card) { - struct qdio_ssqd_desc *qdio_ssqd; + struct qdio_ssqd_desc *ssqd; int retries = 3; int mpno = 0; int rc; @@ -3766,7 +3786,8 @@ int qeth_core_hardsetup_card(struct qeth_card *card) atomic_set(&card->force_alloc_skb, 0); retry: if (retries < 3) { - PRINT_WARN("Retrying to do IDX activates.\n"); + QETH_DBF_MESSAGE(2, "%s Retrying to do IDX activates.\n", + dev_name(&card->gdev->dev)); ccw_device_set_offline(CARD_DDEV(card)); ccw_device_set_offline(CARD_WDEV(card)); ccw_device_set_offline(CARD_RDEV(card)); @@ -3792,9 +3813,16 @@ retry: return rc; } - qdio_ssqd = qdio_get_ssqd_desc(CARD_DDEV(card)); - if (qdio_ssqd) - mpno = qdio_ssqd->pcnt; + ssqd = kmalloc(sizeof(struct qdio_ssqd_desc), GFP_KERNEL); + if (!ssqd) { + rc = -ENOMEM; + goto out; + } + rc = qdio_get_ssqd_desc(CARD_DDEV(card), ssqd); + if (rc == 0) + mpno = ssqd->pcnt; + kfree(ssqd); + if (mpno) mpno = min(mpno - 1, QETH_MAX_PORTNO); if (card->info.portno > mpno) { @@ -3834,7 +3862,10 @@ retry: } return 0; out: - PRINT_ERR("Initialization in hardsetup failed! rc=%d\n", rc); + dev_warn(&card->gdev->dev, "The qeth device driver failed to recover " + "an error on the device\n"); + QETH_DBF_MESSAGE(2, "%s Initialization in hardsetup failed! rc=%d\n", + dev_name(&card->gdev->dev), rc); return rc; } EXPORT_SYMBOL_GPL(qeth_core_hardsetup_card); @@ -4054,8 +4085,8 @@ int qeth_core_load_discipline(struct qeth_card *card, break; } if (!card->discipline.ccwgdriver) { - PRINT_ERR("Support for discipline %d not present\n", - discipline); + dev_err(&card->gdev->dev, "There is no kernel module to " + "support discipline %d\n", discipline); rc = -EINVAL; } return rc; @@ -4448,7 +4479,7 @@ static int __init qeth_core_init(void) { int rc; - PRINT_INFO("loading core functions\n"); + pr_info("loading core functions\n"); INIT_LIST_HEAD(&qeth_core_card_list.list); rwlock_init(&qeth_core_card_list.rwlock); @@ -4488,9 +4519,10 @@ driver_err: ccwgroup_err: ccw_driver_unregister(&qeth_ccw_driver); ccw_err: + QETH_DBF_MESSAGE(2, "Initialization failed with code %d\n", rc); qeth_unregister_dbf_views(); out_err: - PRINT_ERR("Initialization failed with code %d\n", rc); + pr_err("Initializing the qeth device driver failed\n"); return rc; } @@ -4503,7 +4535,7 @@ static void __exit qeth_core_exit(void) ccw_driver_unregister(&qeth_ccw_driver); kmem_cache_destroy(qeth_core_header_cache); qeth_unregister_dbf_views(); - PRINT_INFO("core functions removed\n"); + pr_info("core functions removed\n"); } module_init(qeth_core_init); diff --git a/drivers/s390/net/qeth_l2_main.c b/drivers/s390/net/qeth_l2_main.c index 8a8fad7a8be..2c48591ced4 100644 --- a/drivers/s390/net/qeth_l2_main.c +++ b/drivers/s390/net/qeth_l2_main.c @@ -8,6 +8,9 @@ * Frank Blaschka <frank.blaschka@de.ibm.com> */ +#define KMSG_COMPONENT "qeth" +#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt + #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/string.h> @@ -497,12 +500,13 @@ static int qeth_l2_send_setmac_cb(struct qeth_card *card, card->info.mac_bits |= QETH_LAYER2_MAC_REGISTERED; memcpy(card->dev->dev_addr, cmd->data.setdelmac.mac, OSA_ADDR_LEN); - PRINT_INFO("MAC address %2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x " - "successfully registered on device %s\n", - card->dev->dev_addr[0], card->dev->dev_addr[1], - card->dev->dev_addr[2], card->dev->dev_addr[3], - card->dev->dev_addr[4], card->dev->dev_addr[5], - card->dev->name); + dev_info(&card->gdev->dev, + "MAC address %2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x " + "successfully registered on device %s\n", + card->dev->dev_addr[0], card->dev->dev_addr[1], + card->dev->dev_addr[2], card->dev->dev_addr[3], + card->dev->dev_addr[4], card->dev->dev_addr[5], + card->dev->name); } return 0; } @@ -1009,9 +1013,8 @@ static int __qeth_l2_set_online(struct ccwgroup_device *gdev, int recovery_mode) if (rc) { QETH_DBF_TEXT_(SETUP, 2, "1err%d", rc); if (rc == 0xe080) { - PRINT_WARN("LAN on card %s if offline! " - "Waiting for STARTLAN from card.\n", - CARD_BUS_ID(card)); + dev_warn(&card->gdev->dev, + "The LAN is offline\n"); card->lan_online = 0; } return rc; @@ -1111,8 +1114,8 @@ static int qeth_l2_recover(void *ptr) if (!qeth_do_run_thread(card, QETH_RECOVER_THREAD)) return 0; QETH_DBF_TEXT(TRACE, 2, "recover2"); - PRINT_WARN("Recovery of device %s started ...\n", - CARD_BUS_ID(card)); + dev_warn(&card->gdev->dev, + "A recovery process has been started for the device\n"); card->use_hard_stop = 1; __qeth_l2_set_offline(card->gdev, 1); rc = __qeth_l2_set_online(card->gdev, 1); @@ -1120,27 +1123,27 @@ static int qeth_l2_recover(void *ptr) qeth_clear_thread_start_bit(card, QETH_RECOVER_THREAD); qeth_clear_thread_running_bit(card, QETH_RECOVER_THREAD); if (!rc) - PRINT_INFO("Device %s successfully recovered!\n", - CARD_BUS_ID(card)); + dev_info(&card->gdev->dev, + "Device successfully recovered!\n"); else { rtnl_lock(); dev_close(card->dev); rtnl_unlock(); - PRINT_INFO("Device %s could not be recovered!\n", - CARD_BUS_ID(card)); + dev_warn(&card->gdev->dev, "The qeth device driver " + "failed to recover an error on the device\n"); } return 0; } static int __init qeth_l2_init(void) { - PRINT_INFO("register layer 2 discipline\n"); + pr_info("register layer 2 discipline\n"); return 0; } static void __exit qeth_l2_exit(void) { - PRINT_INFO("unregister layer 2 discipline\n"); + pr_info("unregister layer 2 discipline\n"); } static void qeth_l2_shutdown(struct ccwgroup_device *gdev) diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c index ed59fedd592..c0b30b25a5f 100644 --- a/drivers/s390/net/qeth_l3_main.c +++ b/drivers/s390/net/qeth_l3_main.c @@ -8,6 +8,9 @@ * Frank Blaschka <frank.blaschka@de.ibm.com> */ +#define KMSG_COMPONENT "qeth" +#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt + #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/string.h> @@ -917,8 +920,8 @@ static int qeth_l3_register_addr_entry(struct qeth_card *card, if (rc) { QETH_DBF_TEXT(TRACE, 2, "FAILED"); qeth_l3_ipaddr_to_string(addr->proto, (u8 *)&addr->u, buf); - PRINT_WARN("Could not register IP address %s (rc=0x%x/%d)\n", - buf, rc, rc); + dev_warn(&card->gdev->dev, + "Registering IP address %s failed\n", buf); } return rc; } @@ -1029,24 +1032,22 @@ static int qeth_l3_setadapter_parms(struct qeth_card *card) QETH_DBF_TEXT(SETUP, 2, "setadprm"); if (!qeth_is_supported(card, IPA_SETADAPTERPARMS)) { - PRINT_WARN("set adapter parameters not supported " - "on device %s.\n", - CARD_BUS_ID(card)); + dev_info(&card->gdev->dev, + "set adapter parameters not supported.\n"); QETH_DBF_TEXT(SETUP, 2, " notsupp"); return 0; } rc = qeth_query_setadapterparms(card); if (rc) { - PRINT_WARN("couldn't set adapter parameters on device %s: " - "x%x\n", CARD_BUS_ID(card), rc); + QETH_DBF_MESSAGE(2, "%s couldn't set adapter parameters: " + "0x%x\n", card->gdev->dev.bus_id, rc); return rc; } if (qeth_adp_supported(card, IPA_SETADP_ALTER_MAC_ADDRESS)) { rc = qeth_setadpparms_change_macaddr(card); if (rc) - PRINT_WARN("couldn't get MAC address on " - "device %s: x%x\n", - CARD_BUS_ID(card), rc); + dev_warn(&card->gdev->dev, "Reading the adapter MAC" + " address failed\n", rc); } if ((card->info.link_type == QETH_LINK_TYPE_HSTR) || @@ -1160,16 +1161,17 @@ static int qeth_l3_start_ipa_arp_processing(struct qeth_card *card) QETH_DBF_TEXT(TRACE, 3, "ipaarp"); if (!qeth_is_supported(card, IPA_ARP_PROCESSING)) { - PRINT_WARN("ARP processing not supported " - "on %s!\n", QETH_CARD_IFNAME(card)); + dev_info(&card->gdev->dev, + "ARP processing not supported on %s!\n", + QETH_CARD_IFNAME(card)); return 0; } rc = qeth_l3_send_simple_setassparms(card, IPA_ARP_PROCESSING, IPA_CMD_ASS_START, 0); if (rc) { - PRINT_WARN("Could not start ARP processing " - "assist on %s: 0x%x\n", - QETH_CARD_IFNAME(card), rc); + dev_warn(&card->gdev->dev, + "Starting ARP processing support for %s failed\n", + QETH_CARD_IFNAME(card)); } return rc; } @@ -1181,19 +1183,21 @@ static int qeth_l3_start_ipa_ip_fragmentation(struct qeth_card *card) QETH_DBF_TEXT(TRACE, 3, "ipaipfrg"); if (!qeth_is_supported(card, IPA_IP_FRAGMENTATION)) { - PRINT_INFO("Hardware IP fragmentation not supported on %s\n", - QETH_CARD_IFNAME(card)); + dev_info(&card->gdev->dev, + "Hardware IP fragmentation not supported on %s\n", + QETH_CARD_IFNAME(card)); return -EOPNOTSUPP; } rc = qeth_l3_send_simple_setassparms(card, IPA_IP_FRAGMENTATION, IPA_CMD_ASS_START, 0); if (rc) { - PRINT_WARN("Could not start Hardware IP fragmentation " - "assist on %s: 0x%x\n", - QETH_CARD_IFNAME(card), rc); + dev_warn(&card->gdev->dev, + "Starting IP fragmentation support for %s failed\n", + QETH_CARD_IFNAME(card)); } else - PRINT_INFO("Hardware IP fragmentation enabled \n"); + dev_info(&card->gdev->dev, + "Hardware IP fragmentation enabled \n"); return rc; } @@ -1207,17 +1211,18 @@ static int qeth_l3_start_ipa_source_mac(struct qeth_card *card) return -EOPNOTSUPP; if (!qeth_is_supported(card, IPA_SOURCE_MAC)) { - PRINT_INFO("Inbound source address not " - "supported on %s\n", QETH_CARD_IFNAME(card)); + dev_info(&card->gdev->dev, + "Inbound source address not supported on %s\n", + QETH_CARD_IFNAME(card)); return -EOPNOTSUPP; } rc = qeth_l3_send_simple_setassparms(card, IPA_SOURCE_MAC, IPA_CMD_ASS_START, 0); if (rc) - PRINT_WARN("Could not start inbound source " - "assist on %s: 0x%x\n", - QETH_CARD_IFNAME(card), rc); + dev_warn(&card->gdev->dev, + "Starting proxy ARP support for %s failed\n", + QETH_CARD_IFNAME(card)); return rc; } @@ -1228,19 +1233,19 @@ static int qeth_l3_start_ipa_vlan(struct qeth_card *card) QETH_DBF_TEXT(TRACE, 3, "strtvlan"); if (!qeth_is_supported(card, IPA_FULL_VLAN)) { - PRINT_WARN("VLAN not supported on %s\n", - QETH_CARD_IFNAME(card)); + dev_info(&card->gdev->dev, + "VLAN not supported on %s\n", QETH_CARD_IFNAME(card)); return -EOPNOTSUPP; } rc = qeth_l3_send_simple_setassparms(card, IPA_VLAN_PRIO, IPA_CMD_ASS_START, 0); if (rc) { - PRINT_WARN("Could not start vlan " - "assist on %s: 0x%x\n", - QETH_CARD_IFNAME(card), rc); + dev_warn(&card->gdev->dev, + "Starting VLAN support for %s failed\n", + QETH_CARD_IFNAME(card)); } else { - PRINT_INFO("VLAN enabled \n"); + dev_info(&card->gdev->dev, "VLAN enabled\n"); } return rc; } @@ -1252,19 +1257,20 @@ static int qeth_l3_start_ipa_multicast(struct qeth_card *card) QETH_DBF_TEXT(TRACE, 3, "stmcast"); if (!qeth_is_supported(card, IPA_MULTICASTING)) { - PRINT_WARN("Multicast not supported on %s\n", - QETH_CARD_IFNAME(card)); + dev_info(&card->gdev->dev, + "Multicast not supported on %s\n", + QETH_CARD_IFNAME(card)); return -EOPNOTSUPP; } rc = qeth_l3_send_simple_setassparms(card, IPA_MULTICASTING, IPA_CMD_ASS_START, 0); if (rc) { - PRINT_WARN("Could not start multicast " - "assist on %s: rc=%i\n", - QETH_CARD_IFNAME(card), rc); + dev_warn(&card->gdev->dev, + "Starting multicast support for %s failed\n", + QETH_CARD_IFNAME(card)); } else { - PRINT_INFO("Multicast enabled\n"); + dev_info(&card->gdev->dev, "Multicast enabled\n"); card->dev->flags |= IFF_MULTICAST; } return rc; @@ -1315,36 +1321,37 @@ static int qeth_l3_softsetup_ipv6(struct qeth_card *card) rc = qeth_l3_query_ipassists(card, QETH_PROT_IPV6); if (rc) { - PRINT_ERR("IPv6 query ipassist failed on %s\n", - QETH_CARD_IFNAME(card)); + dev_err(&card->gdev->dev, + "Activating IPv6 support for %s failed\n", + QETH_CARD_IFNAME(card)); return rc; } rc = qeth_l3_send_simple_setassparms(card, IPA_IPV6, IPA_CMD_ASS_START, 3); if (rc) { - PRINT_WARN("IPv6 start assist (version 4) failed " - "on %s: 0x%x\n", - QETH_CARD_IFNAME(card), rc); + dev_err(&card->gdev->dev, + "Activating IPv6 support for %s failed\n", + QETH_CARD_IFNAME(card)); return rc; } rc = qeth_l3_send_simple_setassparms_ipv6(card, IPA_IPV6, IPA_CMD_ASS_START); if (rc) { - PRINT_WARN("IPV6 start assist (version 6) failed " - "on %s: 0x%x\n", - QETH_CARD_IFNAME(card), rc); + dev_err(&card->gdev->dev, + "Activating IPv6 support for %s failed\n", + QETH_CARD_IFNAME(card)); return rc; } rc = qeth_l3_send_simple_setassparms_ipv6(card, IPA_PASSTHRU, IPA_CMD_ASS_START); if (rc) { - PRINT_WARN("Could not enable passthrough " - "on %s: 0x%x\n", - QETH_CARD_IFNAME(card), rc); + dev_warn(&card->gdev->dev, + "Enabling the passthrough mode for %s failed\n", + QETH_CARD_IFNAME(card)); return rc; } out: - PRINT_INFO("IPV6 enabled \n"); + dev_info(&card->gdev->dev, "IPV6 enabled\n"); return 0; } #endif @@ -1356,8 +1363,8 @@ static int qeth_l3_start_ipa_ipv6(struct qeth_card *card) QETH_DBF_TEXT(TRACE, 3, "strtipv6"); if (!qeth_is_supported(card, IPA_IPV6)) { - PRINT_WARN("IPv6 not supported on %s\n", - QETH_CARD_IFNAME(card)); + dev_info(&card->gdev->dev, + "IPv6 not supported on %s\n", QETH_CARD_IFNAME(card)); return 0; } #ifdef CONFIG_QETH_IPV6 @@ -1373,34 +1380,35 @@ static int qeth_l3_start_ipa_broadcast(struct qeth_card *card) QETH_DBF_TEXT(TRACE, 3, "stbrdcst"); card->info.broadcast_capable = 0; if (!qeth_is_supported(card, IPA_FILTERING)) { - PRINT_WARN("Broadcast not supported on %s\n", - QETH_CARD_IFNAME(card)); + dev_info(&card->gdev->dev, + "Broadcast not supported on %s\n", + QETH_CARD_IFNAME(card)); rc = -EOPNOTSUPP; goto out; } rc = qeth_l3_send_simple_setassparms(card, IPA_FILTERING, IPA_CMD_ASS_START, 0); if (rc) { - PRINT_WARN("Could not enable broadcasting filtering " - "on %s: 0x%x\n", - QETH_CARD_IFNAME(card), rc); + dev_warn(&card->gdev->dev, "Enabling broadcast filtering for " + "%s failed\n", QETH_CARD_IFNAME(card)); goto out; } rc = qeth_l3_send_simple_setassparms(card, IPA_FILTERING, IPA_CMD_ASS_CONFIGURE, 1); if (rc) { - PRINT_WARN("Could not set up broadcast filtering on %s: 0x%x\n", - QETH_CARD_IFNAME(card), rc); + dev_warn(&card->gdev->dev, + "Setting up broadcast filtering for %s failed\n", + QETH_CARD_IFNAME(card)); goto out; } card->info.broadcast_capable = QETH_BROADCAST_WITH_ECHO; - PRINT_INFO("Broadcast enabled \n"); + dev_info(&card->gdev->dev, "Broadcast enabled\n"); rc = qeth_l3_send_simple_setassparms(card, IPA_FILTERING, IPA_CMD_ASS_ENABLE, 1); if (rc) { - PRINT_WARN("Could not set up broadcast echo filtering on " - "%s: 0x%x\n", QETH_CARD_IFNAME(card), rc); + dev_warn(&card->gdev->dev, "Setting up broadcast echo " + "filtering for %s failed\n", QETH_CARD_IFNAME(card)); goto out; } card->info.broadcast_capable = QETH_BROADCAST_WITHOUT_ECHO; @@ -1419,18 +1427,18 @@ static int qeth_l3_send_checksum_command(struct qeth_card *card) rc = qeth_l3_send_simple_setassparms(card, IPA_INBOUND_CHECKSUM, IPA_CMD_ASS_START, 0); if (rc) { - PRINT_WARN("Starting Inbound HW Checksumming failed on %s: " - "0x%x,\ncontinuing using Inbound SW Checksumming\n", - QETH_CARD_IFNAME(card), rc); + dev_warn(&card->gdev->dev, "Starting HW checksumming for %s " + "failed, using SW checksumming\n", + QETH_CARD_IFNAME(card)); return rc; } rc = qeth_l3_send_simple_setassparms(card, IPA_INBOUND_CHECKSUM, IPA_CMD_ASS_ENABLE, card->info.csum_mask); if (rc) { - PRINT_WARN("Enabling Inbound HW Checksumming failed on %s: " - "0x%x,\ncontinuing using Inbound SW Checksumming\n", - QETH_CARD_IFNAME(card), rc); + dev_warn(&card->gdev->dev, "Enabling HW checksumming for %s " + "failed, using SW checksumming\n", + QETH_CARD_IFNAME(card)); return rc; } return 0; @@ -1443,26 +1451,30 @@ static int qeth_l3_start_ipa_checksum(struct qeth_card *card) QETH_DBF_TEXT(TRACE, 3, "strtcsum"); if (card->options.checksum_type == NO_CHECKSUMMING) { - PRINT_WARN("Using no checksumming on %s.\n", - QETH_CARD_IFNAME(card)); + dev_info(&card->gdev->dev, + "Using no checksumming on %s.\n", + QETH_CARD_IFNAME(card)); return 0; } if (card->options.checksum_type == SW_CHECKSUMMING) { - PRINT_WARN("Using SW checksumming on %s.\n", - QETH_CARD_IFNAME(card)); + dev_info(&card->gdev->dev, + "Using SW checksumming on %s.\n", + QETH_CARD_IFNAME(card)); return 0; } if (!qeth_is_supported(card, IPA_INBOUND_CHECKSUM)) { - PRINT_WARN("Inbound HW Checksumming not " - "supported on %s,\ncontinuing " - "using Inbound SW Checksumming\n", - QETH_CARD_IFNAME(card)); + dev_info(&card->gdev->dev, + "Inbound HW Checksumming not " + "supported on %s,\ncontinuing " + "using Inbound SW Checksumming\n", + QETH_CARD_IFNAME(card)); card->options.checksum_type = SW_CHECKSUMMING; return 0; } rc = qeth_l3_send_checksum_command(card); if (!rc) - PRINT_INFO("HW Checksumming (inbound) enabled \n"); + dev_info(&card->gdev->dev, + "HW Checksumming (inbound) enabled\n"); return rc; } @@ -1474,18 +1486,20 @@ static int qeth_l3_start_ipa_tso(struct qeth_card *card) QETH_DBF_TEXT(TRACE, 3, "sttso"); if (!qeth_is_supported(card, IPA_OUTBOUND_TSO)) { - PRINT_WARN("Outbound TSO not supported on %s\n", - QETH_CARD_IFNAME(card)); + dev_info(&card->gdev->dev, + "Outbound TSO not supported on %s\n", + QETH_CARD_IFNAME(card)); rc = -EOPNOTSUPP; } else { rc = qeth_l3_send_simple_setassparms(card, IPA_OUTBOUND_TSO, IPA_CMD_ASS_START, 0); if (rc) - PRINT_WARN("Could not start outbound TSO " - "assist on %s: rc=%i\n", - QETH_CARD_IFNAME(card), rc); + dev_warn(&card->gdev->dev, "Starting outbound TCP " + "segmentation offload for %s failed\n", + QETH_CARD_IFNAME(card)); else - PRINT_INFO("Outbound TSO enabled\n"); + dev_info(&card->gdev->dev, + "Outbound TSO enabled\n"); } if (rc && (card->options.large_send == QETH_LARGE_SEND_TSO)) { card->options.large_send = QETH_LARGE_SEND_NO; @@ -1578,12 +1592,8 @@ static int qeth_l3_get_unique_id_cb(struct qeth_card *card, else { card->info.unique_id = UNIQUE_ID_IF_CREATE_ADDR_FAILED | UNIQUE_ID_NOT_BY_CARD; - PRINT_WARN("couldn't get a unique id from the card on device " - "%s (result=x%x), using default id. ipv6 " - "autoconfig on other lpars may lead to duplicate " - "ip addresses. please use manually " - "configured ones.\n", - CARD_BUS_ID(card), cmd->hdr.return_code); + dev_warn(&card->gdev->dev, "The network adapter failed to " + "generate a unique ID\n"); } return 0; } @@ -3086,9 +3096,8 @@ static int __qeth_l3_set_online(struct ccwgroup_device *gdev, int recovery_mode) if (rc) { QETH_DBF_TEXT_(SETUP, 2, "1err%d", rc); if (rc == 0xe080) { - PRINT_WARN("LAN on card %s if offline! " - "Waiting for STARTLAN from card.\n", - CARD_BUS_ID(card)); + dev_warn(&card->gdev->dev, + "The LAN is offline\n"); card->lan_online = 0; } return rc; @@ -3194,8 +3203,8 @@ static int qeth_l3_recover(void *ptr) if (!qeth_do_run_thread(card, QETH_RECOVER_THREAD)) return 0; QETH_DBF_TEXT(TRACE, 2, "recover2"); - PRINT_WARN("Recovery of device %s started ...\n", - CARD_BUS_ID(card)); + dev_warn(&card->gdev->dev, + "A recovery process has been started for the device\n"); card->use_hard_stop = 1; __qeth_l3_set_offline(card->gdev, 1); rc = __qeth_l3_set_online(card->gdev, 1); @@ -3203,14 +3212,14 @@ static int qeth_l3_recover(void *ptr) qeth_clear_thread_start_bit(card, QETH_RECOVER_THREAD); qeth_clear_thread_running_bit(card, QETH_RECOVER_THREAD); if (!rc) - PRINT_INFO("Device %s successfully recovered!\n", - CARD_BUS_ID(card)); + dev_info(&card->gdev->dev, + "Device successfully recovered!\n"); else { rtnl_lock(); dev_close(card->dev); rtnl_unlock(); - PRINT_INFO("Device %s could not be recovered!\n", - CARD_BUS_ID(card)); + dev_warn(&card->gdev->dev, "The qeth device driver " + "failed to recover an error on the device\n"); } return 0; } @@ -3344,7 +3353,7 @@ static int qeth_l3_register_notifiers(void) return rc; } #else - PRINT_WARN("layer 3 discipline no IPv6 support\n"); + pr_warning("There is no IPv6 support for the layer 3 discipline\n"); #endif return 0; } @@ -3363,7 +3372,7 @@ static int __init qeth_l3_init(void) { int rc = 0; - PRINT_INFO("register layer 3 discipline\n"); + pr_info("register layer 3 discipline\n"); rc = qeth_l3_register_notifiers(); return rc; } @@ -3371,7 +3380,7 @@ static int __init qeth_l3_init(void) static void __exit qeth_l3_exit(void) { qeth_l3_unregister_notifiers(); - PRINT_INFO("unregister layer 3 discipline\n"); + pr_info("unregister layer 3 discipline\n"); } module_init(qeth_l3_init); diff --git a/drivers/s390/scsi/zfcp_aux.c b/drivers/s390/scsi/zfcp_aux.c index 3d4e3e3f3fc..e529b55b3ce 100644 --- a/drivers/s390/scsi/zfcp_aux.c +++ b/drivers/s390/scsi/zfcp_aux.c @@ -25,9 +25,15 @@ * Sven Schuetz */ +#define KMSG_COMPONENT "zfcp" +#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt + #include <linux/miscdevice.h> +#include <linux/seq_file.h> #include "zfcp_ext.h" +#define ZFCP_BUS_ID_SIZE 20 + static char *device; MODULE_AUTHOR("IBM Deutschland Entwicklung GmbH - linux390@de.ibm.com"); @@ -83,9 +89,9 @@ static int __init zfcp_device_setup(char *devstr) strcpy(str, devstr); token = strsep(&str, ","); - if (!token || strlen(token) >= BUS_ID_SIZE) + if (!token || strlen(token) >= ZFCP_BUS_ID_SIZE) goto err_out; - strncpy(zfcp_data.init_busid, token, BUS_ID_SIZE); + strncpy(zfcp_data.init_busid, token, ZFCP_BUS_ID_SIZE); token = strsep(&str, ","); if (!token || strict_strtoull(token, 0, @@ -102,7 +108,7 @@ static int __init zfcp_device_setup(char *devstr) err_out: kfree(str); - pr_err("zfcp: %s is not a valid SCSI device\n", devstr); + pr_err("%s is not a valid SCSI device\n", devstr); return 0; } @@ -186,13 +192,13 @@ static int __init zfcp_module_init(void) retval = misc_register(&zfcp_cfdc_misc); if (retval) { - pr_err("zfcp: Registering the misc device zfcp_cfdc failed\n"); + pr_err("Registering the misc device zfcp_cfdc failed\n"); goto out_misc; } retval = zfcp_ccw_register(); if (retval) { - pr_err("zfcp: The zfcp device driver could not register with " + pr_err("The zfcp device driver could not register with " "the common I/O layer\n"); goto out_ccw_register; } @@ -436,6 +442,16 @@ static void _zfcp_status_read_scheduler(struct work_struct *work) stat_work)); } +static void zfcp_print_sl(struct seq_file *m, struct service_level *sl) +{ + struct zfcp_adapter *adapter = + container_of(sl, struct zfcp_adapter, service_level); + + seq_printf(m, "zfcp: %s microcode level %x\n", + dev_name(&adapter->ccw_device->dev), + adapter->fsf_lic_version); +} + /** * zfcp_adapter_enqueue - enqueue a new adapter to the list * @ccw_device: pointer to the struct cc_device @@ -500,6 +516,8 @@ int zfcp_adapter_enqueue(struct ccw_device *ccw_device) INIT_WORK(&adapter->stat_work, _zfcp_status_read_scheduler); INIT_WORK(&adapter->scan_work, _zfcp_scan_ports_later); + adapter->service_level.seq_print = zfcp_print_sl; + /* mark adapter unusable as long as sysfs registration is not complete */ atomic_set_mask(ZFCP_STATUS_COMMON_REMOVE, &adapter->status); diff --git a/drivers/s390/scsi/zfcp_ccw.c b/drivers/s390/scsi/zfcp_ccw.c index 951a8d409d1..728147131e1 100644 --- a/drivers/s390/scsi/zfcp_ccw.c +++ b/drivers/s390/scsi/zfcp_ccw.c @@ -6,6 +6,9 @@ * Copyright IBM Corporation 2002, 2008 */ +#define KMSG_COMPONENT "zfcp" +#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt + #include "zfcp_ext.h" /** diff --git a/drivers/s390/scsi/zfcp_cfdc.c b/drivers/s390/scsi/zfcp_cfdc.c index ec2abceca6d..f1a7518e67e 100644 --- a/drivers/s390/scsi/zfcp_cfdc.c +++ b/drivers/s390/scsi/zfcp_cfdc.c @@ -7,6 +7,9 @@ * Copyright IBM Corporation 2008 */ +#define KMSG_COMPONENT "zfcp" +#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt + #include <linux/types.h> #include <linux/miscdevice.h> #include <asm/ccwdev.h> diff --git a/drivers/s390/scsi/zfcp_dbf.c b/drivers/s390/scsi/zfcp_dbf.c index 31012d58cfb..735d675623f 100644 --- a/drivers/s390/scsi/zfcp_dbf.c +++ b/drivers/s390/scsi/zfcp_dbf.c @@ -6,6 +6,9 @@ * Copyright IBM Corporation 2002, 2008 */ +#define KMSG_COMPONENT "zfcp" +#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt + #include <linux/ctype.h> #include <asm/debug.h> #include "zfcp_ext.h" diff --git a/drivers/s390/scsi/zfcp_def.h b/drivers/s390/scsi/zfcp_def.h index 9ce4c75bd19..e19e46ae4a6 100644 --- a/drivers/s390/scsi/zfcp_def.h +++ b/drivers/s390/scsi/zfcp_def.h @@ -33,6 +33,7 @@ #include <asm/qdio.h> #include <asm/debug.h> #include <asm/ebcdic.h> +#include <asm/sysinfo.h> #include "zfcp_dbf.h" #include "zfcp_fsf.h" @@ -515,6 +516,7 @@ struct zfcp_adapter { struct fsf_qtcb_bottom_port *stats_reset_data; unsigned long stats_reset; struct work_struct scan_work; + struct service_level service_level; atomic_t qdio_outb_full; /* queue full incidents */ }; diff --git a/drivers/s390/scsi/zfcp_erp.c b/drivers/s390/scsi/zfcp_erp.c index c557ba34e1a..4ed4950d994 100644 --- a/drivers/s390/scsi/zfcp_erp.c +++ b/drivers/s390/scsi/zfcp_erp.c @@ -6,6 +6,9 @@ * Copyright IBM Corporation 2002, 2008 */ +#define KMSG_COMPONENT "zfcp" +#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt + #include "zfcp_ext.h" #define ZFCP_MAX_ERPS 3 @@ -1281,10 +1284,13 @@ static void zfcp_erp_action_cleanup(struct zfcp_erp_action *act, int result) break; case ZFCP_ERP_ACTION_REOPEN_ADAPTER: - if (result != ZFCP_ERP_SUCCEEDED) + if (result != ZFCP_ERP_SUCCEEDED) { + unregister_service_level(&adapter->service_level); zfcp_erp_rports_del(adapter); - else + } else { + register_service_level(&adapter->service_level); schedule_work(&adapter->scan_work); + } zfcp_adapter_put(adapter); break; } diff --git a/drivers/s390/scsi/zfcp_fc.c b/drivers/s390/scsi/zfcp_fc.c index 8aab3091a7b..f009f2a7ec3 100644 --- a/drivers/s390/scsi/zfcp_fc.c +++ b/drivers/s390/scsi/zfcp_fc.c @@ -6,6 +6,9 @@ * Copyright IBM Corporation 2008 */ +#define KMSG_COMPONENT "zfcp" +#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt + #include "zfcp_ext.h" struct ct_iu_gpn_ft_req { diff --git a/drivers/s390/scsi/zfcp_fsf.c b/drivers/s390/scsi/zfcp_fsf.c index dc036769040..9c72e083559 100644 --- a/drivers/s390/scsi/zfcp_fsf.c +++ b/drivers/s390/scsi/zfcp_fsf.c @@ -6,6 +6,9 @@ * Copyright IBM Corporation 2002, 2008 */ +#define KMSG_COMPONENT "zfcp" +#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt + #include <linux/blktrace_api.h> #include "zfcp_ext.h" diff --git a/drivers/s390/scsi/zfcp_qdio.c b/drivers/s390/scsi/zfcp_qdio.c index 664752f90b2..d3b55fb66f1 100644 --- a/drivers/s390/scsi/zfcp_qdio.c +++ b/drivers/s390/scsi/zfcp_qdio.c @@ -6,6 +6,9 @@ * Copyright IBM Corporation 2002, 2008 */ +#define KMSG_COMPONENT "zfcp" +#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt + #include "zfcp_ext.h" /* FIXME(tune): free space should be one max. SBAL chain plus what? */ diff --git a/drivers/s390/scsi/zfcp_scsi.c b/drivers/s390/scsi/zfcp_scsi.c index 468c880f8b6..9dc42a68fbd 100644 --- a/drivers/s390/scsi/zfcp_scsi.c +++ b/drivers/s390/scsi/zfcp_scsi.c @@ -6,6 +6,9 @@ * Copyright IBM Corporation 2002, 2008 */ +#define KMSG_COMPONENT "zfcp" +#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt + #include "zfcp_ext.h" #include <asm/atomic.h> diff --git a/drivers/s390/scsi/zfcp_sysfs.c b/drivers/s390/scsi/zfcp_sysfs.c index ca9293ba176..899af2b45b1 100644 --- a/drivers/s390/scsi/zfcp_sysfs.c +++ b/drivers/s390/scsi/zfcp_sysfs.c @@ -6,6 +6,9 @@ * Copyright IBM Corporation 2008 */ +#define KMSG_COMPONENT "zfcp" +#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt + #include "zfcp_ext.h" #define ZFCP_DEV_ATTR(_feat, _name, _mode, _show, _store) \ diff --git a/drivers/s390/sysinfo.c b/drivers/s390/sysinfo.c index c3e4ab07b9c..0eea9078138 100644 --- a/drivers/s390/sysinfo.c +++ b/drivers/s390/sysinfo.c @@ -1,17 +1,21 @@ /* * drivers/s390/sysinfo.c * - * Copyright (C) 2001 IBM Deutschland Entwicklung GmbH, IBM Corporation - * Author(s): Ulrich Weigand (Ulrich.Weigand@de.ibm.com) + * Copyright IBM Corp. 2001, 2008 + * Author(s): Ulrich Weigand (Ulrich.Weigand@de.ibm.com) + * Martin Schwidefsky <schwidefsky@de.ibm.com> */ #include <linux/kernel.h> #include <linux/mm.h> #include <linux/proc_fs.h> +#include <linux/seq_file.h> #include <linux/init.h> #include <linux/delay.h> +#include <linux/module.h> #include <asm/ebcdic.h> #include <asm/sysinfo.h> +#include <asm/cpcmd.h> /* Sigh, math-emu. Don't ask. */ #include <asm/sfp-util.h> @@ -271,6 +275,125 @@ static __init int create_proc_sysinfo(void) __initcall(create_proc_sysinfo); +/* + * Service levels interface. + */ + +static DECLARE_RWSEM(service_level_sem); +static LIST_HEAD(service_level_list); + +int register_service_level(struct service_level *slr) +{ + struct service_level *ptr; + + down_write(&service_level_sem); + list_for_each_entry(ptr, &service_level_list, list) + if (ptr == slr) { + up_write(&service_level_sem); + return -EEXIST; + } + list_add_tail(&slr->list, &service_level_list); + up_write(&service_level_sem); + return 0; +} +EXPORT_SYMBOL(register_service_level); + +int unregister_service_level(struct service_level *slr) +{ + struct service_level *ptr, *next; + int rc = -ENOENT; + + down_write(&service_level_sem); + list_for_each_entry_safe(ptr, next, &service_level_list, list) { + if (ptr != slr) + continue; + list_del(&ptr->list); + rc = 0; + break; + } + up_write(&service_level_sem); + return rc; +} +EXPORT_SYMBOL(unregister_service_level); + +static void *service_level_start(struct seq_file *m, loff_t *pos) +{ + down_read(&service_level_sem); + return seq_list_start(&service_level_list, *pos); +} + +static void *service_level_next(struct seq_file *m, void *p, loff_t *pos) +{ + return seq_list_next(p, &service_level_list, pos); +} + +static void service_level_stop(struct seq_file *m, void *p) +{ + up_read(&service_level_sem); +} + +static int service_level_show(struct seq_file *m, void *p) +{ + struct service_level *slr; + + slr = list_entry(p, struct service_level, list); + slr->seq_print(m, slr); + return 0; +} + +static const struct seq_operations service_level_seq_ops = { + .start = service_level_start, + .next = service_level_next, + .stop = service_level_stop, + .show = service_level_show +}; + +static int service_level_open(struct inode *inode, struct file *file) +{ + return seq_open(file, &service_level_seq_ops); +} + +static const struct file_operations service_level_ops = { + .open = service_level_open, + .read = seq_read, + .llseek = seq_lseek, + .release = seq_release +}; + +static void service_level_vm_print(struct seq_file *m, + struct service_level *slr) +{ + char *query_buffer, *str; + + query_buffer = kmalloc(1024, GFP_KERNEL | GFP_DMA); + if (!query_buffer) + return; + cpcmd("QUERY CPLEVEL", query_buffer, 1024, NULL); + str = strchr(query_buffer, '\n'); + if (str) + *str = 0; + seq_printf(m, "VM: %s\n", query_buffer); + kfree(query_buffer); +} + +static struct service_level service_level_vm = { + .seq_print = service_level_vm_print +}; + +static __init int create_proc_service_level(void) +{ + proc_create("service_levels", 0, NULL, &service_level_ops); + if (MACHINE_IS_VM) + register_service_level(&service_level_vm); + return 0; +} + +subsys_initcall(create_proc_service_level); + +/* + * Bogomips calculation based on cpu capability. + */ + int get_cpu_capability(unsigned int *capability) { struct sysinfo_1_2_2 *info; diff --git a/drivers/scsi/aacraid/linit.c b/drivers/scsi/aacraid/linit.c index 162cd927d94..94acbeed4e7 100644 --- a/drivers/scsi/aacraid/linit.c +++ b/drivers/scsi/aacraid/linit.c @@ -175,8 +175,8 @@ static struct aac_driver_ident aac_drivers[] = { { aac_rx_init, "percraid", "DELL ", "PERCRAID ", 2, AAC_QUIRK_31BIT | AAC_QUIRK_34SG | AAC_QUIRK_SCSI_32 }, /* PERC 3/Di (Boxster/PERC3DiB) */ { aac_rx_init, "aacraid", "ADAPTEC ", "catapult ", 2, AAC_QUIRK_31BIT | AAC_QUIRK_34SG | AAC_QUIRK_SCSI_32 }, /* catapult */ { aac_rx_init, "aacraid", "ADAPTEC ", "tomcat ", 2, AAC_QUIRK_31BIT | AAC_QUIRK_34SG | AAC_QUIRK_SCSI_32 }, /* tomcat */ - { aac_rx_init, "aacraid", "ADAPTEC ", "Adaptec 2120S ", 1, AAC_QUIRK_31BIT | AAC_QUIRK_34SG | AAC_QUIRK_SCSI_32 }, /* Adaptec 2120S (Crusader) */ - { aac_rx_init, "aacraid", "ADAPTEC ", "Adaptec 2200S ", 2, AAC_QUIRK_31BIT | AAC_QUIRK_34SG | AAC_QUIRK_SCSI_32 }, /* Adaptec 2200S (Vulcan) */ + { aac_rx_init, "aacraid", "ADAPTEC ", "Adaptec 2120S ", 1, AAC_QUIRK_31BIT | AAC_QUIRK_34SG }, /* Adaptec 2120S (Crusader) */ + { aac_rx_init, "aacraid", "ADAPTEC ", "Adaptec 2200S ", 2, AAC_QUIRK_31BIT | AAC_QUIRK_34SG }, /* Adaptec 2200S (Vulcan) */ { aac_rx_init, "aacraid", "ADAPTEC ", "Adaptec 2200S ", 2, AAC_QUIRK_31BIT | AAC_QUIRK_34SG | AAC_QUIRK_SCSI_32 }, /* Adaptec 2200S (Vulcan-2m) */ { aac_rx_init, "aacraid", "Legend ", "Legend S220 ", 1, AAC_QUIRK_31BIT | AAC_QUIRK_34SG | AAC_QUIRK_SCSI_32 }, /* Legend S220 (Legend Crusader) */ { aac_rx_init, "aacraid", "Legend ", "Legend S230 ", 2, AAC_QUIRK_31BIT | AAC_QUIRK_34SG | AAC_QUIRK_SCSI_32 }, /* Legend S230 (Legend Vulcan) */ diff --git a/drivers/scsi/ibmvscsi/ibmvstgt.c b/drivers/scsi/ibmvscsi/ibmvstgt.c index 2a5b29d1217..e2dd6a45924 100644 --- a/drivers/scsi/ibmvscsi/ibmvstgt.c +++ b/drivers/scsi/ibmvscsi/ibmvstgt.c @@ -864,21 +864,23 @@ static int ibmvstgt_probe(struct vio_dev *dev, const struct vio_device_id *id) INIT_WORK(&vport->crq_work, handle_crq); - err = crq_queue_create(&vport->crq_queue, target); + err = scsi_add_host(shost, target->dev); if (err) goto free_srp_target; - err = scsi_add_host(shost, target->dev); + err = scsi_tgt_alloc_queue(shost); if (err) - goto destroy_queue; + goto remove_host; - err = scsi_tgt_alloc_queue(shost); + err = crq_queue_create(&vport->crq_queue, target); if (err) - goto destroy_queue; + goto free_queue; return 0; -destroy_queue: - crq_queue_destroy(target); +free_queue: + scsi_tgt_free_queue(shost); +remove_host: + scsi_remove_host(shost); free_srp_target: srp_target_free(target); put_host: diff --git a/drivers/scsi/libiscsi.c b/drivers/scsi/libiscsi.c index 801c7cf54d2..3fdee7370cc 100644 --- a/drivers/scsi/libiscsi.c +++ b/drivers/scsi/libiscsi.c @@ -489,12 +489,6 @@ __iscsi_conn_send_pdu(struct iscsi_conn *conn, struct iscsi_hdr *hdr, if (!__kfifo_get(session->cmdpool.queue, (void*)&task, sizeof(void*))) return NULL; - - if ((hdr->opcode == (ISCSI_OP_NOOP_OUT | ISCSI_OP_IMMEDIATE)) && - hdr->ttt == RESERVED_ITT) { - conn->ping_task = task; - conn->last_ping = jiffies; - } } /* * released in complete pdu for task we expect a response for, and @@ -703,6 +697,11 @@ static void iscsi_send_nopout(struct iscsi_conn *conn, struct iscsi_nopin *rhdr) task = __iscsi_conn_send_pdu(conn, (struct iscsi_hdr *)&hdr, NULL, 0); if (!task) iscsi_conn_printk(KERN_ERR, conn, "Could not send nopout\n"); + else if (!rhdr) { + /* only track our nops */ + conn->ping_task = task; + conn->last_ping = jiffies; + } } static int iscsi_handle_reject(struct iscsi_conn *conn, struct iscsi_hdr *hdr, diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c index fa45a1a6686..148d3af92ae 100644 --- a/drivers/scsi/scsi_lib.c +++ b/drivers/scsi/scsi_lib.c @@ -648,8 +648,8 @@ static void scsi_requeue_command(struct request_queue *q, struct scsi_cmnd *cmd) struct request *req = cmd->request; unsigned long flags; - scsi_unprep_request(req); spin_lock_irqsave(q->queue_lock, flags); + scsi_unprep_request(req); blk_requeue_request(q, req); spin_unlock_irqrestore(q->queue_lock, flags); diff --git a/drivers/serial/sh-sci.c b/drivers/serial/sh-sci.c index 165fc010978..557b54ab2f2 100644 --- a/drivers/serial/sh-sci.c +++ b/drivers/serial/sh-sci.c @@ -51,7 +51,6 @@ #ifdef CONFIG_SUPERH #include <asm/clock.h> #include <asm/sh_bios.h> -#include <asm/kgdb.h> #endif #include "sh-sci.h" @@ -65,10 +64,6 @@ struct sci_port { /* Port IRQs: ERI, RXI, TXI, BRI (optional) */ unsigned int irqs[SCIx_NR_IRQS]; - /* Port pin configuration */ - void (*init_pins)(struct uart_port *port, - unsigned int cflag); - /* Port enable callback */ void (*enable)(struct uart_port *port); @@ -85,10 +80,6 @@ struct sci_port { #endif }; -#ifdef CONFIG_SH_KGDB -static struct sci_port *kgdb_sci_port; -#endif - #ifdef CONFIG_SERIAL_SH_SCI_CONSOLE static struct sci_port *serial_console_port; #endif @@ -101,21 +92,26 @@ static void sci_stop_tx(struct uart_port *port); static struct sci_port sci_ports[SCI_NPORTS]; static struct uart_driver sci_uart_driver; -#if defined(CONFIG_SERIAL_SH_SCI_CONSOLE) && \ - defined(CONFIG_SH_STANDARD_BIOS) || defined(CONFIG_SH_KGDB) +static inline struct sci_port * +to_sci_port(struct uart_port *uart) +{ + return container_of(uart, struct sci_port, port); +} + +#if defined(CONFIG_CONSOLE_POLL) || defined(CONFIG_SERIAL_SH_SCI_CONSOLE) + +#ifdef CONFIG_CONSOLE_POLL static inline void handle_error(struct uart_port *port) { /* Clear error flags */ sci_out(port, SCxSR, SCxSR_ERROR_CLEAR(port)); } -static int get_char(struct uart_port *port) +static int sci_poll_get_char(struct uart_port *port) { - unsigned long flags; unsigned short status; int c; - spin_lock_irqsave(&port->lock, flags); do { status = sci_in(port, SCxSR); if (status & SCxSR_ERRORS(port)) { @@ -123,23 +119,21 @@ static int get_char(struct uart_port *port) continue; } } while (!(status & SCxSR_RDxF(port))); + c = sci_in(port, SCxRDR); - sci_in(port, SCxSR); /* Dummy read */ + + /* Dummy read */ + sci_in(port, SCxSR); sci_out(port, SCxSR, SCxSR_RDxF_CLEAR(port)); - spin_unlock_irqrestore(&port->lock, flags); return c; } -#endif /* CONFIG_SH_STANDARD_BIOS || CONFIG_SH_KGDB */ +#endif -#if defined(CONFIG_SERIAL_SH_SCI_CONSOLE) || defined(CONFIG_SH_KGDB) -static void put_char(struct uart_port *port, char c) +static void sci_poll_put_char(struct uart_port *port, unsigned char c) { - unsigned long flags; unsigned short status; - spin_lock_irqsave(&port->lock, flags); - do { status = sci_in(port, SCxSR); } while (!(status & SCxSR_TDxE(port))); @@ -147,96 +141,22 @@ static void put_char(struct uart_port *port, char c) sci_in(port, SCxSR); /* Dummy read */ sci_out(port, SCxSR, SCxSR_TDxE_CLEAR(port)); sci_out(port, SCxTDR, c); - - spin_unlock_irqrestore(&port->lock, flags); } -#endif - -#ifdef CONFIG_SERIAL_SH_SCI_CONSOLE -static void put_string(struct sci_port *sci_port, const char *buffer, int count) -{ - struct uart_port *port = &sci_port->port; - const unsigned char *p = buffer; - int i; - -#if defined(CONFIG_SH_STANDARD_BIOS) || defined(CONFIG_SH_KGDB) - int checksum; - int usegdb=0; - -#ifdef CONFIG_SH_STANDARD_BIOS - /* This call only does a trap the first time it is - * called, and so is safe to do here unconditionally - */ - usegdb |= sh_bios_in_gdb_mode(); -#endif -#ifdef CONFIG_SH_KGDB - usegdb |= (kgdb_in_gdb_mode && (sci_port == kgdb_sci_port)); -#endif - - if (usegdb) { - /* $<packet info>#<checksum>. */ - do { - unsigned char c; - put_char(port, '$'); - put_char(port, 'O'); /* 'O'utput to console */ - checksum = 'O'; - - for (i=0; i<count; i++) { /* Don't use run length encoding */ - int h, l; - - c = *p++; - h = hex_asc_hi(c); - l = hex_asc_lo(c); - put_char(port, h); - put_char(port, l); - checksum += h + l; - } - put_char(port, '#'); - put_char(port, hex_asc_hi(checksum)); - put_char(port, hex_asc_lo(checksum)); - } while (get_char(port) != '+'); - } else -#endif /* CONFIG_SH_STANDARD_BIOS || CONFIG_SH_KGDB */ - for (i=0; i<count; i++) { - if (*p == 10) - put_char(port, '\r'); - put_char(port, *p++); - } -} -#endif /* CONFIG_SERIAL_SH_SCI_CONSOLE */ - -#ifdef CONFIG_SH_KGDB -static int kgdb_sci_getchar(void) -{ - int c; - - /* Keep trying to read a character, this could be neater */ - while ((c = get_char(&kgdb_sci_port->port)) < 0) - cpu_relax(); - - return c; -} - -static inline void kgdb_sci_putchar(int c) -{ - put_char(&kgdb_sci_port->port, c); -} -#endif /* CONFIG_SH_KGDB */ +#endif /* CONFIG_CONSOLE_POLL || CONFIG_SERIAL_SH_SCI_CONSOLE */ #if defined(__H8300S__) enum { sci_disable, sci_enable }; -static void h8300_sci_config(struct uart_port* port, unsigned int ctrl) +static void h8300_sci_config(struct uart_port *port, unsigned int ctrl) { - volatile unsigned char *mstpcrl=(volatile unsigned char *)MSTPCRL; + volatile unsigned char *mstpcrl = (volatile unsigned char *)MSTPCRL; int ch = (port->mapbase - SMR0) >> 3; unsigned char mask = 1 << (ch+1); - if (ctrl == sci_disable) { + if (ctrl == sci_disable) *mstpcrl |= mask; - } else { + else *mstpcrl &= ~mask; - } } static inline void h8300_sci_enable(struct uart_port *port) @@ -251,7 +171,7 @@ static inline void h8300_sci_disable(struct uart_port *port) #endif #if defined(__H8300H__) || defined(__H8300S__) -static void sci_init_pins_sci(struct uart_port* port, unsigned int cflag) +static void sci_init_pins(struct uart_port *port, unsigned int cflag) { int ch = (port->mapbase - SMR0) >> 3; @@ -266,141 +186,99 @@ static void sci_init_pins_sci(struct uart_port* port, unsigned int cflag) /* tx mark output*/ H8300_SCI_DR(ch) |= h8300_sci_pins[ch].tx; } -#else -#define sci_init_pins_sci NULL -#endif - -#if defined(CONFIG_CPU_SUBTYPE_SH7707) || defined(CONFIG_CPU_SUBTYPE_SH7709) -static void sci_init_pins_irda(struct uart_port *port, unsigned int cflag) +#elif defined(CONFIG_CPU_SUBTYPE_SH7710) || defined(CONFIG_CPU_SUBTYPE_SH7712) +static inline void sci_init_pins(struct uart_port *port, unsigned int cflag) { - unsigned int fcr_val = 0; - - if (cflag & CRTSCTS) - fcr_val |= SCFCR_MCE; - - sci_out(port, SCFCR, fcr_val); -} -#else -#define sci_init_pins_irda NULL -#endif - -#if defined(CONFIG_CPU_SUBTYPE_SH7710) || defined(CONFIG_CPU_SUBTYPE_SH7712) -static void sci_init_pins_scif(struct uart_port* port, unsigned int cflag) -{ - unsigned int fcr_val = 0; - - set_sh771x_scif_pfc(port); - if (cflag & CRTSCTS) { - fcr_val |= SCFCR_MCE; - } - sci_out(port, SCFCR, fcr_val); + if (port->mapbase == 0xA4400000) { + __raw_writew(__raw_readw(PACR) & 0xffc0, PACR); + __raw_writew(__raw_readw(PBCR) & 0x0fff, PBCR); + } else if (port->mapbase == 0xA4410000) + __raw_writew(__raw_readw(PBCR) & 0xf003, PBCR); } #elif defined(CONFIG_CPU_SUBTYPE_SH7720) || defined(CONFIG_CPU_SUBTYPE_SH7721) -static void sci_init_pins_scif(struct uart_port *port, unsigned int cflag) +static inline void sci_init_pins(struct uart_port *port, unsigned int cflag) { - unsigned int fcr_val = 0; unsigned short data; if (cflag & CRTSCTS) { /* enable RTS/CTS */ if (port->mapbase == 0xa4430000) { /* SCIF0 */ /* Clear PTCR bit 9-2; enable all scif pins but sck */ - data = ctrl_inw(PORT_PTCR); - ctrl_outw((data & 0xfc03), PORT_PTCR); + data = __raw_readw(PORT_PTCR); + __raw_writew((data & 0xfc03), PORT_PTCR); } else if (port->mapbase == 0xa4438000) { /* SCIF1 */ /* Clear PVCR bit 9-2 */ - data = ctrl_inw(PORT_PVCR); - ctrl_outw((data & 0xfc03), PORT_PVCR); + data = __raw_readw(PORT_PVCR); + __raw_writew((data & 0xfc03), PORT_PVCR); } - fcr_val |= SCFCR_MCE; } else { if (port->mapbase == 0xa4430000) { /* SCIF0 */ /* Clear PTCR bit 5-2; enable only tx and rx */ - data = ctrl_inw(PORT_PTCR); - ctrl_outw((data & 0xffc3), PORT_PTCR); + data = __raw_readw(PORT_PTCR); + __raw_writew((data & 0xffc3), PORT_PTCR); } else if (port->mapbase == 0xa4438000) { /* SCIF1 */ /* Clear PVCR bit 5-2 */ - data = ctrl_inw(PORT_PVCR); - ctrl_outw((data & 0xffc3), PORT_PVCR); + data = __raw_readw(PORT_PVCR); + __raw_writew((data & 0xffc3), PORT_PVCR); } } - sci_out(port, SCFCR, fcr_val); } #elif defined(CONFIG_CPU_SH3) /* For SH7705, SH7706, SH7707, SH7709, SH7709A, SH7729 */ -static void sci_init_pins_scif(struct uart_port *port, unsigned int cflag) +static inline void sci_init_pins(struct uart_port *port, unsigned int cflag) { - unsigned int fcr_val = 0; unsigned short data; /* We need to set SCPCR to enable RTS/CTS */ - data = ctrl_inw(SCPCR); + data = __raw_readw(SCPCR); /* Clear out SCP7MD1,0, SCP6MD1,0, SCP4MD1,0*/ - ctrl_outw(data & 0x0fcf, SCPCR); + __raw_writew(data & 0x0fcf, SCPCR); - if (cflag & CRTSCTS) - fcr_val |= SCFCR_MCE; - else { + if (!(cflag & CRTSCTS)) { /* We need to set SCPCR to enable RTS/CTS */ - data = ctrl_inw(SCPCR); + data = __raw_readw(SCPCR); /* Clear out SCP7MD1,0, SCP4MD1,0, Set SCP6MD1,0 = {01} (output) */ - ctrl_outw((data & 0x0fcf) | 0x1000, SCPCR); + __raw_writew((data & 0x0fcf) | 0x1000, SCPCR); data = ctrl_inb(SCPDR); /* Set /RTS2 (bit6) = 0 */ ctrl_outb(data & 0xbf, SCPDR); } - - sci_out(port, SCFCR, fcr_val); } #elif defined(CONFIG_CPU_SUBTYPE_SH7722) -static void sci_init_pins_scif(struct uart_port *port, unsigned int cflag) +static inline void sci_init_pins(struct uart_port *port, unsigned int cflag) { - unsigned int fcr_val = 0; unsigned short data; if (port->mapbase == 0xffe00000) { - data = ctrl_inw(PSCR); + data = __raw_readw(PSCR); data &= ~0x03cf; - if (cflag & CRTSCTS) - fcr_val |= SCFCR_MCE; - else + if (!(cflag & CRTSCTS)) data |= 0x0340; - ctrl_outw(data, PSCR); + __raw_writew(data, PSCR); } - /* SCIF1 and SCIF2 should be setup by board code */ - - sci_out(port, SCFCR, fcr_val); -} -#elif defined(CONFIG_CPU_SUBTYPE_SH7723) -static void sci_init_pins_scif(struct uart_port *port, unsigned int cflag) -{ - /* Nothing to do here.. */ - sci_out(port, SCFCR, 0); } -#else -/* For SH7750 */ -static void sci_init_pins_scif(struct uart_port *port, unsigned int cflag) -{ - unsigned int fcr_val = 0; - - if (cflag & CRTSCTS) { - fcr_val |= SCFCR_MCE; - } else { -#if defined(CONFIG_CPU_SUBTYPE_SH7343) || defined(CONFIG_CPU_SUBTYPE_SH7366) - /* Nothing */ #elif defined(CONFIG_CPU_SUBTYPE_SH7763) || \ defined(CONFIG_CPU_SUBTYPE_SH7780) || \ defined(CONFIG_CPU_SUBTYPE_SH7785) || \ defined(CONFIG_CPU_SUBTYPE_SHX3) - ctrl_outw(0x0080, SCSPTR0); /* Set RTS = 1 */ +static inline void sci_init_pins(struct uart_port *port, unsigned int cflag) +{ + if (!(cflag & CRTSCTS)) + __raw_writew(0x0080, SCSPTR0); /* Set RTS = 1 */ +} +#elif defined(CONFIG_CPU_SH4) && !defined(CONFIG_CPU_SH4A) +static inline void sci_init_pins(struct uart_port *port, unsigned int cflag) +{ + if (!(cflag & CRTSCTS)) + __raw_writew(0x0080, SCSPTR2); /* Set RTS = 1 */ +} #else - ctrl_outw(0x0080, SCSPTR2); /* Set RTS = 1 */ -#endif - } - sci_out(port, SCFCR, fcr_val); +static inline void sci_init_pins(struct uart_port *port, unsigned int cflag) +{ + /* Nothing to do */ } #endif @@ -419,18 +297,26 @@ static inline int scif_rxroom(struct uart_port *port) #elif defined(CONFIG_CPU_SUBTYPE_SH7763) static inline int scif_txroom(struct uart_port *port) { - if((port->mapbase == 0xffe00000) || (port->mapbase == 0xffe08000)) /* SCIF0/1*/ + if ((port->mapbase == 0xffe00000) || + (port->mapbase == 0xffe08000)) { + /* SCIF0/1*/ return SCIF_TXROOM_MAX - (sci_in(port, SCTFDR) & 0xff); - else /* SCIF2 */ + } else { + /* SCIF2 */ return SCIF2_TXROOM_MAX - (sci_in(port, SCFDR) >> 8); + } } static inline int scif_rxroom(struct uart_port *port) { - if((port->mapbase == 0xffe00000) || (port->mapbase == 0xffe08000)) /* SCIF0/1*/ + if ((port->mapbase == 0xffe00000) || + (port->mapbase == 0xffe08000)) { + /* SCIF0/1*/ return sci_in(port, SCRFDR) & 0xff; - else /* SCIF2 */ + } else { + /* SCIF2 */ return sci_in(port, SCFDR) & SCIF2_RFDC_MASK; + } } #else static inline int scif_txroom(struct uart_port *port) @@ -446,12 +332,12 @@ static inline int scif_rxroom(struct uart_port *port) static inline int sci_txroom(struct uart_port *port) { - return ((sci_in(port, SCxSR) & SCI_TDRE) != 0); + return (sci_in(port, SCxSR) & SCI_TDRE) != 0; } static inline int sci_rxroom(struct uart_port *port) { - return ((sci_in(port, SCxSR) & SCxSR_RDxF(port)) != 0); + return (sci_in(port, SCxSR) & SCxSR_RDxF(port)) != 0; } /* ********************************************************************** * @@ -469,11 +355,10 @@ static void sci_transmit_chars(struct uart_port *port) status = sci_in(port, SCxSR); if (!(status & SCxSR_TDxE(port))) { ctrl = sci_in(port, SCSCR); - if (uart_circ_empty(xmit)) { + if (uart_circ_empty(xmit)) ctrl &= ~SCI_CTRL_FLAGS_TIE; - } else { + else ctrl |= SCI_CTRL_FLAGS_TIE; - } sci_out(port, SCSCR, ctrl); return; } @@ -521,11 +406,11 @@ static void sci_transmit_chars(struct uart_port *port) } /* On SH3, SCIF may read end-of-break as a space->mark char */ -#define STEPFN(c) ({int __c=(c); (((__c-1)|(__c)) == -1); }) +#define STEPFN(c) ({int __c = (c); (((__c-1)|(__c)) == -1); }) static inline void sci_receive_chars(struct uart_port *port) { - struct sci_port *sci_port = (struct sci_port *)port; + struct sci_port *sci_port = to_sci_port(port); struct tty_struct *tty = port->info->port.tty; int i, count, copied = 0; unsigned short status; @@ -550,13 +435,13 @@ static inline void sci_receive_chars(struct uart_port *port) if (port->type == PORT_SCI) { char c = sci_in(port, SCxRDR); - if (uart_handle_sysrq_char(port, c) || sci_port->break_flag) + if (uart_handle_sysrq_char(port, c) || + sci_port->break_flag) count = 0; - else { + else tty_insert_flip_char(tty, c, TTY_NORMAL); - } } else { - for (i=0; i<count; i++) { + for (i = 0; i < count; i++) { char c = sci_in(port, SCxRDR); status = sci_in(port, SCxSR); #if defined(CONFIG_CPU_SH3) @@ -569,7 +454,7 @@ static inline void sci_receive_chars(struct uart_port *port) } /* Nonzero => end-of-break */ - pr_debug("scif: debounce<%02x>\n", c); + dev_dbg(port->dev, "debounce<%02x>\n", c); sci_port->break_flag = 0; if (STEPFN(c)) { @@ -586,12 +471,13 @@ static inline void sci_receive_chars(struct uart_port *port) /* Store data and status */ if (status&SCxSR_FER(port)) { flag = TTY_FRAME; - pr_debug("sci: frame error\n"); + dev_notice(port->dev, "frame error\n"); } else if (status&SCxSR_PER(port)) { flag = TTY_PARITY; - pr_debug("sci: parity error\n"); + dev_notice(port->dev, "parity error\n"); } else flag = TTY_NORMAL; + tty_insert_flip_char(tty, c, flag); } } @@ -651,13 +537,14 @@ static inline int sci_handle_errors(struct uart_port *port) /* overrun error */ if (tty_insert_flip_char(tty, 0, TTY_OVERRUN)) copied++; - pr_debug("sci: overrun error\n"); + + dev_notice(port->dev, "overrun error"); } if (status & SCxSR_FER(port)) { if (sci_rxd_in(port) == 0) { /* Notify of BREAK */ - struct sci_port *sci_port = (struct sci_port *)port; + struct sci_port *sci_port = to_sci_port(port); if (!sci_port->break_flag) { sci_port->break_flag = 1; @@ -666,15 +553,19 @@ static inline int sci_handle_errors(struct uart_port *port) /* Do sysrq handling. */ if (uart_handle_break(port)) return 0; - pr_debug("sci: BREAK detected\n"); + + dev_dbg(port->dev, "BREAK detected\n"); + if (tty_insert_flip_char(tty, 0, TTY_BREAK)) - copied++; - } + copied++; + } + } else { /* frame error */ if (tty_insert_flip_char(tty, 0, TTY_FRAME)) copied++; - pr_debug("sci: frame error\n"); + + dev_notice(port->dev, "frame error\n"); } } @@ -682,7 +573,8 @@ static inline int sci_handle_errors(struct uart_port *port) /* parity error */ if (tty_insert_flip_char(tty, 0, TTY_PARITY)) copied++; - pr_debug("sci: parity error\n"); + + dev_notice(port->dev, "parity error"); } if (copied) @@ -691,6 +583,27 @@ static inline int sci_handle_errors(struct uart_port *port) return copied; } +static inline int sci_handle_fifo_overrun(struct uart_port *port) +{ + struct tty_struct *tty = port->info->port.tty; + int copied = 0; + + if (port->type != PORT_SCIF) + return 0; + + if ((sci_in(port, SCLSR) & SCIF_ORER) != 0) { + sci_out(port, SCLSR, 0); + + tty_insert_flip_char(tty, 0, TTY_OVERRUN); + tty_flip_buffer_push(tty); + + dev_notice(port->dev, "overrun error\n"); + copied++; + } + + return copied; +} + static inline int sci_handle_breaks(struct uart_port *port) { int copied = 0; @@ -709,23 +622,15 @@ static inline int sci_handle_breaks(struct uart_port *port) /* Notify of BREAK */ if (tty_insert_flip_char(tty, 0, TTY_BREAK)) copied++; - pr_debug("sci: BREAK detected\n"); - } -#if defined(SCIF_ORER) - /* XXX: Handle SCIF overrun error */ - if (port->type != PORT_SCI && (sci_in(port, SCLSR) & SCIF_ORER) != 0) { - sci_out(port, SCLSR, 0); - if (tty_insert_flip_char(tty, 0, TTY_OVERRUN)) { - copied++; - pr_debug("sci: overrun error\n"); - } + dev_dbg(port->dev, "BREAK detected\n"); } -#endif if (copied) tty_flip_buffer_push(tty); + copied += sci_handle_fifo_overrun(port); + return copied; } @@ -763,16 +668,7 @@ static irqreturn_t sci_er_interrupt(int irq, void *ptr) sci_out(port, SCxSR, SCxSR_RDxF_CLEAR(port)); } } else { -#if defined(SCIF_ORER) - if((sci_in(port, SCLSR) & SCIF_ORER) != 0) { - struct tty_struct *tty = port->info->port.tty; - - sci_out(port, SCLSR, 0); - tty_insert_flip_char(tty, 0, TTY_OVERRUN); - tty_flip_buffer_push(tty); - pr_debug("scif: overrun error\n"); - } -#endif + sci_handle_fifo_overrun(port); sci_rx_interrupt(irq, ptr); } @@ -801,8 +697,8 @@ static irqreturn_t sci_mpxed_interrupt(int irq, void *ptr) struct uart_port *port = ptr; irqreturn_t ret = IRQ_NONE; - ssr_status = sci_in(port,SCxSR); - scr_status = sci_in(port,SCSCR); + ssr_status = sci_in(port, SCxSR); + scr_status = sci_in(port, SCSCR); /* Tx Interrupt */ if ((ssr_status & 0x0020) && (scr_status & SCI_CTRL_FLAGS_TIE)) @@ -820,7 +716,7 @@ static irqreturn_t sci_mpxed_interrupt(int irq, void *ptr) return ret; } -#if defined(CONFIG_CPU_FREQ) && defined(CONFIG_HAVE_CLK) +#ifdef CONFIG_HAVE_CLK /* * Here we define a transistion notifier so that we can update all of our * ports' baud rate when the peripheral clock changes. @@ -828,41 +724,20 @@ static irqreturn_t sci_mpxed_interrupt(int irq, void *ptr) static int sci_notifier(struct notifier_block *self, unsigned long phase, void *p) { - struct cpufreq_freqs *freqs = p; int i; if ((phase == CPUFREQ_POSTCHANGE) || - (phase == CPUFREQ_RESUMECHANGE)){ + (phase == CPUFREQ_RESUMECHANGE)) for (i = 0; i < SCI_NPORTS; i++) { - struct uart_port *port = &sci_ports[i].port; - struct clk *clk; - - /* - * Update the uartclk per-port if frequency has - * changed, since it will no longer necessarily be - * consistent with the old frequency. - * - * Really we want to be able to do something like - * uart_change_speed() or something along those lines - * here to implicitly reset the per-port baud rate.. - * - * Clean this up later.. - */ - clk = clk_get(NULL, "module_clk"); - port->uartclk = clk_get_rate(clk); - clk_put(clk); + struct sci_port *s = &sci_ports[i]; + s->port.uartclk = clk_get_rate(s->clk); } - printk(KERN_INFO "%s: got a postchange notification " - "for cpu %d (old %d, new %d)\n", - __func__, freqs->cpu, freqs->old, freqs->new); - } - return NOTIFY_OK; } static struct notifier_block sci_nb = { &sci_notifier, NULL, 0 }; -#endif /* CONFIG_CPU_FREQ && CONFIG_HAVE_CLK */ +#endif static int sci_request_irq(struct sci_port *port) { @@ -875,23 +750,22 @@ static int sci_request_irq(struct sci_port *port) "SCI Transmit Data Empty", "SCI Break" }; if (port->irqs[0] == port->irqs[1]) { - if (!port->irqs[0]) { - printk(KERN_ERR "sci: Cannot allocate irq.(IRQ=0)\n"); + if (unlikely(!port->irqs[0])) return -ENODEV; - } if (request_irq(port->irqs[0], sci_mpxed_interrupt, IRQF_DISABLED, "sci", port)) { - printk(KERN_ERR "sci: Cannot allocate irq.\n"); + dev_err(port->port.dev, "Can't allocate IRQ\n"); return -ENODEV; } } else { for (i = 0; i < ARRAY_SIZE(handlers); i++) { - if (!port->irqs[i]) + if (unlikely(!port->irqs[i])) continue; + if (request_irq(port->irqs[i], handlers[i], IRQF_DISABLED, desc[i], port)) { - printk(KERN_ERR "sci: Cannot allocate irq.\n"); + dev_err(port->port.dev, "Can't allocate IRQ\n"); return -ENODEV; } } @@ -904,12 +778,9 @@ static void sci_free_irq(struct sci_port *port) { int i; - if (port->irqs[0] == port->irqs[1]) { - if (!port->irqs[0]) - printk("sci: sci_free_irq error\n"); - else - free_irq(port->irqs[0], port); - } else { + if (port->irqs[0] == port->irqs[1]) + free_irq(port->irqs[0], port); + else { for (i = 0; i < ARRAY_SIZE(port->irqs); i++) { if (!port->irqs[i]) continue; @@ -1028,7 +899,6 @@ static void sci_shutdown(struct uart_port *port) static void sci_set_termios(struct uart_port *port, struct ktermios *termios, struct ktermios *old) { - struct sci_port *s = &sci_ports[port->line]; unsigned int status, baud, smr_val; int t = -1; @@ -1060,32 +930,36 @@ static void sci_set_termios(struct uart_port *port, struct ktermios *termios, sci_out(port, SCSMR, smr_val); if (t > 0) { - if(t >= 256) { + if (t >= 256) { sci_out(port, SCSMR, (sci_in(port, SCSMR) & ~3) | 1); t >>= 2; - } else { + } else sci_out(port, SCSMR, sci_in(port, SCSMR) & ~3); - } + sci_out(port, SCBRR, t); udelay((1000000+(baud-1)) / baud); /* Wait one bit interval */ } - if (likely(s->init_pins)) - s->init_pins(port, termios->c_cflag); + sci_init_pins(port, termios->c_cflag); + sci_out(port, SCFCR, (termios->c_cflag & CRTSCTS) ? SCFCR_MCE : 0); sci_out(port, SCSCR, SCSCR_INIT(port)); if ((termios->c_cflag & CREAD) != 0) - sci_start_rx(port,0); + sci_start_rx(port, 0); } static const char *sci_type(struct uart_port *port) { switch (port->type) { - case PORT_SCI: return "sci"; - case PORT_SCIF: return "scif"; - case PORT_IRDA: return "irda"; - case PORT_SCIFA: return "scifa"; + case PORT_IRDA: + return "irda"; + case PORT_SCI: + return "sci"; + case PORT_SCIF: + return "scif"; + case PORT_SCIFA: + return "scifa"; } return NULL; @@ -1108,19 +982,6 @@ static void sci_config_port(struct uart_port *port, int flags) port->type = s->type; - switch (port->type) { - case PORT_SCI: - s->init_pins = sci_init_pins_sci; - break; - case PORT_SCIF: - case PORT_SCIFA: - s->init_pins = sci_init_pins_scif; - break; - case PORT_IRDA: - s->init_pins = sci_init_pins_irda; - break; - } - if (port->flags & UPF_IOREMAP && !port->membase) { #if defined(CONFIG_SUPERH64) port->mapbase = onchip_remap(SCIF_ADDR_SH5, 1024, "SCIF"); @@ -1129,7 +990,7 @@ static void sci_config_port(struct uart_port *port, int flags) port->membase = ioremap_nocache(port->mapbase, 0x40); #endif - printk(KERN_ERR "sci: can't remap port#%d\n", port->line); + dev_err(port->dev, "can't remap port#%d\n", port->line); } } @@ -1163,6 +1024,10 @@ static struct uart_ops sci_uart_ops = { .request_port = sci_request_port, .config_port = sci_config_port, .verify_port = sci_verify_port, +#ifdef CONFIG_CONSOLE_POLL + .poll_get_char = sci_poll_get_char, + .poll_put_char = sci_poll_put_char, +#endif }; static void __init sci_init_ports(void) @@ -1229,7 +1094,15 @@ int __init early_sci_setup(struct uart_port *port) static void serial_console_write(struct console *co, const char *s, unsigned count) { - put_string(serial_console_port, s, count); + struct uart_port *port = &serial_console_port->port; + int i; + + for (i = 0; i < count; i++) { + if (*s == 10) + sci_poll_put_char(port, '\r'); + + sci_poll_put_char(port, *s++); + } } static int __init serial_console_setup(struct console *co, char *options) @@ -1307,89 +1180,8 @@ static int __init sci_console_init(void) console_initcall(sci_console_init); #endif /* CONFIG_SERIAL_SH_SCI_CONSOLE */ -#ifdef CONFIG_SH_KGDB_CONSOLE -/* - * FIXME: Most of this can go away.. at the moment, we rely on - * arch/sh/kernel/setup.c to do the command line parsing for kgdb, though - * most of that can easily be done here instead. - * - * For the time being, just accept the values that were parsed earlier.. - */ -static void __init kgdb_console_get_options(struct uart_port *port, int *baud, - int *parity, int *bits) -{ - *baud = kgdb_baud; - *parity = tolower(kgdb_parity); - *bits = kgdb_bits - '0'; -} - -/* - * The naming here is somewhat misleading, since kgdb_console_setup() takes - * care of the early-on initialization for kgdb, regardless of whether we - * actually use kgdb as a console or not. - * - * On the plus side, this lets us kill off the old kgdb_sci_setup() nonsense. - */ -int __init kgdb_console_setup(struct console *co, char *options) -{ - struct uart_port *port = &sci_ports[kgdb_portnum].port; - int baud = 38400; - int bits = 8; - int parity = 'n'; - int flow = 'n'; - - if (co->index != kgdb_portnum) - co->index = kgdb_portnum; - - kgdb_sci_port = &sci_ports[co->index]; - port = &kgdb_sci_port->port; - - /* - * Also need to check port->type, we don't actually have any - * UPIO_PORT ports, but uart_report_port() handily misreports - * it anyways if we don't have a port available by the time this is - * called. - */ - if (!port->type) - return -ENODEV; - if (!port->membase || !port->mapbase) - return -ENODEV; - - if (options) - uart_parse_options(options, &baud, &parity, &bits, &flow); - else - kgdb_console_get_options(port, &baud, &parity, &bits); - - kgdb_getchar = kgdb_sci_getchar; - kgdb_putchar = kgdb_sci_putchar; - - return uart_set_options(port, co, baud, parity, bits, flow); -} - -static struct console kgdb_console = { - .name = "ttySC", - .device = uart_console_device, - .write = kgdb_console_write, - .setup = kgdb_console_setup, - .flags = CON_PRINTBUFFER, - .index = -1, - .data = &sci_uart_driver, -}; - -/* Register the KGDB console so we get messages (d'oh!) */ -static int __init kgdb_console_init(void) -{ - sci_init_ports(); - register_console(&kgdb_console); - return 0; -} -console_initcall(kgdb_console_init); -#endif /* CONFIG_SH_KGDB_CONSOLE */ - -#if defined(CONFIG_SH_KGDB_CONSOLE) -#define SCI_CONSOLE &kgdb_console -#elif defined(CONFIG_SERIAL_SH_SCI_CONSOLE) -#define SCI_CONSOLE &serial_console +#if defined(CONFIG_SERIAL_SH_SCI_CONSOLE) +#define SCI_CONSOLE (&serial_console) #else #define SCI_CONSOLE 0 #endif @@ -1463,15 +1255,8 @@ static int __devinit sci_probe(struct platform_device *dev) uart_add_one_port(&sci_uart_driver, &sciport->port); } -#if defined(CONFIG_SH_KGDB) && !defined(CONFIG_SH_KGDB_CONSOLE) - kgdb_sci_port = &sci_ports[kgdb_portnum]; - kgdb_getchar = kgdb_sci_getchar; - kgdb_putchar = kgdb_sci_putchar; -#endif - -#if defined(CONFIG_CPU_FREQ) && defined(CONFIG_HAVE_CLK) +#ifdef CONFIG_HAVE_CLK cpufreq_register_notifier(&sci_nb, CPUFREQ_TRANSITION_NOTIFIER); - dev_info(&dev->dev, "CPU frequency notifier registered\n"); #endif #ifdef CONFIG_SH_STANDARD_BIOS @@ -1491,6 +1276,10 @@ static int __devexit sci_remove(struct platform_device *dev) { int i; +#ifdef CONFIG_HAVE_CLK + cpufreq_unregister_notifier(&sci_nb, CPUFREQ_TRANSITION_NOTIFIER); +#endif + for (i = 0; i < SCI_NPORTS; i++) uart_remove_one_port(&sci_uart_driver, &sci_ports[i].port); diff --git a/drivers/serial/sh-sci.h b/drivers/serial/sh-sci.h index 9f33b064172..38c600c0dbb 100644 --- a/drivers/serial/sh-sci.h +++ b/drivers/serial/sh-sci.h @@ -133,13 +133,20 @@ # define SCSPTR5 0xffef0024 /* 16 bit SCIF */ # define SCIF_OPER 0x0001 /* Overrun error bit */ # define SCSCR_INIT(port) 0x3a /* TIE=0,RIE=0,TE=1,RE=1,REIE=1 */ -#elif defined(CONFIG_CPU_SUBTYPE_SH7203) || \ +#elif defined(CONFIG_CPU_SUBTYPE_SH7201) || \ + defined(CONFIG_CPU_SUBTYPE_SH7203) || \ defined(CONFIG_CPU_SUBTYPE_SH7206) || \ defined(CONFIG_CPU_SUBTYPE_SH7263) # define SCSPTR0 0xfffe8020 /* 16 bit SCIF */ # define SCSPTR1 0xfffe8820 /* 16 bit SCIF */ # define SCSPTR2 0xfffe9020 /* 16 bit SCIF */ # define SCSPTR3 0xfffe9820 /* 16 bit SCIF */ +# if defined(CONFIG_CPU_SUBTYPE_SH7201) +# define SCSPTR4 0xfffeA020 /* 16 bit SCIF */ +# define SCSPTR5 0xfffeA820 /* 16 bit SCIF */ +# define SCSPTR6 0xfffeB020 /* 16 bit SCIF */ +# define SCSPTR7 0xfffeB820 /* 16 bit SCIF */ +# endif # define SCSCR_INIT(port) 0x38 /* TIE=0,RIE=0,TE=1,RE=1,REIE=1 */ #elif defined(CONFIG_CPU_SUBTYPE_SH7619) # define SCSPTR0 0xf8400020 /* 16 bit SCIF */ @@ -225,6 +232,10 @@ # define SCIF_TXROOM_MAX 16 #endif +#ifndef SCIF_ORER +#define SCIF_ORER 0x0000 +#endif + #define SCxSR_TEND(port) (((port)->type == PORT_SCI) ? SCI_TEND : SCIF_TEND) #define SCxSR_ERRORS(port) (((port)->type == PORT_SCI) ? SCI_ERRORS : SCIF_ERRORS) #define SCxSR_RDxF(port) (((port)->type == PORT_SCI) ? SCI_RDRF : SCIF_RDF) @@ -232,12 +243,7 @@ #define SCxSR_FER(port) (((port)->type == PORT_SCI) ? SCI_FER : SCIF_FER) #define SCxSR_PER(port) (((port)->type == PORT_SCI) ? SCI_PER : SCIF_PER) #define SCxSR_BRK(port) (((port)->type == PORT_SCI) ? 0x00 : SCIF_BRK) - -#if defined(CONFIG_CPU_SUBTYPE_SH7705) -# define SCxSR_ORER(port) (((port)->type == PORT_SCI) ? SCI_ORER : SCIF_ORER) -#else -# define SCxSR_ORER(port) (((port)->type == PORT_SCI) ? SCI_ORER : 0x0000) -#endif +#define SCxSR_ORER(port) (((port)->type == PORT_SCI) ? SCI_ORER : SCIF_ORER) #if defined(CONFIG_CPU_SUBTYPE_SH7705) || \ defined(CONFIG_CPU_SUBTYPE_SH7720) || \ @@ -501,18 +507,6 @@ static inline int sci_rxd_in(struct uart_port *port) { return sci_in(port,SCxSR)&0x0010 ? 1 : 0; } -static inline void set_sh771x_scif_pfc(struct uart_port *port) -{ - if (port->mapbase == 0xA4400000){ - ctrl_outw(ctrl_inw(PACR)&0xffc0,PACR); - ctrl_outw(ctrl_inw(PBCR)&0x0fff,PBCR); - return; - } - if (port->mapbase == 0xA4410000){ - ctrl_outw(ctrl_inw(PBCR)&0xf003,PBCR); - return; - } -} #elif defined(CONFIG_CPU_SUBTYPE_SH7720) || \ defined(CONFIG_CPU_SUBTYPE_SH7721) static inline int sci_rxd_in(struct uart_port *port) @@ -664,7 +658,8 @@ static inline int sci_rxd_in(struct uart_port *port) return ctrl_inw(SCSPTR5) & 0x0001 ? 1 : 0; /* SCIF */ return 1; } -#elif defined(CONFIG_CPU_SUBTYPE_SH7203) || \ +#elif defined(CONFIG_CPU_SUBTYPE_SH7201) || \ + defined(CONFIG_CPU_SUBTYPE_SH7203) || \ defined(CONFIG_CPU_SUBTYPE_SH7206) || \ defined(CONFIG_CPU_SUBTYPE_SH7263) static inline int sci_rxd_in(struct uart_port *port) @@ -677,6 +672,16 @@ static inline int sci_rxd_in(struct uart_port *port) return ctrl_inw(SCSPTR2) & 0x0001 ? 1 : 0; /* SCIF */ if (port->mapbase == 0xfffe9800) return ctrl_inw(SCSPTR3) & 0x0001 ? 1 : 0; /* SCIF */ +#if defined(CONFIG_CPU_SUBTYPE_SH7201) + if (port->mapbase == 0xfffeA000) + return ctrl_inw(SCSPTR0) & 0x0001 ? 1 : 0; /* SCIF */ + if (port->mapbase == 0xfffeA800) + return ctrl_inw(SCSPTR1) & 0x0001 ? 1 : 0; /* SCIF */ + if (port->mapbase == 0xfffeB000) + return ctrl_inw(SCSPTR2) & 0x0001 ? 1 : 0; /* SCIF */ + if (port->mapbase == 0xfffeB800) + return ctrl_inw(SCSPTR3) & 0x0001 ? 1 : 0; /* SCIF */ +#endif return 1; } #elif defined(CONFIG_CPU_SUBTYPE_SH7619) diff --git a/drivers/sh/maple/maple.c b/drivers/sh/maple/maple.c index d1812d32f47..63f0de29aa1 100644 --- a/drivers/sh/maple/maple.c +++ b/drivers/sh/maple/maple.c @@ -827,7 +827,7 @@ static int __init maple_bus_init(void) maple_queue_cache = kmem_cache_create("maple_queue_cache", 0x400, 0, - SLAB_POISON|SLAB_HWCACHE_ALIGN, NULL); + SLAB_HWCACHE_ALIGN, NULL); if (!maple_queue_cache) goto cleanup_bothirqs; diff --git a/drivers/staging/Kconfig b/drivers/staging/Kconfig index c95b286a123..5d457c96bd7 100644 --- a/drivers/staging/Kconfig +++ b/drivers/staging/Kconfig @@ -22,6 +22,8 @@ menuconfig STAGING If in doubt, say N here. +if STAGING + config STAGING_EXCLUDE_BUILD bool "Exclude Staging drivers from being built" if STAGING default y @@ -62,3 +64,4 @@ source "drivers/staging/at76_usb/Kconfig" source "drivers/staging/poch/Kconfig" endif # !STAGING_EXCLUDE_BUILD +endif # STAGING diff --git a/drivers/usb/class/usbtmc.c b/drivers/usb/class/usbtmc.c index 8e74657f106..43a863c5cc4 100644 --- a/drivers/usb/class/usbtmc.c +++ b/drivers/usb/class/usbtmc.c @@ -51,6 +51,7 @@ static struct usb_device_id usbtmc_devices[] = { { USB_INTERFACE_INFO(USB_CLASS_APP_SPEC, 3, 0), }, { 0, } /* terminating entry */ }; +MODULE_DEVICE_TABLE(usb, usbtmc_devices); /* * This structure is the capabilities for the device diff --git a/drivers/usb/core/devio.c b/drivers/usb/core/devio.c index 2bccefebff1..aa79280df15 100644 --- a/drivers/usb/core/devio.c +++ b/drivers/usb/core/devio.c @@ -574,6 +574,7 @@ static int usbdev_open(struct inode *inode, struct file *file) { struct usb_device *dev = NULL; struct dev_state *ps; + const struct cred *cred = current_cred(); int ret; lock_kernel(); @@ -617,8 +618,8 @@ static int usbdev_open(struct inode *inode, struct file *file) init_waitqueue_head(&ps->wait); ps->discsignr = 0; ps->disc_pid = get_pid(task_pid(current)); - ps->disc_uid = current->uid; - ps->disc_euid = current->euid; + ps->disc_uid = cred->uid; + ps->disc_euid = cred->euid; ps->disccontext = NULL; ps->ifclaimed = 0; security_task_getsecid(current, &ps->secid); @@ -967,6 +968,7 @@ static int proc_do_submiturb(struct dev_state *ps, struct usbdevfs_urb *uurb, struct usb_host_endpoint *ep; struct async *as; struct usb_ctrlrequest *dr = NULL; + const struct cred *cred = current_cred(); unsigned int u, totlen, isofrmlen; int ret, ifnum = -1; int is_in; @@ -1174,8 +1176,8 @@ static int proc_do_submiturb(struct dev_state *ps, struct usbdevfs_urb *uurb, as->signr = uurb->signr; as->ifnum = ifnum; as->pid = get_pid(task_pid(current)); - as->uid = current->uid; - as->euid = current->euid; + as->uid = cred->uid; + as->euid = cred->euid; security_task_getsecid(current, &as->secid); if (!is_in) { if (copy_from_user(as->urb->transfer_buffer, uurb->buffer, diff --git a/drivers/usb/core/driver.c b/drivers/usb/core/driver.c index 3d7793d9303..8c081308b0e 100644 --- a/drivers/usb/core/driver.c +++ b/drivers/usb/core/driver.c @@ -279,7 +279,9 @@ static int usb_unbind_interface(struct device *dev) * altsetting means creating new endpoint device entries). * When either of these happens, defer the Set-Interface. */ - if (!error && intf->dev.power.status == DPM_ON) + if (intf->cur_altsetting->desc.bAlternateSetting == 0) + ; /* Already in altsetting 0 so skip Set-Interface */ + else if (!error && intf->dev.power.status == DPM_ON) usb_set_interface(udev, intf->altsetting[0]. desc.bInterfaceNumber, 0); else diff --git a/drivers/usb/core/inode.c b/drivers/usb/core/inode.c index 94632264dcc..185be760833 100644 --- a/drivers/usb/core/inode.c +++ b/drivers/usb/core/inode.c @@ -277,8 +277,8 @@ static struct inode *usbfs_get_inode (struct super_block *sb, int mode, dev_t de if (inode) { inode->i_mode = mode; - inode->i_uid = current->fsuid; - inode->i_gid = current->fsgid; + inode->i_uid = current_fsuid(); + inode->i_gid = current_fsgid(); inode->i_blocks = 0; inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME; switch (mode & S_IFMT) { diff --git a/drivers/usb/gadget/f_rndis.c b/drivers/usb/gadget/f_rndis.c index 428b5993575..3a8bb53fc47 100644 --- a/drivers/usb/gadget/f_rndis.c +++ b/drivers/usb/gadget/f_rndis.c @@ -651,6 +651,8 @@ rndis_bind(struct usb_configuration *c, struct usb_function *f) fs_in_desc.bEndpointAddress; hs_out_desc.bEndpointAddress = fs_out_desc.bEndpointAddress; + hs_notify_desc.bEndpointAddress = + fs_notify_desc.bEndpointAddress; /* copy descriptors, and track endpoint copies */ f->hs_descriptors = usb_copy_descriptors(eth_hs_function); @@ -662,6 +664,8 @@ rndis_bind(struct usb_configuration *c, struct usb_function *f) f->hs_descriptors, &hs_in_desc); rndis->hs.out = usb_find_endpoint(eth_hs_function, f->hs_descriptors, &hs_out_desc); + rndis->hs.notify = usb_find_endpoint(eth_hs_function, + f->hs_descriptors, &hs_notify_desc); } rndis->port.open = rndis_open; diff --git a/drivers/usb/gadget/m66592-udc.c b/drivers/usb/gadget/m66592-udc.c index 77b44fb48f0..3a8879ec206 100644 --- a/drivers/usb/gadget/m66592-udc.c +++ b/drivers/usb/gadget/m66592-udc.c @@ -623,7 +623,6 @@ static void start_ep0(struct m66592_ep *ep, struct m66592_request *req) #if defined(CONFIG_SUPERH_BUILT_IN_M66592) static void init_controller(struct m66592 *m66592) { - usbf_start_clock(); m66592_bset(m66592, M66592_HSE, M66592_SYSCFG); /* High spd */ m66592_bclr(m66592, M66592_USBE, M66592_SYSCFG); m66592_bclr(m66592, M66592_DPRPU, M66592_SYSCFG); @@ -671,9 +670,7 @@ static void init_controller(struct m66592 *m66592) static void disable_controller(struct m66592 *m66592) { -#if defined(CONFIG_SUPERH_BUILT_IN_M66592) - usbf_stop_clock(); -#else +#if !defined(CONFIG_SUPERH_BUILT_IN_M66592) m66592_bclr(m66592, M66592_SCKE, M66592_SYSCFG); udelay(1); m66592_bclr(m66592, M66592_PLLC, M66592_SYSCFG); @@ -686,9 +683,7 @@ static void disable_controller(struct m66592 *m66592) static void m66592_start_xclock(struct m66592 *m66592) { -#if defined(CONFIG_SUPERH_BUILT_IN_M66592) - usbf_start_clock(); -#else +#if !defined(CONFIG_SUPERH_BUILT_IN_M66592) u16 tmp; tmp = m66592_read(m66592, M66592_SYSCFG); @@ -1539,7 +1534,10 @@ static int __exit m66592_remove(struct platform_device *pdev) iounmap(m66592->reg); free_irq(platform_get_irq(pdev, 0), m66592); m66592_free_request(&m66592->ep[0].ep, m66592->ep0_req); - usbf_stop_clock(); +#if defined(CONFIG_SUPERH_BUILT_IN_M66592) && defined(CONFIG_HAVE_CLK) + clk_disable(m66592->clk); + clk_put(m66592->clk); +#endif kfree(m66592); return 0; } @@ -1556,6 +1554,9 @@ static int __init m66592_probe(struct platform_device *pdev) int irq; void __iomem *reg = NULL; struct m66592 *m66592 = NULL; +#if defined(CONFIG_SUPERH_BUILT_IN_M66592) && defined(CONFIG_HAVE_CLK) + char clk_name[8]; +#endif int ret = 0; int i; @@ -1614,6 +1615,16 @@ static int __init m66592_probe(struct platform_device *pdev) goto clean_up; } +#if defined(CONFIG_SUPERH_BUILT_IN_M66592) && defined(CONFIG_HAVE_CLK) + snprintf(clk_name, sizeof(clk_name), "usbf%d", pdev->id); + m66592->clk = clk_get(&pdev->dev, clk_name); + if (IS_ERR(m66592->clk)) { + dev_err(&pdev->dev, "cannot get clock \"%s\"\n", clk_name); + ret = PTR_ERR(m66592->clk); + goto clean_up2; + } + clk_enable(m66592->clk); +#endif INIT_LIST_HEAD(&m66592->gadget.ep_list); m66592->gadget.ep0 = &m66592->ep[0].ep; INIT_LIST_HEAD(&m66592->gadget.ep0->ep_list); @@ -1645,7 +1656,7 @@ static int __init m66592_probe(struct platform_device *pdev) m66592->ep0_req = m66592_alloc_request(&m66592->ep[0].ep, GFP_KERNEL); if (m66592->ep0_req == NULL) - goto clean_up2; + goto clean_up3; m66592->ep0_req->complete = nop_completion; init_controller(m66592); @@ -1653,7 +1664,12 @@ static int __init m66592_probe(struct platform_device *pdev) dev_info(&pdev->dev, "version %s\n", DRIVER_VERSION); return 0; +clean_up3: +#if defined(CONFIG_SUPERH_BUILT_IN_M66592) && defined(CONFIG_HAVE_CLK) + clk_disable(m66592->clk); + clk_put(m66592->clk); clean_up2: +#endif free_irq(irq, m66592); clean_up: if (m66592) { diff --git a/drivers/usb/gadget/m66592-udc.h b/drivers/usb/gadget/m66592-udc.h index f118f00f146..286ce07e796 100644 --- a/drivers/usb/gadget/m66592-udc.h +++ b/drivers/usb/gadget/m66592-udc.h @@ -23,6 +23,10 @@ #ifndef __M66592_UDC_H__ #define __M66592_UDC_H__ +#if defined(CONFIG_SUPERH_BUILT_IN_M66592) && defined(CONFIG_HAVE_CLK) +#include <linux/clk.h> +#endif + #define M66592_SYSCFG 0x00 #define M66592_XTAL 0xC000 /* b15-14: Crystal selection */ #define M66592_XTAL48 0x8000 /* 48MHz */ @@ -476,6 +480,9 @@ struct m66592_ep { struct m66592 { spinlock_t lock; void __iomem *reg; +#if defined(CONFIG_SUPERH_BUILT_IN_M66592) && defined(CONFIG_HAVE_CLK) + struct clk *clk; +#endif struct usb_gadget gadget; struct usb_gadget_driver *driver; @@ -604,26 +611,6 @@ static inline void m66592_mdfy(struct m66592 *m66592, u16 val, u16 pat, #define m66592_bset(m66592, val, offset) \ m66592_mdfy(m66592, val, 0, offset) -#if defined(CONFIG_SUPERH_BUILT_IN_M66592) -#include <asm/io.h> -#define MSTPCR2 0xA4150038 /* for SH7722 */ -#define MSTPCR2_USB 0x00000800 - -static inline void usbf_start_clock(void) -{ - ctrl_outl(ctrl_inl(MSTPCR2) & ~MSTPCR2_USB, MSTPCR2); -} - -static inline void usbf_stop_clock(void) -{ - ctrl_outl(ctrl_inl(MSTPCR2) | MSTPCR2_USB, MSTPCR2); -} - -#else -#define usbf_start_clock(x) -#define usbf_stop_clock(x) -#endif /* if defined(CONFIG_SUPERH_BUILT_IN_M66592) */ - #endif /* ifndef __M66592_UDC_H__ */ diff --git a/drivers/usb/host/r8a66597-hcd.c b/drivers/usb/host/r8a66597-hcd.c index 2376f24f3c8..c21f14e0666 100644 --- a/drivers/usb/host/r8a66597-hcd.c +++ b/drivers/usb/host/r8a66597-hcd.c @@ -114,6 +114,9 @@ static int r8a66597_clock_enable(struct r8a66597 *r8a66597) int i = 0; #if defined(CONFIG_SUPERH_ON_CHIP_R8A66597) +#if defined(CONFIG_HAVE_CLK) + clk_enable(r8a66597->clk); +#endif do { r8a66597_write(r8a66597, SCKE, SYSCFG0); tmp = r8a66597_read(r8a66597, SYSCFG0); @@ -154,7 +157,11 @@ static void r8a66597_clock_disable(struct r8a66597 *r8a66597) { r8a66597_bclr(r8a66597, SCKE, SYSCFG0); udelay(1); -#if !defined(CONFIG_SUPERH_ON_CHIP_R8A66597) +#if defined(CONFIG_SUPERH_ON_CHIP_R8A66597) +#if defined(CONFIG_HAVE_CLK) + clk_disable(r8a66597->clk); +#endif +#else r8a66597_bclr(r8a66597, PLLC, SYSCFG0); r8a66597_bclr(r8a66597, XCKE, SYSCFG0); r8a66597_bclr(r8a66597, USBE, SYSCFG0); @@ -2261,6 +2268,9 @@ static int __init_or_module r8a66597_remove(struct platform_device *pdev) del_timer_sync(&r8a66597->rh_timer); usb_remove_hcd(hcd); iounmap((void *)r8a66597->reg); +#if defined(CONFIG_SUPERH_ON_CHIP_R8A66597) && defined(CONFIG_HAVE_CLK) + clk_put(r8a66597->clk); +#endif usb_put_hcd(hcd); return 0; } @@ -2268,6 +2278,9 @@ static int __init_or_module r8a66597_remove(struct platform_device *pdev) #define resource_len(r) (((r)->end - (r)->start) + 1) static int __init r8a66597_probe(struct platform_device *pdev) { +#if defined(CONFIG_SUPERH_ON_CHIP_R8A66597) && defined(CONFIG_HAVE_CLK) + char clk_name[8]; +#endif struct resource *res = NULL, *ires; int irq = -1; void __iomem *reg = NULL; @@ -2320,6 +2333,16 @@ static int __init r8a66597_probe(struct platform_device *pdev) memset(r8a66597, 0, sizeof(struct r8a66597)); dev_set_drvdata(&pdev->dev, r8a66597); +#if defined(CONFIG_SUPERH_ON_CHIP_R8A66597) && defined(CONFIG_HAVE_CLK) + snprintf(clk_name, sizeof(clk_name), "usb%d", pdev->id); + r8a66597->clk = clk_get(&pdev->dev, clk_name); + if (IS_ERR(r8a66597->clk)) { + dev_err(&pdev->dev, "cannot get clock \"%s\"\n", clk_name); + ret = PTR_ERR(r8a66597->clk); + goto clean_up2; + } +#endif + spin_lock_init(&r8a66597->lock); init_timer(&r8a66597->rh_timer); r8a66597->rh_timer.function = r8a66597_timer; @@ -2365,11 +2388,18 @@ static int __init r8a66597_probe(struct platform_device *pdev) ret = usb_add_hcd(hcd, irq, IRQF_DISABLED | irq_trigger); if (ret != 0) { dev_err(&pdev->dev, "Failed to add hcd\n"); - goto clean_up; + goto clean_up3; } return 0; +clean_up3: +#if defined(CONFIG_SUPERH_ON_CHIP_R8A66597) && defined(CONFIG_HAVE_CLK) + clk_put(r8a66597->clk); +clean_up2: +#endif + usb_put_hcd(hcd); + clean_up: if (reg) iounmap(reg); diff --git a/drivers/usb/host/r8a66597.h b/drivers/usb/host/r8a66597.h index 84ee0141731..ecacde4d69b 100644 --- a/drivers/usb/host/r8a66597.h +++ b/drivers/usb/host/r8a66597.h @@ -26,6 +26,10 @@ #ifndef __R8A66597_H__ #define __R8A66597_H__ +#if defined(CONFIG_SUPERH_ON_CHIP_R8A66597) && defined(CONFIG_HAVE_CLK) +#include <linux/clk.h> +#endif + #define SYSCFG0 0x00 #define SYSCFG1 0x02 #define SYSSTS0 0x04 @@ -481,7 +485,9 @@ struct r8a66597_root_hub { struct r8a66597 { spinlock_t lock; unsigned long reg; - +#if defined(CONFIG_SUPERH_ON_CHIP_R8A66597) && defined(CONFIG_HAVE_CLK) + struct clk *clk; +#endif struct r8a66597_device device0; struct r8a66597_root_hub root_hub[R8A66597_MAX_ROOT_HUB]; struct list_head pipe_queue[R8A66597_MAX_NUM_PIPE]; diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c index aad1359a3eb..fb6f2933b01 100644 --- a/drivers/usb/serial/ftdi_sio.c +++ b/drivers/usb/serial/ftdi_sio.c @@ -143,6 +143,7 @@ static struct ftdi_sio_quirk ftdi_HE_TIRA1_quirk = { static struct usb_device_id id_table_combined [] = { { USB_DEVICE(FTDI_VID, FTDI_AMC232_PID) }, { USB_DEVICE(FTDI_VID, FTDI_CANUSB_PID) }, + { USB_DEVICE(FTDI_VID, FTDI_CANDAPTER_PID) }, { USB_DEVICE(FTDI_VID, FTDI_SCS_DEVICE_0_PID) }, { USB_DEVICE(FTDI_VID, FTDI_SCS_DEVICE_1_PID) }, { USB_DEVICE(FTDI_VID, FTDI_SCS_DEVICE_2_PID) }, @@ -166,6 +167,7 @@ static struct usb_device_id id_table_combined [] = { { USB_DEVICE(FTDI_VID, FTDI_OPENDCC_PID) }, { USB_DEVICE(INTERBIOMETRICS_VID, INTERBIOMETRICS_IOBOARD_PID) }, { USB_DEVICE(INTERBIOMETRICS_VID, INTERBIOMETRICS_MINI_IOBOARD_PID) }, + { USB_DEVICE(FTDI_VID, FTDI_SPROG_II) }, { USB_DEVICE(FTDI_VID, FTDI_XF_632_PID) }, { USB_DEVICE(FTDI_VID, FTDI_XF_634_PID) }, { USB_DEVICE(FTDI_VID, FTDI_XF_547_PID) }, diff --git a/drivers/usb/serial/ftdi_sio.h b/drivers/usb/serial/ftdi_sio.h index 07a3992abad..373ee09975b 100644 --- a/drivers/usb/serial/ftdi_sio.h +++ b/drivers/usb/serial/ftdi_sio.h @@ -40,6 +40,9 @@ /* AlphaMicro Components AMC-232USB01 device */ #define FTDI_AMC232_PID 0xFF00 /* Product Id */ +/* www.candapter.com Ewert Energy Systems CANdapter device */ +#define FTDI_CANDAPTER_PID 0x9F80 /* Product Id */ + /* SCS HF Radio Modems PID's (http://www.scs-ptc.com) */ /* the VID is the standard ftdi vid (FTDI_VID) */ #define FTDI_SCS_DEVICE_0_PID 0xD010 /* SCS PTC-IIusb */ @@ -75,6 +78,9 @@ /* OpenDCC (www.opendcc.de) product id */ #define FTDI_OPENDCC_PID 0xBFD8 +/* Sprog II (Andrew Crosland's SprogII DCC interface) */ +#define FTDI_SPROG_II 0xF0C8 + /* www.crystalfontz.com devices - thanx for providing free devices for evaluation ! */ /* they use the ftdi chipset for the USB interface and the vendor id is the same */ #define FTDI_XF_632_PID 0xFC08 /* 632: 16x2 Character Display */ diff --git a/drivers/usb/serial/pl2303.c b/drivers/usb/serial/pl2303.c index 491c8857b64..1aed584be5e 100644 --- a/drivers/usb/serial/pl2303.c +++ b/drivers/usb/serial/pl2303.c @@ -91,6 +91,8 @@ static struct usb_device_id id_table [] = { { USB_DEVICE(WS002IN_VENDOR_ID, WS002IN_PRODUCT_ID) }, { USB_DEVICE(COREGA_VENDOR_ID, COREGA_PRODUCT_ID) }, { USB_DEVICE(YCCABLE_VENDOR_ID, YCCABLE_PRODUCT_ID) }, + { USB_DEVICE(SUPERIAL_VENDOR_ID, SUPERIAL_PRODUCT_ID) }, + { USB_DEVICE(HP_VENDOR_ID, HP_LD220_PRODUCT_ID) }, { } /* Terminating entry */ }; diff --git a/drivers/usb/serial/pl2303.h b/drivers/usb/serial/pl2303.h index a3bd039c78e..54974f446a8 100644 --- a/drivers/usb/serial/pl2303.h +++ b/drivers/usb/serial/pl2303.h @@ -110,3 +110,11 @@ /* Y.C. Cable U.S.A., Inc - USB to RS-232 */ #define YCCABLE_VENDOR_ID 0x05ad #define YCCABLE_PRODUCT_ID 0x0fba + +/* "Superial" USB - Serial */ +#define SUPERIAL_VENDOR_ID 0x5372 +#define SUPERIAL_PRODUCT_ID 0x2303 + +/* Hewlett-Packard LD220-HP POS Pole Display */ +#define HP_VENDOR_ID 0x03f0 +#define HP_LD220_PRODUCT_ID 0x3524 diff --git a/drivers/usb/serial/ti_usb_3410_5052.c b/drivers/usb/serial/ti_usb_3410_5052.c index 31c42d1cae1..01d0c70d60e 100644 --- a/drivers/usb/serial/ti_usb_3410_5052.c +++ b/drivers/usb/serial/ti_usb_3410_5052.c @@ -16,56 +16,6 @@ * For questions or problems with this driver, contact Texas Instruments * technical support, or Al Borchers <alborchers@steinerpoint.com>, or * Peter Berger <pberger@brimson.com>. - * - * This driver needs this hotplug script in /etc/hotplug/usb/ti_usb_3410_5052 - * or in /etc/hotplug.d/usb/ti_usb_3410_5052.hotplug to set the device - * configuration. - * - * #!/bin/bash - * - * BOOT_CONFIG=1 - * ACTIVE_CONFIG=2 - * - * if [[ "$ACTION" != "add" ]] - * then - * exit - * fi - * - * CONFIG_PATH=/sys${DEVPATH%/?*}/bConfigurationValue - * - * if [[ 0`cat $CONFIG_PATH` -ne $BOOT_CONFIG ]] - * then - * exit - * fi - * - * PRODUCT=${PRODUCT%/?*} # delete version - * VENDOR_ID=`printf "%d" 0x${PRODUCT%/?*}` - * PRODUCT_ID=`printf "%d" 0x${PRODUCT#*?/}` - * - * PARAM_PATH=/sys/module/ti_usb_3410_5052/parameters - * - * function scan() { - * s=$1 - * shift - * for i - * do - * if [[ $s -eq $i ]] - * then - * return 0 - * fi - * done - * return 1 - * } - * - * IFS=$IFS, - * - * if (scan $VENDOR_ID 1105 `cat $PARAM_PATH/vendor_3410` && - * scan $PRODUCT_ID 13328 `cat $PARAM_PATH/product_3410`) || - * (scan $VENDOR_ID 1105 `cat $PARAM_PATH/vendor_5052` && - * scan $PRODUCT_ID 20562 20818 20570 20575 `cat $PARAM_PATH/product_5052`) - * then - * echo $ACTIVE_CONFIG > $CONFIG_PATH - * fi */ #include <linux/kernel.h> @@ -457,9 +407,10 @@ static int ti_startup(struct usb_serial *serial) goto free_tdev; } - /* the second configuration must be set (in sysfs by hotplug script) */ + /* the second configuration must be set */ if (dev->actconfig->desc.bConfigurationValue == TI_BOOT_CONFIG) { - status = -ENODEV; + status = usb_driver_set_configuration(dev, TI_ACTIVE_CONFIG); + status = status ? status : -ENODEV; goto free_tdev; } diff --git a/drivers/usb/storage/unusual_devs.h b/drivers/usb/storage/unusual_devs.h index e61f2bfc64a..bfcc1fe8251 100644 --- a/drivers/usb/storage/unusual_devs.h +++ b/drivers/usb/storage/unusual_devs.h @@ -167,8 +167,22 @@ UNUSUAL_DEV( 0x0421, 0x005d, 0x0001, 0x0600, US_SC_DEVICE, US_PR_DEVICE, NULL, US_FL_FIX_CAPACITY ), +/* Reported by Ozan Sener <themgzzy@gmail.com> */ +UNUSUAL_DEV( 0x0421, 0x0060, 0x0551, 0x0551, + "Nokia", + "3500c", + US_SC_DEVICE, US_PR_DEVICE, NULL, + US_FL_FIX_CAPACITY ), + +/* Reported by CSECSY Laszlo <boobaa@frugalware.org> */ +UNUSUAL_DEV( 0x0421, 0x0063, 0x0001, 0x0601, + "Nokia", + "Nokia 3109c", + US_SC_DEVICE, US_PR_DEVICE, NULL, + US_FL_FIX_CAPACITY ), + /* Patch for Nokia 5310 capacity */ -UNUSUAL_DEV( 0x0421, 0x006a, 0x0000, 0x0591, +UNUSUAL_DEV( 0x0421, 0x006a, 0x0000, 0x0701, "Nokia", "5310", US_SC_DEVICE, US_PR_DEVICE, NULL, diff --git a/drivers/video/Kconfig b/drivers/video/Kconfig index 3f3ce13fef4..d0c821992a9 100644 --- a/drivers/video/Kconfig +++ b/drivers/video/Kconfig @@ -1889,10 +1889,11 @@ config FB_W100 config FB_SH_MOBILE_LCDC tristate "SuperH Mobile LCDC framebuffer support" depends on FB && SUPERH - select FB_CFB_FILLRECT - select FB_CFB_COPYAREA - select FB_CFB_IMAGEBLIT - default m + select FB_SYS_FILLRECT + select FB_SYS_COPYAREA + select FB_SYS_IMAGEBLIT + select FB_SYS_FOPS + select FB_DEFERRED_IO ---help--- Frame buffer driver for the on-chip SH-Mobile LCD controller. @@ -2021,17 +2022,19 @@ config FB_COBALT depends on FB && MIPS_COBALT config FB_SH7760 - bool "SH7760/SH7763 LCDC support" - depends on FB && (CPU_SUBTYPE_SH7760 || CPU_SUBTYPE_SH7763) - select FB_CFB_FILLRECT - select FB_CFB_COPYAREA - select FB_CFB_IMAGEBLIT - help - Support for the SH7760/SH7763 integrated (D)STN/TFT LCD Controller. - Supports display resolutions up to 1024x1024 pixel, grayscale and - color operation, with depths ranging from 1 bpp to 8 bpp monochrome - and 8, 15 or 16 bpp color; 90 degrees clockwise display rotation for - panels <= 320 pixel horizontal resolution. + bool "SH7760/SH7763/SH7720/SH7721 LCDC support" + depends on FB && (CPU_SUBTYPE_SH7760 || CPU_SUBTYPE_SH7763 \ + || CPU_SUBTYPE_SH7720 || CPU_SUBTYPE_SH7721) + select FB_CFB_FILLRECT + select FB_CFB_COPYAREA + select FB_CFB_IMAGEBLIT + ---help--- + Support for the SH7760/SH7763/SH7720/SH7721 integrated + (D)STN/TFT LCD Controller. + Supports display resolutions up to 1024x1024 pixel, grayscale and + color operation, with depths ranging from 1 bpp to 8 bpp monochrome + and 8, 15 or 16 bpp color; 90 degrees clockwise display rotation for + panels <= 320 pixel horizontal resolution. config FB_VIRTUAL tristate "Virtual Frame Buffer support (ONLY FOR TESTING!)" diff --git a/drivers/video/fb_defio.c b/drivers/video/fb_defio.c index 4835bdc4e9f..082026546ae 100644 --- a/drivers/video/fb_defio.c +++ b/drivers/video/fb_defio.c @@ -24,6 +24,19 @@ #include <linux/rmap.h> #include <linux/pagemap.h> +struct page *fb_deferred_io_page(struct fb_info *info, unsigned long offs) +{ + void *screen_base = (void __force *) info->screen_base; + struct page *page; + + if (is_vmalloc_addr(screen_base + offs)) + page = vmalloc_to_page(screen_base + offs); + else + page = pfn_to_page((info->fix.smem_start + offs) >> PAGE_SHIFT); + + return page; +} + /* this is to find and return the vmalloc-ed fb pages */ static int fb_deferred_io_fault(struct vm_area_struct *vma, struct vm_fault *vmf) @@ -31,14 +44,12 @@ static int fb_deferred_io_fault(struct vm_area_struct *vma, unsigned long offset; struct page *page; struct fb_info *info = vma->vm_private_data; - /* info->screen_base is virtual memory */ - void *screen_base = (void __force *) info->screen_base; offset = vmf->pgoff << PAGE_SHIFT; if (offset >= info->fix.smem_len) return VM_FAULT_SIGBUS; - page = vmalloc_to_page(screen_base + offset); + page = fb_deferred_io_page(info, offset); if (!page) return VM_FAULT_SIGBUS; @@ -60,6 +71,10 @@ int fb_deferred_io_fsync(struct file *file, struct dentry *dentry, int datasync) { struct fb_info *info = file->private_data; + /* Skip if deferred io is complied-in but disabled on this fbdev */ + if (!info->fbdefio) + return 0; + /* Kill off the delayed work */ cancel_rearming_delayed_work(&info->deferred_work); @@ -184,7 +199,6 @@ EXPORT_SYMBOL_GPL(fb_deferred_io_open); void fb_deferred_io_cleanup(struct fb_info *info) { - void *screen_base = (void __force *) info->screen_base; struct fb_deferred_io *fbdefio = info->fbdefio; struct page *page; int i; @@ -195,9 +209,12 @@ void fb_deferred_io_cleanup(struct fb_info *info) /* clear out the mapping that we setup */ for (i = 0 ; i < info->fix.smem_len; i += PAGE_SIZE) { - page = vmalloc_to_page(screen_base + i); + page = fb_deferred_io_page(info, i); page->mapping = NULL; } + + info->fbops->fb_mmap = NULL; + mutex_destroy(&fbdefio->lock); } EXPORT_SYMBOL_GPL(fb_deferred_io_cleanup); diff --git a/drivers/video/sh7760fb.c b/drivers/video/sh7760fb.c index 8d0212da451..653bdfee305 100644 --- a/drivers/video/sh7760fb.c +++ b/drivers/video/sh7760fb.c @@ -13,6 +13,8 @@ * * Thanks to Siegfried Schaefer <s.schaefer at schaefer-edv.de> * for his original source and testing! + * + * sh7760_setcolreg get from drivers/video/sh_mobile_lcdcfb.c */ #include <linux/completion.h> @@ -53,29 +55,6 @@ static irqreturn_t sh7760fb_irq(int irq, void *data) return IRQ_HANDLED; } -static void sh7760fb_wait_vsync(struct fb_info *info) -{ - struct sh7760fb_par *par = info->par; - - if (par->pd->novsync) - return; - - iowrite16(ioread16(par->base + LDINTR) & ~VINT_CHECK, - par->base + LDINTR); - - if (par->irq < 0) { - /* poll for vert. retrace: status bit is sticky */ - while (!(ioread16(par->base + LDINTR) & VINT_CHECK)) - cpu_relax(); - } else { - /* a "wait_for_irq_event(par->irq)" would be extremely nice */ - init_completion(&par->vsync); - enable_irq(par->irq); - wait_for_completion(&par->vsync); - disable_irq_nosync(par->irq); - } -} - /* wait_for_lps - wait until power supply has reached a certain state. */ static int wait_for_lps(struct sh7760fb_par *par, int val) { @@ -117,55 +96,28 @@ static int sh7760fb_blank(int blank, struct fb_info *info) return wait_for_lps(par, lps); } -/* set color registers */ -static int sh7760fb_setcmap(struct fb_cmap *cmap, struct fb_info *info) +static int sh7760_setcolreg (u_int regno, + u_int red, u_int green, u_int blue, + u_int transp, struct fb_info *info) { - struct sh7760fb_par *par = info->par; - u32 s = cmap->start; - u32 l = cmap->len; - u16 *r = cmap->red; - u16 *g = cmap->green; - u16 *b = cmap->blue; - u32 col, tmo; - int ret; + u32 *palette = info->pseudo_palette; - ret = 0; + if (regno >= 16) + return -EINVAL; - sh7760fb_wait_vsync(info); + /* only FB_VISUAL_TRUECOLOR supported */ - /* request palette access */ - iowrite16(LDPALCR_PALEN, par->base + LDPALCR); + red >>= 16 - info->var.red.length; + green >>= 16 - info->var.green.length; + blue >>= 16 - info->var.blue.length; + transp >>= 16 - info->var.transp.length; - /* poll for access grant */ - tmo = 100; - while (!(ioread16(par->base + LDPALCR) & LDPALCR_PALS) && (--tmo)) - cpu_relax(); + palette[regno] = (red << info->var.red.offset) | + (green << info->var.green.offset) | + (blue << info->var.blue.offset) | + (transp << info->var.transp.offset); - if (!tmo) { - ret = 1; - dev_dbg(info->dev, "no palette access!\n"); - goto out; - } - - while (l && (s < 256)) { - col = ((*r) & 0xff) << 16; - col |= ((*g) & 0xff) << 8; - col |= ((*b) & 0xff); - col &= SH7760FB_PALETTE_MASK; - iowrite32(col, par->base + LDPR(s)); - - if (s < 16) - ((u32 *) (info->pseudo_palette))[s] = s; - - s++; - l--; - r++; - g++; - b++; - } -out: - iowrite16(0, par->base + LDPALCR); - return ret; + return 0; } static void encode_fix(struct fb_fix_screeninfo *fix, struct fb_info *info, @@ -406,7 +358,7 @@ static struct fb_ops sh7760fb_ops = { .owner = THIS_MODULE, .fb_blank = sh7760fb_blank, .fb_check_var = sh7760fb_check_var, - .fb_setcmap = sh7760fb_setcmap, + .fb_setcolreg = sh7760_setcolreg, .fb_set_par = sh7760fb_set_par, .fb_fillrect = cfb_fillrect, .fb_copyarea = cfb_copyarea, diff --git a/drivers/video/sh_mobile_lcdcfb.c b/drivers/video/sh_mobile_lcdcfb.c index efff672fd7b..0e2b8fd24df 100644 --- a/drivers/video/sh_mobile_lcdcfb.c +++ b/drivers/video/sh_mobile_lcdcfb.c @@ -16,7 +16,9 @@ #include <linux/clk.h> #include <linux/platform_device.h> #include <linux/dma-mapping.h> +#include <linux/interrupt.h> #include <video/sh_mobile_lcdc.h> +#include <asm/atomic.h> #define PALETTE_NR 16 @@ -30,11 +32,15 @@ struct sh_mobile_lcdc_chan { u32 pseudo_palette[PALETTE_NR]; struct fb_info info; dma_addr_t dma_handle; + struct fb_deferred_io defio; }; struct sh_mobile_lcdc_priv { void __iomem *base; + int irq; #ifdef CONFIG_HAVE_CLK + atomic_t clk_usecnt; + struct clk *dot_clk; struct clk *clk; #endif unsigned long lddckr; @@ -56,7 +62,7 @@ struct sh_mobile_lcdc_priv { /* per-channel registers */ enum { LDDCKPAT1R, LDDCKPAT2R, LDMT1R, LDMT2R, LDMT3R, LDDFR, LDSM1R, - LDSA1R, LDMLSR, LDHCNR, LDHSYNR, LDVLNR, LDVSYNR, LDPMR }; + LDSM2R, LDSA1R, LDMLSR, LDHCNR, LDHSYNR, LDVLNR, LDVSYNR, LDPMR }; static unsigned long lcdc_offs_mainlcd[] = { [LDDCKPAT1R] = 0x400, @@ -66,6 +72,7 @@ static unsigned long lcdc_offs_mainlcd[] = { [LDMT3R] = 0x420, [LDDFR] = 0x424, [LDSM1R] = 0x428, + [LDSM2R] = 0x42c, [LDSA1R] = 0x430, [LDMLSR] = 0x438, [LDHCNR] = 0x448, @@ -83,6 +90,7 @@ static unsigned long lcdc_offs_sublcd[] = { [LDMT3R] = 0x608, [LDDFR] = 0x60c, [LDSM1R] = 0x610, + [LDSM2R] = 0x614, [LDSA1R] = 0x618, [LDMLSR] = 0x620, [LDHCNR] = 0x624, @@ -96,6 +104,8 @@ static unsigned long lcdc_offs_sublcd[] = { #define LCDC_RESET 0x00000100 #define DISPLAY_BEU 0x00000008 #define LCDC_ENABLE 0x00000001 +#define LDINTR_FE 0x00000400 +#define LDINTR_FS 0x00000004 static void lcdc_write_chan(struct sh_mobile_lcdc_chan *chan, int reg_nr, unsigned long data) @@ -170,6 +180,65 @@ struct sh_mobile_lcdc_sys_bus_ops sh_mobile_lcdc_sys_bus_ops = { lcdc_sys_read_data, }; +#ifdef CONFIG_HAVE_CLK +static void sh_mobile_lcdc_clk_on(struct sh_mobile_lcdc_priv *priv) +{ + if (atomic_inc_and_test(&priv->clk_usecnt)) { + clk_enable(priv->clk); + if (priv->dot_clk) + clk_enable(priv->dot_clk); + } +} + +static void sh_mobile_lcdc_clk_off(struct sh_mobile_lcdc_priv *priv) +{ + if (atomic_sub_return(1, &priv->clk_usecnt) == -1) { + if (priv->dot_clk) + clk_disable(priv->dot_clk); + clk_disable(priv->clk); + } +} +#else +static void sh_mobile_lcdc_clk_on(struct sh_mobile_lcdc_priv *priv) {} +static void sh_mobile_lcdc_clk_off(struct sh_mobile_lcdc_priv *priv) {} +#endif + +static void sh_mobile_lcdc_deferred_io(struct fb_info *info, + struct list_head *pagelist) +{ + struct sh_mobile_lcdc_chan *ch = info->par; + + /* enable clocks before accessing hardware */ + sh_mobile_lcdc_clk_on(ch->lcdc); + + /* trigger panel update */ + lcdc_write_chan(ch, LDSM2R, 1); +} + +static void sh_mobile_lcdc_deferred_io_touch(struct fb_info *info) +{ + struct fb_deferred_io *fbdefio = info->fbdefio; + + if (fbdefio) + schedule_delayed_work(&info->deferred_work, fbdefio->delay); +} + +static irqreturn_t sh_mobile_lcdc_irq(int irq, void *data) +{ + struct sh_mobile_lcdc_priv *priv = data; + unsigned long tmp; + + /* acknowledge interrupt */ + tmp = lcdc_read(priv, _LDINTR); + tmp &= 0xffffff00; /* mask in high 24 bits */ + tmp |= 0x000000ff ^ LDINTR_FS; /* status in low 8 */ + lcdc_write(priv, _LDINTR, tmp); + + /* disable clocks */ + sh_mobile_lcdc_clk_off(priv); + return IRQ_HANDLED; +} + static void sh_mobile_lcdc_start_stop(struct sh_mobile_lcdc_priv *priv, int start) { @@ -207,6 +276,11 @@ static int sh_mobile_lcdc_start(struct sh_mobile_lcdc_priv *priv) int k, m; int ret = 0; + /* enable clocks before accessing the hardware */ + for (k = 0; k < ARRAY_SIZE(priv->ch); k++) + if (priv->ch[k].enabled) + sh_mobile_lcdc_clk_on(priv); + /* reset */ lcdc_write(priv, _LDCNT2R, lcdc_read(priv, _LDCNT2R) | LCDC_RESET); lcdc_wait_bit(priv, _LDCNT2R, LCDC_RESET, 0); @@ -249,7 +323,7 @@ static int sh_mobile_lcdc_start(struct sh_mobile_lcdc_priv *priv) lcdc_write(priv, _LDDCKSTPR, 0); lcdc_wait_bit(priv, _LDDCKSTPR, ~0, 0); - /* interrupts are disabled */ + /* interrupts are disabled to begin with */ lcdc_write(priv, _LDINTR, 0); for (k = 0; k < ARRAY_SIZE(priv->ch); k++) { @@ -310,9 +384,6 @@ static int sh_mobile_lcdc_start(struct sh_mobile_lcdc_priv *priv) return ret; } - /* --- display_lcdc_data() --- */ - lcdc_write(priv, _LDINTR, 0x00000f00); - /* word and long word swap */ lcdc_write(priv, _LDDDSR, lcdc_read(priv, _LDDDSR) | 6); @@ -334,8 +405,24 @@ static int sh_mobile_lcdc_start(struct sh_mobile_lcdc_priv *priv) /* set line size */ lcdc_write_chan(ch, LDMLSR, ch->info.fix.line_length); - /* continuous read mode */ - lcdc_write_chan(ch, LDSM1R, 0); + /* setup deferred io if SYS bus */ + tmp = ch->cfg.sys_bus_cfg.deferred_io_msec; + if (ch->ldmt1r_value & (1 << 12) && tmp) { + ch->defio.deferred_io = sh_mobile_lcdc_deferred_io; + ch->defio.delay = msecs_to_jiffies(tmp); + ch->info.fbdefio = &ch->defio; + fb_deferred_io_init(&ch->info); + + /* one-shot mode */ + lcdc_write_chan(ch, LDSM1R, 1); + + /* enable "Frame End Interrupt Enable" bit */ + lcdc_write(priv, _LDINTR, LDINTR_FE); + + } else { + /* continuous read mode */ + lcdc_write_chan(ch, LDSM1R, 0); + } } /* display output */ @@ -359,6 +446,7 @@ static void sh_mobile_lcdc_stop(struct sh_mobile_lcdc_priv *priv) { struct sh_mobile_lcdc_chan *ch; struct sh_mobile_lcdc_board_cfg *board_cfg; + unsigned long tmp; int k; /* tell the board code to disable the panel */ @@ -367,10 +455,22 @@ static void sh_mobile_lcdc_stop(struct sh_mobile_lcdc_priv *priv) board_cfg = &ch->cfg.board_cfg; if (board_cfg->display_off) board_cfg->display_off(board_cfg->board_data); + + /* cleanup deferred io if SYS bus */ + tmp = ch->cfg.sys_bus_cfg.deferred_io_msec; + if (ch->ldmt1r_value & (1 << 12) && tmp) { + fb_deferred_io_cleanup(&ch->info); + ch->info.fbdefio = NULL; + } } /* stop the lcdc */ sh_mobile_lcdc_start_stop(priv, 0); + + /* stop clocks */ + for (k = 0; k < ARRAY_SIZE(priv->ch); k++) + if (priv->ch[k].enabled) + sh_mobile_lcdc_clk_off(priv); } static int sh_mobile_lcdc_check_interface(struct sh_mobile_lcdc_chan *ch) @@ -413,9 +513,13 @@ static int sh_mobile_lcdc_check_interface(struct sh_mobile_lcdc_chan *ch) return -EINVAL; } -static int sh_mobile_lcdc_setup_clocks(struct device *dev, int clock_source, +static int sh_mobile_lcdc_setup_clocks(struct platform_device *pdev, + int clock_source, struct sh_mobile_lcdc_priv *priv) { +#ifdef CONFIG_HAVE_CLK + char clk_name[8]; +#endif char *str; int icksel; @@ -430,14 +534,21 @@ static int sh_mobile_lcdc_setup_clocks(struct device *dev, int clock_source, priv->lddckr = icksel << 16; #ifdef CONFIG_HAVE_CLK + atomic_set(&priv->clk_usecnt, -1); + snprintf(clk_name, sizeof(clk_name), "lcdc%d", pdev->id); + priv->clk = clk_get(&pdev->dev, clk_name); + if (IS_ERR(priv->clk)) { + dev_err(&pdev->dev, "cannot get clock \"%s\"\n", clk_name); + return PTR_ERR(priv->clk); + } + if (str) { - priv->clk = clk_get(dev, str); - if (IS_ERR(priv->clk)) { - dev_err(dev, "cannot get clock %s\n", str); - return PTR_ERR(priv->clk); + priv->dot_clk = clk_get(&pdev->dev, str); + if (IS_ERR(priv->dot_clk)) { + dev_err(&pdev->dev, "cannot get dot clock %s\n", str); + clk_put(priv->clk); + return PTR_ERR(priv->dot_clk); } - - clk_enable(priv->clk); } #endif @@ -475,11 +586,34 @@ static struct fb_fix_screeninfo sh_mobile_lcdc_fix = { .accel = FB_ACCEL_NONE, }; +static void sh_mobile_lcdc_fillrect(struct fb_info *info, + const struct fb_fillrect *rect) +{ + sys_fillrect(info, rect); + sh_mobile_lcdc_deferred_io_touch(info); +} + +static void sh_mobile_lcdc_copyarea(struct fb_info *info, + const struct fb_copyarea *area) +{ + sys_copyarea(info, area); + sh_mobile_lcdc_deferred_io_touch(info); +} + +static void sh_mobile_lcdc_imageblit(struct fb_info *info, + const struct fb_image *image) +{ + sys_imageblit(info, image); + sh_mobile_lcdc_deferred_io_touch(info); +} + static struct fb_ops sh_mobile_lcdc_ops = { .fb_setcolreg = sh_mobile_lcdc_setcolreg, - .fb_fillrect = cfb_fillrect, - .fb_copyarea = cfb_copyarea, - .fb_imageblit = cfb_imageblit, + .fb_read = fb_sys_read, + .fb_write = fb_sys_write, + .fb_fillrect = sh_mobile_lcdc_fillrect, + .fb_copyarea = sh_mobile_lcdc_copyarea, + .fb_imageblit = sh_mobile_lcdc_imageblit, }; static int sh_mobile_lcdc_set_bpp(struct fb_var_screeninfo *var, int bpp) @@ -540,8 +674,9 @@ static int __init sh_mobile_lcdc_probe(struct platform_device *pdev) } res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - if (res == NULL) { - dev_err(&pdev->dev, "cannot find IO resource\n"); + i = platform_get_irq(pdev, 0); + if (!res || i < 0) { + dev_err(&pdev->dev, "cannot get platform resources\n"); error = -ENOENT; goto err0; } @@ -553,6 +688,14 @@ static int __init sh_mobile_lcdc_probe(struct platform_device *pdev) goto err0; } + error = request_irq(i, sh_mobile_lcdc_irq, IRQF_DISABLED, + pdev->dev.bus_id, priv); + if (error) { + dev_err(&pdev->dev, "unable to request irq\n"); + goto err1; + } + + priv->irq = i; platform_set_drvdata(pdev, priv); pdata = pdev->dev.platform_data; @@ -587,8 +730,7 @@ static int __init sh_mobile_lcdc_probe(struct platform_device *pdev) goto err1; } - error = sh_mobile_lcdc_setup_clocks(&pdev->dev, - pdata->clock_source, priv); + error = sh_mobile_lcdc_setup_clocks(pdev, pdata->clock_source, priv); if (error) { dev_err(&pdev->dev, "unable to setup clocks\n"); goto err1; @@ -637,6 +779,7 @@ static int __init sh_mobile_lcdc_probe(struct platform_device *pdev) info->fix.smem_start = priv->ch[i].dma_handle; info->screen_base = buf; info->device = &pdev->dev; + info->par = &priv->ch[i]; } if (error) @@ -664,6 +807,10 @@ static int __init sh_mobile_lcdc_probe(struct platform_device *pdev) (int) priv->ch[i].cfg.lcd_cfg.xres, (int) priv->ch[i].cfg.lcd_cfg.yres, priv->ch[i].cfg.bpp); + + /* deferred io mode: disable clock to save power */ + if (info->fbdefio) + sh_mobile_lcdc_clk_off(priv); } return 0; @@ -697,15 +844,16 @@ static int sh_mobile_lcdc_remove(struct platform_device *pdev) } #ifdef CONFIG_HAVE_CLK - if (priv->clk) { - clk_disable(priv->clk); - clk_put(priv->clk); - } + if (priv->dot_clk) + clk_put(priv->dot_clk); + clk_put(priv->clk); #endif if (priv->base) iounmap(priv->base); + if (priv->irq) + free_irq(priv->irq, priv); kfree(priv); return 0; } diff --git a/drivers/w1/w1_io.c b/drivers/w1/w1_io.c index 0d15b0eaf79..5139c25ca96 100644 --- a/drivers/w1/w1_io.c +++ b/drivers/w1/w1_io.c @@ -356,7 +356,9 @@ int w1_reset_select_slave(struct w1_slave *sl) w1_write_8(sl->master, W1_SKIP_ROM); else { u8 match[9] = {W1_MATCH_ROM, }; - memcpy(&match[1], (u8 *)&sl->reg_num, 8); + u64 rn = le64_to_cpu(*((u64*)&sl->reg_num)); + + memcpy(&match[1], &rn, 8); w1_write_block(sl->master, match, 9); } return 0; diff --git a/drivers/xen/balloon.c b/drivers/xen/balloon.c index 526c191e84e..8dc7109d61b 100644 --- a/drivers/xen/balloon.c +++ b/drivers/xen/balloon.c @@ -44,13 +44,15 @@ #include <linux/list.h> #include <linux/sysdev.h> -#include <asm/xen/hypervisor.h> #include <asm/page.h> #include <asm/pgalloc.h> #include <asm/pgtable.h> #include <asm/uaccess.h> #include <asm/tlb.h> +#include <asm/xen/hypervisor.h> +#include <asm/xen/hypercall.h> +#include <xen/interface/xen.h> #include <xen/interface/memory.h> #include <xen/xenbus.h> #include <xen/features.h> diff --git a/drivers/xen/features.c b/drivers/xen/features.c index 0707714e40d..99eda169c77 100644 --- a/drivers/xen/features.c +++ b/drivers/xen/features.c @@ -8,7 +8,11 @@ #include <linux/types.h> #include <linux/cache.h> #include <linux/module.h> -#include <asm/xen/hypervisor.h> + +#include <asm/xen/hypercall.h> + +#include <xen/interface/xen.h> +#include <xen/interface/version.h> #include <xen/features.h> u8 xen_features[XENFEAT_NR_SUBMAPS * 32] __read_mostly; diff --git a/drivers/xen/grant-table.c b/drivers/xen/grant-table.c index 06592b9da83..7d8f531fb8e 100644 --- a/drivers/xen/grant-table.c +++ b/drivers/xen/grant-table.c @@ -40,6 +40,7 @@ #include <xen/interface/xen.h> #include <xen/page.h> #include <xen/grant_table.h> +#include <asm/xen/hypercall.h> #include <asm/pgtable.h> #include <asm/sync_bitops.h> |