diff options
Diffstat (limited to 'drivers')
69 files changed, 1001 insertions, 843 deletions
diff --git a/drivers/acpi/dispatcher/dsobject.c b/drivers/acpi/dispatcher/dsobject.c index a474ca2334d..954ac8ce958 100644 --- a/drivers/acpi/dispatcher/dsobject.c +++ b/drivers/acpi/dispatcher/dsobject.c @@ -137,6 +137,71 @@ acpi_ds_build_internal_object(struct acpi_walk_state *walk_state, return_ACPI_STATUS(status); } } + + /* Special object resolution for elements of a package */ + + if ((op->common.parent->common.aml_opcode == AML_PACKAGE_OP) || + (op->common.parent->common.aml_opcode == + AML_VAR_PACKAGE_OP)) { + /* + * Attempt to resolve the node to a value before we insert it into + * the package. If this is a reference to a common data type, + * resolve it immediately. According to the ACPI spec, package + * elements can only be "data objects" or method references. + * Attempt to resolve to an Integer, Buffer, String or Package. + * If cannot, return the named reference (for things like Devices, + * Methods, etc.) Buffer Fields and Fields will resolve to simple + * objects (int/buf/str/pkg). + * + * NOTE: References to things like Devices, Methods, Mutexes, etc. + * will remain as named references. This behavior is not described + * in the ACPI spec, but it appears to be an oversight. + */ + obj_desc = (union acpi_operand_object *)op->common.node; + + status = + acpi_ex_resolve_node_to_value(ACPI_CAST_INDIRECT_PTR + (struct + acpi_namespace_node, + &obj_desc), + walk_state); + if (ACPI_FAILURE(status)) { + return_ACPI_STATUS(status); + } + + switch (op->common.node->type) { + /* + * For these types, we need the actual node, not the subobject. + * However, the subobject got an extra reference count above. + */ + case ACPI_TYPE_MUTEX: + case ACPI_TYPE_METHOD: + case ACPI_TYPE_POWER: + case ACPI_TYPE_PROCESSOR: + case ACPI_TYPE_EVENT: + case ACPI_TYPE_REGION: + case ACPI_TYPE_DEVICE: + case ACPI_TYPE_THERMAL: + + obj_desc = + (union acpi_operand_object *)op->common. + node; + break; + + default: + break; + } + + /* + * If above resolved to an operand object, we are done. Otherwise, + * we have a NS node, we must create the package entry as a named + * reference. + */ + if (ACPI_GET_DESCRIPTOR_TYPE(obj_desc) != + ACPI_DESC_TYPE_NAMED) { + goto exit; + } + } } /* Create and init a new internal ACPI object */ @@ -156,6 +221,7 @@ acpi_ds_build_internal_object(struct acpi_walk_state *walk_state, return_ACPI_STATUS(status); } + exit: *obj_desc_ptr = obj_desc; return_ACPI_STATUS(AE_OK); } @@ -356,12 +422,25 @@ acpi_ds_build_internal_package_obj(struct acpi_walk_state *walk_state, arg = arg->common.next; for (i = 0; arg && (i < element_count); i++) { if (arg->common.aml_opcode == AML_INT_RETURN_VALUE_OP) { - - /* This package element is already built, just get it */ - - obj_desc->package.elements[i] = - ACPI_CAST_PTR(union acpi_operand_object, - arg->common.node); + if (arg->common.node->type == ACPI_TYPE_METHOD) { + /* + * A method reference "looks" to the parser to be a method + * invocation, so we special case it here + */ + arg->common.aml_opcode = AML_INT_NAMEPATH_OP; + status = + acpi_ds_build_internal_object(walk_state, + arg, + &obj_desc-> + package. + elements[i]); + } else { + /* This package element is already built, just get it */ + + obj_desc->package.elements[i] = + ACPI_CAST_PTR(union acpi_operand_object, + arg->common.node); + } } else { status = acpi_ds_build_internal_object(walk_state, arg, &obj_desc-> diff --git a/drivers/acpi/processor_throttling.c b/drivers/acpi/processor_throttling.c index c26c61fb36c..6742d7bc477 100644 --- a/drivers/acpi/processor_throttling.c +++ b/drivers/acpi/processor_throttling.c @@ -29,6 +29,7 @@ #include <linux/kernel.h> #include <linux/module.h> #include <linux/init.h> +#include <linux/sched.h> #include <linux/cpufreq.h> #include <linux/proc_fs.h> #include <linux/seq_file.h> @@ -413,7 +414,7 @@ static int acpi_throttling_rdmsr(struct acpi_processor *pr, } else { msr_low = 0; msr_high = 0; - rdmsr_on_cpu(cpu, MSR_IA32_THERM_CONTROL, + rdmsr_safe(MSR_IA32_THERM_CONTROL, (u32 *)&msr_low , (u32 *) &msr_high); msr = (msr_high << 32) | msr_low; *value = (acpi_integer) msr; @@ -438,7 +439,7 @@ static int acpi_throttling_wrmsr(struct acpi_processor *pr, acpi_integer value) "HARDWARE addr space,NOT supported yet\n"); } else { msr = value; - wrmsr_on_cpu(cpu, MSR_IA32_THERM_CONTROL, + wrmsr_safe(MSR_IA32_THERM_CONTROL, msr & 0xffffffff, msr >> 32); ret = 0; } @@ -572,21 +573,32 @@ static int acpi_processor_get_throttling_ptc(struct acpi_processor *pr) return -ENODEV; pr->throttling.state = 0; - local_irq_disable(); + value = 0; ret = acpi_read_throttling_status(pr, &value); if (ret >= 0) { state = acpi_get_throttling_state(pr, value); pr->throttling.state = state; } - local_irq_enable(); return 0; } static int acpi_processor_get_throttling(struct acpi_processor *pr) { - return pr->throttling.acpi_processor_get_throttling(pr); + cpumask_t saved_mask; + int ret; + + /* + * Migrate task to the cpu pointed by pr. + */ + saved_mask = current->cpus_allowed; + set_cpus_allowed(current, cpumask_of_cpu(pr->id)); + ret = pr->throttling.acpi_processor_get_throttling(pr); + /* restore the previous state */ + set_cpus_allowed(current, saved_mask); + + return ret; } static int acpi_processor_get_fadt_info(struct acpi_processor *pr) @@ -717,21 +729,29 @@ static int acpi_processor_set_throttling_ptc(struct acpi_processor *pr, if (state < pr->throttling_platform_limit) return -EPERM; - local_irq_disable(); value = 0; ret = acpi_get_throttling_value(pr, state, &value); if (ret >= 0) { acpi_write_throttling_state(pr, value); pr->throttling.state = state; } - local_irq_enable(); return 0; } int acpi_processor_set_throttling(struct acpi_processor *pr, int state) { - return pr->throttling.acpi_processor_set_throttling(pr, state); + cpumask_t saved_mask; + int ret; + /* + * Migrate task to the cpu pointed by pr. + */ + saved_mask = current->cpus_allowed; + set_cpus_allowed(current, cpumask_of_cpu(pr->id)); + ret = pr->throttling.acpi_processor_set_throttling(pr, state); + /* restore the previous state */ + set_cpus_allowed(current, saved_mask); + return ret; } int acpi_processor_get_throttling_info(struct acpi_processor *pr) diff --git a/drivers/ata/ata_piix.c b/drivers/ata/ata_piix.c index 483269db2c7..b538e1d22bf 100644 --- a/drivers/ata/ata_piix.c +++ b/drivers/ata/ata_piix.c @@ -967,6 +967,13 @@ static int piix_broken_suspend(void) }, }, { + .ident = "TECRA M3", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"), + DMI_MATCH(DMI_PRODUCT_NAME, "Tecra M3"), + }, + }, + { .ident = "TECRA M5", .matches = { DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"), @@ -981,6 +988,20 @@ static int piix_broken_suspend(void) }, }, { + .ident = "TECRA A8", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"), + DMI_MATCH(DMI_PRODUCT_NAME, "TECRA A8"), + }, + }, + { + .ident = "Satellite R25", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"), + DMI_MATCH(DMI_PRODUCT_NAME, "Satellite R25"), + }, + }, + { .ident = "Satellite U200", .matches = { DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"), @@ -988,6 +1009,13 @@ static int piix_broken_suspend(void) }, }, { + .ident = "Satellite U200", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"), + DMI_MATCH(DMI_PRODUCT_NAME, "SATELLITE U200"), + }, + }, + { .ident = "Satellite Pro U200", .matches = { DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"), diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c index 33f06277b3b..b514a80f137 100644 --- a/drivers/ata/libata-core.c +++ b/drivers/ata/libata-core.c @@ -4185,6 +4185,9 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = { /* Devices which get the IVB wrong */ { "QUANTUM FIREBALLlct10 05", "A03.0900", ATA_HORKAGE_IVB, }, { "TSSTcorp CDDVDW SH-S202J", "SB00", ATA_HORKAGE_IVB, }, + { "TSSTcorp CDDVDW SH-S202J", "SB01", ATA_HORKAGE_IVB, }, + { "TSSTcorp CDDVDW SH-S202N", "SB00", ATA_HORKAGE_IVB, }, + { "TSSTcorp CDDVDW SH-S202N", "SB01", ATA_HORKAGE_IVB, }, /* End Marker */ { } @@ -6964,12 +6967,11 @@ int ata_host_start(struct ata_host *host) if (ap->ops->port_start) { rc = ap->ops->port_start(ap); if (rc) { - ata_port_printk(ap, KERN_ERR, "failed to " - "start port (errno=%d)\n", rc); + if (rc != -ENODEV) + dev_printk(KERN_ERR, host->dev, "failed to start port %d (errno=%d)\n", i, rc); goto err_out; } } - ata_eh_freeze_port(ap); } diff --git a/drivers/ata/libata-eh.c b/drivers/ata/libata-eh.c index 0dac69db1fd..e6605f03864 100644 --- a/drivers/ata/libata-eh.c +++ b/drivers/ata/libata-eh.c @@ -1850,30 +1850,54 @@ static void ata_eh_link_report(struct ata_link *link) ehc->i.serror & SERR_DEV_XCHG ? "DevExch " : ""); for (tag = 0; tag < ATA_MAX_QUEUE; tag++) { - static const char *dma_str[] = { - [DMA_BIDIRECTIONAL] = "bidi", - [DMA_TO_DEVICE] = "out", - [DMA_FROM_DEVICE] = "in", - [DMA_NONE] = "", - }; struct ata_queued_cmd *qc = __ata_qc_from_tag(ap, tag); struct ata_taskfile *cmd = &qc->tf, *res = &qc->result_tf; + const u8 *cdb = qc->cdb; + char data_buf[20] = ""; + char cdb_buf[70] = ""; if (!(qc->flags & ATA_QCFLAG_FAILED) || qc->dev->link != link || !qc->err_mask) continue; + if (qc->dma_dir != DMA_NONE) { + static const char *dma_str[] = { + [DMA_BIDIRECTIONAL] = "bidi", + [DMA_TO_DEVICE] = "out", + [DMA_FROM_DEVICE] = "in", + }; + static const char *prot_str[] = { + [ATA_PROT_PIO] = "pio", + [ATA_PROT_DMA] = "dma", + [ATA_PROT_NCQ] = "ncq", + [ATA_PROT_ATAPI] = "pio", + [ATA_PROT_ATAPI_DMA] = "dma", + }; + + snprintf(data_buf, sizeof(data_buf), " %s %u %s", + prot_str[qc->tf.protocol], qc->nbytes, + dma_str[qc->dma_dir]); + } + + if (is_atapi_taskfile(&qc->tf)) + snprintf(cdb_buf, sizeof(cdb_buf), + "cdb %02x %02x %02x %02x %02x %02x %02x %02x " + "%02x %02x %02x %02x %02x %02x %02x %02x\n ", + cdb[0], cdb[1], cdb[2], cdb[3], + cdb[4], cdb[5], cdb[6], cdb[7], + cdb[8], cdb[9], cdb[10], cdb[11], + cdb[12], cdb[13], cdb[14], cdb[15]); + ata_dev_printk(qc->dev, KERN_ERR, "cmd %02x/%02x:%02x:%02x:%02x:%02x/%02x:%02x:%02x:%02x:%02x/%02x " - "tag %d cdb 0x%x data %u %s\n " + "tag %d%s\n %s" "res %02x/%02x:%02x:%02x:%02x:%02x/%02x:%02x:%02x:%02x:%02x/%02x " "Emask 0x%x (%s)%s\n", cmd->command, cmd->feature, cmd->nsect, cmd->lbal, cmd->lbam, cmd->lbah, cmd->hob_feature, cmd->hob_nsect, cmd->hob_lbal, cmd->hob_lbam, cmd->hob_lbah, - cmd->device, qc->tag, qc->cdb[0], qc->nbytes, - dma_str[qc->dma_dir], + cmd->device, qc->tag, data_buf, cdb_buf, res->command, res->feature, res->nsect, res->lbal, res->lbam, res->lbah, res->hob_feature, res->hob_nsect, diff --git a/drivers/ata/pata_at32.c b/drivers/ata/pata_at32.c index bb250a48e27..67e574de31e 100644 --- a/drivers/ata/pata_at32.c +++ b/drivers/ata/pata_at32.c @@ -28,7 +28,7 @@ #include <asm/arch/smc.h> #define DRV_NAME "pata_at32" -#define DRV_VERSION "0.0.2" +#define DRV_VERSION "0.0.3" /* * CompactFlash controller memory layout relative to the base address: @@ -64,6 +64,8 @@ * Mode 2 | 8.3 | 240 ns | 0x07 * Mode 3 | 11.1 | 180 ns | 0x0f * Mode 4 | 16.7 | 120 ns | 0x1f + * + * Alter PIO_MASK below according to table to set maximal PIO mode. */ #define PIO_MASK (0x1f) @@ -85,36 +87,40 @@ struct at32_ide_info { */ static int pata_at32_setup_timing(struct device *dev, struct at32_ide_info *info, - const struct ata_timing *timing) + const struct ata_timing *ata) { - /* These two values are found through testing */ - const int min_recover = 25; - const int ncs_hold = 15; - struct smc_config *smc = &info->smc; + struct smc_timing timing; int active; int recover; + memset(&timing, 0, sizeof(struct smc_timing)); + /* Total cycle time */ - smc->read_cycle = timing->cyc8b; + timing.read_cycle = ata->cyc8b; /* DIOR <= CFIOR timings */ - smc->nrd_setup = timing->setup; - smc->nrd_pulse = timing->act8b; + timing.nrd_setup = ata->setup; + timing.nrd_pulse = ata->act8b; + timing.nrd_recover = ata->rec8b; + + /* Convert nanosecond timing to clock cycles */ + smc_set_timing(smc, &timing); - /* Compute recover, extend total cycle if needed */ - active = smc->nrd_setup + smc->nrd_pulse; + /* Add one extra cycle setup due to signal ring */ + smc->nrd_setup = smc->nrd_setup + 1; + + active = smc->nrd_setup + smc->nrd_pulse; recover = smc->read_cycle - active; - if (recover < min_recover) { - smc->read_cycle = active + min_recover; - recover = min_recover; - } + /* Need at least two cycles recovery */ + if (recover < 2) + smc->read_cycle = active + 2; /* (CS0, CS1, DIR, OE) <= (CFCE1, CFCE2, CFRNW, NCSX) timings */ - smc->ncs_read_setup = 0; - smc->ncs_read_pulse = active + ncs_hold; + smc->ncs_read_setup = 1; + smc->ncs_read_pulse = smc->read_cycle - 2; /* Write timings same as read timings */ smc->write_cycle = smc->read_cycle; @@ -123,11 +129,13 @@ static int pata_at32_setup_timing(struct device *dev, smc->ncs_write_setup = smc->ncs_read_setup; smc->ncs_write_pulse = smc->ncs_read_pulse; - /* Do some debugging output */ - dev_dbg(dev, "SMC: C=%d S=%d P=%d R=%d NCSS=%d NCSP=%d NCSR=%d\n", + /* Do some debugging output of ATA and SMC timings */ + dev_dbg(dev, "ATA: C=%d S=%d P=%d R=%d\n", + ata->cyc8b, ata->setup, ata->act8b, ata->rec8b); + + dev_dbg(dev, "SMC: C=%d S=%d P=%d NS=%d NP=%d\n", smc->read_cycle, smc->nrd_setup, smc->nrd_pulse, - recover, smc->ncs_read_setup, smc->ncs_read_pulse, - smc->read_cycle - smc->ncs_read_pulse); + smc->ncs_read_setup, smc->ncs_read_pulse); /* Finally, configure the SMC */ return smc_set_configuration(info->cs, smc); @@ -182,7 +190,6 @@ static struct scsi_host_template at32_sht = { }; static struct ata_port_operations at32_port_ops = { - .port_disable = ata_port_disable, .set_piomode = pata_at32_set_piomode, .tf_load = ata_tf_load, .tf_read = ata_tf_read, @@ -203,7 +210,6 @@ static struct ata_port_operations at32_port_ops = { .irq_clear = pata_at32_irq_clear, .irq_on = ata_irq_on, - .irq_ack = ata_irq_ack, .port_start = ata_sff_port_start, }; @@ -223,8 +229,7 @@ static int __init pata_at32_init_one(struct device *dev, /* Setup ATA bindings */ ap->ops = &at32_port_ops; ap->pio_mask = PIO_MASK; - ap->flags = ATA_FLAG_MMIO | ATA_FLAG_SLAVE_POSS - | ATA_FLAG_PIO_POLLING; + ap->flags |= ATA_FLAG_MMIO | ATA_FLAG_SLAVE_POSS; /* * Since all 8-bit taskfile transfers has to go on the lower @@ -357,12 +362,12 @@ static int __init pata_at32_probe(struct platform_device *pdev) info->smc.tdf_mode = 0; /* TDF optimization disabled */ info->smc.tdf_cycles = 0; /* No TDF wait cycles */ - /* Setup ATA timing */ + /* Setup SMC to ATA timing */ ret = pata_at32_setup_timing(dev, info, &initial_timing); if (ret) goto err_setup_timing; - /* Setup ATA addresses */ + /* Map ATA address space */ ret = -ENOMEM; info->ide_addr = devm_ioremap(dev, info->res_ide.start, 16); info->alt_addr = devm_ioremap(dev, info->res_alt.start, 16); @@ -373,7 +378,7 @@ static int __init pata_at32_probe(struct platform_device *pdev) pata_at32_debug_bus(dev, info); #endif - /* Register ATA device */ + /* Setup and register ATA device */ ret = pata_at32_init_one(dev, info); if (ret) goto err_ata_device; diff --git a/drivers/ata/pata_bf54x.c b/drivers/ata/pata_bf54x.c index 81db405a544..088a41f4e65 100644 --- a/drivers/ata/pata_bf54x.c +++ b/drivers/ata/pata_bf54x.c @@ -1489,6 +1489,8 @@ static int __devinit bfin_atapi_probe(struct platform_device *pdev) int board_idx = 0; struct resource *res; struct ata_host *host; + unsigned int fsclk = get_sclk(); + int udma_mode = 5; const struct ata_port_info *ppi[] = { &bfin_port_info[board_idx], NULL }; @@ -1507,6 +1509,11 @@ static int __devinit bfin_atapi_probe(struct platform_device *pdev) if (res == NULL) return -EINVAL; + while (bfin_port_info[board_idx].udma_mask>0 && udma_fsclk[udma_mode] > fsclk) { + udma_mode--; + bfin_port_info[board_idx].udma_mask >>= 1; + } + /* * Now that that's out of the way, wire up the port.. */ diff --git a/drivers/ata/sata_mv.c b/drivers/ata/sata_mv.c index a43f64d2775..8d864e5e97e 100644 --- a/drivers/ata/sata_mv.c +++ b/drivers/ata/sata_mv.c @@ -164,10 +164,14 @@ enum { MV_PCI_ERR_ATTRIBUTE = 0x1d48, MV_PCI_ERR_COMMAND = 0x1d50, - PCI_IRQ_CAUSE_OFS = 0x1d58, - PCI_IRQ_MASK_OFS = 0x1d5c, + PCI_IRQ_CAUSE_OFS = 0x1d58, + PCI_IRQ_MASK_OFS = 0x1d5c, PCI_UNMASK_ALL_IRQS = 0x7fffff, /* bits 22-0 */ + PCIE_IRQ_CAUSE_OFS = 0x1900, + PCIE_IRQ_MASK_OFS = 0x1910, + PCIE_UNMASK_ALL_IRQS = 0x70a, /* assorted bits */ + HC_MAIN_IRQ_CAUSE_OFS = 0x1d60, HC_MAIN_IRQ_MASK_OFS = 0x1d64, PORT0_ERR = (1 << 0), /* shift by port # */ @@ -303,6 +307,7 @@ enum { MV_HP_GEN_I = (1 << 6), /* Generation I: 50xx */ MV_HP_GEN_II = (1 << 7), /* Generation II: 60xx */ MV_HP_GEN_IIE = (1 << 8), /* Generation IIE: 6042/7042 */ + MV_HP_PCIE = (1 << 9), /* PCIe bus/regs: 7042 */ /* Port private flags (pp_flags) */ MV_PP_FLAG_EDMA_EN = (1 << 0), /* is EDMA engine enabled? */ @@ -388,7 +393,15 @@ struct mv_port_signal { u32 pre; }; -struct mv_host_priv; +struct mv_host_priv { + u32 hp_flags; + struct mv_port_signal signal[8]; + const struct mv_hw_ops *ops; + u32 irq_cause_ofs; + u32 irq_mask_ofs; + u32 unmask_all_irqs; +}; + struct mv_hw_ops { void (*phy_errata)(struct mv_host_priv *hpriv, void __iomem *mmio, unsigned int port); @@ -401,12 +414,6 @@ struct mv_hw_ops { void (*reset_bus)(struct pci_dev *pdev, void __iomem *mmio); }; -struct mv_host_priv { - u32 hp_flags; - struct mv_port_signal signal[8]; - const struct mv_hw_ops *ops; -}; - static void mv_irq_clear(struct ata_port *ap); static int mv_scr_read(struct ata_port *ap, unsigned int sc_reg_in, u32 *val); static int mv_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val); @@ -631,11 +638,13 @@ static const struct pci_device_id mv_pci_tbl[] = { /* Adaptec 1430SA */ { PCI_VDEVICE(ADAPTEC2, 0x0243), chip_7042 }, - { PCI_VDEVICE(TTI, 0x2310), chip_7042 }, - - /* add Marvell 7042 support */ + /* Marvell 7042 support */ { PCI_VDEVICE(MARVELL, 0x7042), chip_7042 }, + /* Highpoint RocketRAID PCIe series */ + { PCI_VDEVICE(TTI, 0x2300), chip_7042 }, + { PCI_VDEVICE(TTI, 0x2310), chip_7042 }, + { } /* terminate list */ }; @@ -1648,13 +1657,14 @@ static void mv_host_intr(struct ata_host *host, u32 relevant, unsigned int hc) static void mv_pci_error(struct ata_host *host, void __iomem *mmio) { + struct mv_host_priv *hpriv = host->private_data; struct ata_port *ap; struct ata_queued_cmd *qc; struct ata_eh_info *ehi; unsigned int i, err_mask, printed = 0; u32 err_cause; - err_cause = readl(mmio + PCI_IRQ_CAUSE_OFS); + err_cause = readl(mmio + hpriv->irq_cause_ofs); dev_printk(KERN_ERR, host->dev, "PCI ERROR; PCI IRQ cause=0x%08x\n", err_cause); @@ -1662,7 +1672,7 @@ static void mv_pci_error(struct ata_host *host, void __iomem *mmio) DPRINTK("All regs @ PCI error\n"); mv_dump_all_regs(mmio, -1, to_pci_dev(host->dev)); - writelfl(0, mmio + PCI_IRQ_CAUSE_OFS); + writelfl(0, mmio + hpriv->irq_cause_ofs); for (i = 0; i < host->n_ports; i++) { ap = host->ports[i]; @@ -1926,6 +1936,8 @@ static int mv5_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio, #define ZERO(reg) writel(0, mmio + (reg)) static void mv_reset_pci_bus(struct pci_dev *pdev, void __iomem *mmio) { + struct ata_host *host = dev_get_drvdata(&pdev->dev); + struct mv_host_priv *hpriv = host->private_data; u32 tmp; tmp = readl(mmio + MV_PCI_MODE); @@ -1937,8 +1949,8 @@ static void mv_reset_pci_bus(struct pci_dev *pdev, void __iomem *mmio) writel(0x000100ff, mmio + MV_PCI_XBAR_TMOUT); ZERO(HC_MAIN_IRQ_MASK_OFS); ZERO(MV_PCI_SERR_MASK); - ZERO(PCI_IRQ_CAUSE_OFS); - ZERO(PCI_IRQ_MASK_OFS); + ZERO(hpriv->irq_cause_ofs); + ZERO(hpriv->irq_mask_ofs); ZERO(MV_PCI_ERR_LOW_ADDRESS); ZERO(MV_PCI_ERR_HIGH_ADDRESS); ZERO(MV_PCI_ERR_ATTRIBUTE); @@ -2170,7 +2182,7 @@ static void mv_phy_reset(struct ata_port *ap, unsigned int *class, mv_scr_read(ap, SCR_ERROR, &serror); mv_scr_read(ap, SCR_CONTROL, &scontrol); DPRINTK("S-regs after ATA_RST: SStat 0x%08x SErr 0x%08x " - "SCtrl 0x%08x\n", status, serror, scontrol); + "SCtrl 0x%08x\n", sstatus, serror, scontrol); } #endif @@ -2490,6 +2502,7 @@ static int mv_chip_id(struct ata_host *host, unsigned int board_idx) break; case chip_7042: + hp_flags |= MV_HP_PCIE; case chip_6042: hpriv->ops = &mv6xxx_ops; hp_flags |= MV_HP_GEN_IIE; @@ -2516,6 +2529,15 @@ static int mv_chip_id(struct ata_host *host, unsigned int board_idx) } hpriv->hp_flags = hp_flags; + if (hp_flags & MV_HP_PCIE) { + hpriv->irq_cause_ofs = PCIE_IRQ_CAUSE_OFS; + hpriv->irq_mask_ofs = PCIE_IRQ_MASK_OFS; + hpriv->unmask_all_irqs = PCIE_UNMASK_ALL_IRQS; + } else { + hpriv->irq_cause_ofs = PCI_IRQ_CAUSE_OFS; + hpriv->irq_mask_ofs = PCI_IRQ_MASK_OFS; + hpriv->unmask_all_irqs = PCI_UNMASK_ALL_IRQS; + } return 0; } @@ -2595,10 +2617,10 @@ static int mv_init_host(struct ata_host *host, unsigned int board_idx) } /* Clear any currently outstanding host interrupt conditions */ - writelfl(0, mmio + PCI_IRQ_CAUSE_OFS); + writelfl(0, mmio + hpriv->irq_cause_ofs); /* and unmask interrupt generation for host regs */ - writelfl(PCI_UNMASK_ALL_IRQS, mmio + PCI_IRQ_MASK_OFS); + writelfl(hpriv->unmask_all_irqs, mmio + hpriv->irq_mask_ofs); if (IS_GEN_I(hpriv)) writelfl(~HC_MAIN_MASKED_IRQS_5, mmio + HC_MAIN_IRQ_MASK_OFS); @@ -2609,8 +2631,8 @@ static int mv_init_host(struct ata_host *host, unsigned int board_idx) "PCI int cause/mask=0x%08x/0x%08x\n", readl(mmio + HC_MAIN_IRQ_CAUSE_OFS), readl(mmio + HC_MAIN_IRQ_MASK_OFS), - readl(mmio + PCI_IRQ_CAUSE_OFS), - readl(mmio + PCI_IRQ_MASK_OFS)); + readl(mmio + hpriv->irq_cause_ofs), + readl(mmio + hpriv->irq_mask_ofs)); done: return rc; diff --git a/drivers/crypto/Kconfig b/drivers/crypto/Kconfig index 5fd6688a444..ddd3a259cea 100644 --- a/drivers/crypto/Kconfig +++ b/drivers/crypto/Kconfig @@ -12,7 +12,7 @@ if CRYPTO_HW config CRYPTO_DEV_PADLOCK tristate "Support for VIA PadLock ACE" - depends on X86_32 + depends on X86_32 && !UML select CRYPTO_ALGAPI help Some VIA processors come with an integrated crypto engine diff --git a/drivers/isdn/i4l/isdn_common.c b/drivers/isdn/i4l/isdn_common.c index c6df2925ebd..d6952959d72 100644 --- a/drivers/isdn/i4l/isdn_common.c +++ b/drivers/isdn/i4l/isdn_common.c @@ -1515,6 +1515,7 @@ isdn_ioctl(struct inode *inode, struct file *file, uint cmd, ulong arg) if (copy_from_user(&iocts, argp, sizeof(isdn_ioctl_struct))) return -EFAULT; + iocts.drvid[sizeof(iocts.drvid)-1] = 0; if (strlen(iocts.drvid)) { if ((p = strchr(iocts.drvid, ','))) *p = 0; @@ -1599,6 +1600,7 @@ isdn_ioctl(struct inode *inode, struct file *file, uint cmd, ulong arg) if (copy_from_user(&iocts, argp, sizeof(isdn_ioctl_struct))) return -EFAULT; + iocts.drvid[sizeof(iocts.drvid)-1] = 0; if (strlen(iocts.drvid)) { drvidx = -1; for (i = 0; i < ISDN_MAX_DRIVERS; i++) @@ -1643,7 +1645,7 @@ isdn_ioctl(struct inode *inode, struct file *file, uint cmd, ulong arg) } else { p = (char __user *) iocts.arg; for (i = 0; i < 10; i++) { - sprintf(bname, "%s%s", + snprintf(bname, sizeof(bname), "%s%s", strlen(dev->drv[drvidx]->msn2eaz[i]) ? dev->drv[drvidx]->msn2eaz[i] : "_", (i < 9) ? "," : "\0"); @@ -1673,6 +1675,7 @@ isdn_ioctl(struct inode *inode, struct file *file, uint cmd, ulong arg) char *p; if (copy_from_user(&iocts, argp, sizeof(isdn_ioctl_struct))) return -EFAULT; + iocts.drvid[sizeof(iocts.drvid)-1] = 0; if (strlen(iocts.drvid)) { if ((p = strchr(iocts.drvid, ','))) *p = 0; diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig index e8d69b0adf9..d9107e542df 100644 --- a/drivers/net/Kconfig +++ b/drivers/net/Kconfig @@ -888,7 +888,7 @@ config SMC91X tristate "SMC 91C9x/91C1xxx support" select CRC32 select MII - depends on ARM || REDWOOD_5 || REDWOOD_6 || M32R || SUPERH || SOC_AU1X00 || BFIN + depends on ARM || REDWOOD_5 || REDWOOD_6 || M32R || SUPERH || SOC_AU1X00 || BLACKFIN help This is a driver for SMC's 91x series of Ethernet chipsets, including the SMC91C94 and the SMC91C111. Say Y if you want it @@ -926,7 +926,7 @@ config SMC911X tristate "SMSC LAN911[5678] support" select CRC32 select MII - depends on ARCH_PXA || SUPERH + depends on ARCH_PXA || SH_MAGIC_PANEL_R2 help This is a driver for SMSC's LAN911x series of Ethernet chipsets including the new LAN9115, LAN9116, LAN9117, and LAN9118. diff --git a/drivers/net/amd8111e.c b/drivers/net/amd8111e.c index eebf5bb2b03..e7fdd81919b 100644 --- a/drivers/net/amd8111e.c +++ b/drivers/net/amd8111e.c @@ -1340,7 +1340,9 @@ static int amd8111e_close(struct net_device * dev) struct amd8111e_priv *lp = netdev_priv(dev); netif_stop_queue(dev); +#ifdef CONFIG_AMD8111E_NAPI napi_disable(&lp->napi); +#endif spin_lock_irq(&lp->lock); @@ -1372,7 +1374,9 @@ static int amd8111e_open(struct net_device * dev ) dev->name, dev)) return -EAGAIN; +#ifdef CONFIG_AMD8111E_NAPI napi_enable(&lp->napi); +#endif spin_lock_irq(&lp->lock); @@ -1380,7 +1384,9 @@ static int amd8111e_open(struct net_device * dev ) if(amd8111e_restart(dev)){ spin_unlock_irq(&lp->lock); +#ifdef CONFIG_AMD8111E_NAPI napi_disable(&lp->napi); +#endif if (dev->irq) free_irq(dev->irq, dev); return -ENOMEM; diff --git a/drivers/net/bfin_mac.c b/drivers/net/bfin_mac.c index 0b99b554929..eb971755a3f 100644 --- a/drivers/net/bfin_mac.c +++ b/drivers/net/bfin_mac.c @@ -676,7 +676,7 @@ static void bf537mac_rx(struct net_device *dev) skb->protocol = eth_type_trans(skb, dev); #if defined(BFIN_MAC_CSUM_OFFLOAD) skb->csum = current_rx_ptr->status.ip_payload_csum; - skb->ip_summed = CHECKSUM_PARTIAL; + skb->ip_summed = CHECKSUM_COMPLETE; #endif netif_rx(skb); diff --git a/drivers/net/chelsio/cxgb2.c b/drivers/net/chelsio/cxgb2.c index 2dbf8dc116c..c5975047c89 100644..100755 --- a/drivers/net/chelsio/cxgb2.c +++ b/drivers/net/chelsio/cxgb2.c @@ -374,7 +374,9 @@ static char stats_strings[][ETH_GSTRING_LEN] = { "TxInternalMACXmitError", "TxFramesWithExcessiveDeferral", "TxFCSErrors", - + "TxJumboFramesOk", + "TxJumboOctetsOk", + "RxOctetsOK", "RxOctetsBad", "RxUnicastFramesOK", @@ -392,16 +394,17 @@ static char stats_strings[][ETH_GSTRING_LEN] = { "RxInRangeLengthErrors", "RxOutOfRangeLengthField", "RxFrameTooLongErrors", + "RxJumboFramesOk", + "RxJumboOctetsOk", /* Port stats */ - "RxPackets", "RxCsumGood", - "TxPackets", "TxCsumOffload", "TxTso", "RxVlan", "TxVlan", - + "TxNeedHeadroom", + /* Interrupt stats */ "rx drops", "pure_rsps", @@ -463,23 +466,56 @@ static void get_stats(struct net_device *dev, struct ethtool_stats *stats, const struct cmac_statistics *s; const struct sge_intr_counts *t; struct sge_port_stats ss; - unsigned int len; s = mac->ops->statistics_update(mac, MAC_STATS_UPDATE_FULL); - - len = sizeof(u64)*(&s->TxFCSErrors + 1 - &s->TxOctetsOK); - memcpy(data, &s->TxOctetsOK, len); - data += len; - - len = sizeof(u64)*(&s->RxFrameTooLongErrors + 1 - &s->RxOctetsOK); - memcpy(data, &s->RxOctetsOK, len); - data += len; - + t = t1_sge_get_intr_counts(adapter->sge); t1_sge_get_port_stats(adapter->sge, dev->if_port, &ss); - memcpy(data, &ss, sizeof(ss)); - data += sizeof(ss); - t = t1_sge_get_intr_counts(adapter->sge); + *data++ = s->TxOctetsOK; + *data++ = s->TxOctetsBad; + *data++ = s->TxUnicastFramesOK; + *data++ = s->TxMulticastFramesOK; + *data++ = s->TxBroadcastFramesOK; + *data++ = s->TxPauseFrames; + *data++ = s->TxFramesWithDeferredXmissions; + *data++ = s->TxLateCollisions; + *data++ = s->TxTotalCollisions; + *data++ = s->TxFramesAbortedDueToXSCollisions; + *data++ = s->TxUnderrun; + *data++ = s->TxLengthErrors; + *data++ = s->TxInternalMACXmitError; + *data++ = s->TxFramesWithExcessiveDeferral; + *data++ = s->TxFCSErrors; + *data++ = s->TxJumboFramesOK; + *data++ = s->TxJumboOctetsOK; + + *data++ = s->RxOctetsOK; + *data++ = s->RxOctetsBad; + *data++ = s->RxUnicastFramesOK; + *data++ = s->RxMulticastFramesOK; + *data++ = s->RxBroadcastFramesOK; + *data++ = s->RxPauseFrames; + *data++ = s->RxFCSErrors; + *data++ = s->RxAlignErrors; + *data++ = s->RxSymbolErrors; + *data++ = s->RxDataErrors; + *data++ = s->RxSequenceErrors; + *data++ = s->RxRuntErrors; + *data++ = s->RxJabberErrors; + *data++ = s->RxInternalMACRcvError; + *data++ = s->RxInRangeLengthErrors; + *data++ = s->RxOutOfRangeLengthField; + *data++ = s->RxFrameTooLongErrors; + *data++ = s->RxJumboFramesOK; + *data++ = s->RxJumboOctetsOK; + + *data++ = ss.rx_cso_good; + *data++ = ss.tx_cso; + *data++ = ss.tx_tso; + *data++ = ss.vlan_xtract; + *data++ = ss.vlan_insert; + *data++ = ss.tx_need_hdrroom; + *data++ = t->rx_drops; *data++ = t->pure_rsps; *data++ = t->unhandled_irqs; diff --git a/drivers/net/chelsio/pm3393.c b/drivers/net/chelsio/pm3393.c index 678778a8d13..2117c4fbb10 100644..100755 --- a/drivers/net/chelsio/pm3393.c +++ b/drivers/net/chelsio/pm3393.c @@ -45,7 +45,7 @@ #include <linux/crc32.h> -#define OFFSET(REG_ADDR) (REG_ADDR << 2) +#define OFFSET(REG_ADDR) ((REG_ADDR) << 2) /* Max frame size PM3393 can handle. Includes Ethernet header and CRC. */ #define MAX_FRAME_SIZE 9600 @@ -428,69 +428,26 @@ static int pm3393_set_speed_duplex_fc(struct cmac *cmac, int speed, int duplex, return 0; } -static void pm3393_rmon_update(struct adapter *adapter, u32 offs, u64 *val, - int over) -{ - u32 val0, val1, val2; - - t1_tpi_read(adapter, offs, &val0); - t1_tpi_read(adapter, offs + 4, &val1); - t1_tpi_read(adapter, offs + 8, &val2); - - *val &= ~0ull << 40; - *val |= val0 & 0xffff; - *val |= (val1 & 0xffff) << 16; - *val |= (u64)(val2 & 0xff) << 32; - - if (over) - *val += 1ull << 40; +#define RMON_UPDATE(mac, name, stat_name) \ +{ \ + t1_tpi_read((mac)->adapter, OFFSET(name), &val0); \ + t1_tpi_read((mac)->adapter, OFFSET((name)+1), &val1); \ + t1_tpi_read((mac)->adapter, OFFSET((name)+2), &val2); \ + (mac)->stats.stat_name = (u64)(val0 & 0xffff) | \ + ((u64)(val1 & 0xffff) << 16) | \ + ((u64)(val2 & 0xff) << 32) | \ + ((mac)->stats.stat_name & \ + 0xffffff0000000000ULL); \ + if (ro & \ + (1ULL << ((name - SUNI1x10GEXP_REG_MSTAT_COUNTER_0_LOW) >> 2))) \ + (mac)->stats.stat_name += 1ULL << 40; \ } static const struct cmac_statistics *pm3393_update_statistics(struct cmac *mac, int flag) { - static struct { - unsigned int reg; - unsigned int offset; - } hw_stats [] = { - -#define HW_STAT(name, stat_name) \ - { name, (&((struct cmac_statistics *)NULL)->stat_name) - (u64 *)NULL } - - /* Rx stats */ - HW_STAT(RxOctetsReceivedOK, RxOctetsOK), - HW_STAT(RxUnicastFramesReceivedOK, RxUnicastFramesOK), - HW_STAT(RxMulticastFramesReceivedOK, RxMulticastFramesOK), - HW_STAT(RxBroadcastFramesReceivedOK, RxBroadcastFramesOK), - HW_STAT(RxPAUSEMACCtrlFramesReceived, RxPauseFrames), - HW_STAT(RxFrameCheckSequenceErrors, RxFCSErrors), - HW_STAT(RxFramesLostDueToInternalMACErrors, - RxInternalMACRcvError), - HW_STAT(RxSymbolErrors, RxSymbolErrors), - HW_STAT(RxInRangeLengthErrors, RxInRangeLengthErrors), - HW_STAT(RxFramesTooLongErrors , RxFrameTooLongErrors), - HW_STAT(RxJabbers, RxJabberErrors), - HW_STAT(RxFragments, RxRuntErrors), - HW_STAT(RxUndersizedFrames, RxRuntErrors), - HW_STAT(RxJumboFramesReceivedOK, RxJumboFramesOK), - HW_STAT(RxJumboOctetsReceivedOK, RxJumboOctetsOK), - - /* Tx stats */ - HW_STAT(TxOctetsTransmittedOK, TxOctetsOK), - HW_STAT(TxFramesLostDueToInternalMACTransmissionError, - TxInternalMACXmitError), - HW_STAT(TxTransmitSystemError, TxFCSErrors), - HW_STAT(TxUnicastFramesTransmittedOK, TxUnicastFramesOK), - HW_STAT(TxMulticastFramesTransmittedOK, TxMulticastFramesOK), - HW_STAT(TxBroadcastFramesTransmittedOK, TxBroadcastFramesOK), - HW_STAT(TxPAUSEMACCtrlFramesTransmitted, TxPauseFrames), - HW_STAT(TxJumboFramesReceivedOK, TxJumboFramesOK), - HW_STAT(TxJumboOctetsReceivedOK, TxJumboOctetsOK) - }, *p = hw_stats; - u64 ro; - u32 val0, val1, val2, val3; - u64 *stats = (u64 *) &mac->stats; - unsigned int i; + u64 ro; + u32 val0, val1, val2, val3; /* Snap the counters */ pmwrite(mac, SUNI1x10GEXP_REG_MSTAT_CONTROL, @@ -504,14 +461,35 @@ static const struct cmac_statistics *pm3393_update_statistics(struct cmac *mac, ro = ((u64)val0 & 0xffff) | (((u64)val1 & 0xffff) << 16) | (((u64)val2 & 0xffff) << 32) | (((u64)val3 & 0xffff) << 48); - for (i = 0; i < ARRAY_SIZE(hw_stats); i++) { - unsigned reg = p->reg - SUNI1x10GEXP_REG_MSTAT_COUNTER_0_LOW; - - pm3393_rmon_update((mac)->adapter, OFFSET(p->reg), - stats + p->offset, ro & (reg >> 2)); - } - - + /* Rx stats */ + RMON_UPDATE(mac, RxOctetsReceivedOK, RxOctetsOK); + RMON_UPDATE(mac, RxUnicastFramesReceivedOK, RxUnicastFramesOK); + RMON_UPDATE(mac, RxMulticastFramesReceivedOK, RxMulticastFramesOK); + RMON_UPDATE(mac, RxBroadcastFramesReceivedOK, RxBroadcastFramesOK); + RMON_UPDATE(mac, RxPAUSEMACCtrlFramesReceived, RxPauseFrames); + RMON_UPDATE(mac, RxFrameCheckSequenceErrors, RxFCSErrors); + RMON_UPDATE(mac, RxFramesLostDueToInternalMACErrors, + RxInternalMACRcvError); + RMON_UPDATE(mac, RxSymbolErrors, RxSymbolErrors); + RMON_UPDATE(mac, RxInRangeLengthErrors, RxInRangeLengthErrors); + RMON_UPDATE(mac, RxFramesTooLongErrors , RxFrameTooLongErrors); + RMON_UPDATE(mac, RxJabbers, RxJabberErrors); + RMON_UPDATE(mac, RxFragments, RxRuntErrors); + RMON_UPDATE(mac, RxUndersizedFrames, RxRuntErrors); + RMON_UPDATE(mac, RxJumboFramesReceivedOK, RxJumboFramesOK); + RMON_UPDATE(mac, RxJumboOctetsReceivedOK, RxJumboOctetsOK); + + /* Tx stats */ + RMON_UPDATE(mac, TxOctetsTransmittedOK, TxOctetsOK); + RMON_UPDATE(mac, TxFramesLostDueToInternalMACTransmissionError, + TxInternalMACXmitError); + RMON_UPDATE(mac, TxTransmitSystemError, TxFCSErrors); + RMON_UPDATE(mac, TxUnicastFramesTransmittedOK, TxUnicastFramesOK); + RMON_UPDATE(mac, TxMulticastFramesTransmittedOK, TxMulticastFramesOK); + RMON_UPDATE(mac, TxBroadcastFramesTransmittedOK, TxBroadcastFramesOK); + RMON_UPDATE(mac, TxPAUSEMACCtrlFramesTransmitted, TxPauseFrames); + RMON_UPDATE(mac, TxJumboFramesReceivedOK, TxJumboFramesOK); + RMON_UPDATE(mac, TxJumboOctetsReceivedOK, TxJumboOctetsOK); return &mac->stats; } diff --git a/drivers/net/chelsio/sge.c b/drivers/net/chelsio/sge.c index 443666292a5..b301c0428ae 100644..100755 --- a/drivers/net/chelsio/sge.c +++ b/drivers/net/chelsio/sge.c @@ -986,11 +986,10 @@ void t1_sge_get_port_stats(const struct sge *sge, int port, for_each_possible_cpu(cpu) { struct sge_port_stats *st = per_cpu_ptr(sge->port_stats[port], cpu); - ss->rx_packets += st->rx_packets; ss->rx_cso_good += st->rx_cso_good; - ss->tx_packets += st->tx_packets; ss->tx_cso += st->tx_cso; ss->tx_tso += st->tx_tso; + ss->tx_need_hdrroom += st->tx_need_hdrroom; ss->vlan_xtract += st->vlan_xtract; ss->vlan_insert += st->vlan_insert; } @@ -1380,7 +1379,6 @@ static void sge_rx(struct sge *sge, struct freelQ *fl, unsigned int len) __skb_pull(skb, sizeof(*p)); st = per_cpu_ptr(sge->port_stats[p->iff], smp_processor_id()); - st->rx_packets++; skb->protocol = eth_type_trans(skb, adapter->port[p->iff].dev); skb->dev->last_rx = jiffies; @@ -1624,11 +1622,9 @@ int t1_poll(struct napi_struct *napi, int budget) { struct adapter *adapter = container_of(napi, struct adapter, napi); struct net_device *dev = adapter->port[0].dev; - int work_done; - - work_done = process_responses(adapter, budget); + int work_done = process_responses(adapter, budget); - if (likely(!responses_pending(adapter))) { + if (likely(work_done < budget)) { netif_rx_complete(dev, napi); writel(adapter->sge->respQ.cidx, adapter->regs + A_SG_SLEEPING); @@ -1848,7 +1844,8 @@ int t1_start_xmit(struct sk_buff *skb, struct net_device *dev) { struct adapter *adapter = dev->priv; struct sge *sge = adapter->sge; - struct sge_port_stats *st = per_cpu_ptr(sge->port_stats[dev->if_port], smp_processor_id()); + struct sge_port_stats *st = per_cpu_ptr(sge->port_stats[dev->if_port], + smp_processor_id()); struct cpl_tx_pkt *cpl; struct sk_buff *orig_skb = skb; int ret; @@ -1856,6 +1853,18 @@ int t1_start_xmit(struct sk_buff *skb, struct net_device *dev) if (skb->protocol == htons(ETH_P_CPL5)) goto send; + /* + * We are using a non-standard hard_header_len. + * Allocate more header room in the rare cases it is not big enough. + */ + if (unlikely(skb_headroom(skb) < dev->hard_header_len - ETH_HLEN)) { + skb = skb_realloc_headroom(skb, sizeof(struct cpl_tx_pkt_lso)); + ++st->tx_need_hdrroom; + dev_kfree_skb_any(orig_skb); + if (!skb) + return NETDEV_TX_OK; + } + if (skb_shinfo(skb)->gso_size) { int eth_type; struct cpl_tx_pkt_lso *hdr; @@ -1889,24 +1898,6 @@ int t1_start_xmit(struct sk_buff *skb, struct net_device *dev) return NETDEV_TX_OK; } - /* - * We are using a non-standard hard_header_len and some kernel - * components, such as pktgen, do not handle it right. - * Complain when this happens but try to fix things up. - */ - if (unlikely(skb_headroom(skb) < dev->hard_header_len - ETH_HLEN)) { - pr_debug("%s: headroom %d header_len %d\n", dev->name, - skb_headroom(skb), dev->hard_header_len); - - if (net_ratelimit()) - printk(KERN_ERR "%s: inadequate headroom in " - "Tx packet\n", dev->name); - skb = skb_realloc_headroom(skb, sizeof(*cpl)); - dev_kfree_skb_any(orig_skb); - if (!skb) - return NETDEV_TX_OK; - } - if (!(adapter->flags & UDP_CSUM_CAPABLE) && skb->ip_summed == CHECKSUM_PARTIAL && ip_hdr(skb)->protocol == IPPROTO_UDP) { @@ -1952,7 +1943,6 @@ int t1_start_xmit(struct sk_buff *skb, struct net_device *dev) cpl->vlan_valid = 0; send: - st->tx_packets++; dev->trans_start = jiffies; ret = t1_sge_tx(skb, adapter, 0, dev); diff --git a/drivers/net/chelsio/sge.h b/drivers/net/chelsio/sge.h index 713d9c55f24..cced9dff91c 100644..100755 --- a/drivers/net/chelsio/sge.h +++ b/drivers/net/chelsio/sge.h @@ -57,13 +57,12 @@ struct sge_intr_counts { }; struct sge_port_stats { - u64 rx_packets; /* # of Ethernet packets received */ u64 rx_cso_good; /* # of successful RX csum offloads */ - u64 tx_packets; /* # of TX packets */ u64 tx_cso; /* # of TX checksum offloads */ u64 tx_tso; /* # of TSO requests */ u64 vlan_xtract; /* # of VLAN tag extractions */ u64 vlan_insert; /* # of VLAN tag insertions */ + u64 tx_need_hdrroom; /* # of TX skbs in need of more header room */ }; struct sk_buff; diff --git a/drivers/net/e1000/e1000_main.c b/drivers/net/e1000/e1000_main.c index cf39473ef90..4f37506ad37 100644 --- a/drivers/net/e1000/e1000_main.c +++ b/drivers/net/e1000/e1000_main.c @@ -3942,7 +3942,7 @@ e1000_clean(struct napi_struct *napi, int budget) &work_done, budget); /* If no Tx and not enough Rx work done, exit the polling mode */ - if ((!tx_cleaned && (work_done < budget)) || + if ((!tx_cleaned && (work_done == 0)) || !netif_running(poll_dev)) { quit_polling: if (likely(adapter->itr_setting & 3)) diff --git a/drivers/net/ehea/ehea.h b/drivers/net/ehea/ehea.h index f78e5bf7cb3..5f82a4647ee 100644 --- a/drivers/net/ehea/ehea.h +++ b/drivers/net/ehea/ehea.h @@ -40,7 +40,7 @@ #include <asm/io.h> #define DRV_NAME "ehea" -#define DRV_VERSION "EHEA_0080" +#define DRV_VERSION "EHEA_0083" /* eHEA capability flags */ #define DLPAR_PORT_ADD_REM 1 diff --git a/drivers/net/ehea/ehea_main.c b/drivers/net/ehea/ehea_main.c index f0319f1e8e0..869e1604b16 100644 --- a/drivers/net/ehea/ehea_main.c +++ b/drivers/net/ehea/ehea_main.c @@ -136,7 +136,7 @@ static struct net_device_stats *ehea_get_stats(struct net_device *dev) struct ehea_port *port = netdev_priv(dev); struct net_device_stats *stats = &port->stats; struct hcp_ehea_port_cb2 *cb2; - u64 hret, rx_packets; + u64 hret, rx_packets, tx_packets; int i; memset(stats, 0, sizeof(*stats)); @@ -162,7 +162,11 @@ static struct net_device_stats *ehea_get_stats(struct net_device *dev) for (i = 0; i < port->num_def_qps; i++) rx_packets += port->port_res[i].rx_packets; - stats->tx_packets = cb2->txucp + cb2->txmcp + cb2->txbcp; + tx_packets = 0; + for (i = 0; i < port->num_def_qps + port->num_add_tx_qps; i++) + tx_packets += port->port_res[i].tx_packets; + + stats->tx_packets = tx_packets; stats->multicast = cb2->rxmcp; stats->rx_errors = cb2->rxuerr; stats->rx_bytes = cb2->rxo; @@ -406,11 +410,6 @@ static int ehea_treat_poll_error(struct ehea_port_res *pr, int rq, if (cqe->status & EHEA_CQE_STAT_ERR_CRC) pr->p_stats.err_frame_crc++; - if (netif_msg_rx_err(pr->port)) { - ehea_error("CQE Error for QP %d", pr->qp->init_attr.qp_nr); - ehea_dump(cqe, sizeof(*cqe), "CQE"); - } - if (rq == 2) { *processed_rq2 += 1; skb = get_skb_by_index(pr->rq2_skba.arr, pr->rq2_skba.len, cqe); @@ -422,7 +421,11 @@ static int ehea_treat_poll_error(struct ehea_port_res *pr, int rq, } if (cqe->status & EHEA_CQE_STAT_FAT_ERR_MASK) { - ehea_error("Critical receive error. Resetting port."); + if (netif_msg_rx_err(pr->port)) { + ehea_error("Critical receive error for QP %d. " + "Resetting port.", pr->qp->init_attr.qp_nr); + ehea_dump(cqe, sizeof(*cqe), "CQE"); + } schedule_work(&pr->port->reset_task); return 1; } @@ -2000,6 +2003,7 @@ static int ehea_start_xmit(struct sk_buff *skb, struct net_device *dev) } ehea_post_swqe(pr->qp, swqe); + pr->tx_packets++; if (unlikely(atomic_read(&pr->swqe_avail) <= 1)) { spin_lock_irqsave(&pr->netif_queue, flags); diff --git a/drivers/net/ehea/ehea_qmr.h b/drivers/net/ehea/ehea_qmr.h index 562de0ebdd8..bc62d389c16 100644 --- a/drivers/net/ehea/ehea_qmr.h +++ b/drivers/net/ehea/ehea_qmr.h @@ -145,8 +145,8 @@ struct ehea_rwqe { #define EHEA_CQE_VLAN_TAG_XTRACT 0x0400 #define EHEA_CQE_TYPE_RQ 0x60 -#define EHEA_CQE_STAT_ERR_MASK 0x720F -#define EHEA_CQE_STAT_FAT_ERR_MASK 0x1F +#define EHEA_CQE_STAT_ERR_MASK 0x700F +#define EHEA_CQE_STAT_FAT_ERR_MASK 0xF #define EHEA_CQE_STAT_ERR_TCP 0x4000 #define EHEA_CQE_STAT_ERR_IP 0x2000 #define EHEA_CQE_STAT_ERR_CRC 0x1000 diff --git a/drivers/net/fec_mpc52xx.c b/drivers/net/fec_mpc52xx.c index a8a0ee220da..bf5a7caa5b5 100644 --- a/drivers/net/fec_mpc52xx.c +++ b/drivers/net/fec_mpc52xx.c @@ -971,6 +971,8 @@ mpc52xx_fec_probe(struct of_device *op, const struct of_device_id *match) mpc52xx_fec_reset_stats(ndev); + SET_NETDEV_DEV(ndev, &op->dev); + /* Register the new network device */ rv = register_netdev(ndev); if (rv < 0) diff --git a/drivers/net/forcedeth.c b/drivers/net/forcedeth.c index 92ce2e38f0d..a96583cceb5 100644 --- a/drivers/net/forcedeth.c +++ b/drivers/net/forcedeth.c @@ -5286,19 +5286,15 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i if (readl(base + NvRegTransmitterControl) & NVREG_XMITCTL_SYNC_PHY_INIT) { np->mac_in_use = readl(base + NvRegTransmitterControl) & NVREG_XMITCTL_MGMT_ST; dprintk(KERN_INFO "%s: mgmt unit is running. mac in use %x.\n", pci_name(pci_dev), np->mac_in_use); - for (i = 0; i < 5000; i++) { - msleep(1); - if (nv_mgmt_acquire_sema(dev)) { - /* management unit setup the phy already? */ - if ((readl(base + NvRegTransmitterControl) & NVREG_XMITCTL_SYNC_MASK) == - NVREG_XMITCTL_SYNC_PHY_INIT) { - /* phy is inited by mgmt unit */ - phyinitialized = 1; - dprintk(KERN_INFO "%s: Phy already initialized by mgmt unit.\n", pci_name(pci_dev)); - } else { - /* we need to init the phy */ - } - break; + if (nv_mgmt_acquire_sema(dev)) { + /* management unit setup the phy already? */ + if ((readl(base + NvRegTransmitterControl) & NVREG_XMITCTL_SYNC_MASK) == + NVREG_XMITCTL_SYNC_PHY_INIT) { + /* phy is inited by mgmt unit */ + phyinitialized = 1; + dprintk(KERN_INFO "%s: Phy already initialized by mgmt unit.\n", pci_name(pci_dev)); + } else { + /* we need to init the phy */ } } } @@ -5613,6 +5609,22 @@ static struct pci_device_id pci_tbl[] = { PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_35), .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT, }, + { /* MCP79 Ethernet Controller */ + PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_36), + .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT, + }, + { /* MCP79 Ethernet Controller */ + PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_37), + .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT, + }, + { /* MCP79 Ethernet Controller */ + PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_38), + .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT, + }, + { /* MCP79 Ethernet Controller */ + PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_39), + .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT, + }, {0,}, }; diff --git a/drivers/net/ibm_newemac/core.c b/drivers/net/ibm_newemac/core.c index 0de3aa2a2e4..eb0718b441b 100644 --- a/drivers/net/ibm_newemac/core.c +++ b/drivers/net/ibm_newemac/core.c @@ -642,9 +642,11 @@ static void emac_reset_work(struct work_struct *work) DBG(dev, "reset_work" NL); mutex_lock(&dev->link_lock); - emac_netif_stop(dev); - emac_full_tx_reset(dev); - emac_netif_start(dev); + if (dev->opened) { + emac_netif_stop(dev); + emac_full_tx_reset(dev); + emac_netif_start(dev); + } mutex_unlock(&dev->link_lock); } @@ -1063,10 +1065,9 @@ static int emac_open(struct net_device *ndev) dev->rx_sg_skb = NULL; mutex_lock(&dev->link_lock); + dev->opened = 1; - /* XXX Start PHY polling now. Shouldn't wr do like sungem instead and - * always poll the PHY even when the iface is down ? That would allow - * things like laptop-net to work. --BenH + /* Start PHY polling now. */ if (dev->phy.address >= 0) { int link_poll_interval; @@ -1145,9 +1146,11 @@ static void emac_link_timer(struct work_struct *work) int link_poll_interval; mutex_lock(&dev->link_lock); - DBG2(dev, "link timer" NL); + if (!dev->opened) + goto bail; + if (dev->phy.def->ops->poll_link(&dev->phy)) { if (!netif_carrier_ok(dev->ndev)) { /* Get new link parameters */ @@ -1170,13 +1173,14 @@ static void emac_link_timer(struct work_struct *work) link_poll_interval = PHY_POLL_LINK_OFF; } schedule_delayed_work(&dev->link_work, link_poll_interval); - + bail: mutex_unlock(&dev->link_lock); } static void emac_force_link_update(struct emac_instance *dev) { netif_carrier_off(dev->ndev); + smp_rmb(); if (dev->link_polling) { cancel_rearming_delayed_work(&dev->link_work); if (dev->link_polling) @@ -1191,11 +1195,14 @@ static int emac_close(struct net_device *ndev) DBG(dev, "close" NL); - if (dev->phy.address >= 0) + if (dev->phy.address >= 0) { + dev->link_polling = 0; cancel_rearming_delayed_work(&dev->link_work); - + } + mutex_lock(&dev->link_lock); emac_netif_stop(dev); - flush_scheduled_work(); + dev->opened = 0; + mutex_unlock(&dev->link_lock); emac_rx_disable(dev); emac_tx_disable(dev); @@ -2756,6 +2763,8 @@ static int __devexit emac_remove(struct of_device *ofdev) unregister_netdev(dev->ndev); + flush_scheduled_work(); + if (emac_has_feature(dev, EMAC_FTR_HAS_TAH)) tah_detach(dev->tah_dev, dev->tah_port); if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII)) diff --git a/drivers/net/ibm_newemac/core.h b/drivers/net/ibm_newemac/core.h index 4011803117c..a010b2463fd 100644 --- a/drivers/net/ibm_newemac/core.h +++ b/drivers/net/ibm_newemac/core.h @@ -258,6 +258,7 @@ struct emac_instance { int stop_timeout; /* in us */ int no_mcast; int mcast_pending; + int opened; struct work_struct reset_work; spinlock_t lock; }; diff --git a/drivers/net/lib82596.c b/drivers/net/lib82596.c index 9a855e51214..b59f442bbf3 100644 --- a/drivers/net/lib82596.c +++ b/drivers/net/lib82596.c @@ -176,8 +176,8 @@ struct i596_reg { struct i596_tbd { unsigned short size; unsigned short pad; - dma_addr_t next; - dma_addr_t data; + u32 next; + u32 data; u32 cache_pad[5]; /* Total 32 bytes... */ }; @@ -195,12 +195,12 @@ struct i596_cmd { struct i596_cmd *v_next; /* Address from CPUs viewpoint */ unsigned short status; unsigned short command; - dma_addr_t b_next; /* Address from i596 viewpoint */ + u32 b_next; /* Address from i596 viewpoint */ }; struct tx_cmd { struct i596_cmd cmd; - dma_addr_t tbd; + u32 tbd; unsigned short size; unsigned short pad; struct sk_buff *skb; /* So we can free it after tx */ @@ -237,8 +237,8 @@ struct cf_cmd { struct i596_rfd { unsigned short stat; unsigned short cmd; - dma_addr_t b_next; /* Address from i596 viewpoint */ - dma_addr_t rbd; + u32 b_next; /* Address from i596 viewpoint */ + u32 rbd; unsigned short count; unsigned short size; struct i596_rfd *v_next; /* Address from CPUs viewpoint */ @@ -249,18 +249,18 @@ struct i596_rfd { }; struct i596_rbd { - /* hardware data */ - unsigned short count; - unsigned short zero1; - dma_addr_t b_next; - dma_addr_t b_data; /* Address from i596 viewpoint */ - unsigned short size; - unsigned short zero2; - /* driver data */ - struct sk_buff *skb; - struct i596_rbd *v_next; - dma_addr_t b_addr; /* This rbd addr from i596 view */ - unsigned char *v_data; /* Address from CPUs viewpoint */ + /* hardware data */ + unsigned short count; + unsigned short zero1; + u32 b_next; + u32 b_data; /* Address from i596 viewpoint */ + unsigned short size; + unsigned short zero2; + /* driver data */ + struct sk_buff *skb; + struct i596_rbd *v_next; + u32 b_addr; /* This rbd addr from i596 view */ + unsigned char *v_data; /* Address from CPUs viewpoint */ /* Total 32 bytes... */ #ifdef __LP64__ u32 cache_pad[4]; @@ -275,8 +275,8 @@ struct i596_rbd { struct i596_scb { unsigned short status; unsigned short command; - dma_addr_t cmd; - dma_addr_t rfd; + u32 cmd; + u32 rfd; u32 crc_err; u32 align_err; u32 resource_err; @@ -288,14 +288,14 @@ struct i596_scb { }; struct i596_iscp { - u32 stat; - dma_addr_t scb; + u32 stat; + u32 scb; }; struct i596_scp { - u32 sysbus; - u32 pad; - dma_addr_t iscp; + u32 sysbus; + u32 pad; + u32 iscp; }; struct i596_dma { diff --git a/drivers/net/phy/marvell.c b/drivers/net/phy/marvell.c index 035fd41fb61..f0574073a2a 100644 --- a/drivers/net/phy/marvell.c +++ b/drivers/net/phy/marvell.c @@ -143,21 +143,29 @@ static int m88e1111_config_init(struct phy_device *phydev) int err; if ((phydev->interface == PHY_INTERFACE_MODE_RGMII) || - (phydev->interface == PHY_INTERFACE_MODE_RGMII_ID)) { + (phydev->interface == PHY_INTERFACE_MODE_RGMII_ID) || + (phydev->interface == PHY_INTERFACE_MODE_RGMII_RXID) || + (phydev->interface == PHY_INTERFACE_MODE_RGMII_TXID)) { int temp; - if (phydev->interface == PHY_INTERFACE_MODE_RGMII_ID) { - temp = phy_read(phydev, MII_M1111_PHY_EXT_CR); - if (temp < 0) - return temp; + temp = phy_read(phydev, MII_M1111_PHY_EXT_CR); + if (temp < 0) + return temp; + if (phydev->interface == PHY_INTERFACE_MODE_RGMII_ID) { temp |= (MII_M1111_RX_DELAY | MII_M1111_TX_DELAY); - - err = phy_write(phydev, MII_M1111_PHY_EXT_CR, temp); - if (err < 0) - return err; + } else if (phydev->interface == PHY_INTERFACE_MODE_RGMII_RXID) { + temp &= ~MII_M1111_TX_DELAY; + temp |= MII_M1111_RX_DELAY; + } else if (phydev->interface == PHY_INTERFACE_MODE_RGMII_TXID) { + temp &= ~MII_M1111_RX_DELAY; + temp |= MII_M1111_TX_DELAY; } + err = phy_write(phydev, MII_M1111_PHY_EXT_CR, temp); + if (err < 0) + return err; + temp = phy_read(phydev, MII_M1111_PHY_EXT_SR); if (temp < 0) return temp; diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c index 9bc11773705..7c9e6e34950 100644 --- a/drivers/net/phy/phy.c +++ b/drivers/net/phy/phy.c @@ -406,6 +406,9 @@ int phy_mii_ioctl(struct phy_device *phydev, && phydev->drv->config_init) phydev->drv->config_init(phydev); break; + + default: + return -ENOTTY; } return 0; diff --git a/drivers/net/s2io.c b/drivers/net/s2io.c index 63266670624..d5113dd712c 100644 --- a/drivers/net/s2io.c +++ b/drivers/net/s2io.c @@ -1081,7 +1081,7 @@ static int init_nic(struct s2io_nic *nic) /* to set the swapper controle on the card */ if(s2io_set_swapper(nic)) { DBG_PRINT(ERR_DBG,"ERROR: Setting Swapper failed\n"); - return -1; + return -EIO; } /* @@ -1503,7 +1503,7 @@ static int init_nic(struct s2io_nic *nic) DBG_PRINT(ERR_DBG, "%s: failed rts ds steering", dev->name); DBG_PRINT(ERR_DBG, "set on codepoint %d\n", i); - return FAILURE; + return -ENODEV; } } @@ -1570,7 +1570,7 @@ static int init_nic(struct s2io_nic *nic) if (time > 10) { DBG_PRINT(ERR_DBG, "%s: TTI init Failed\n", dev->name); - return -1; + return -ENODEV; } msleep(50); time++; @@ -1623,7 +1623,7 @@ static int init_nic(struct s2io_nic *nic) if (time > 10) { DBG_PRINT(ERR_DBG, "%s: RTI init Failed\n", dev->name); - return -1; + return -ENODEV; } time++; msleep(50); @@ -3914,6 +3914,12 @@ static int s2io_close(struct net_device *dev) { struct s2io_nic *sp = dev->priv; + /* Return if the device is already closed * + * Can happen when s2io_card_up failed in change_mtu * + */ + if (!is_s2io_card_up(sp)) + return 0; + netif_stop_queue(dev); napi_disable(&sp->napi); /* Reset card, kill tasklet and free Tx and Rx buffers. */ @@ -6355,6 +6361,7 @@ static int s2io_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) static int s2io_change_mtu(struct net_device *dev, int new_mtu) { struct s2io_nic *sp = dev->priv; + int ret = 0; if ((new_mtu < MIN_MTU) || (new_mtu > S2IO_JUMBO_SIZE)) { DBG_PRINT(ERR_DBG, "%s: MTU size is invalid.\n", @@ -6366,9 +6373,11 @@ static int s2io_change_mtu(struct net_device *dev, int new_mtu) if (netif_running(dev)) { s2io_card_down(sp); netif_stop_queue(dev); - if (s2io_card_up(sp)) { + ret = s2io_card_up(sp); + if (ret) { DBG_PRINT(ERR_DBG, "%s: Device bring up failed\n", __FUNCTION__); + return ret; } if (netif_queue_stopped(dev)) netif_wake_queue(dev); @@ -6379,7 +6388,7 @@ static int s2io_change_mtu(struct net_device *dev, int new_mtu) writeq(vBIT(val64, 2, 14), &bar0->rmac_max_pyld_len); } - return 0; + return ret; } /** @@ -6777,6 +6786,9 @@ static void do_s2io_card_down(struct s2io_nic * sp, int do_io) unsigned long flags; register u64 val64 = 0; + if (!is_s2io_card_up(sp)) + return; + del_timer_sync(&sp->alarm_timer); /* If s2io_set_link task is executing, wait till it completes. */ while (test_and_set_bit(__S2IO_STATE_LINK_TASK, &(sp->state))) { @@ -6850,11 +6862,13 @@ static int s2io_card_up(struct s2io_nic * sp) u16 interruptible; /* Initialize the H/W I/O registers */ - if (init_nic(sp) != 0) { + ret = init_nic(sp); + if (ret != 0) { DBG_PRINT(ERR_DBG, "%s: H/W initialization failed\n", dev->name); - s2io_reset(sp); - return -ENODEV; + if (ret != -EIO) + s2io_reset(sp); + return ret; } /* diff --git a/drivers/net/skge.c b/drivers/net/skge.c index 6d62250fba0..186eb8ebfda 100644 --- a/drivers/net/skge.c +++ b/drivers/net/skge.c @@ -44,7 +44,7 @@ #include "skge.h" #define DRV_NAME "skge" -#define DRV_VERSION "1.12" +#define DRV_VERSION "1.13" #define PFX DRV_NAME " " #define DEFAULT_TX_RING_SIZE 128 @@ -1095,16 +1095,9 @@ static void xm_link_down(struct skge_hw *hw, int port) { struct net_device *dev = hw->dev[port]; struct skge_port *skge = netdev_priv(dev); - u16 cmd = xm_read16(hw, port, XM_MMU_CMD); xm_write16(hw, port, XM_IMSK, XM_IMSK_DISABLE); - cmd &= ~(XM_MMU_ENA_RX | XM_MMU_ENA_TX); - xm_write16(hw, port, XM_MMU_CMD, cmd); - - /* dummy read to ensure writing */ - xm_read16(hw, port, XM_MMU_CMD); - if (netif_carrier_ok(dev)) skge_link_down(skge); } @@ -1194,6 +1187,7 @@ static void genesis_init(struct skge_hw *hw) static void genesis_reset(struct skge_hw *hw, int port) { const u8 zero[8] = { 0 }; + u32 reg; skge_write8(hw, SK_REG(port, GMAC_IRQ_MSK), 0); @@ -1209,6 +1203,11 @@ static void genesis_reset(struct skge_hw *hw, int port) xm_write16(hw, port, PHY_BCOM_INT_MASK, 0xffff); xm_outhash(hw, port, XM_HSM, zero); + + /* Flush TX and RX fifo */ + reg = xm_read32(hw, port, XM_MODE); + xm_write32(hw, port, XM_MODE, reg | XM_MD_FTF); + xm_write32(hw, port, XM_MODE, reg | XM_MD_FRF); } @@ -1634,15 +1633,14 @@ static void genesis_mac_init(struct skge_hw *hw, int port) } xm_write16(hw, port, XM_RX_CMD, r); - /* We want short frames padded to 60 bytes. */ xm_write16(hw, port, XM_TX_CMD, XM_TX_AUTO_PAD); - /* - * Bump up the transmit threshold. This helps hold off transmit - * underruns when we're blasting traffic from both ports at once. - */ - xm_write16(hw, port, XM_TX_THR, 512); + /* Increase threshold for jumbo frames on dual port */ + if (hw->ports > 1 && jumbo) + xm_write16(hw, port, XM_TX_THR, 1020); + else + xm_write16(hw, port, XM_TX_THR, 512); /* * Enable the reception of all error frames. This is is @@ -1713,7 +1711,13 @@ static void genesis_stop(struct skge_port *skge) { struct skge_hw *hw = skge->hw; int port = skge->port; - u32 reg; + unsigned retries = 1000; + u16 cmd; + + /* Disable Tx and Rx */ + cmd = xm_read16(hw, port, XM_MMU_CMD); + cmd &= ~(XM_MMU_ENA_RX | XM_MMU_ENA_TX); + xm_write16(hw, port, XM_MMU_CMD, cmd); genesis_reset(hw, port); @@ -1721,20 +1725,17 @@ static void genesis_stop(struct skge_port *skge) skge_write16(hw, B3_PA_CTRL, port == 0 ? PA_CLR_TO_TX1 : PA_CLR_TO_TX2); - /* - * If the transfer sticks at the MAC the STOP command will not - * terminate if we don't flush the XMAC's transmit FIFO ! - */ - xm_write32(hw, port, XM_MODE, - xm_read32(hw, port, XM_MODE)|XM_MD_FTF); - - /* Reset the MAC */ - skge_write16(hw, SK_REG(port, TX_MFF_CTRL1), MFF_SET_MAC_RST); + skge_write16(hw, SK_REG(port, TX_MFF_CTRL1), MFF_CLR_MAC_RST); + do { + skge_write16(hw, SK_REG(port, TX_MFF_CTRL1), MFF_SET_MAC_RST); + if (!(skge_read16(hw, SK_REG(port, TX_MFF_CTRL1)) & MFF_SET_MAC_RST)) + break; + } while (--retries > 0); /* For external PHYs there must be special handling */ if (hw->phy_type != SK_PHY_XMAC) { - reg = skge_read32(hw, B2_GP_IO); + u32 reg = skge_read32(hw, B2_GP_IO); if (port == 0) { reg |= GP_DIR_0; reg &= ~GP_IO_0; @@ -1801,11 +1802,6 @@ static void genesis_mac_intr(struct skge_hw *hw, int port) xm_write32(hw, port, XM_MODE, XM_MD_FTF); ++dev->stats.tx_fifo_errors; } - - if (status & XM_IS_RXF_OV) { - xm_write32(hw, port, XM_MODE, XM_MD_FRF); - ++dev->stats.rx_fifo_errors; - } } static void genesis_link_up(struct skge_port *skge) @@ -1862,9 +1858,9 @@ static void genesis_link_up(struct skge_port *skge) xm_write32(hw, port, XM_MODE, mode); - /* Turn on detection of Tx underrun, Rx overrun */ + /* Turn on detection of Tx underrun */ msk = xm_read16(hw, port, XM_IMSK); - msk &= ~(XM_IS_RXF_OV | XM_IS_TXF_UR); + msk &= ~XM_IS_TXF_UR; xm_write16(hw, port, XM_IMSK, msk); xm_read16(hw, port, XM_ISRC); @@ -2194,9 +2190,12 @@ static void yukon_mac_init(struct skge_hw *hw, int port) TX_JAM_IPG_VAL(TX_JAM_IPG_DEF) | TX_IPG_JAM_DATA(TX_IPG_JAM_DEF)); - /* serial mode register */ - reg = GM_SMOD_VLAN_ENA | IPG_DATA_VAL(IPG_DATA_DEF); - if (hw->dev[port]->mtu > 1500) + /* configure the Serial Mode Register */ + reg = DATA_BLIND_VAL(DATA_BLIND_DEF) + | GM_SMOD_VLAN_ENA + | IPG_DATA_VAL(IPG_DATA_DEF); + + if (hw->dev[port]->mtu > ETH_DATA_LEN) reg |= GM_SMOD_JUMBO_ENA; gma_write16(hw, port, GM_SERIAL_MODE, reg); @@ -2619,8 +2618,8 @@ static int skge_up(struct net_device *dev) yukon_mac_init(hw, port); spin_unlock_bh(&hw->phy_lock); - /* Configure RAMbuffers */ - chunk = hw->ram_size / ((hw->ports + 1)*2); + /* Configure RAMbuffers - equally between ports and tx/rx */ + chunk = (hw->ram_size - hw->ram_offset) / (hw->ports * 2); ram_addr = hw->ram_offset + 2 * chunk * port; skge_ramset(hw, rxqaddr[port], ram_addr, chunk); @@ -2897,11 +2896,7 @@ static void skge_tx_timeout(struct net_device *dev) static int skge_change_mtu(struct net_device *dev, int new_mtu) { - struct skge_port *skge = netdev_priv(dev); - struct skge_hw *hw = skge->hw; - int port = skge->port; int err; - u16 ctl, reg; if (new_mtu < ETH_ZLEN || new_mtu > ETH_JUMBO_MTU) return -EINVAL; @@ -2911,40 +2906,13 @@ static int skge_change_mtu(struct net_device *dev, int new_mtu) return 0; } - skge_write32(hw, B0_IMSK, 0); - dev->trans_start = jiffies; /* prevent tx timeout */ - netif_stop_queue(dev); - napi_disable(&skge->napi); - - ctl = gma_read16(hw, port, GM_GP_CTRL); - gma_write16(hw, port, GM_GP_CTRL, ctl & ~GM_GPCR_RX_ENA); - - skge_rx_clean(skge); - skge_rx_stop(hw, port); + skge_down(dev); dev->mtu = new_mtu; - reg = GM_SMOD_VLAN_ENA | IPG_DATA_VAL(IPG_DATA_DEF); - if (new_mtu > 1500) - reg |= GM_SMOD_JUMBO_ENA; - gma_write16(hw, port, GM_SERIAL_MODE, reg); - - skge_write8(hw, RB_ADDR(rxqaddr[port], RB_CTRL), RB_ENA_OP_MD); - - err = skge_rx_fill(dev); - wmb(); - if (!err) - skge_write8(hw, Q_ADDR(rxqaddr[port], Q_CSR), CSR_START | CSR_IRQ_CL_F); - skge_write32(hw, B0_IMSK, hw->intr_mask); - + err = skge_up(dev); if (err) dev_close(dev); - else { - gma_write16(hw, port, GM_GP_CTRL, ctl); - - napi_enable(&skge->napi); - netif_wake_queue(dev); - } return err; } diff --git a/drivers/net/sky2.c b/drivers/net/sky2.c index a2070db725c..3d1dfc94840 100644 --- a/drivers/net/sky2.c +++ b/drivers/net/sky2.c @@ -31,7 +31,6 @@ #include <linux/etherdevice.h> #include <linux/ethtool.h> #include <linux/pci.h> -#include <linux/aer.h> #include <linux/ip.h> #include <net/ip.h> #include <linux/tcp.h> @@ -240,22 +239,21 @@ static void sky2_power_on(struct sky2_hw *hw) sky2_write8(hw, B2_Y2_CLK_GATE, 0); if (hw->flags & SKY2_HW_ADV_POWER_CTL) { - struct pci_dev *pdev = hw->pdev; u32 reg; - pci_write_config_dword(pdev, PCI_DEV_REG3, 0); + sky2_pci_write32(hw, PCI_DEV_REG3, 0); - pci_read_config_dword(pdev, PCI_DEV_REG4, ®); + reg = sky2_pci_read32(hw, PCI_DEV_REG4); /* set all bits to 0 except bits 15..12 and 8 */ reg &= P_ASPM_CONTROL_MSK; - pci_write_config_dword(pdev, PCI_DEV_REG4, reg); + sky2_pci_write32(hw, PCI_DEV_REG4, reg); - pci_read_config_dword(pdev, PCI_DEV_REG5, ®); + reg = sky2_pci_read32(hw, PCI_DEV_REG5); /* set all bits to 0 except bits 28 & 27 */ reg &= P_CTL_TIM_VMAIN_AV_MSK; - pci_write_config_dword(pdev, PCI_DEV_REG5, reg); + sky2_pci_write32(hw, PCI_DEV_REG5, reg); - pci_write_config_dword(pdev, PCI_CFG_REG_1, 0); + sky2_pci_write32(hw, PCI_CFG_REG_1, 0); /* Enable workaround for dev 4.107 on Yukon-Ultra & Extreme */ reg = sky2_read32(hw, B2_GP_IO); @@ -619,12 +617,11 @@ static void sky2_phy_init(struct sky2_hw *hw, unsigned port) static void sky2_phy_power(struct sky2_hw *hw, unsigned port, int onoff) { - struct pci_dev *pdev = hw->pdev; u32 reg1; static const u32 phy_power[] = { PCI_Y2_PHY1_POWD, PCI_Y2_PHY2_POWD }; static const u32 coma_mode[] = { PCI_Y2_PHY1_COMA, PCI_Y2_PHY2_COMA }; - pci_read_config_dword(pdev, PCI_DEV_REG1, ®1); + reg1 = sky2_pci_read32(hw, PCI_DEV_REG1); /* Turn on/off phy power saving */ if (onoff) reg1 &= ~phy_power[port]; @@ -634,8 +631,8 @@ static void sky2_phy_power(struct sky2_hw *hw, unsigned port, int onoff) if (onoff && hw->chip_id == CHIP_ID_YUKON_XL && hw->chip_rev > 1) reg1 |= coma_mode[port]; - pci_write_config_dword(pdev, PCI_DEV_REG1, reg1); - pci_read_config_dword(pdev, PCI_DEV_REG1, ®1); + sky2_pci_write32(hw, PCI_DEV_REG1, reg1); + reg1 = sky2_pci_read32(hw, PCI_DEV_REG1); udelay(100); } @@ -704,9 +701,9 @@ static void sky2_wol_init(struct sky2_port *sky2) sky2_write16(hw, WOL_REGS(port, WOL_CTRL_STAT), ctrl); /* Turn on legacy PCI-Express PME mode */ - pci_read_config_dword(hw->pdev, PCI_DEV_REG1, ®1); + reg1 = sky2_pci_read32(hw, PCI_DEV_REG1); reg1 |= PCI_Y2_PME_LEGACY; - pci_write_config_dword(hw->pdev, PCI_DEV_REG1, reg1); + sky2_pci_write32(hw, PCI_DEV_REG1, reg1); /* block receiver */ sky2_write8(hw, SK_REG(port, RX_GMF_CTRL_T), GMF_RST_SET); @@ -848,6 +845,13 @@ static void sky2_mac_init(struct sky2_hw *hw, unsigned port) sky2_set_tx_stfwd(hw, port); } + if (hw->chip_id == CHIP_ID_YUKON_FE_P && + hw->chip_rev == CHIP_REV_YU_FE2_A0) { + /* disable dynamic watermark */ + reg = sky2_read16(hw, SK_REG(port, TX_GMF_EA)); + reg &= ~TX_DYN_WM_ENA; + sky2_write16(hw, SK_REG(port, TX_GMF_EA), reg); + } } /* Assign Ram Buffer allocation to queue */ @@ -1320,15 +1324,12 @@ static int sky2_up(struct net_device *dev) */ if (otherdev && netif_running(otherdev) && (cap = pci_find_capability(hw->pdev, PCI_CAP_ID_PCIX))) { - struct sky2_port *osky2 = netdev_priv(otherdev); u16 cmd; - pci_read_config_word(hw->pdev, cap + PCI_X_CMD, &cmd); + cmd = sky2_pci_read16(hw, cap + PCI_X_CMD); cmd &= ~PCI_X_CMD_MAX_SPLIT; - pci_write_config_word(hw->pdev, cap + PCI_X_CMD, cmd); + sky2_pci_write16(hw, cap + PCI_X_CMD, cmd); - sky2->rx_csum = 0; - osky2->rx_csum = 0; } if (netif_msg_ifup(sky2)) @@ -2426,37 +2427,26 @@ static void sky2_hw_intr(struct sky2_hw *hw) if (status & (Y2_IS_MST_ERR | Y2_IS_IRQ_STAT)) { u16 pci_err; - pci_read_config_word(pdev, PCI_STATUS, &pci_err); + pci_err = sky2_pci_read16(hw, PCI_STATUS); if (net_ratelimit()) dev_err(&pdev->dev, "PCI hardware error (0x%x)\n", pci_err); - pci_write_config_word(pdev, PCI_STATUS, + sky2_pci_write16(hw, PCI_STATUS, pci_err | PCI_STATUS_ERROR_BITS); } if (status & Y2_IS_PCI_EXP) { /* PCI-Express uncorrectable Error occurred */ - int aer = pci_find_aer_capability(hw->pdev); u32 err; - if (aer) { - pci_read_config_dword(pdev, aer + PCI_ERR_UNCOR_STATUS, - &err); - pci_cleanup_aer_uncorrect_error_status(pdev); - } else { - /* Either AER not configured, or not working - * because of bad MMCONFIG, so just do recover - * manually. - */ - err = sky2_read32(hw, Y2_CFG_AER + PCI_ERR_UNCOR_STATUS); - sky2_write32(hw, Y2_CFG_AER + PCI_ERR_UNCOR_STATUS, - 0xfffffffful); - } - + err = sky2_read32(hw, Y2_CFG_AER + PCI_ERR_UNCOR_STATUS); + sky2_write32(hw, Y2_CFG_AER + PCI_ERR_UNCOR_STATUS, + 0xfffffffful); if (net_ratelimit()) dev_err(&pdev->dev, "PCI Express error (0x%x)\n", err); + sky2_read32(hw, Y2_CFG_AER + PCI_ERR_UNCOR_STATUS); } if (status & Y2_HWE_L1_MASK) @@ -2703,13 +2693,10 @@ static inline u32 sky2_clk2us(const struct sky2_hw *hw, u32 clk) static int __devinit sky2_init(struct sky2_hw *hw) { - int rc; u8 t8; /* Enable all clocks and check for bad PCI access */ - rc = pci_write_config_dword(hw->pdev, PCI_DEV_REG3, 0); - if (rc) - return rc; + sky2_pci_write32(hw, PCI_DEV_REG3, 0); sky2_write8(hw, B0_CTST, CS_RST_CLR); @@ -2806,32 +2793,21 @@ static void sky2_reset(struct sky2_hw *hw) sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_ON); /* clear PCI errors, if any */ - pci_read_config_word(pdev, PCI_STATUS, &status); + status = sky2_pci_read16(hw, PCI_STATUS); status |= PCI_STATUS_ERROR_BITS; - pci_write_config_word(pdev, PCI_STATUS, status); + sky2_pci_write16(hw, PCI_STATUS, status); sky2_write8(hw, B0_CTST, CS_MRST_CLR); cap = pci_find_capability(pdev, PCI_CAP_ID_EXP); if (cap) { - if (pci_find_aer_capability(pdev)) { - /* Check for advanced error reporting */ - pci_cleanup_aer_uncorrect_error_status(pdev); - pci_cleanup_aer_correct_error_status(pdev); - } else { - dev_warn(&pdev->dev, - "PCI Express Advanced Error Reporting" - " not configured or MMCONFIG problem?\n"); - - sky2_write32(hw, Y2_CFG_AER + PCI_ERR_UNCOR_STATUS, - 0xfffffffful); - } + sky2_write32(hw, Y2_CFG_AER + PCI_ERR_UNCOR_STATUS, + 0xfffffffful); /* If error bit is stuck on ignore it */ if (sky2_read32(hw, B0_HWE_ISRC) & Y2_IS_PCI_EXP) dev_info(&pdev->dev, "ignoring stuck error report bit\n"); - - else if (pci_enable_pcie_error_reporting(pdev)) + else hwe_mask |= Y2_IS_PCI_EXP; } @@ -3672,32 +3648,33 @@ static int sky2_set_tso(struct net_device *dev, u32 data) static int sky2_get_eeprom_len(struct net_device *dev) { struct sky2_port *sky2 = netdev_priv(dev); + struct sky2_hw *hw = sky2->hw; u16 reg2; - pci_read_config_word(sky2->hw->pdev, PCI_DEV_REG2, ®2); + reg2 = sky2_pci_read16(hw, PCI_DEV_REG2); return 1 << ( ((reg2 & PCI_VPD_ROM_SZ) >> 14) + 8); } -static u32 sky2_vpd_read(struct pci_dev *pdev, int cap, u16 offset) +static u32 sky2_vpd_read(struct sky2_hw *hw, int cap, u16 offset) { u32 val; - pci_write_config_word(pdev, cap + PCI_VPD_ADDR, offset); + sky2_pci_write16(hw, cap + PCI_VPD_ADDR, offset); do { - pci_read_config_word(pdev, cap + PCI_VPD_ADDR, &offset); + offset = sky2_pci_read16(hw, cap + PCI_VPD_ADDR); } while (!(offset & PCI_VPD_ADDR_F)); - pci_read_config_dword(pdev, cap + PCI_VPD_DATA, &val); + val = sky2_pci_read32(hw, cap + PCI_VPD_DATA); return val; } -static void sky2_vpd_write(struct pci_dev *pdev, int cap, u16 offset, u32 val) +static void sky2_vpd_write(struct sky2_hw *hw, int cap, u16 offset, u32 val) { - pci_write_config_word(pdev, cap + PCI_VPD_DATA, val); - pci_write_config_dword(pdev, cap + PCI_VPD_ADDR, offset | PCI_VPD_ADDR_F); + sky2_pci_write16(hw, cap + PCI_VPD_DATA, val); + sky2_pci_write32(hw, cap + PCI_VPD_ADDR, offset | PCI_VPD_ADDR_F); do { - pci_read_config_word(pdev, cap + PCI_VPD_ADDR, &offset); + offset = sky2_pci_read16(hw, cap + PCI_VPD_ADDR); } while (offset & PCI_VPD_ADDR_F); } @@ -3715,7 +3692,7 @@ static int sky2_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom eeprom->magic = SKY2_EEPROM_MAGIC; while (length > 0) { - u32 val = sky2_vpd_read(sky2->hw->pdev, cap, offset); + u32 val = sky2_vpd_read(sky2->hw, cap, offset); int n = min_t(int, length, sizeof(val)); memcpy(data, &val, n); @@ -3745,10 +3722,10 @@ static int sky2_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom int n = min_t(int, length, sizeof(val)); if (n < sizeof(val)) - val = sky2_vpd_read(sky2->hw->pdev, cap, offset); + val = sky2_vpd_read(sky2->hw, cap, offset); memcpy(&val, data, n); - sky2_vpd_write(sky2->hw->pdev, cap, offset, val); + sky2_vpd_write(sky2->hw, cap, offset, val); length -= n; data += n; @@ -4013,7 +3990,7 @@ static __devinit struct net_device *sky2_init_netdev(struct sky2_hw *hw, sky2->duplex = -1; sky2->speed = -1; sky2->advertising = sky2_supported_modes(hw); - sky2->rx_csum = 1; + sky2->rx_csum = (hw->chip_id != CHIP_ID_YUKON_XL); sky2->wol = wol; spin_lock_init(&sky2->phy_lock); @@ -4184,9 +4161,9 @@ static int __devinit sky2_probe(struct pci_dev *pdev, */ { u32 reg; - pci_read_config_dword(pdev,PCI_DEV_REG2, ®); + reg = sky2_pci_read32(hw, PCI_DEV_REG2); reg &= ~PCI_REV_DESC; - pci_write_config_dword(pdev, PCI_DEV_REG2, reg); + sky2_pci_write32(hw, PCI_DEV_REG2, reg); } #endif @@ -4377,7 +4354,7 @@ static int sky2_resume(struct pci_dev *pdev) if (hw->chip_id == CHIP_ID_YUKON_EX || hw->chip_id == CHIP_ID_YUKON_EC_U || hw->chip_id == CHIP_ID_YUKON_FE_P) - pci_write_config_dword(pdev, PCI_DEV_REG3, 0); + sky2_pci_write32(hw, PCI_DEV_REG3, 0); sky2_reset(hw); sky2_write32(hw, B0_IMSK, Y2_IS_BASE); diff --git a/drivers/net/sky2.h b/drivers/net/sky2.h index 69525fd7908..bc646a47edd 100644 --- a/drivers/net/sky2.h +++ b/drivers/net/sky2.h @@ -2128,4 +2128,25 @@ static inline void gma_set_addr(struct sky2_hw *hw, unsigned port, unsigned reg, gma_write16(hw, port, reg+4,(u16) addr[2] | ((u16) addr[3] << 8)); gma_write16(hw, port, reg+8,(u16) addr[4] | ((u16) addr[5] << 8)); } + +/* PCI config space access */ +static inline u32 sky2_pci_read32(const struct sky2_hw *hw, unsigned reg) +{ + return sky2_read32(hw, Y2_CFG_SPC + reg); +} + +static inline u16 sky2_pci_read16(const struct sky2_hw *hw, unsigned reg) +{ + return sky2_read16(hw, Y2_CFG_SPC + reg); +} + +static inline void sky2_pci_write32(struct sky2_hw *hw, unsigned reg, u32 val) +{ + sky2_write32(hw, Y2_CFG_SPC + reg, val); +} + +static inline void sky2_pci_write16(struct sky2_hw *hw, unsigned reg, u16 val) +{ + sky2_write16(hw, Y2_CFG_SPC + reg, val); +} #endif diff --git a/drivers/net/smc911x.c b/drivers/net/smc911x.c index dd18af0ce67..1a3d80bfe9e 100644 --- a/drivers/net/smc911x.c +++ b/drivers/net/smc911x.c @@ -428,7 +428,6 @@ static inline void smc911x_drop_pkt(struct net_device *dev) */ static inline void smc911x_rcv(struct net_device *dev) { - struct smc911x_local *lp = netdev_priv(dev); unsigned long ioaddr = dev->base_addr; unsigned int pkt_len, status; struct sk_buff *skb; @@ -473,6 +472,7 @@ static inline void smc911x_rcv(struct net_device *dev) skb_put(skb,pkt_len-4); #ifdef SMC_USE_DMA { + struct smc911x_local *lp = netdev_priv(dev); unsigned int fifo; /* Lower the FIFO threshold if possible */ fifo = SMC_GET_FIFO_INT(); @@ -1379,13 +1379,6 @@ static void smc911x_set_multicast_list(struct net_device *dev) unsigned int multicast_table[2]; unsigned int mcr, update_multicast = 0; unsigned long flags; - /* table for flipping the order of 5 bits */ - static const unsigned char invert5[] = - {0x00, 0x10, 0x08, 0x18, 0x04, 0x14, 0x0C, 0x1C, - 0x02, 0x12, 0x0A, 0x1A, 0x06, 0x16, 0x0E, 0x1E, - 0x01, 0x11, 0x09, 0x19, 0x05, 0x15, 0x0D, 0x1D, - 0x03, 0x13, 0x0B, 0x1B, 0x07, 0x17, 0x0F, 0x1F}; - DBG(SMC_DEBUG_FUNC, "%s: --> %s\n", dev->name, __FUNCTION__); @@ -1432,7 +1425,7 @@ static void smc911x_set_multicast_list(struct net_device *dev) cur_addr = dev->mc_list; for (i = 0; i < dev->mc_count; i++, cur_addr = cur_addr->next) { - int position; + u32 position; /* do we have a pointer here? */ if (!cur_addr) @@ -1442,12 +1435,10 @@ static void smc911x_set_multicast_list(struct net_device *dev) if (!(*cur_addr->dmi_addr & 1)) continue; - /* only use the low order bits */ - position = crc32_le(~0, cur_addr->dmi_addr, 6) & 0x3f; + /* upper 6 bits are used as hash index */ + position = ether_crc(ETH_ALEN, cur_addr->dmi_addr)>>26; - /* do some messy swapping to put the bit in the right spot */ - multicast_table[invert5[position&0x1F]&0x1] |= - (1<<invert5[(position>>1)&0x1F]); + multicast_table[position>>5] |= 1 << (position&0x1f); } /* be sure I get rid of flags I might have set */ diff --git a/drivers/net/smc911x.h b/drivers/net/smc911x.h index 16a0edc078f..d04e4fa3520 100644 --- a/drivers/net/smc911x.h +++ b/drivers/net/smc911x.h @@ -37,7 +37,7 @@ #define SMC_USE_16BIT 0 #define SMC_USE_32BIT 1 #define SMC_IRQ_SENSE IRQF_TRIGGER_FALLING -#elif CONFIG_SH_MAGIC_PANEL_R2 +#elif defined(CONFIG_SH_MAGIC_PANEL_R2) #define SMC_USE_SH_DMA 0 #define SMC_USE_16BIT 0 #define SMC_USE_32BIT 1 diff --git a/drivers/net/smc91x.h b/drivers/net/smc91x.h index db34e1eb67e..07b7f7120e3 100644 --- a/drivers/net/smc91x.h +++ b/drivers/net/smc91x.h @@ -55,7 +55,7 @@ #define SMC_insw(a, r, p, l) readsw((a) + (r), p, l) #define SMC_outsw(a, r, p, l) writesw((a) + (r), p, l) -#elif defined(CONFIG_BFIN) +#elif defined(CONFIG_BLACKFIN) #define SMC_IRQ_FLAGS IRQF_TRIGGER_HIGH #define RPC_LSA_DEFAULT RPC_LED_100_10 diff --git a/drivers/net/sungem.c b/drivers/net/sungem.c index f6fedcc32de..68872142530 100644 --- a/drivers/net/sungem.c +++ b/drivers/net/sungem.c @@ -2281,14 +2281,12 @@ static void gem_reset_task(struct work_struct *work) mutex_lock(&gp->pm_mutex); - napi_disable(&gp->napi); + if (gp->opened) + napi_disable(&gp->napi); spin_lock_irq(&gp->lock); spin_lock(&gp->tx_lock); - if (gp->running == 0) - goto not_running; - if (gp->running) { netif_stop_queue(gp->dev); @@ -2298,13 +2296,14 @@ static void gem_reset_task(struct work_struct *work) gem_set_link_modes(gp); netif_wake_queue(gp->dev); } - not_running: + gp->reset_task_pending = 0; spin_unlock(&gp->tx_lock); spin_unlock_irq(&gp->lock); - napi_enable(&gp->napi); + if (gp->opened) + napi_enable(&gp->napi); mutex_unlock(&gp->pm_mutex); } diff --git a/drivers/net/tulip/dmfe.c b/drivers/net/tulip/dmfe.c index ca90566d5bc..b4891caeae5 100644 --- a/drivers/net/tulip/dmfe.c +++ b/drivers/net/tulip/dmfe.c @@ -2118,8 +2118,8 @@ static int dmfe_suspend(struct pci_dev *pci_dev, pm_message_t state) pci_enable_wake(pci_dev, PCI_D3cold, 1); /* Power down device*/ - pci_set_power_state(pci_dev, pci_choose_state (pci_dev,state)); pci_save_state(pci_dev); + pci_set_power_state(pci_dev, pci_choose_state (pci_dev, state)); return 0; } @@ -2129,8 +2129,8 @@ static int dmfe_resume(struct pci_dev *pci_dev) struct net_device *dev = pci_get_drvdata(pci_dev); u32 tmp; - pci_restore_state(pci_dev); pci_set_power_state(pci_dev, PCI_D0); + pci_restore_state(pci_dev); /* Re-initilize DM910X board */ dmfe_init_dm910x(dev); diff --git a/drivers/net/ucc_geth.c b/drivers/net/ucc_geth.c index a3ff270593f..7f689907ac2 100644 --- a/drivers/net/ucc_geth.c +++ b/drivers/net/ucc_geth.c @@ -1460,6 +1460,8 @@ static int adjust_enet_interface(struct ucc_geth_private *ugeth) if ((ugeth->phy_interface == PHY_INTERFACE_MODE_RMII) || (ugeth->phy_interface == PHY_INTERFACE_MODE_RGMII) || (ugeth->phy_interface == PHY_INTERFACE_MODE_RGMII_ID) || + (ugeth->phy_interface == PHY_INTERFACE_MODE_RGMII_RXID) || + (ugeth->phy_interface == PHY_INTERFACE_MODE_RGMII_TXID) || (ugeth->phy_interface == PHY_INTERFACE_MODE_RTBI)) { upsmr |= UPSMR_RPM; switch (ugeth->max_speed) { @@ -1557,6 +1559,8 @@ static void adjust_link(struct net_device *dev) if ((ugeth->phy_interface == PHY_INTERFACE_MODE_RMII) || (ugeth->phy_interface == PHY_INTERFACE_MODE_RGMII) || (ugeth->phy_interface == PHY_INTERFACE_MODE_RGMII_ID) || + (ugeth->phy_interface == PHY_INTERFACE_MODE_RGMII_RXID) || + (ugeth->phy_interface == PHY_INTERFACE_MODE_RGMII_TXID) || (ugeth->phy_interface == PHY_INTERFACE_MODE_RTBI)) { if (phydev->speed == SPEED_10) upsmr |= UPSMR_R10M; @@ -3795,6 +3799,10 @@ static phy_interface_t to_phy_interface(const char *phy_connection_type) return PHY_INTERFACE_MODE_RGMII; if (strcasecmp(phy_connection_type, "rgmii-id") == 0) return PHY_INTERFACE_MODE_RGMII_ID; + if (strcasecmp(phy_connection_type, "rgmii-txid") == 0) + return PHY_INTERFACE_MODE_RGMII_TXID; + if (strcasecmp(phy_connection_type, "rgmii-rxid") == 0) + return PHY_INTERFACE_MODE_RGMII_RXID; if (strcasecmp(phy_connection_type, "rtbi") == 0) return PHY_INTERFACE_MODE_RTBI; @@ -3889,6 +3897,8 @@ static int ucc_geth_probe(struct of_device* ofdev, const struct of_device_id *ma case PHY_INTERFACE_MODE_GMII: case PHY_INTERFACE_MODE_RGMII: case PHY_INTERFACE_MODE_RGMII_ID: + case PHY_INTERFACE_MODE_RGMII_RXID: + case PHY_INTERFACE_MODE_RGMII_TXID: case PHY_INTERFACE_MODE_TBI: case PHY_INTERFACE_MODE_RTBI: max_speed = SPEED_1000; diff --git a/drivers/net/usb/dm9601.c b/drivers/net/usb/dm9601.c index 2c685734b7a..1ffdd106f4c 100644 --- a/drivers/net/usb/dm9601.c +++ b/drivers/net/usb/dm9601.c @@ -94,7 +94,7 @@ static void dm_write_async_callback(struct urb *urb) struct usb_ctrlrequest *req = (struct usb_ctrlrequest *)urb->context; if (urb->status < 0) - printk(KERN_DEBUG "dm_write_async_callback() failed with %d", + printk(KERN_DEBUG "dm_write_async_callback() failed with %d\n", urb->status); kfree(req); diff --git a/drivers/net/via-velocity.c b/drivers/net/via-velocity.c index 450e29d7a9f..35cd65d6b9e 100644 --- a/drivers/net/via-velocity.c +++ b/drivers/net/via-velocity.c @@ -1242,6 +1242,9 @@ static int velocity_rx_refill(struct velocity_info *vptr) static int velocity_init_rd_ring(struct velocity_info *vptr) { int ret; + int mtu = vptr->dev->mtu; + + vptr->rx_buf_sz = (mtu <= ETH_DATA_LEN) ? PKT_BUF_SZ : mtu + 32; vptr->rd_info = kcalloc(vptr->options.numrx, sizeof(struct velocity_rd_info), GFP_KERNEL); @@ -1898,8 +1901,6 @@ static int velocity_open(struct net_device *dev) struct velocity_info *vptr = netdev_priv(dev); int ret; - vptr->rx_buf_sz = (dev->mtu <= 1504 ? PKT_BUF_SZ : dev->mtu + 32); - ret = velocity_init_rings(vptr); if (ret < 0) goto out; @@ -1978,12 +1979,6 @@ static int velocity_change_mtu(struct net_device *dev, int new_mtu) velocity_free_rd_ring(vptr); dev->mtu = new_mtu; - if (new_mtu > 8192) - vptr->rx_buf_sz = 9 * 1024; - else if (new_mtu > 4096) - vptr->rx_buf_sz = 8192; - else - vptr->rx_buf_sz = 4 * 1024; ret = velocity_init_rd_ring(vptr); if (ret < 0) diff --git a/drivers/net/wireless/b43/main.c b/drivers/net/wireless/b43/main.c index 2b17c1dc46f..b45eecc53c4 100644 --- a/drivers/net/wireless/b43/main.c +++ b/drivers/net/wireless/b43/main.c @@ -1566,7 +1566,7 @@ static void b43_release_firmware(struct b43_wldev *dev) static void b43_print_fw_helptext(struct b43_wl *wl) { b43err(wl, "You must go to " - "http://linuxwireless.org/en/users/Drivers/bcm43xx#devicefirmware " + "http://linuxwireless.org/en/users/Drivers/b43#devicefirmware " "and download the correct firmware (version 4).\n"); } diff --git a/drivers/net/wireless/b43/phy.c b/drivers/net/wireless/b43/phy.c index 3d4ed647c31..7ff091e69f0 100644 --- a/drivers/net/wireless/b43/phy.c +++ b/drivers/net/wireless/b43/phy.c @@ -2214,7 +2214,7 @@ int b43_phy_init_tssi2dbm_table(struct b43_wldev *dev) } dyn_tssi2dbm = kmalloc(64, GFP_KERNEL); if (dyn_tssi2dbm == NULL) { - b43err(dev->wl, "Could not allocate memory" + b43err(dev->wl, "Could not allocate memory " "for tssi2dbm table\n"); return -ENOMEM; } diff --git a/drivers/net/wireless/b43legacy/dma.c b/drivers/net/wireless/b43legacy/dma.c index 8cb3dc4c474..83161d9af81 100644 --- a/drivers/net/wireless/b43legacy/dma.c +++ b/drivers/net/wireless/b43legacy/dma.c @@ -996,7 +996,7 @@ int b43legacy_dma_init(struct b43legacy_wldev *dev) err = ssb_dma_set_mask(dev->dev, dmamask); if (err) { -#ifdef BCM43XX_PIO +#ifdef CONFIG_B43LEGACY_PIO b43legacywarn(dev->wl, "DMA for this device not supported. " "Falling back to PIO\n"); dev->__using_pio = 1; diff --git a/drivers/net/wireless/b43legacy/main.c b/drivers/net/wireless/b43legacy/main.c index 3bde1e9ab42..32d5e1785bd 100644 --- a/drivers/net/wireless/b43legacy/main.c +++ b/drivers/net/wireless/b43legacy/main.c @@ -1419,7 +1419,7 @@ static void b43legacy_release_firmware(struct b43legacy_wldev *dev) static void b43legacy_print_fw_helptext(struct b43legacy_wl *wl) { b43legacyerr(wl, "You must go to http://linuxwireless.org/en/users/" - "Drivers/bcm43xx#devicefirmware " + "Drivers/b43#devicefirmware " "and download the correct firmware (version 3).\n"); } diff --git a/drivers/net/wireless/b43legacy/phy.c b/drivers/net/wireless/b43legacy/phy.c index 22a4b3d0186..491e518e4ae 100644 --- a/drivers/net/wireless/b43legacy/phy.c +++ b/drivers/net/wireless/b43legacy/phy.c @@ -2020,7 +2020,7 @@ int b43legacy_phy_init_tssi2dbm_table(struct b43legacy_wldev *dev) phy->idle_tssi = 62; dyn_tssi2dbm = kmalloc(64, GFP_KERNEL); if (dyn_tssi2dbm == NULL) { - b43legacyerr(dev->wl, "Could not allocate memory" + b43legacyerr(dev->wl, "Could not allocate memory " "for tssi2dbm table\n"); return -ENOMEM; } diff --git a/drivers/net/wireless/bcm43xx/bcm43xx_phy.c b/drivers/net/wireless/bcm43xx/bcm43xx_phy.c index b37f1e34870..af3de334365 100644 --- a/drivers/net/wireless/bcm43xx/bcm43xx_phy.c +++ b/drivers/net/wireless/bcm43xx/bcm43xx_phy.c @@ -2149,7 +2149,7 @@ int bcm43xx_phy_init_tssi2dbm_table(struct bcm43xx_private *bcm) } dyn_tssi2dbm = kmalloc(64, GFP_KERNEL); if (dyn_tssi2dbm == NULL) { - printk(KERN_ERR PFX "Could not allocate memory" + printk(KERN_ERR PFX "Could not allocate memory " "for tssi2dbm table\n"); return -ENOMEM; } diff --git a/drivers/net/wireless/iwlwifi/iwl3945-base.c b/drivers/net/wireless/iwlwifi/iwl3945-base.c index 465da4f67ce..4bdf237f6ad 100644 --- a/drivers/net/wireless/iwlwifi/iwl3945-base.c +++ b/drivers/net/wireless/iwlwifi/iwl3945-base.c @@ -2915,6 +2915,10 @@ static void iwl_set_rate(struct iwl_priv *priv) int i; hw = iwl_get_hw_mode(priv, priv->phymode); + if (!hw) { + IWL_ERROR("Failed to set rate: unable to get hw mode\n"); + return; + } priv->active_rate = 0; priv->active_rate_basic = 0; @@ -6936,13 +6940,10 @@ static int iwl_mac_add_interface(struct ieee80211_hw *hw, DECLARE_MAC_BUF(mac); IWL_DEBUG_MAC80211("enter: id %d, type %d\n", conf->if_id, conf->type); - if (conf->mac_addr) - IWL_DEBUG_MAC80211("enter: MAC %s\n", - print_mac(mac, conf->mac_addr)); if (priv->interface_id) { IWL_DEBUG_MAC80211("leave - interface_id != 0\n"); - return 0; + return -EOPNOTSUPP; } spin_lock_irqsave(&priv->lock, flags); @@ -6951,6 +6952,12 @@ static int iwl_mac_add_interface(struct ieee80211_hw *hw, spin_unlock_irqrestore(&priv->lock, flags); mutex_lock(&priv->mutex); + + if (conf->mac_addr) { + IWL_DEBUG_MAC80211("Set: %s\n", print_mac(mac, conf->mac_addr)); + memcpy(priv->mac_addr, conf->mac_addr, ETH_ALEN); + } + iwl_set_mode(priv, conf->type); IWL_DEBUG_MAC80211("leave\n"); @@ -8270,6 +8277,7 @@ static void iwl_cancel_deferred_work(struct iwl_priv *priv) { iwl_hw_cancel_deferred_work(priv); + cancel_delayed_work_sync(&priv->init_alive_start); cancel_delayed_work(&priv->scan_check); cancel_delayed_work(&priv->alive_start); cancel_delayed_work(&priv->post_associate); diff --git a/drivers/net/wireless/iwlwifi/iwl4965-base.c b/drivers/net/wireless/iwlwifi/iwl4965-base.c index 9918780f5e8..8f85564ec6f 100644 --- a/drivers/net/wireless/iwlwifi/iwl4965-base.c +++ b/drivers/net/wireless/iwlwifi/iwl4965-base.c @@ -3003,6 +3003,10 @@ static void iwl_set_rate(struct iwl_priv *priv) int i; hw = iwl_get_hw_mode(priv, priv->phymode); + if (!hw) { + IWL_ERROR("Failed to set rate: unable to get hw mode\n"); + return; + } priv->active_rate = 0; priv->active_rate_basic = 0; @@ -7326,9 +7330,6 @@ static int iwl_mac_add_interface(struct ieee80211_hw *hw, DECLARE_MAC_BUF(mac); IWL_DEBUG_MAC80211("enter: id %d, type %d\n", conf->if_id, conf->type); - if (conf->mac_addr) - IWL_DEBUG_MAC80211("enter: MAC %s\n", - print_mac(mac, conf->mac_addr)); if (priv->interface_id) { IWL_DEBUG_MAC80211("leave - interface_id != 0\n"); @@ -7341,6 +7342,11 @@ static int iwl_mac_add_interface(struct ieee80211_hw *hw, spin_unlock_irqrestore(&priv->lock, flags); mutex_lock(&priv->mutex); + + if (conf->mac_addr) { + IWL_DEBUG_MAC80211("Set %s\n", print_mac(mac, conf->mac_addr)); + memcpy(priv->mac_addr, conf->mac_addr, ETH_ALEN); + } iwl_set_mode(priv, conf->type); IWL_DEBUG_MAC80211("leave\n"); @@ -8864,6 +8870,7 @@ static void iwl_cancel_deferred_work(struct iwl_priv *priv) { iwl_hw_cancel_deferred_work(priv); + cancel_delayed_work_sync(&priv->init_alive_start); cancel_delayed_work(&priv->scan_check); cancel_delayed_work(&priv->alive_start); cancel_delayed_work(&priv->post_associate); diff --git a/drivers/net/wireless/libertas/if_cs.c b/drivers/net/wireless/libertas/if_cs.c index ec89dabc412..ba4fc2b3bf0 100644 --- a/drivers/net/wireless/libertas/if_cs.c +++ b/drivers/net/wireless/libertas/if_cs.c @@ -170,7 +170,8 @@ static int if_cs_poll_while_fw_download(struct if_cs_card *card, uint addr, u8 r #define IF_CS_H_IC_TX_OVER 0x0001 #define IF_CS_H_IC_RX_OVER 0x0002 #define IF_CS_H_IC_DNLD_OVER 0x0004 -#define IF_CS_H_IC_HOST_EVENT 0x0008 +#define IF_CS_H_IC_POWER_DOWN 0x0008 +#define IF_CS_H_IC_HOST_EVENT 0x0010 #define IF_CS_H_IC_MASK 0x001f #define IF_CS_H_INT_MASK 0x00000004 diff --git a/drivers/net/wireless/libertas/main.c b/drivers/net/wireless/libertas/main.c index 5ead08312e1..1823b48a8ba 100644 --- a/drivers/net/wireless/libertas/main.c +++ b/drivers/net/wireless/libertas/main.c @@ -1165,8 +1165,6 @@ wlan_private *libertas_add_card(void *card, struct device *dmdev) #ifdef WIRELESS_EXT dev->wireless_handlers = (struct iw_handler_def *)&libertas_handler_def; #endif -#define NETIF_F_DYNALLOC 16 - dev->features |= NETIF_F_DYNALLOC; dev->flags |= IFF_BROADCAST | IFF_MULTICAST; dev->set_multicast_list = libertas_set_multicast_list; @@ -1348,8 +1346,6 @@ int libertas_add_mesh(wlan_private *priv, struct device *dev) #ifdef WIRELESS_EXT mesh_dev->wireless_handlers = (struct iw_handler_def *)&mesh_handler_def; #endif -#define NETIF_F_DYNALLOC 16 - /* Register virtual mesh interface */ ret = register_netdev(mesh_dev); if (ret) { diff --git a/drivers/net/wireless/libertas/wext.c b/drivers/net/wireless/libertas/wext.c index c6f5aa3cb46..395b7882d4d 100644 --- a/drivers/net/wireless/libertas/wext.c +++ b/drivers/net/wireless/libertas/wext.c @@ -1528,7 +1528,7 @@ static int wlan_set_encodeext(struct net_device *dev, && (ext->key_len != KEY_LEN_WPA_TKIP)) || ((alg == IW_ENCODE_ALG_CCMP) && (ext->key_len != KEY_LEN_WPA_AES))) { - lbs_deb_wext("invalid size %d for key of alg" + lbs_deb_wext("invalid size %d for key of alg " "type %d\n", ext->key_len, alg); diff --git a/drivers/net/wireless/netwave_cs.c b/drivers/net/wireless/netwave_cs.c index 2402cb8dd32..d2fa079fbc4 100644 --- a/drivers/net/wireless/netwave_cs.c +++ b/drivers/net/wireless/netwave_cs.c @@ -806,7 +806,7 @@ static int netwave_pcmcia_config(struct pcmcia_device *link) { for (i = 0; i < 6; i++) dev->dev_addr[i] = readb(ramBase + NETWAVE_EREG_PA + i); - printk(KERN_INFO "%s: Netwave: port %#3lx, irq %d, mem %lx" + printk(KERN_INFO "%s: Netwave: port %#3lx, irq %d, mem %lx, " "id %c%c, hw_addr %s\n", dev->name, dev->base_addr, dev->irq, (u_long) ramBase, diff --git a/drivers/net/wireless/p54usb.c b/drivers/net/wireless/p54usb.c index 755482a5a93..60d286eb0b8 100644 --- a/drivers/net/wireless/p54usb.c +++ b/drivers/net/wireless/p54usb.c @@ -308,7 +308,7 @@ static int p54u_read_eeprom(struct ieee80211_hw *dev) buf = kmalloc(0x2020, GFP_KERNEL); if (!buf) { - printk(KERN_ERR "prism54usb: cannot allocate memory for" + printk(KERN_ERR "prism54usb: cannot allocate memory for " "eeprom readback!\n"); return -ENOMEM; } diff --git a/drivers/s390/net/ctcmain.c b/drivers/s390/net/ctcmain.c index 6bf3ebbe985..b3b6f654365 100644 --- a/drivers/s390/net/ctcmain.c +++ b/drivers/s390/net/ctcmain.c @@ -2782,35 +2782,14 @@ ctc_probe_device(struct ccwgroup_device *cgdev) } /** - * Initialize everything of the net device except the name and the - * channel structs. + * Device setup function called by alloc_netdev(). + * + * @param dev Device to be setup. */ -static struct net_device * -ctc_init_netdevice(struct net_device * dev, int alloc_device, - struct ctc_priv *privptr) +void ctc_init_netdevice(struct net_device * dev) { - if (!privptr) - return NULL; - DBF_TEXT(setup, 3, __FUNCTION__); - if (alloc_device) { - dev = kzalloc(sizeof(struct net_device), GFP_KERNEL); - if (!dev) - return NULL; - } - - dev->priv = privptr; - privptr->fsm = init_fsm("ctcdev", dev_state_names, - dev_event_names, CTC_NR_DEV_STATES, CTC_NR_DEV_EVENTS, - dev_fsm, DEV_FSM_LEN, GFP_KERNEL); - if (privptr->fsm == NULL) { - if (alloc_device) - kfree(dev); - return NULL; - } - fsm_newstate(privptr->fsm, DEV_STATE_STOPPED); - fsm_settimer(privptr->fsm, &privptr->restart_timer); if (dev->mtu == 0) dev->mtu = CTC_BUFSIZE_DEFAULT - LL_HEADER_LENGTH - 2; dev->hard_start_xmit = ctc_tx; @@ -2823,7 +2802,7 @@ ctc_init_netdevice(struct net_device * dev, int alloc_device, dev->type = ARPHRD_SLIP; dev->tx_queue_len = 100; dev->flags = IFF_POINTOPOINT | IFF_NOARP; - return dev; + SET_MODULE_OWNER(dev); } @@ -2879,14 +2858,22 @@ ctc_new_device(struct ccwgroup_device *cgdev) "ccw_device_set_online (cdev[1]) failed with ret = %d\n", ret); } - dev = ctc_init_netdevice(NULL, 1, privptr); - + dev = alloc_netdev(0, "ctc%d", ctc_init_netdevice); if (!dev) { ctc_pr_warn("ctc_init_netdevice failed\n"); goto out; } + dev->priv = privptr; - strlcpy(dev->name, "ctc%d", IFNAMSIZ); + privptr->fsm = init_fsm("ctcdev", dev_state_names, + dev_event_names, CTC_NR_DEV_STATES, CTC_NR_DEV_EVENTS, + dev_fsm, DEV_FSM_LEN, GFP_KERNEL); + if (privptr->fsm == NULL) { + free_netdev(dev); + goto out; + } + fsm_newstate(privptr->fsm, DEV_STATE_STOPPED); + fsm_settimer(privptr->fsm, &privptr->restart_timer); for (direction = READ; direction <= WRITE; direction++) { privptr->channel[direction] = diff --git a/drivers/s390/scsi/zfcp_erp.c b/drivers/s390/scsi/zfcp_erp.c index 5552b755c08..07fa824d179 100644 --- a/drivers/s390/scsi/zfcp_erp.c +++ b/drivers/s390/scsi/zfcp_erp.c @@ -977,7 +977,9 @@ static void zfcp_erp_action_dismiss(struct zfcp_erp_action *erp_action) debug_text_event(adapter->erp_dbf, 2, "a_adis"); debug_event(adapter->erp_dbf, 2, &erp_action->action, sizeof (int)); - zfcp_erp_async_handler_nolock(erp_action, ZFCP_STATUS_ERP_DISMISSED); + erp_action->status |= ZFCP_STATUS_ERP_DISMISSED; + if (zfcp_erp_action_exists(erp_action) == ZFCP_ERP_ACTION_RUNNING) + zfcp_erp_action_ready(erp_action); } int @@ -1063,7 +1065,7 @@ zfcp_erp_thread(void *data) &adapter->status)) { write_lock_irqsave(&adapter->erp_lock, flags); - next = adapter->erp_ready_head.prev; + next = adapter->erp_ready_head.next; write_unlock_irqrestore(&adapter->erp_lock, flags); if (next != &adapter->erp_ready_head) { @@ -1153,15 +1155,13 @@ zfcp_erp_strategy(struct zfcp_erp_action *erp_action) /* * check for dismissed status again to avoid follow-up actions, - * failing of targets and so on for dismissed actions + * failing of targets and so on for dismissed actions, + * we go through down() here because there has been an up() */ - retval = zfcp_erp_strategy_check_action(erp_action, retval); + if (erp_action->status & ZFCP_STATUS_ERP_DISMISSED) + retval = ZFCP_ERP_CONTINUES; switch (retval) { - case ZFCP_ERP_DISMISSED: - /* leave since this action has ridden to its ancestors */ - debug_text_event(adapter->erp_dbf, 6, "a_st_dis2"); - goto unlock; case ZFCP_ERP_NOMEM: /* no memory to continue immediately, let it sleep */ if (!(erp_action->status & ZFCP_STATUS_ERP_LOWMEM)) { @@ -3089,7 +3089,7 @@ zfcp_erp_action_enqueue(int action, ++adapter->erp_total_count; /* finally put it into 'ready' queue and kick erp thread */ - list_add(&erp_action->list, &adapter->erp_ready_head); + list_add_tail(&erp_action->list, &adapter->erp_ready_head); up(&adapter->erp_ready_sem); retval = 0; out: diff --git a/drivers/scsi/aacraid/commsup.c b/drivers/scsi/aacraid/commsup.c index 240a0bb8986..abce48ccc85 100644 --- a/drivers/scsi/aacraid/commsup.c +++ b/drivers/scsi/aacraid/commsup.c @@ -1339,10 +1339,10 @@ int aac_check_health(struct aac_dev * aac) aif = (struct aac_aifcmd *)hw_fib->data; aif->command = cpu_to_le32(AifCmdEventNotify); aif->seqnum = cpu_to_le32(0xFFFFFFFF); - aif->data[0] = cpu_to_le32(AifEnExpEvent); - aif->data[1] = cpu_to_le32(AifExeFirmwarePanic); - aif->data[2] = cpu_to_le32(AifHighPriority); - aif->data[3] = cpu_to_le32(BlinkLED); + aif->data[0] = AifEnExpEvent; + aif->data[1] = AifExeFirmwarePanic; + aif->data[2] = AifHighPriority; + aif->data[3] = BlinkLED; /* * Put the FIB onto the diff --git a/drivers/scsi/aacraid/linit.c b/drivers/scsi/aacraid/linit.c index 038980be763..9dd331bc29b 100644 --- a/drivers/scsi/aacraid/linit.c +++ b/drivers/scsi/aacraid/linit.c @@ -636,7 +636,7 @@ static int aac_cfg_open(struct inode *inode, struct file *file) static int aac_cfg_ioctl(struct inode *inode, struct file *file, unsigned int cmd, unsigned long arg) { - if (!capable(CAP_SYS_ADMIN)) + if (!capable(CAP_SYS_RAWIO)) return -EPERM; return aac_do_ioctl(file->private_data, cmd, (void __user *)arg); } @@ -691,7 +691,7 @@ static int aac_compat_ioctl(struct scsi_device *sdev, int cmd, void __user *arg) static long aac_compat_cfg_ioctl(struct file *file, unsigned cmd, unsigned long arg) { - if (!capable(CAP_SYS_ADMIN)) + if (!capable(CAP_SYS_RAWIO)) return -EPERM; return aac_compat_do_ioctl((struct aac_dev *)file->private_data, cmd, arg); } @@ -950,7 +950,8 @@ static struct scsi_host_template aac_driver_template = { static void __aac_shutdown(struct aac_dev * aac) { - kthread_stop(aac->thread); + if (aac->aif_thread) + kthread_stop(aac->thread); aac_send_shutdown(aac); aac_adapter_disable_int(aac); free_irq(aac->pdev->irq, aac); diff --git a/drivers/scsi/atari_scsi.c b/drivers/scsi/atari_scsi.c index 6f8403b82ba..f5732d8f67f 100644 --- a/drivers/scsi/atari_scsi.c +++ b/drivers/scsi/atari_scsi.c @@ -393,7 +393,7 @@ static irqreturn_t scsi_tt_intr(int irq, void *dummy) #endif /* REAL_DMA */ - NCR5380_intr(0, 0); + NCR5380_intr(irq, dummy); #if 0 /* To be sure the int is not masked */ @@ -458,7 +458,7 @@ static irqreturn_t scsi_falcon_intr(int irq, void *dummy) #endif /* REAL_DMA */ - NCR5380_intr(0, 0); + NCR5380_intr(irq, dummy); return IRQ_HANDLED; } @@ -684,7 +684,7 @@ int atari_scsi_detect(struct scsi_host_template *host) * interrupt after having cleared the pending flag for the DMA * interrupt. */ if (request_irq(IRQ_TT_MFP_SCSI, scsi_tt_intr, IRQ_TYPE_SLOW, - "SCSI NCR5380", scsi_tt_intr)) { + "SCSI NCR5380", instance)) { printk(KERN_ERR "atari_scsi_detect: cannot allocate irq %d, aborting",IRQ_TT_MFP_SCSI); scsi_unregister(atari_scsi_host); atari_stram_free(atari_dma_buffer); @@ -701,7 +701,7 @@ int atari_scsi_detect(struct scsi_host_template *host) IRQ_TYPE_PRIO, "Hades DMA emulator", hades_dma_emulator)) { printk(KERN_ERR "atari_scsi_detect: cannot allocate irq %d, aborting (MACH_IS_HADES)",IRQ_AUTO_2); - free_irq(IRQ_TT_MFP_SCSI, scsi_tt_intr); + free_irq(IRQ_TT_MFP_SCSI, instance); scsi_unregister(atari_scsi_host); atari_stram_free(atari_dma_buffer); atari_dma_buffer = 0; @@ -761,7 +761,7 @@ int atari_scsi_detect(struct scsi_host_template *host) int atari_scsi_release(struct Scsi_Host *sh) { if (IS_A_TT()) - free_irq(IRQ_TT_MFP_SCSI, scsi_tt_intr); + free_irq(IRQ_TT_MFP_SCSI, sh); if (atari_dma_buffer) atari_stram_free(atari_dma_buffer); return 1; diff --git a/drivers/scsi/dtc.c b/drivers/scsi/dtc.c index 2596165096d..c2677ba29c7 100644 --- a/drivers/scsi/dtc.c +++ b/drivers/scsi/dtc.c @@ -277,7 +277,8 @@ found: /* With interrupts enabled, it will sometimes hang when doing heavy * reads. So better not enable them until I finger it out. */ if (instance->irq != SCSI_IRQ_NONE) - if (request_irq(instance->irq, dtc_intr, IRQF_DISABLED, "dtc", instance)) { + if (request_irq(instance->irq, dtc_intr, IRQF_DISABLED, + "dtc", instance)) { printk(KERN_ERR "scsi%d : IRQ%d not free, interrupts disabled\n", instance->host_no, instance->irq); instance->irq = SCSI_IRQ_NONE; } @@ -459,7 +460,7 @@ static int dtc_release(struct Scsi_Host *shost) NCR5380_local_declare(); NCR5380_setup(shost); if (shost->irq) - free_irq(shost->irq, NULL); + free_irq(shost->irq, shost); NCR5380_exit(shost); if (shost->io_port && shost->n_io_port) release_region(shost->io_port, shost->n_io_port); diff --git a/drivers/scsi/g_NCR5380.c b/drivers/scsi/g_NCR5380.c index 607336f56d5..75585a52c88 100644 --- a/drivers/scsi/g_NCR5380.c +++ b/drivers/scsi/g_NCR5380.c @@ -460,7 +460,8 @@ int __init generic_NCR5380_detect(struct scsi_host_template * tpnt) instance->irq = NCR5380_probe_irq(instance, 0xffff); if (instance->irq != SCSI_IRQ_NONE) - if (request_irq(instance->irq, generic_NCR5380_intr, IRQF_DISABLED, "NCR5380", instance)) { + if (request_irq(instance->irq, generic_NCR5380_intr, + IRQF_DISABLED, "NCR5380", instance)) { printk(KERN_WARNING "scsi%d : IRQ%d not free, interrupts disabled\n", instance->host_no, instance->irq); instance->irq = SCSI_IRQ_NONE; } @@ -513,7 +514,7 @@ int generic_NCR5380_release_resources(struct Scsi_Host *instance) NCR5380_setup(instance); if (instance->irq != SCSI_IRQ_NONE) - free_irq(instance->irq, NULL); + free_irq(instance->irq, instance); NCR5380_exit(instance); #ifndef CONFIG_SCSI_G_NCR5380_MEM diff --git a/drivers/scsi/iscsi_tcp.c b/drivers/scsi/iscsi_tcp.c index 4bcf916c21a..57ce2251abc 100644 --- a/drivers/scsi/iscsi_tcp.c +++ b/drivers/scsi/iscsi_tcp.c @@ -197,7 +197,7 @@ iscsi_tcp_cleanup_ctask(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask) if (unlikely(!sc)) return; - tcp_ctask->xmstate = XMSTATE_IDLE; + tcp_ctask->xmstate = XMSTATE_VALUE_IDLE; tcp_ctask->r2t = NULL; } @@ -409,7 +409,7 @@ iscsi_r2t_rsp(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask) tcp_ctask->exp_datasn = r2tsn + 1; __kfifo_put(tcp_ctask->r2tqueue, (void*)&r2t, sizeof(void*)); - tcp_ctask->xmstate |= XMSTATE_SOL_HDR_INIT; + set_bit(XMSTATE_BIT_SOL_HDR_INIT, &tcp_ctask->xmstate); list_move_tail(&ctask->running, &conn->xmitqueue); scsi_queue_work(session->host, &conn->xmitwork); @@ -1254,7 +1254,7 @@ static void iscsi_set_padding(struct iscsi_tcp_cmd_task *tcp_ctask, tcp_ctask->pad_count = ISCSI_PAD_LEN - tcp_ctask->pad_count; debug_scsi("write padding %d bytes\n", tcp_ctask->pad_count); - tcp_ctask->xmstate |= XMSTATE_W_PAD; + set_bit(XMSTATE_BIT_W_PAD, &tcp_ctask->xmstate); } /** @@ -1269,7 +1269,7 @@ iscsi_tcp_cmd_init(struct iscsi_cmd_task *ctask) struct iscsi_tcp_cmd_task *tcp_ctask = ctask->dd_data; BUG_ON(__kfifo_len(tcp_ctask->r2tqueue)); - tcp_ctask->xmstate = XMSTATE_CMD_HDR_INIT; + tcp_ctask->xmstate = 1 << XMSTATE_BIT_CMD_HDR_INIT; } /** @@ -1283,10 +1283,10 @@ iscsi_tcp_cmd_init(struct iscsi_cmd_task *ctask) * xmit. * * Management xmit state machine consists of these states: - * XMSTATE_IMM_HDR_INIT - calculate digest of PDU Header - * XMSTATE_IMM_HDR - PDU Header xmit in progress - * XMSTATE_IMM_DATA - PDU Data xmit in progress - * XMSTATE_IDLE - management PDU is done + * XMSTATE_BIT_IMM_HDR_INIT - calculate digest of PDU Header + * XMSTATE_BIT_IMM_HDR - PDU Header xmit in progress + * XMSTATE_BIT_IMM_DATA - PDU Data xmit in progress + * XMSTATE_VALUE_IDLE - management PDU is done **/ static int iscsi_tcp_mtask_xmit(struct iscsi_conn *conn, struct iscsi_mgmt_task *mtask) @@ -1297,12 +1297,12 @@ iscsi_tcp_mtask_xmit(struct iscsi_conn *conn, struct iscsi_mgmt_task *mtask) debug_scsi("mtask deq [cid %d state %x itt 0x%x]\n", conn->id, tcp_mtask->xmstate, mtask->itt); - if (tcp_mtask->xmstate & XMSTATE_IMM_HDR_INIT) { + if (test_bit(XMSTATE_BIT_IMM_HDR_INIT, &tcp_mtask->xmstate)) { iscsi_buf_init_iov(&tcp_mtask->headbuf, (char*)mtask->hdr, sizeof(struct iscsi_hdr)); if (mtask->data_count) { - tcp_mtask->xmstate |= XMSTATE_IMM_DATA; + set_bit(XMSTATE_BIT_IMM_DATA, &tcp_mtask->xmstate); iscsi_buf_init_iov(&tcp_mtask->sendbuf, (char*)mtask->data, mtask->data_count); @@ -1315,21 +1315,20 @@ iscsi_tcp_mtask_xmit(struct iscsi_conn *conn, struct iscsi_mgmt_task *mtask) (u8*)tcp_mtask->hdrext); tcp_mtask->sent = 0; - tcp_mtask->xmstate &= ~XMSTATE_IMM_HDR_INIT; - tcp_mtask->xmstate |= XMSTATE_IMM_HDR; + clear_bit(XMSTATE_BIT_IMM_HDR_INIT, &tcp_mtask->xmstate); + set_bit(XMSTATE_BIT_IMM_HDR, &tcp_mtask->xmstate); } - if (tcp_mtask->xmstate & XMSTATE_IMM_HDR) { + if (test_bit(XMSTATE_BIT_IMM_HDR, &tcp_mtask->xmstate)) { rc = iscsi_sendhdr(conn, &tcp_mtask->headbuf, mtask->data_count); if (rc) return rc; - tcp_mtask->xmstate &= ~XMSTATE_IMM_HDR; + clear_bit(XMSTATE_BIT_IMM_HDR, &tcp_mtask->xmstate); } - if (tcp_mtask->xmstate & XMSTATE_IMM_DATA) { + if (test_and_clear_bit(XMSTATE_BIT_IMM_DATA, &tcp_mtask->xmstate)) { BUG_ON(!mtask->data_count); - tcp_mtask->xmstate &= ~XMSTATE_IMM_DATA; /* FIXME: implement. * Virtual buffer could be spreaded across multiple pages... */ @@ -1339,13 +1338,13 @@ iscsi_tcp_mtask_xmit(struct iscsi_conn *conn, struct iscsi_mgmt_task *mtask) rc = iscsi_sendpage(conn, &tcp_mtask->sendbuf, &mtask->data_count, &tcp_mtask->sent); if (rc) { - tcp_mtask->xmstate |= XMSTATE_IMM_DATA; + set_bit(XMSTATE_BIT_IMM_DATA, &tcp_mtask->xmstate); return rc; } } while (mtask->data_count); } - BUG_ON(tcp_mtask->xmstate != XMSTATE_IDLE); + BUG_ON(tcp_mtask->xmstate != XMSTATE_VALUE_IDLE); if (mtask->hdr->itt == RESERVED_ITT) { struct iscsi_session *session = conn->session; @@ -1365,7 +1364,7 @@ iscsi_send_cmd_hdr(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask) struct iscsi_tcp_cmd_task *tcp_ctask = ctask->dd_data; int rc = 0; - if (tcp_ctask->xmstate & XMSTATE_CMD_HDR_INIT) { + if (test_bit(XMSTATE_BIT_CMD_HDR_INIT, &tcp_ctask->xmstate)) { tcp_ctask->sent = 0; tcp_ctask->sg_count = 0; tcp_ctask->exp_datasn = 0; @@ -1390,21 +1389,21 @@ iscsi_send_cmd_hdr(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask) if (conn->hdrdgst_en) iscsi_hdr_digest(conn, &tcp_ctask->headbuf, (u8*)tcp_ctask->hdrext); - tcp_ctask->xmstate &= ~XMSTATE_CMD_HDR_INIT; - tcp_ctask->xmstate |= XMSTATE_CMD_HDR_XMIT; + clear_bit(XMSTATE_BIT_CMD_HDR_INIT, &tcp_ctask->xmstate); + set_bit(XMSTATE_BIT_CMD_HDR_XMIT, &tcp_ctask->xmstate); } - if (tcp_ctask->xmstate & XMSTATE_CMD_HDR_XMIT) { + if (test_bit(XMSTATE_BIT_CMD_HDR_XMIT, &tcp_ctask->xmstate)) { rc = iscsi_sendhdr(conn, &tcp_ctask->headbuf, ctask->imm_count); if (rc) return rc; - tcp_ctask->xmstate &= ~XMSTATE_CMD_HDR_XMIT; + clear_bit(XMSTATE_BIT_CMD_HDR_XMIT, &tcp_ctask->xmstate); if (sc->sc_data_direction != DMA_TO_DEVICE) return 0; if (ctask->imm_count) { - tcp_ctask->xmstate |= XMSTATE_IMM_DATA; + set_bit(XMSTATE_BIT_IMM_DATA, &tcp_ctask->xmstate); iscsi_set_padding(tcp_ctask, ctask->imm_count); if (ctask->conn->datadgst_en) { @@ -1414,9 +1413,10 @@ iscsi_send_cmd_hdr(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask) } } - if (ctask->unsol_count) - tcp_ctask->xmstate |= - XMSTATE_UNS_HDR | XMSTATE_UNS_INIT; + if (ctask->unsol_count) { + set_bit(XMSTATE_BIT_UNS_HDR, &tcp_ctask->xmstate); + set_bit(XMSTATE_BIT_UNS_INIT, &tcp_ctask->xmstate); + } } return rc; } @@ -1428,25 +1428,25 @@ iscsi_send_padding(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask) struct iscsi_tcp_conn *tcp_conn = conn->dd_data; int sent = 0, rc; - if (tcp_ctask->xmstate & XMSTATE_W_PAD) { + if (test_bit(XMSTATE_BIT_W_PAD, &tcp_ctask->xmstate)) { iscsi_buf_init_iov(&tcp_ctask->sendbuf, (char*)&tcp_ctask->pad, tcp_ctask->pad_count); if (conn->datadgst_en) crypto_hash_update(&tcp_conn->tx_hash, &tcp_ctask->sendbuf.sg, tcp_ctask->sendbuf.sg.length); - } else if (!(tcp_ctask->xmstate & XMSTATE_W_RESEND_PAD)) + } else if (!test_bit(XMSTATE_BIT_W_RESEND_PAD, &tcp_ctask->xmstate)) return 0; - tcp_ctask->xmstate &= ~XMSTATE_W_PAD; - tcp_ctask->xmstate &= ~XMSTATE_W_RESEND_PAD; + clear_bit(XMSTATE_BIT_W_PAD, &tcp_ctask->xmstate); + clear_bit(XMSTATE_BIT_W_RESEND_PAD, &tcp_ctask->xmstate); debug_scsi("sending %d pad bytes for itt 0x%x\n", tcp_ctask->pad_count, ctask->itt); rc = iscsi_sendpage(conn, &tcp_ctask->sendbuf, &tcp_ctask->pad_count, &sent); if (rc) { debug_scsi("padding send failed %d\n", rc); - tcp_ctask->xmstate |= XMSTATE_W_RESEND_PAD; + set_bit(XMSTATE_BIT_W_RESEND_PAD, &tcp_ctask->xmstate); } return rc; } @@ -1465,11 +1465,11 @@ iscsi_send_digest(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask, tcp_ctask = ctask->dd_data; tcp_conn = conn->dd_data; - if (!(tcp_ctask->xmstate & XMSTATE_W_RESEND_DATA_DIGEST)) { + if (!test_bit(XMSTATE_BIT_W_RESEND_DATA_DIGEST, &tcp_ctask->xmstate)) { crypto_hash_final(&tcp_conn->tx_hash, (u8*)digest); iscsi_buf_init_iov(buf, (char*)digest, 4); } - tcp_ctask->xmstate &= ~XMSTATE_W_RESEND_DATA_DIGEST; + clear_bit(XMSTATE_BIT_W_RESEND_DATA_DIGEST, &tcp_ctask->xmstate); rc = iscsi_sendpage(conn, buf, &tcp_ctask->digest_count, &sent); if (!rc) @@ -1478,7 +1478,7 @@ iscsi_send_digest(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask, else { debug_scsi("sending digest 0x%x failed for itt 0x%x!\n", *digest, ctask->itt); - tcp_ctask->xmstate |= XMSTATE_W_RESEND_DATA_DIGEST; + set_bit(XMSTATE_BIT_W_RESEND_DATA_DIGEST, &tcp_ctask->xmstate); } return rc; } @@ -1526,8 +1526,8 @@ iscsi_send_unsol_hdr(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask) struct iscsi_data_task *dtask; int rc; - tcp_ctask->xmstate |= XMSTATE_UNS_DATA; - if (tcp_ctask->xmstate & XMSTATE_UNS_INIT) { + set_bit(XMSTATE_BIT_UNS_DATA, &tcp_ctask->xmstate); + if (test_bit(XMSTATE_BIT_UNS_INIT, &tcp_ctask->xmstate)) { dtask = &tcp_ctask->unsol_dtask; iscsi_prep_unsolicit_data_pdu(ctask, &dtask->hdr); @@ -1537,14 +1537,14 @@ iscsi_send_unsol_hdr(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask) iscsi_hdr_digest(conn, &tcp_ctask->headbuf, (u8*)dtask->hdrext); - tcp_ctask->xmstate &= ~XMSTATE_UNS_INIT; + clear_bit(XMSTATE_BIT_UNS_INIT, &tcp_ctask->xmstate); iscsi_set_padding(tcp_ctask, ctask->data_count); } rc = iscsi_sendhdr(conn, &tcp_ctask->headbuf, ctask->data_count); if (rc) { - tcp_ctask->xmstate &= ~XMSTATE_UNS_DATA; - tcp_ctask->xmstate |= XMSTATE_UNS_HDR; + clear_bit(XMSTATE_BIT_UNS_DATA, &tcp_ctask->xmstate); + set_bit(XMSTATE_BIT_UNS_HDR, &tcp_ctask->xmstate); return rc; } @@ -1565,16 +1565,15 @@ iscsi_send_unsol_pdu(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask) struct iscsi_tcp_cmd_task *tcp_ctask = ctask->dd_data; int rc; - if (tcp_ctask->xmstate & XMSTATE_UNS_HDR) { + if (test_and_clear_bit(XMSTATE_BIT_UNS_HDR, &tcp_ctask->xmstate)) { BUG_ON(!ctask->unsol_count); - tcp_ctask->xmstate &= ~XMSTATE_UNS_HDR; send_hdr: rc = iscsi_send_unsol_hdr(conn, ctask); if (rc) return rc; } - if (tcp_ctask->xmstate & XMSTATE_UNS_DATA) { + if (test_bit(XMSTATE_BIT_UNS_DATA, &tcp_ctask->xmstate)) { struct iscsi_data_task *dtask = &tcp_ctask->unsol_dtask; int start = tcp_ctask->sent; @@ -1584,14 +1583,14 @@ send_hdr: ctask->unsol_count -= tcp_ctask->sent - start; if (rc) return rc; - tcp_ctask->xmstate &= ~XMSTATE_UNS_DATA; + clear_bit(XMSTATE_BIT_UNS_DATA, &tcp_ctask->xmstate); /* * Done with the Data-Out. Next, check if we need * to send another unsolicited Data-Out. */ if (ctask->unsol_count) { debug_scsi("sending more uns\n"); - tcp_ctask->xmstate |= XMSTATE_UNS_INIT; + set_bit(XMSTATE_BIT_UNS_INIT, &tcp_ctask->xmstate); goto send_hdr; } } @@ -1607,7 +1606,7 @@ static int iscsi_send_sol_pdu(struct iscsi_conn *conn, struct iscsi_data_task *dtask; int left, rc; - if (tcp_ctask->xmstate & XMSTATE_SOL_HDR_INIT) { + if (test_bit(XMSTATE_BIT_SOL_HDR_INIT, &tcp_ctask->xmstate)) { if (!tcp_ctask->r2t) { spin_lock_bh(&session->lock); __kfifo_get(tcp_ctask->r2tqueue, (void*)&tcp_ctask->r2t, @@ -1621,19 +1620,19 @@ send_hdr: if (conn->hdrdgst_en) iscsi_hdr_digest(conn, &r2t->headbuf, (u8*)dtask->hdrext); - tcp_ctask->xmstate &= ~XMSTATE_SOL_HDR_INIT; - tcp_ctask->xmstate |= XMSTATE_SOL_HDR; + clear_bit(XMSTATE_BIT_SOL_HDR_INIT, &tcp_ctask->xmstate); + set_bit(XMSTATE_BIT_SOL_HDR, &tcp_ctask->xmstate); } - if (tcp_ctask->xmstate & XMSTATE_SOL_HDR) { + if (test_bit(XMSTATE_BIT_SOL_HDR, &tcp_ctask->xmstate)) { r2t = tcp_ctask->r2t; dtask = &r2t->dtask; rc = iscsi_sendhdr(conn, &r2t->headbuf, r2t->data_count); if (rc) return rc; - tcp_ctask->xmstate &= ~XMSTATE_SOL_HDR; - tcp_ctask->xmstate |= XMSTATE_SOL_DATA; + clear_bit(XMSTATE_BIT_SOL_HDR, &tcp_ctask->xmstate); + set_bit(XMSTATE_BIT_SOL_DATA, &tcp_ctask->xmstate); if (conn->datadgst_en) { iscsi_data_digest_init(conn->dd_data, tcp_ctask); @@ -1646,7 +1645,7 @@ send_hdr: r2t->sent); } - if (tcp_ctask->xmstate & XMSTATE_SOL_DATA) { + if (test_bit(XMSTATE_BIT_SOL_DATA, &tcp_ctask->xmstate)) { r2t = tcp_ctask->r2t; dtask = &r2t->dtask; @@ -1655,7 +1654,7 @@ send_hdr: &dtask->digestbuf, &dtask->digest); if (rc) return rc; - tcp_ctask->xmstate &= ~XMSTATE_SOL_DATA; + clear_bit(XMSTATE_BIT_SOL_DATA, &tcp_ctask->xmstate); /* * Done with this Data-Out. Next, check if we have @@ -1700,32 +1699,32 @@ send_hdr: * xmit stages. * *iscsi_send_cmd_hdr() - * XMSTATE_CMD_HDR_INIT - prepare Header and Data buffers Calculate - * Header Digest - * XMSTATE_CMD_HDR_XMIT - Transmit header in progress + * XMSTATE_BIT_CMD_HDR_INIT - prepare Header and Data buffers Calculate + * Header Digest + * XMSTATE_BIT_CMD_HDR_XMIT - Transmit header in progress * *iscsi_send_padding - * XMSTATE_W_PAD - Prepare and send pading - * XMSTATE_W_RESEND_PAD - retry send pading + * XMSTATE_BIT_W_PAD - Prepare and send pading + * XMSTATE_BIT_W_RESEND_PAD - retry send pading * *iscsi_send_digest - * XMSTATE_W_RESEND_DATA_DIGEST - Finalize and send Data Digest - * XMSTATE_W_RESEND_DATA_DIGEST - retry sending digest + * XMSTATE_BIT_W_RESEND_DATA_DIGEST - Finalize and send Data Digest + * XMSTATE_BIT_W_RESEND_DATA_DIGEST - retry sending digest * *iscsi_send_unsol_hdr - * XMSTATE_UNS_INIT - prepare un-solicit data header and digest - * XMSTATE_UNS_HDR - send un-solicit header + * XMSTATE_BIT_UNS_INIT - prepare un-solicit data header and digest + * XMSTATE_BIT_UNS_HDR - send un-solicit header * *iscsi_send_unsol_pdu - * XMSTATE_UNS_DATA - send un-solicit data in progress + * XMSTATE_BIT_UNS_DATA - send un-solicit data in progress * *iscsi_send_sol_pdu - * XMSTATE_SOL_HDR_INIT - solicit data header and digest initialize - * XMSTATE_SOL_HDR - send solicit header - * XMSTATE_SOL_DATA - send solicit data + * XMSTATE_BIT_SOL_HDR_INIT - solicit data header and digest initialize + * XMSTATE_BIT_SOL_HDR - send solicit header + * XMSTATE_BIT_SOL_DATA - send solicit data * *iscsi_tcp_ctask_xmit - * XMSTATE_IMM_DATA - xmit managment data (??) + * XMSTATE_BIT_IMM_DATA - xmit managment data (??) **/ static int iscsi_tcp_ctask_xmit(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask) @@ -1742,13 +1741,13 @@ iscsi_tcp_ctask_xmit(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask) if (ctask->sc->sc_data_direction != DMA_TO_DEVICE) return 0; - if (tcp_ctask->xmstate & XMSTATE_IMM_DATA) { + if (test_bit(XMSTATE_BIT_IMM_DATA, &tcp_ctask->xmstate)) { rc = iscsi_send_data(ctask, &tcp_ctask->sendbuf, &tcp_ctask->sg, &tcp_ctask->sent, &ctask->imm_count, &tcp_ctask->immbuf, &tcp_ctask->immdigest); if (rc) return rc; - tcp_ctask->xmstate &= ~XMSTATE_IMM_DATA; + clear_bit(XMSTATE_BIT_IMM_DATA, &tcp_ctask->xmstate); } rc = iscsi_send_unsol_pdu(conn, ctask); @@ -1981,7 +1980,7 @@ static void iscsi_tcp_mgmt_init(struct iscsi_conn *conn, struct iscsi_mgmt_task *mtask) { struct iscsi_tcp_mgmt_task *tcp_mtask = mtask->dd_data; - tcp_mtask->xmstate = XMSTATE_IMM_HDR_INIT; + tcp_mtask->xmstate = 1 << XMSTATE_BIT_IMM_HDR_INIT; } static int diff --git a/drivers/scsi/iscsi_tcp.h b/drivers/scsi/iscsi_tcp.h index 7eba44df0a7..68c36cc8997 100644 --- a/drivers/scsi/iscsi_tcp.h +++ b/drivers/scsi/iscsi_tcp.h @@ -32,21 +32,21 @@ #define IN_PROGRESS_PAD_RECV 0x4 /* xmit state machine */ -#define XMSTATE_IDLE 0x0 -#define XMSTATE_CMD_HDR_INIT 0x1 -#define XMSTATE_CMD_HDR_XMIT 0x2 -#define XMSTATE_IMM_HDR 0x4 -#define XMSTATE_IMM_DATA 0x8 -#define XMSTATE_UNS_INIT 0x10 -#define XMSTATE_UNS_HDR 0x20 -#define XMSTATE_UNS_DATA 0x40 -#define XMSTATE_SOL_HDR 0x80 -#define XMSTATE_SOL_DATA 0x100 -#define XMSTATE_W_PAD 0x200 -#define XMSTATE_W_RESEND_PAD 0x400 -#define XMSTATE_W_RESEND_DATA_DIGEST 0x800 -#define XMSTATE_IMM_HDR_INIT 0x1000 -#define XMSTATE_SOL_HDR_INIT 0x2000 +#define XMSTATE_VALUE_IDLE 0 +#define XMSTATE_BIT_CMD_HDR_INIT 0 +#define XMSTATE_BIT_CMD_HDR_XMIT 1 +#define XMSTATE_BIT_IMM_HDR 2 +#define XMSTATE_BIT_IMM_DATA 3 +#define XMSTATE_BIT_UNS_INIT 4 +#define XMSTATE_BIT_UNS_HDR 5 +#define XMSTATE_BIT_UNS_DATA 6 +#define XMSTATE_BIT_SOL_HDR 7 +#define XMSTATE_BIT_SOL_DATA 8 +#define XMSTATE_BIT_W_PAD 9 +#define XMSTATE_BIT_W_RESEND_PAD 10 +#define XMSTATE_BIT_W_RESEND_DATA_DIGEST 11 +#define XMSTATE_BIT_IMM_HDR_INIT 12 +#define XMSTATE_BIT_SOL_HDR_INIT 13 #define ISCSI_PAD_LEN 4 #define ISCSI_SG_TABLESIZE SG_ALL @@ -122,7 +122,7 @@ struct iscsi_data_task { struct iscsi_tcp_mgmt_task { struct iscsi_hdr hdr; char hdrext[sizeof(__u32)]; /* Header-Digest */ - int xmstate; /* mgmt xmit progress */ + unsigned long xmstate; /* mgmt xmit progress */ struct iscsi_buf headbuf; /* header buffer */ struct iscsi_buf sendbuf; /* in progress buffer */ int sent; @@ -150,7 +150,7 @@ struct iscsi_tcp_cmd_task { int pad_count; /* padded bytes */ struct iscsi_buf headbuf; /* header buf (xmit) */ struct iscsi_buf sendbuf; /* in progress buffer*/ - int xmstate; /* xmit xtate machine */ + unsigned long xmstate; /* xmit xtate machine */ int sent; struct scatterlist *sg; /* per-cmd SG list */ struct scatterlist *bad_sg; /* assert statement */ diff --git a/drivers/scsi/libiscsi.c b/drivers/scsi/libiscsi.c index efceed451b4..8b57af5baae 100644 --- a/drivers/scsi/libiscsi.c +++ b/drivers/scsi/libiscsi.c @@ -291,9 +291,6 @@ invalid_datalen: min_t(uint16_t, senselen, SCSI_SENSE_BUFFERSIZE)); } - if (sc->sc_data_direction == DMA_TO_DEVICE) - goto out; - if (rhdr->flags & ISCSI_FLAG_CMD_UNDERFLOW) { int res_count = be32_to_cpu(rhdr->residual_count); diff --git a/drivers/scsi/mac_scsi.c b/drivers/scsi/mac_scsi.c index abe2bda6ac3..3b09ab21d70 100644 --- a/drivers/scsi/mac_scsi.c +++ b/drivers/scsi/mac_scsi.c @@ -303,7 +303,7 @@ int macscsi_detect(struct scsi_host_template * tpnt) if (instance->irq != SCSI_IRQ_NONE) if (request_irq(instance->irq, NCR5380_intr, IRQ_FLG_SLOW, - "ncr5380", instance)) { + "ncr5380", instance)) { printk(KERN_WARNING "scsi%d: IRQ%d not free, interrupts disabled\n", instance->host_no, instance->irq); instance->irq = SCSI_IRQ_NONE; @@ -326,7 +326,7 @@ int macscsi_detect(struct scsi_host_template * tpnt) int macscsi_release (struct Scsi_Host *shpnt) { if (shpnt->irq != SCSI_IRQ_NONE) - free_irq (shpnt->irq, NCR5380_intr); + free_irq(shpnt->irq, shpnt); NCR5380_exit(shpnt); return 0; diff --git a/drivers/scsi/pas16.c b/drivers/scsi/pas16.c index ee596565997..f2018b46f49 100644 --- a/drivers/scsi/pas16.c +++ b/drivers/scsi/pas16.c @@ -453,7 +453,8 @@ int __init pas16_detect(struct scsi_host_template * tpnt) instance->irq = NCR5380_probe_irq(instance, PAS16_IRQS); if (instance->irq != SCSI_IRQ_NONE) - if (request_irq(instance->irq, pas16_intr, IRQF_DISABLED, "pas16", instance)) { + if (request_irq(instance->irq, pas16_intr, IRQF_DISABLED, + "pas16", instance)) { printk("scsi%d : IRQ%d not free, interrupts disabled\n", instance->host_no, instance->irq); instance->irq = SCSI_IRQ_NONE; @@ -604,7 +605,7 @@ static inline int NCR5380_pwrite (struct Scsi_Host *instance, unsigned char *src static int pas16_release(struct Scsi_Host *shost) { if (shost->irq) - free_irq(shost->irq, NULL); + free_irq(shost->irq, shost); NCR5380_exit(shost); if (shost->dma_channel != 0xff) free_dma(shost->dma_channel); diff --git a/drivers/scsi/qla1280.c b/drivers/scsi/qla1280.c index 3aeb68bcb7a..146d540f628 100644 --- a/drivers/scsi/qla1280.c +++ b/drivers/scsi/qla1280.c @@ -1310,14 +1310,7 @@ qla1280_done(struct scsi_qla_host *ha) } /* Release memory used for this I/O */ - if (cmd->use_sg) { - pci_unmap_sg(ha->pdev, cmd->request_buffer, - cmd->use_sg, cmd->sc_data_direction); - } else if (cmd->request_bufflen) { - pci_unmap_single(ha->pdev, sp->saved_dma_handle, - cmd->request_bufflen, - cmd->sc_data_direction); - } + scsi_dma_unmap(cmd); /* Call the mid-level driver interrupt handler */ CMD_HANDLE(sp->cmd) = (unsigned char *)INVALID_HANDLE; @@ -1406,14 +1399,14 @@ qla1280_return_status(struct response * sts, struct scsi_cmnd *cp) break; case CS_DATA_UNDERRUN: - if ((cp->request_bufflen - residual_length) < + if ((scsi_bufflen(cp) - residual_length) < cp->underflow) { printk(KERN_WARNING "scsi: Underflow detected - retrying " "command.\n"); host_status = DID_ERROR; } else { - cp->resid = residual_length; + scsi_set_resid(cp, residual_length); host_status = DID_OK; } break; @@ -2775,33 +2768,28 @@ qla1280_64bit_start_scsi(struct scsi_qla_host *ha, struct srb * sp) struct device_reg __iomem *reg = ha->iobase; struct scsi_cmnd *cmd = sp->cmd; cmd_a64_entry_t *pkt; - struct scatterlist *sg = NULL, *s; __le32 *dword_ptr; dma_addr_t dma_handle; int status = 0; int cnt; int req_cnt; - u16 seg_cnt; + int seg_cnt; u8 dir; ENTER("qla1280_64bit_start_scsi:"); /* Calculate number of entries and segments required. */ req_cnt = 1; - if (cmd->use_sg) { - sg = (struct scatterlist *) cmd->request_buffer; - seg_cnt = pci_map_sg(ha->pdev, sg, cmd->use_sg, - cmd->sc_data_direction); - + seg_cnt = scsi_dma_map(cmd); + if (seg_cnt > 0) { if (seg_cnt > 2) { req_cnt += (seg_cnt - 2) / 5; if ((seg_cnt - 2) % 5) req_cnt++; } - } else if (cmd->request_bufflen) { /* If data transfer. */ - seg_cnt = 1; - } else { - seg_cnt = 0; + } else if (seg_cnt < 0) { + status = 1; + goto out; } if ((req_cnt + 2) >= ha->req_q_cnt) { @@ -2889,124 +2877,104 @@ qla1280_64bit_start_scsi(struct scsi_qla_host *ha, struct srb * sp) * Load data segments. */ if (seg_cnt) { /* If data transfer. */ + struct scatterlist *sg, *s; int remseg = seg_cnt; + + sg = scsi_sglist(cmd); + /* Setup packet address segment pointer. */ dword_ptr = (u32 *)&pkt->dseg_0_address; - if (cmd->use_sg) { /* If scatter gather */ - /* Load command entry data segments. */ - for_each_sg(sg, s, seg_cnt, cnt) { - if (cnt == 2) + /* Load command entry data segments. */ + for_each_sg(sg, s, seg_cnt, cnt) { + if (cnt == 2) + break; + + dma_handle = sg_dma_address(s); +#if defined(CONFIG_IA64_GENERIC) || defined(CONFIG_IA64_SGI_SN2) + if (ha->flags.use_pci_vchannel) + sn_pci_set_vchan(ha->pdev, + (unsigned long *)&dma_handle, + SCSI_BUS_32(cmd)); +#endif + *dword_ptr++ = + cpu_to_le32(pci_dma_lo32(dma_handle)); + *dword_ptr++ = + cpu_to_le32(pci_dma_hi32(dma_handle)); + *dword_ptr++ = cpu_to_le32(sg_dma_len(s)); + dprintk(3, "S/G Segment phys_addr=%x %x, len=0x%x\n", + cpu_to_le32(pci_dma_hi32(dma_handle)), + cpu_to_le32(pci_dma_lo32(dma_handle)), + cpu_to_le32(sg_dma_len(sg_next(s)))); + remseg--; + } + dprintk(5, "qla1280_64bit_start_scsi: Scatter/gather " + "command packet data - b %i, t %i, l %i \n", + SCSI_BUS_32(cmd), SCSI_TCN_32(cmd), + SCSI_LUN_32(cmd)); + qla1280_dump_buffer(5, (char *)pkt, + REQUEST_ENTRY_SIZE); + + /* + * Build continuation packets. + */ + dprintk(3, "S/G Building Continuation...seg_cnt=0x%x " + "remains\n", seg_cnt); + + while (remseg > 0) { + /* Update sg start */ + sg = s; + /* Adjust ring index. */ + ha->req_ring_index++; + if (ha->req_ring_index == REQUEST_ENTRY_CNT) { + ha->req_ring_index = 0; + ha->request_ring_ptr = + ha->request_ring; + } else + ha->request_ring_ptr++; + + pkt = (cmd_a64_entry_t *)ha->request_ring_ptr; + + /* Zero out packet. */ + memset(pkt, 0, REQUEST_ENTRY_SIZE); + + /* Load packet defaults. */ + ((struct cont_a64_entry *) pkt)->entry_type = + CONTINUE_A64_TYPE; + ((struct cont_a64_entry *) pkt)->entry_count = 1; + ((struct cont_a64_entry *) pkt)->sys_define = + (uint8_t)ha->req_ring_index; + /* Setup packet address segment pointer. */ + dword_ptr = + (u32 *)&((struct cont_a64_entry *) pkt)->dseg_0_address; + + /* Load continuation entry data segments. */ + for_each_sg(sg, s, remseg, cnt) { + if (cnt == 5) break; dma_handle = sg_dma_address(s); #if defined(CONFIG_IA64_GENERIC) || defined(CONFIG_IA64_SGI_SN2) if (ha->flags.use_pci_vchannel) sn_pci_set_vchan(ha->pdev, - (unsigned long *)&dma_handle, + (unsigned long *)&dma_handle, SCSI_BUS_32(cmd)); #endif *dword_ptr++ = cpu_to_le32(pci_dma_lo32(dma_handle)); *dword_ptr++ = cpu_to_le32(pci_dma_hi32(dma_handle)); - *dword_ptr++ = cpu_to_le32(sg_dma_len(s)); - dprintk(3, "S/G Segment phys_addr=%x %x, len=0x%x\n", + *dword_ptr++ = + cpu_to_le32(sg_dma_len(s)); + dprintk(3, "S/G Segment Cont. phys_addr=%x %x, len=0x%x\n", cpu_to_le32(pci_dma_hi32(dma_handle)), cpu_to_le32(pci_dma_lo32(dma_handle)), - cpu_to_le32(sg_dma_len(sg_next(s)))); - remseg--; + cpu_to_le32(sg_dma_len(s))); } - dprintk(5, "qla1280_64bit_start_scsi: Scatter/gather " - "command packet data - b %i, t %i, l %i \n", - SCSI_BUS_32(cmd), SCSI_TCN_32(cmd), - SCSI_LUN_32(cmd)); - qla1280_dump_buffer(5, (char *)pkt, - REQUEST_ENTRY_SIZE); - - /* - * Build continuation packets. - */ - dprintk(3, "S/G Building Continuation...seg_cnt=0x%x " - "remains\n", seg_cnt); - - while (remseg > 0) { - /* Update sg start */ - sg = s; - /* Adjust ring index. */ - ha->req_ring_index++; - if (ha->req_ring_index == REQUEST_ENTRY_CNT) { - ha->req_ring_index = 0; - ha->request_ring_ptr = - ha->request_ring; - } else - ha->request_ring_ptr++; - - pkt = (cmd_a64_entry_t *)ha->request_ring_ptr; - - /* Zero out packet. */ - memset(pkt, 0, REQUEST_ENTRY_SIZE); - - /* Load packet defaults. */ - ((struct cont_a64_entry *) pkt)->entry_type = - CONTINUE_A64_TYPE; - ((struct cont_a64_entry *) pkt)->entry_count = 1; - ((struct cont_a64_entry *) pkt)->sys_define = - (uint8_t)ha->req_ring_index; - /* Setup packet address segment pointer. */ - dword_ptr = - (u32 *)&((struct cont_a64_entry *) pkt)->dseg_0_address; - - /* Load continuation entry data segments. */ - for_each_sg(sg, s, remseg, cnt) { - if (cnt == 5) - break; - dma_handle = sg_dma_address(s); -#if defined(CONFIG_IA64_GENERIC) || defined(CONFIG_IA64_SGI_SN2) - if (ha->flags.use_pci_vchannel) - sn_pci_set_vchan(ha->pdev, - (unsigned long *)&dma_handle, - SCSI_BUS_32(cmd)); -#endif - *dword_ptr++ = - cpu_to_le32(pci_dma_lo32(dma_handle)); - *dword_ptr++ = - cpu_to_le32(pci_dma_hi32(dma_handle)); - *dword_ptr++ = - cpu_to_le32(sg_dma_len(s)); - dprintk(3, "S/G Segment Cont. phys_addr=%x %x, len=0x%x\n", - cpu_to_le32(pci_dma_hi32(dma_handle)), - cpu_to_le32(pci_dma_lo32(dma_handle)), - cpu_to_le32(sg_dma_len(s))); - } - remseg -= cnt; - dprintk(5, "qla1280_64bit_start_scsi: " - "continuation packet data - b %i, t " - "%i, l %i \n", SCSI_BUS_32(cmd), - SCSI_TCN_32(cmd), SCSI_LUN_32(cmd)); - qla1280_dump_buffer(5, (char *)pkt, - REQUEST_ENTRY_SIZE); - } - } else { /* No scatter gather data transfer */ - dma_handle = pci_map_single(ha->pdev, - cmd->request_buffer, - cmd->request_bufflen, - cmd->sc_data_direction); - - sp->saved_dma_handle = dma_handle; -#if defined(CONFIG_IA64_GENERIC) || defined(CONFIG_IA64_SGI_SN2) - if (ha->flags.use_pci_vchannel) - sn_pci_set_vchan(ha->pdev, - (unsigned long *)&dma_handle, - SCSI_BUS_32(cmd)); -#endif - *dword_ptr++ = cpu_to_le32(pci_dma_lo32(dma_handle)); - *dword_ptr++ = cpu_to_le32(pci_dma_hi32(dma_handle)); - *dword_ptr = cpu_to_le32(cmd->request_bufflen); - - dprintk(5, "qla1280_64bit_start_scsi: No scatter/" - "gather command packet data - b %i, t %i, " - "l %i \n", SCSI_BUS_32(cmd), SCSI_TCN_32(cmd), - SCSI_LUN_32(cmd)); + remseg -= cnt; + dprintk(5, "qla1280_64bit_start_scsi: " + "continuation packet data - b %i, t " + "%i, l %i \n", SCSI_BUS_32(cmd), + SCSI_TCN_32(cmd), SCSI_LUN_32(cmd)); qla1280_dump_buffer(5, (char *)pkt, REQUEST_ENTRY_SIZE); } @@ -3068,12 +3036,11 @@ qla1280_32bit_start_scsi(struct scsi_qla_host *ha, struct srb * sp) struct device_reg __iomem *reg = ha->iobase; struct scsi_cmnd *cmd = sp->cmd; struct cmd_entry *pkt; - struct scatterlist *sg = NULL, *s; __le32 *dword_ptr; int status = 0; int cnt; int req_cnt; - uint16_t seg_cnt; + int seg_cnt; dma_addr_t dma_handle; u8 dir; @@ -3083,18 +3050,8 @@ qla1280_32bit_start_scsi(struct scsi_qla_host *ha, struct srb * sp) cmd->cmnd[0]); /* Calculate number of entries and segments required. */ - req_cnt = 1; - if (cmd->use_sg) { - /* - * We must build an SG list in adapter format, as the kernel's - * SG list cannot be used directly because of data field size - * (__alpha__) differences and the kernel SG list uses virtual - * addresses where we need physical addresses. - */ - sg = (struct scatterlist *) cmd->request_buffer; - seg_cnt = pci_map_sg(ha->pdev, sg, cmd->use_sg, - cmd->sc_data_direction); - + seg_cnt = scsi_dma_map(cmd); + if (seg_cnt) { /* * if greater than four sg entries then we need to allocate * continuation entries @@ -3106,14 +3063,9 @@ qla1280_32bit_start_scsi(struct scsi_qla_host *ha, struct srb * sp) } dprintk(3, "S/G Transfer cmd=%p seg_cnt=0x%x, req_cnt=%x\n", cmd, seg_cnt, req_cnt); - } else if (cmd->request_bufflen) { /* If data transfer. */ - dprintk(3, "No S/G transfer t=%x cmd=%p len=%x CDB=%x\n", - SCSI_TCN_32(cmd), cmd, cmd->request_bufflen, - cmd->cmnd[0]); - seg_cnt = 1; - } else { - /* dprintk(1, "No data transfer \n"); */ - seg_cnt = 0; + } else if (seg_cnt < 0) { + status = 1; + goto out; } if ((req_cnt + 2) >= ha->req_q_cnt) { @@ -3194,91 +3146,84 @@ qla1280_32bit_start_scsi(struct scsi_qla_host *ha, struct srb * sp) * Load data segments. */ if (seg_cnt) { + struct scatterlist *sg, *s; int remseg = seg_cnt; + + sg = scsi_sglist(cmd); + /* Setup packet address segment pointer. */ dword_ptr = &pkt->dseg_0_address; - if (cmd->use_sg) { /* If scatter gather */ - dprintk(3, "Building S/G data segments..\n"); - qla1280_dump_buffer(1, (char *)sg, 4 * 16); + dprintk(3, "Building S/G data segments..\n"); + qla1280_dump_buffer(1, (char *)sg, 4 * 16); + + /* Load command entry data segments. */ + for_each_sg(sg, s, seg_cnt, cnt) { + if (cnt == 4) + break; + *dword_ptr++ = + cpu_to_le32(pci_dma_lo32(sg_dma_address(s))); + *dword_ptr++ = cpu_to_le32(sg_dma_len(s)); + dprintk(3, "S/G Segment phys_addr=0x%lx, len=0x%x\n", + (pci_dma_lo32(sg_dma_address(s))), + (sg_dma_len(s))); + remseg--; + } + /* + * Build continuation packets. + */ + dprintk(3, "S/G Building Continuation" + "...seg_cnt=0x%x remains\n", seg_cnt); + while (remseg > 0) { + /* Continue from end point */ + sg = s; + /* Adjust ring index. */ + ha->req_ring_index++; + if (ha->req_ring_index == REQUEST_ENTRY_CNT) { + ha->req_ring_index = 0; + ha->request_ring_ptr = + ha->request_ring; + } else + ha->request_ring_ptr++; + + pkt = (struct cmd_entry *)ha->request_ring_ptr; + + /* Zero out packet. */ + memset(pkt, 0, REQUEST_ENTRY_SIZE); + + /* Load packet defaults. */ + ((struct cont_entry *) pkt)-> + entry_type = CONTINUE_TYPE; + ((struct cont_entry *) pkt)->entry_count = 1; - /* Load command entry data segments. */ - for_each_sg(sg, s, seg_cnt, cnt) { - if (cnt == 4) + ((struct cont_entry *) pkt)->sys_define = + (uint8_t) ha->req_ring_index; + + /* Setup packet address segment pointer. */ + dword_ptr = + &((struct cont_entry *) pkt)->dseg_0_address; + + /* Load continuation entry data segments. */ + for_each_sg(sg, s, remseg, cnt) { + if (cnt == 7) break; *dword_ptr++ = cpu_to_le32(pci_dma_lo32(sg_dma_address(s))); - *dword_ptr++ = cpu_to_le32(sg_dma_len(s)); - dprintk(3, "S/G Segment phys_addr=0x%lx, len=0x%x\n", - (pci_dma_lo32(sg_dma_address(s))), - (sg_dma_len(s))); - remseg--; - } - /* - * Build continuation packets. - */ - dprintk(3, "S/G Building Continuation" - "...seg_cnt=0x%x remains\n", seg_cnt); - while (remseg > 0) { - /* Continue from end point */ - sg = s; - /* Adjust ring index. */ - ha->req_ring_index++; - if (ha->req_ring_index == REQUEST_ENTRY_CNT) { - ha->req_ring_index = 0; - ha->request_ring_ptr = - ha->request_ring; - } else - ha->request_ring_ptr++; - - pkt = (struct cmd_entry *)ha->request_ring_ptr; - - /* Zero out packet. */ - memset(pkt, 0, REQUEST_ENTRY_SIZE); - - /* Load packet defaults. */ - ((struct cont_entry *) pkt)-> - entry_type = CONTINUE_TYPE; - ((struct cont_entry *) pkt)->entry_count = 1; - - ((struct cont_entry *) pkt)->sys_define = - (uint8_t) ha->req_ring_index; - - /* Setup packet address segment pointer. */ - dword_ptr = - &((struct cont_entry *) pkt)->dseg_0_address; - - /* Load continuation entry data segments. */ - for_each_sg(sg, s, remseg, cnt) { - if (cnt == 7) - break; - *dword_ptr++ = - cpu_to_le32(pci_dma_lo32(sg_dma_address(s))); - *dword_ptr++ = - cpu_to_le32(sg_dma_len(s)); - dprintk(1, - "S/G Segment Cont. phys_addr=0x%x, " - "len=0x%x\n", - cpu_to_le32(pci_dma_lo32(sg_dma_address(s))), - cpu_to_le32(sg_dma_len(s))); - } - remseg -= cnt; - dprintk(5, "qla1280_32bit_start_scsi: " - "continuation packet data - " - "scsi(%i:%i:%i)\n", SCSI_BUS_32(cmd), - SCSI_TCN_32(cmd), SCSI_LUN_32(cmd)); - qla1280_dump_buffer(5, (char *)pkt, - REQUEST_ENTRY_SIZE); + *dword_ptr++ = + cpu_to_le32(sg_dma_len(s)); + dprintk(1, + "S/G Segment Cont. phys_addr=0x%x, " + "len=0x%x\n", + cpu_to_le32(pci_dma_lo32(sg_dma_address(s))), + cpu_to_le32(sg_dma_len(s))); } - } else { /* No S/G data transfer */ - dma_handle = pci_map_single(ha->pdev, - cmd->request_buffer, - cmd->request_bufflen, - cmd->sc_data_direction); - sp->saved_dma_handle = dma_handle; - - *dword_ptr++ = cpu_to_le32(pci_dma_lo32(dma_handle)); - *dword_ptr = cpu_to_le32(cmd->request_bufflen); + remseg -= cnt; + dprintk(5, "qla1280_32bit_start_scsi: " + "continuation packet data - " + "scsi(%i:%i:%i)\n", SCSI_BUS_32(cmd), + SCSI_TCN_32(cmd), SCSI_LUN_32(cmd)); + qla1280_dump_buffer(5, (char *)pkt, + REQUEST_ENTRY_SIZE); } } else { /* No data transfer at all */ dprintk(5, "qla1280_32bit_start_scsi: No data, command " @@ -4086,9 +4031,9 @@ __qla1280_print_scsi_cmd(struct scsi_cmnd *cmd) for (i = 0; i < cmd->cmd_len; i++) { printk("0x%02x ", cmd->cmnd[i]); } - printk(" seg_cnt =%d\n", cmd->use_sg); + printk(" seg_cnt =%d\n", scsi_sg_count(cmd)); printk(" request buffer=0x%p, request buffer len=0x%x\n", - cmd->request_buffer, cmd->request_bufflen); + scsi_sglist(cmd), scsi_bufflen(cmd)); /* if (cmd->use_sg) { sg = (struct scatterlist *) cmd->request_buffer; diff --git a/drivers/scsi/sun3_scsi.c b/drivers/scsi/sun3_scsi.c index 5e46d842c6f..e606cf0a2eb 100644 --- a/drivers/scsi/sun3_scsi.c +++ b/drivers/scsi/sun3_scsi.c @@ -268,7 +268,7 @@ int sun3scsi_detect(struct scsi_host_template * tpnt) ((struct NCR5380_hostdata *)instance->hostdata)->ctrl = 0; if (request_irq(instance->irq, scsi_sun3_intr, - 0, "Sun3SCSI-5380", NULL)) { + 0, "Sun3SCSI-5380", instance)) { #ifndef REAL_DMA printk("scsi%d: IRQ%d not free, interrupts disabled\n", instance->host_no, instance->irq); @@ -310,7 +310,7 @@ int sun3scsi_detect(struct scsi_host_template * tpnt) int sun3scsi_release (struct Scsi_Host *shpnt) { if (shpnt->irq != SCSI_IRQ_NONE) - free_irq (shpnt->irq, NULL); + free_irq(shpnt->irq, shpnt); iounmap((void *)sun3_scsi_regp); diff --git a/drivers/scsi/sun3_scsi_vme.c b/drivers/scsi/sun3_scsi_vme.c index 7cb4a31453e..02d9727f017 100644 --- a/drivers/scsi/sun3_scsi_vme.c +++ b/drivers/scsi/sun3_scsi_vme.c @@ -230,7 +230,7 @@ static int sun3scsi_detect(struct scsi_host_template * tpnt) ((struct NCR5380_hostdata *)instance->hostdata)->ctrl = 0; if (request_irq(instance->irq, scsi_sun3_intr, - 0, "Sun3SCSI-5380VME", NULL)) { + 0, "Sun3SCSI-5380VME", instance)) { #ifndef REAL_DMA printk("scsi%d: IRQ%d not free, interrupts disabled\n", instance->host_no, instance->irq); @@ -279,7 +279,7 @@ static int sun3scsi_detect(struct scsi_host_template * tpnt) int sun3scsi_release (struct Scsi_Host *shpnt) { if (shpnt->irq != SCSI_IRQ_NONE) - free_irq (shpnt->irq, NULL); + free_irq(shpnt->irq, shpnt); iounmap((void *)sun3_scsi_regp); diff --git a/drivers/scsi/t128.c b/drivers/scsi/t128.c index 248d60b8d89..041eaaace2c 100644 --- a/drivers/scsi/t128.c +++ b/drivers/scsi/t128.c @@ -259,7 +259,8 @@ found: instance->irq = NCR5380_probe_irq(instance, T128_IRQS); if (instance->irq != SCSI_IRQ_NONE) - if (request_irq(instance->irq, t128_intr, IRQF_DISABLED, "t128", instance)) { + if (request_irq(instance->irq, t128_intr, IRQF_DISABLED, "t128", + instance)) { printk("scsi%d : IRQ%d not free, interrupts disabled\n", instance->host_no, instance->irq); instance->irq = SCSI_IRQ_NONE; @@ -295,7 +296,7 @@ static int t128_release(struct Scsi_Host *shost) NCR5380_local_declare(); NCR5380_setup(shost); if (shost->irq) - free_irq(shost->irq, NULL); + free_irq(shost->irq, shost); NCR5380_exit(shost); if (shost->io_port && shost->n_io_port) release_region(shost->io_port, shost->n_io_port); |