diff options
Diffstat (limited to 'drivers')
997 files changed, 31058 insertions, 14660 deletions
diff --git a/drivers/acpi/acpi_memhotplug.c b/drivers/acpi/acpi_memhotplug.c index 7a0f4aa4fa1..9a62224cc27 100644 --- a/drivers/acpi/acpi_memhotplug.c +++ b/drivers/acpi/acpi_memhotplug.c @@ -38,6 +38,9 @@ #define _COMPONENT ACPI_MEMORY_DEVICE_COMPONENT +#undef PREFIX +#define PREFIX "ACPI:memory_hp:" + ACPI_MODULE_NAME("acpi_memhotplug"); MODULE_AUTHOR("Naveen B S <naveen.b.s@intel.com>"); MODULE_DESCRIPTION("Hotplug Mem Driver"); @@ -153,6 +156,7 @@ acpi_memory_get_device(acpi_handle handle, acpi_handle phandle; struct acpi_device *device = NULL; struct acpi_device *pdevice = NULL; + int result; if (!acpi_bus_get_device(handle, &device) && device) @@ -165,9 +169,9 @@ acpi_memory_get_device(acpi_handle handle, } /* Get the parent device */ - status = acpi_bus_get_device(phandle, &pdevice); - if (ACPI_FAILURE(status)) { - ACPI_EXCEPTION((AE_INFO, status, "Cannot get acpi bus device")); + result = acpi_bus_get_device(phandle, &pdevice); + if (result) { + printk(KERN_WARNING PREFIX "Cannot get acpi bus device"); return -EINVAL; } @@ -175,9 +179,9 @@ acpi_memory_get_device(acpi_handle handle, * Now add the notified device. This creates the acpi_device * and invokes .add function */ - status = acpi_bus_add(&device, pdevice, handle, ACPI_BUS_TYPE_DEVICE); - if (ACPI_FAILURE(status)) { - ACPI_EXCEPTION((AE_INFO, status, "Cannot add acpi bus")); + result = acpi_bus_add(&device, pdevice, handle, ACPI_BUS_TYPE_DEVICE); + if (result) { + printk(KERN_WARNING PREFIX "Cannot add acpi bus"); return -EINVAL; } @@ -238,7 +242,12 @@ static int acpi_memory_enable_device(struct acpi_memory_device *mem_device) num_enabled++; continue; } - + /* + * If the memory block size is zero, please ignore it. + * Don't try to do the following memory hotplug flowchart. + */ + if (!info->length) + continue; if (node < 0) node = memory_add_physaddr_to_nid(info->start_addr); @@ -253,8 +262,15 @@ static int acpi_memory_enable_device(struct acpi_memory_device *mem_device) mem_device->state = MEMORY_INVALID_STATE; return -EINVAL; } - - return result; + /* + * Sometimes the memory device will contain several memory blocks. + * When one memory block is hot-added to the system memory, it will + * be regarded as a success. + * Otherwise if the last memory block can't be hot-added to the system + * memory, it will be failure and the memory device can't be bound with + * driver. + */ + return 0; } static int acpi_memory_powerdown_device(struct acpi_memory_device *mem_device) diff --git a/drivers/acpi/acpica/acobject.h b/drivers/acpi/acpica/acobject.h index 544dcf83492..eb6f038b03d 100644 --- a/drivers/acpi/acpica/acobject.h +++ b/drivers/acpi/acpica/acobject.h @@ -97,6 +97,7 @@ #define AOPOBJ_OBJECT_INITIALIZED 0x08 #define AOPOBJ_SETUP_COMPLETE 0x10 #define AOPOBJ_SINGLE_DATUM 0x20 +#define AOPOBJ_INVALID 0x40 /* Used if host OS won't allow an op_region address */ /****************************************************************************** * diff --git a/drivers/acpi/acpica/dsopcode.c b/drivers/acpi/acpica/dsopcode.c index 584d766e6f1..b79978f7bc7 100644 --- a/drivers/acpi/acpica/dsopcode.c +++ b/drivers/acpi/acpica/dsopcode.c @@ -397,6 +397,30 @@ acpi_status acpi_ds_get_region_arguments(union acpi_operand_object *obj_desc) status = acpi_ds_execute_arguments(node, acpi_ns_get_parent_node(node), extra_desc->extra.aml_length, extra_desc->extra.aml_start); + if (ACPI_FAILURE(status)) { + return_ACPI_STATUS(status); + } + + /* Validate the region address/length via the host OS */ + + status = acpi_os_validate_address(obj_desc->region.space_id, + obj_desc->region.address, + (acpi_size) obj_desc->region.length, + acpi_ut_get_node_name(node)); + + if (ACPI_FAILURE(status)) { + /* + * Invalid address/length. We will emit an error message and mark + * the region as invalid, so that it will cause an additional error if + * it is ever used. Then return AE_OK. + */ + ACPI_EXCEPTION((AE_INFO, status, + "During address validation of OpRegion [%4.4s]", + node->name.ascii)); + obj_desc->common.flags |= AOPOBJ_INVALID; + status = AE_OK; + } + return_ACPI_STATUS(status); } diff --git a/drivers/acpi/acpica/exfldio.c b/drivers/acpi/acpica/exfldio.c index d4075b82102..6687be167f5 100644 --- a/drivers/acpi/acpica/exfldio.c +++ b/drivers/acpi/acpica/exfldio.c @@ -113,6 +113,12 @@ acpi_ex_setup_region(union acpi_operand_object *obj_desc, } } + /* Exit if Address/Length have been disallowed by the host OS */ + + if (rgn_desc->common.flags & AOPOBJ_INVALID) { + return_ACPI_STATUS(AE_AML_ILLEGAL_ADDRESS); + } + /* * Exit now for SMBus address space, it has a non-linear address space * and the request cannot be directly validated diff --git a/drivers/acpi/acpica/exstorob.c b/drivers/acpi/acpica/exstorob.c index 67340cc7014..257706e7734 100644 --- a/drivers/acpi/acpica/exstorob.c +++ b/drivers/acpi/acpica/exstorob.c @@ -70,6 +70,12 @@ acpi_ex_store_buffer_to_buffer(union acpi_operand_object *source_desc, ACPI_FUNCTION_TRACE_PTR(ex_store_buffer_to_buffer, source_desc); + /* If Source and Target are the same, just return */ + + if (source_desc == target_desc) { + return_ACPI_STATUS(AE_OK); + } + /* We know that source_desc is a buffer by now */ buffer = ACPI_CAST_PTR(u8, source_desc->buffer.pointer); @@ -161,6 +167,12 @@ acpi_ex_store_string_to_string(union acpi_operand_object *source_desc, ACPI_FUNCTION_TRACE_PTR(ex_store_string_to_string, source_desc); + /* If Source and Target are the same, just return */ + + if (source_desc == target_desc) { + return_ACPI_STATUS(AE_OK); + } + /* We know that source_desc is a string by now */ buffer = ACPI_CAST_PTR(u8, source_desc->string.pointer); diff --git a/drivers/acpi/osl.c b/drivers/acpi/osl.c index 71670719d61..5691f165a95 100644 --- a/drivers/acpi/osl.c +++ b/drivers/acpi/osl.c @@ -189,11 +189,36 @@ acpi_status __init acpi_os_initialize(void) return AE_OK; } +static void bind_to_cpu0(struct work_struct *work) +{ + set_cpus_allowed(current, cpumask_of_cpu(0)); + kfree(work); +} + +static void bind_workqueue(struct workqueue_struct *wq) +{ + struct work_struct *work; + + work = kzalloc(sizeof(struct work_struct), GFP_KERNEL); + INIT_WORK(work, bind_to_cpu0); + queue_work(wq, work); +} + acpi_status acpi_os_initialize1(void) { + /* + * On some machines, a software-initiated SMI causes corruption unless + * the SMI runs on CPU 0. An SMI can be initiated by any AML, but + * typically it's done in GPE-related methods that are run via + * workqueues, so we can avoid the known corruption cases by binding + * the workqueues to CPU 0. + */ kacpid_wq = create_singlethread_workqueue("kacpid"); + bind_workqueue(kacpid_wq); kacpi_notify_wq = create_singlethread_workqueue("kacpi_notify"); + bind_workqueue(kacpi_notify_wq); kacpi_hotplug_wq = create_singlethread_workqueue("kacpi_hotplug"); + bind_workqueue(kacpi_hotplug_wq); BUG_ON(!kacpid_wq); BUG_ON(!kacpi_notify_wq); BUG_ON(!kacpi_hotplug_wq); diff --git a/drivers/acpi/pci_root.c b/drivers/acpi/pci_root.c index 8a5bf3b356f..55b5b90c2a4 100644 --- a/drivers/acpi/pci_root.c +++ b/drivers/acpi/pci_root.c @@ -395,7 +395,7 @@ struct pci_dev *acpi_get_pci_dev(acpi_handle handle) fn = adr & 0xffff; pdev = pci_get_slot(pbus, PCI_DEVFN(dev, fn)); - if (hnd == handle) + if (!pdev || hnd == handle) break; pbus = pdev->subordinate; diff --git a/drivers/acpi/processor_core.c b/drivers/acpi/processor_core.c index 84e0f3c0744..2cc4b303387 100644 --- a/drivers/acpi/processor_core.c +++ b/drivers/acpi/processor_core.c @@ -1151,6 +1151,9 @@ static int __init acpi_processor_init(void) { int result = 0; + if (acpi_disabled) + return 0; + memset(&errata, 0, sizeof(errata)); #ifdef CONFIG_SMP @@ -1197,6 +1200,9 @@ out_proc: static void __exit acpi_processor_exit(void) { + if (acpi_disabled) + return; + acpi_processor_ppc_exit(); acpi_thermal_cpufreq_exit(); diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c index 0efa59e7e3a..66393d5c4c7 100644 --- a/drivers/acpi/processor_idle.c +++ b/drivers/acpi/processor_idle.c @@ -162,8 +162,9 @@ static void lapic_timer_check_state(int state, struct acpi_processor *pr, pr->power.timer_broadcast_on_state = state; } -static void lapic_timer_propagate_broadcast(struct acpi_processor *pr) +static void lapic_timer_propagate_broadcast(void *arg) { + struct acpi_processor *pr = (struct acpi_processor *) arg; unsigned long reason; reason = pr->power.timer_broadcast_on_state < INT_MAX ? @@ -635,7 +636,8 @@ static int acpi_processor_power_verify(struct acpi_processor *pr) working++; } - lapic_timer_propagate_broadcast(pr); + smp_call_function_single(pr->id, lapic_timer_propagate_broadcast, + pr, 1); return (working); } diff --git a/drivers/acpi/processor_thermal.c b/drivers/acpi/processor_thermal.c index 39838c66603..31adda1099e 100644 --- a/drivers/acpi/processor_thermal.c +++ b/drivers/acpi/processor_thermal.c @@ -66,7 +66,7 @@ static int acpi_processor_apply_limit(struct acpi_processor *pr) if (pr->limit.thermal.tx > tx) tx = pr->limit.thermal.tx; - result = acpi_processor_set_throttling(pr, tx); + result = acpi_processor_set_throttling(pr, tx, false); if (result) goto end; } @@ -421,12 +421,12 @@ processor_set_cur_state(struct thermal_cooling_device *cdev, if (state <= max_pstate) { if (pr->flags.throttling && pr->throttling.state) - result = acpi_processor_set_throttling(pr, 0); + result = acpi_processor_set_throttling(pr, 0, false); cpufreq_set_cur_state(pr->id, state); } else { cpufreq_set_cur_state(pr->id, max_pstate); result = acpi_processor_set_throttling(pr, - state - max_pstate); + state - max_pstate, false); } return result; } diff --git a/drivers/acpi/processor_throttling.c b/drivers/acpi/processor_throttling.c index 227543789ba..ae39797aab5 100644 --- a/drivers/acpi/processor_throttling.c +++ b/drivers/acpi/processor_throttling.c @@ -62,7 +62,8 @@ struct throttling_tstate { #define THROTTLING_POSTCHANGE (2) static int acpi_processor_get_throttling(struct acpi_processor *pr); -int acpi_processor_set_throttling(struct acpi_processor *pr, int state); +int acpi_processor_set_throttling(struct acpi_processor *pr, + int state, bool force); static int acpi_processor_update_tsd_coord(void) { @@ -361,7 +362,7 @@ int acpi_processor_tstate_has_changed(struct acpi_processor *pr) */ target_state = throttling_limit; } - return acpi_processor_set_throttling(pr, target_state); + return acpi_processor_set_throttling(pr, target_state, false); } /* @@ -839,10 +840,10 @@ static int acpi_processor_get_throttling_ptc(struct acpi_processor *pr) if (ret >= 0) { state = acpi_get_throttling_state(pr, value); if (state == -1) { - ACPI_WARNING((AE_INFO, - "Invalid throttling state, reset")); + ACPI_DEBUG_PRINT((ACPI_DB_INFO, + "Invalid throttling state, reset\n")); state = 0; - ret = acpi_processor_set_throttling(pr, state); + ret = acpi_processor_set_throttling(pr, state, true); if (ret) return ret; } @@ -915,7 +916,7 @@ static int acpi_processor_get_fadt_info(struct acpi_processor *pr) } static int acpi_processor_set_throttling_fadt(struct acpi_processor *pr, - int state) + int state, bool force) { u32 value = 0; u32 duty_mask = 0; @@ -930,7 +931,7 @@ static int acpi_processor_set_throttling_fadt(struct acpi_processor *pr, if (!pr->flags.throttling) return -ENODEV; - if (state == pr->throttling.state) + if (!force && (state == pr->throttling.state)) return 0; if (state < pr->throttling_platform_limit) @@ -988,7 +989,7 @@ static int acpi_processor_set_throttling_fadt(struct acpi_processor *pr, } static int acpi_processor_set_throttling_ptc(struct acpi_processor *pr, - int state) + int state, bool force) { int ret; acpi_integer value; @@ -1002,7 +1003,7 @@ static int acpi_processor_set_throttling_ptc(struct acpi_processor *pr, if (!pr->flags.throttling) return -ENODEV; - if (state == pr->throttling.state) + if (!force && (state == pr->throttling.state)) return 0; if (state < pr->throttling_platform_limit) @@ -1018,7 +1019,8 @@ static int acpi_processor_set_throttling_ptc(struct acpi_processor *pr, return 0; } -int acpi_processor_set_throttling(struct acpi_processor *pr, int state) +int acpi_processor_set_throttling(struct acpi_processor *pr, + int state, bool force) { cpumask_var_t saved_mask; int ret = 0; @@ -1070,7 +1072,7 @@ int acpi_processor_set_throttling(struct acpi_processor *pr, int state) /* FIXME: use work_on_cpu() */ set_cpus_allowed_ptr(current, cpumask_of(pr->id)); ret = p_throttling->acpi_processor_set_throttling(pr, - t_state.target_state); + t_state.target_state, force); } else { /* * When the T-state coordination is SW_ALL or HW_ALL, @@ -1103,7 +1105,7 @@ int acpi_processor_set_throttling(struct acpi_processor *pr, int state) set_cpus_allowed_ptr(current, cpumask_of(i)); ret = match_pr->throttling. acpi_processor_set_throttling( - match_pr, t_state.target_state); + match_pr, t_state.target_state, force); } } /* @@ -1201,7 +1203,7 @@ int acpi_processor_get_throttling_info(struct acpi_processor *pr) ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Disabling throttling (was T%d)\n", pr->throttling.state)); - result = acpi_processor_set_throttling(pr, 0); + result = acpi_processor_set_throttling(pr, 0, false); if (result) goto end; } @@ -1307,7 +1309,7 @@ static ssize_t acpi_processor_write_throttling(struct file *file, if (strcmp(tmpbuf, charp) != 0) return -EINVAL; - result = acpi_processor_set_throttling(pr, state_val); + result = acpi_processor_set_throttling(pr, state_val, false); if (result) return result; diff --git a/drivers/acpi/sleep.c b/drivers/acpi/sleep.c index 01574a06653..42159a28f43 100644 --- a/drivers/acpi/sleep.c +++ b/drivers/acpi/sleep.c @@ -397,6 +397,14 @@ static struct dmi_system_id __initdata acpisleep_dmi_table[] = { }, }, { + .callback = init_set_sci_en_on_resume, + .ident = "Hewlett-Packard HP G7000 Notebook PC", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"), + DMI_MATCH(DMI_PRODUCT_NAME, "HP G7000 Notebook PC"), + }, + }, + { .callback = init_old_suspend_ordering, .ident = "Panasonic CF51-2L", .matches = { diff --git a/drivers/acpi/system.c b/drivers/acpi/system.c index 0944daec064..9c61ab2177c 100644 --- a/drivers/acpi/system.c +++ b/drivers/acpi/system.c @@ -121,7 +121,7 @@ static void acpi_table_attr_init(struct acpi_table_attr *table_attr, table_attr->attr.size = 0; table_attr->attr.read = acpi_table_show; table_attr->attr.attr.name = table_attr->name; - table_attr->attr.attr.mode = 0444; + table_attr->attr.attr.mode = 0400; return; } diff --git a/drivers/acpi/video.c b/drivers/acpi/video.c index 8851315ce85..60ea984c84a 100644 --- a/drivers/acpi/video.c +++ b/drivers/acpi/video.c @@ -2004,8 +2004,11 @@ static int acpi_video_bus_put_one_device(struct acpi_video_device *device) status = acpi_remove_notify_handler(device->dev->handle, ACPI_DEVICE_NOTIFY, acpi_video_device_notify); - sysfs_remove_link(&device->backlight->dev.kobj, "device"); - backlight_device_unregister(device->backlight); + if (device->backlight) { + sysfs_remove_link(&device->backlight->dev.kobj, "device"); + backlight_device_unregister(device->backlight); + device->backlight = NULL; + } if (device->cdev) { sysfs_remove_link(&device->dev->dev.kobj, "thermal_cooling"); diff --git a/drivers/amba/bus.c b/drivers/amba/bus.c index 3d763fdf99b..24665067301 100644 --- a/drivers/amba/bus.c +++ b/drivers/amba/bus.c @@ -207,6 +207,16 @@ int amba_device_register(struct amba_device *dev, struct resource *parent) void __iomem *tmp; int i, ret; + device_initialize(&dev->dev); + + /* + * Copy from device_add + */ + if (dev->dev.init_name) { + dev_set_name(&dev->dev, "%s", dev->dev.init_name); + dev->dev.init_name = NULL; + } + dev->dev.release = amba_device_release; dev->dev.bus = &amba_bustype; dev->dev.dma_mask = &dev->dma_mask; @@ -240,7 +250,7 @@ int amba_device_register(struct amba_device *dev, struct resource *parent) goto err_release; } - ret = device_register(&dev->dev); + ret = device_add(&dev->dev); if (ret) goto err_release; diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c index 15a23031833..fe3eba5d6b3 100644 --- a/drivers/ata/ahci.c +++ b/drivers/ata/ahci.c @@ -219,6 +219,8 @@ enum { AHCI_HFLAG_SECT255 = (1 << 8), /* max 255 sectors */ AHCI_HFLAG_YES_NCQ = (1 << 9), /* force NCQ cap on */ AHCI_HFLAG_NO_SUSPEND = (1 << 10), /* don't suspend */ + AHCI_HFLAG_SRST_TOUT_IS_OFFLINE = (1 << 11), /* treat SRST timeout as + link offline */ /* ap->flags bits */ @@ -513,11 +515,16 @@ static const struct pci_device_id ahci_pci_tbl[] = { { PCI_VDEVICE(INTEL, 0x502a), board_ahci }, /* Tolapai */ { PCI_VDEVICE(INTEL, 0x502b), board_ahci }, /* Tolapai */ { PCI_VDEVICE(INTEL, 0x3a05), board_ahci }, /* ICH10 */ + { PCI_VDEVICE(INTEL, 0x3a22), board_ahci }, /* ICH10 */ { PCI_VDEVICE(INTEL, 0x3a25), board_ahci }, /* ICH10 */ + { PCI_VDEVICE(INTEL, 0x3b22), board_ahci }, /* PCH AHCI */ + { PCI_VDEVICE(INTEL, 0x3b23), board_ahci }, /* PCH AHCI */ { PCI_VDEVICE(INTEL, 0x3b24), board_ahci }, /* PCH RAID */ { PCI_VDEVICE(INTEL, 0x3b25), board_ahci }, /* PCH RAID */ + { PCI_VDEVICE(INTEL, 0x3b29), board_ahci }, /* PCH AHCI */ { PCI_VDEVICE(INTEL, 0x3b2b), board_ahci }, /* PCH RAID */ { PCI_VDEVICE(INTEL, 0x3b2c), board_ahci }, /* PCH RAID */ + { PCI_VDEVICE(INTEL, 0x3b2f), board_ahci }, /* PCH AHCI */ /* JMicron 360/1/3/5/6, match class to avoid IDE function */ { PCI_VENDOR_ID_JMICRON, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID, @@ -1658,6 +1665,7 @@ static int ahci_do_softreset(struct ata_link *link, unsigned int *class, int (*check_ready)(struct ata_link *link)) { struct ata_port *ap = link->ap; + struct ahci_host_priv *hpriv = ap->host->private_data; const char *reason = NULL; unsigned long now, msecs; struct ata_taskfile tf; @@ -1696,12 +1704,21 @@ static int ahci_do_softreset(struct ata_link *link, unsigned int *class, /* wait for link to become ready */ rc = ata_wait_after_reset(link, deadline, check_ready); - /* link occupied, -ENODEV too is an error */ - if (rc) { + if (rc == -EBUSY && hpriv->flags & AHCI_HFLAG_SRST_TOUT_IS_OFFLINE) { + /* + * Workaround for cases where link online status can't + * be trusted. Treat device readiness timeout as link + * offline. + */ + ata_link_printk(link, KERN_INFO, + "device not ready, treating as offline\n"); + *class = ATA_DEV_NONE; + } else if (rc) { + /* link occupied, -ENODEV too is an error */ reason = "device not ready"; goto fail; - } - *class = ahci_dev_classify(ap); + } else + *class = ahci_dev_classify(ap); DPRINTK("EXIT, class=%u\n", *class); return 0; @@ -1768,7 +1785,8 @@ static int ahci_sb600_softreset(struct ata_link *link, unsigned int *class, irq_sts = readl(port_mmio + PORT_IRQ_STAT); if (irq_sts & PORT_IRQ_BAD_PMP) { ata_link_printk(link, KERN_WARNING, - "failed due to HW bug, retry pmp=0\n"); + "applying SB600 PMP SRST workaround " + "and retrying\n"); rc = ahci_do_softreset(link, class, 0, deadline, ahci_check_ready); } @@ -2721,6 +2739,56 @@ static bool ahci_broken_suspend(struct pci_dev *pdev) return !ver || strcmp(ver, dmi->driver_data) < 0; } +static bool ahci_broken_online(struct pci_dev *pdev) +{ +#define ENCODE_BUSDEVFN(bus, slot, func) \ + (void *)(unsigned long)(((bus) << 8) | PCI_DEVFN((slot), (func))) + static const struct dmi_system_id sysids[] = { + /* + * There are several gigabyte boards which use + * SIMG5723s configured as hardware RAID. Certain + * 5723 firmware revisions shipped there keep the link + * online but fail to answer properly to SRST or + * IDENTIFY when no device is attached downstream + * causing libata to retry quite a few times leading + * to excessive detection delay. + * + * As these firmwares respond to the second reset try + * with invalid device signature, considering unknown + * sig as offline works around the problem acceptably. + */ + { + .ident = "EP45-DQ6", + .matches = { + DMI_MATCH(DMI_BOARD_VENDOR, + "Gigabyte Technology Co., Ltd."), + DMI_MATCH(DMI_BOARD_NAME, "EP45-DQ6"), + }, + .driver_data = ENCODE_BUSDEVFN(0x0a, 0x00, 0), + }, + { + .ident = "EP45-DS5", + .matches = { + DMI_MATCH(DMI_BOARD_VENDOR, + "Gigabyte Technology Co., Ltd."), + DMI_MATCH(DMI_BOARD_NAME, "EP45-DS5"), + }, + .driver_data = ENCODE_BUSDEVFN(0x03, 0x00, 0), + }, + { } /* terminate list */ + }; +#undef ENCODE_BUSDEVFN + const struct dmi_system_id *dmi = dmi_first_match(sysids); + unsigned int val; + + if (!dmi) + return false; + + val = (unsigned long)dmi->driver_data; + + return pdev->bus->number == (val >> 8) && pdev->devfn == (val & 0xff); +} + static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) { static int printed_version; @@ -2836,6 +2904,12 @@ static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) "BIOS update required for suspend/resume\n"); } + if (ahci_broken_online(pdev)) { + hpriv->flags |= AHCI_HFLAG_SRST_TOUT_IS_OFFLINE; + dev_info(&pdev->dev, + "online status unreliable, applying workaround\n"); + } + /* CAP.NP sometimes indicate the index of the last enabled * port, at other times, that of the last possible port, so * determining the maximum port number requires looking at diff --git a/drivers/ata/ata_piix.c b/drivers/ata/ata_piix.c index d0a14cf2bd7..9ac4e378992 100644 --- a/drivers/ata/ata_piix.c +++ b/drivers/ata/ata_piix.c @@ -596,9 +596,12 @@ static const struct ich_laptop ich_laptop[] = { { 0x27DF, 0x0005, 0x0280 }, /* ICH7 on Acer 5602WLMi */ { 0x27DF, 0x1025, 0x0102 }, /* ICH7 on Acer 5602aWLMi */ { 0x27DF, 0x1025, 0x0110 }, /* ICH7 on Acer 3682WLMi */ + { 0x27DF, 0x1028, 0x02b0 }, /* ICH7 on unknown Dell */ { 0x27DF, 0x1043, 0x1267 }, /* ICH7 on Asus W5F */ { 0x27DF, 0x103C, 0x30A1 }, /* ICH7 on HP Compaq nc2400 */ + { 0x27DF, 0x103C, 0x361a }, /* ICH7 on unkown HP */ { 0x27DF, 0x1071, 0xD221 }, /* ICH7 on Hercules EC-900 */ + { 0x27DF, 0x152D, 0x0778 }, /* ICH7 on unknown Intel */ { 0x24CA, 0x1025, 0x0061 }, /* ICH4 on ACER Aspire 2023WLMi */ { 0x24CA, 0x1025, 0x003d }, /* ICH4 on ACER TM290 */ { 0x266F, 0x1025, 0x0066 }, /* ICH6 on ACER Aspire 1694WLMi */ @@ -661,6 +664,8 @@ static int piix_pata_prereset(struct ata_link *link, unsigned long deadline) return ata_sff_prereset(link, deadline); } +static DEFINE_SPINLOCK(piix_lock); + /** * piix_set_piomode - Initialize host controller PATA PIO timings * @ap: Port whose timings we are configuring @@ -674,8 +679,9 @@ static int piix_pata_prereset(struct ata_link *link, unsigned long deadline) static void piix_set_piomode(struct ata_port *ap, struct ata_device *adev) { - unsigned int pio = adev->pio_mode - XFER_PIO_0; struct pci_dev *dev = to_pci_dev(ap->host->dev); + unsigned long flags; + unsigned int pio = adev->pio_mode - XFER_PIO_0; unsigned int is_slave = (adev->devno != 0); unsigned int master_port= ap->port_no ? 0x42 : 0x40; unsigned int slave_port = 0x44; @@ -705,6 +711,8 @@ static void piix_set_piomode(struct ata_port *ap, struct ata_device *adev) if (adev->class == ATA_DEV_ATA) control |= 4; /* PPE enable */ + spin_lock_irqsave(&piix_lock, flags); + /* PIO configuration clears DTE unconditionally. It will be * programmed in set_dmamode which is guaranteed to be called * after set_piomode if any DMA mode is available. @@ -744,6 +752,8 @@ static void piix_set_piomode(struct ata_port *ap, struct ata_device *adev) udma_enable &= ~(1 << (2 * ap->port_no + adev->devno)); pci_write_config_byte(dev, 0x48, udma_enable); } + + spin_unlock_irqrestore(&piix_lock, flags); } /** @@ -761,6 +771,7 @@ static void piix_set_piomode(struct ata_port *ap, struct ata_device *adev) static void do_pata_set_dmamode(struct ata_port *ap, struct ata_device *adev, int isich) { struct pci_dev *dev = to_pci_dev(ap->host->dev); + unsigned long flags; u8 master_port = ap->port_no ? 0x42 : 0x40; u16 master_data; u8 speed = adev->dma_mode; @@ -774,6 +785,8 @@ static void do_pata_set_dmamode(struct ata_port *ap, struct ata_device *adev, in { 2, 1 }, { 2, 3 }, }; + spin_lock_irqsave(&piix_lock, flags); + pci_read_config_word(dev, master_port, &master_data); if (ap->udma_mask) pci_read_config_byte(dev, 0x48, &udma_enable); @@ -864,6 +877,8 @@ static void do_pata_set_dmamode(struct ata_port *ap, struct ata_device *adev, in /* Don't scribble on 0x48 if the controller does not support UDMA */ if (ap->udma_mask) pci_write_config_byte(dev, 0x48, udma_enable); + + spin_unlock_irqrestore(&piix_lock, flags); } /** diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c index 045a486a09e..072ba5ea138 100644 --- a/drivers/ata/libata-core.c +++ b/drivers/ata/libata-core.c @@ -1515,6 +1515,7 @@ static int ata_hpa_resize(struct ata_device *dev) return rc; } + dev->n_native_sectors = native_sectors; /* nothing to do? */ if (native_sectors <= sectors || !ata_ignore_hpa) { @@ -3392,17 +3393,27 @@ int ata_down_xfermask_limit(struct ata_device *dev, unsigned int sel) static int ata_dev_set_mode(struct ata_device *dev) { + struct ata_port *ap = dev->link->ap; struct ata_eh_context *ehc = &dev->link->eh_context; + const bool nosetxfer = dev->horkage & ATA_HORKAGE_NOSETXFER; const char *dev_err_whine = ""; int ign_dev_err = 0; - unsigned int err_mask; + unsigned int err_mask = 0; int rc; dev->flags &= ~ATA_DFLAG_PIO; if (dev->xfer_shift == ATA_SHIFT_PIO) dev->flags |= ATA_DFLAG_PIO; - err_mask = ata_dev_set_xfermode(dev); + if (nosetxfer && ap->flags & ATA_FLAG_SATA && ata_id_is_sata(dev->id)) + dev_err_whine = " (SET_XFERMODE skipped)"; + else { + if (nosetxfer) + ata_dev_printk(dev, KERN_WARNING, + "NOSETXFER but PATA detected - can't " + "skip SETXFER, might malfunction\n"); + err_mask = ata_dev_set_xfermode(dev); + } if (err_mask & ~AC_ERR_DEV) goto fail; @@ -4089,6 +4100,7 @@ int ata_dev_revalidate(struct ata_device *dev, unsigned int new_class, unsigned int readid_flags) { u64 n_sectors = dev->n_sectors; + u64 n_native_sectors = dev->n_native_sectors; int rc; if (!ata_dev_enabled(dev)) @@ -4118,16 +4130,30 @@ int ata_dev_revalidate(struct ata_device *dev, unsigned int new_class, /* verify n_sectors hasn't changed */ if (dev->class == ATA_DEV_ATA && n_sectors && dev->n_sectors != n_sectors) { - ata_dev_printk(dev, KERN_INFO, "n_sectors mismatch " + ata_dev_printk(dev, KERN_WARNING, "n_sectors mismatch " "%llu != %llu\n", (unsigned long long)n_sectors, (unsigned long long)dev->n_sectors); - - /* restore original n_sectors */ - dev->n_sectors = n_sectors; - - rc = -ENODEV; - goto fail; + /* + * Something could have caused HPA to be unlocked + * involuntarily. If n_native_sectors hasn't changed + * and the new size matches it, keep the device. + */ + if (dev->n_native_sectors == n_native_sectors && + dev->n_sectors > n_sectors && + dev->n_sectors == n_native_sectors) { + ata_dev_printk(dev, KERN_WARNING, + "new n_sectors matches native, probably " + "late HPA unlock, continuing\n"); + /* keep using the old n_sectors */ + dev->n_sectors = n_sectors; + } else { + /* restore original n_[native]_sectors and fail */ + dev->n_native_sectors = n_native_sectors; + dev->n_sectors = n_sectors; + rc = -ENODEV; + goto fail; + } } return 0; @@ -4276,6 +4302,9 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = { { "WDC WD2500JD-00HBB0", "WD-WMAL71490727", ATA_HORKAGE_BROKEN_HPA }, { "MAXTOR 6L080L4", "A93.0500", ATA_HORKAGE_BROKEN_HPA }, + /* this one allows HPA unlocking but fails IOs on the area */ + { "OCZ-VERTEX", "1.30", ATA_HORKAGE_BROKEN_HPA }, + /* Devices which report 1 sector over size HPA */ { "ST340823A", NULL, ATA_HORKAGE_HPA_SIZE, }, { "ST320413A", NULL, ATA_HORKAGE_HPA_SIZE, }, @@ -4297,6 +4326,12 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = { /* Devices which aren't very happy with higher link speeds */ { "WD My Book", NULL, ATA_HORKAGE_1_5_GBPS, }, + /* + * Devices which choke on SETXFER. Applies only if both the + * device and controller are SATA. + */ + { "PIONEER DVD-RW DVRTD08", "1.00", ATA_HORKAGE_NOSETXFER }, + /* End Marker */ { } }; diff --git a/drivers/ata/libata-eh.c b/drivers/ata/libata-eh.c index fa22f94ca41..79711b64054 100644 --- a/drivers/ata/libata-eh.c +++ b/drivers/ata/libata-eh.c @@ -2327,7 +2327,7 @@ int ata_eh_reset(struct ata_link *link, int classify, struct ata_port *ap = link->ap; struct ata_link *slave = ap->slave_link; struct ata_eh_context *ehc = &link->eh_context; - struct ata_eh_context *sehc = &slave->eh_context; + struct ata_eh_context *sehc = slave ? &slave->eh_context : NULL; unsigned int *classes = ehc->classes; unsigned int lflags = link->flags; int verbose = !(ehc->i.flags & ATA_EHI_QUIET); @@ -2517,6 +2517,10 @@ int ata_eh_reset(struct ata_link *link, int classify, ata_eh_about_to_do(link, NULL, ATA_EH_RESET); rc = ata_do_reset(link, reset, classes, deadline, true); + if (rc) { + failed_link = link; + goto fail; + } } } else { if (verbose) diff --git a/drivers/ata/pata_at91.c b/drivers/ata/pata_at91.c index 4b27617be26..41c94b1ae49 100644 --- a/drivers/ata/pata_at91.c +++ b/drivers/ata/pata_at91.c @@ -26,9 +26,7 @@ #include <linux/platform_device.h> #include <linux/ata_platform.h> -#include <mach/at91sam9260_matrix.h> #include <mach/at91sam9_smc.h> -#include <mach/at91sam9260.h> #include <mach/board.h> #include <mach/gpio.h> @@ -44,65 +42,62 @@ struct at91_ide_info { unsigned long mode; unsigned int cs; + struct clk *mck; + void __iomem *ide_addr; void __iomem *alt_addr; }; -const struct ata_timing initial_timing = +static const struct ata_timing initial_timing = {XFER_PIO_0, 70, 290, 240, 600, 165, 150, 600, 0}; -static unsigned int calc_mck_cycles(unsigned int ns, unsigned int mck_hz) +static unsigned long calc_mck_cycles(unsigned long ns, unsigned long mck_hz) { unsigned long mul; - /* - * cycles = x [nsec] * f [Hz] / 10^9 [ns in sec] = - * x * (f / 1_000_000_000) = - * x * ((f * 65536) / 1_000_000_000) / 65536 = - * x * (((f / 10_000) * 65536) / 100_000) / 65536 = - */ + /* + * cycles = x [nsec] * f [Hz] / 10^9 [ns in sec] = + * x * (f / 1_000_000_000) = + * x * ((f * 65536) / 1_000_000_000) / 65536 = + * x * (((f / 10_000) * 65536) / 100_000) / 65536 = + */ - mul = (mck_hz / 10000) << 16; - mul /= 100000; + mul = (mck_hz / 10000) << 16; + mul /= 100000; - return (ns * mul + 65536) >> 16; /* rounding */ + return (ns * mul + 65536) >> 16; /* rounding */ } static void set_smc_mode(struct at91_ide_info *info) { - at91_sys_write(AT91_SMC_MODE(info->cs), info->mode); - return; + at91_sys_write(AT91_SMC_MODE(info->cs), info->mode); + return; } static void set_smc_timing(struct device *dev, struct at91_ide_info *info, const struct ata_timing *ata) { - int read_cycle, write_cycle, active, recover; - int nrd_setup, nrd_pulse, nrd_recover; - int nwe_setup, nwe_pulse; + unsigned long read_cycle, write_cycle, active, recover; + unsigned long nrd_setup, nrd_pulse, nrd_recover; + unsigned long nwe_setup, nwe_pulse; - int ncs_write_setup, ncs_write_pulse; - int ncs_read_setup, ncs_read_pulse; + unsigned long ncs_write_setup, ncs_write_pulse; + unsigned long ncs_read_setup, ncs_read_pulse; - unsigned int mck_hz; - struct clk *mck; + unsigned long mck_hz; read_cycle = ata->cyc8b; nrd_setup = ata->setup; nrd_pulse = ata->act8b; nrd_recover = ata->rec8b; - mck = clk_get(NULL, "mck"); - BUG_ON(IS_ERR(mck)); - mck_hz = clk_get_rate(mck); + mck_hz = clk_get_rate(info->mck); read_cycle = calc_mck_cycles(read_cycle, mck_hz); nrd_setup = calc_mck_cycles(nrd_setup, mck_hz); nrd_pulse = calc_mck_cycles(nrd_pulse, mck_hz); nrd_recover = calc_mck_cycles(nrd_recover, mck_hz); - clk_put(mck); - active = nrd_setup + nrd_pulse; recover = read_cycle - active; @@ -121,13 +116,13 @@ static void set_smc_timing(struct device *dev, ncs_write_setup = ncs_read_setup; ncs_write_pulse = ncs_read_pulse; - dev_dbg(dev, "ATA timings: nrd_setup = %d nrd_pulse = %d nrd_cycle = %d\n", + dev_dbg(dev, "ATA timings: nrd_setup = %lu nrd_pulse = %lu nrd_cycle = %lu\n", nrd_setup, nrd_pulse, read_cycle); - dev_dbg(dev, "ATA timings: nwe_setup = %d nwe_pulse = %d nwe_cycle = %d\n", + dev_dbg(dev, "ATA timings: nwe_setup = %lu nwe_pulse = %lu nwe_cycle = %lu\n", nwe_setup, nwe_pulse, write_cycle); - dev_dbg(dev, "ATA timings: ncs_read_setup = %d ncs_read_pulse = %d\n", + dev_dbg(dev, "ATA timings: ncs_read_setup = %lu ncs_read_pulse = %lu\n", ncs_read_setup, ncs_read_pulse); - dev_dbg(dev, "ATA timings: ncs_write_setup = %d ncs_write_pulse = %d\n", + dev_dbg(dev, "ATA timings: ncs_write_setup = %lu ncs_write_pulse = %lu\n", ncs_write_setup, ncs_write_pulse); at91_sys_write(AT91_SMC_SETUP(info->cs), @@ -217,6 +212,7 @@ static int __devinit pata_at91_probe(struct platform_device *pdev) struct resource *mem_res; struct ata_host *host; struct ata_port *ap; + int irq_flags = 0; int irq = 0; int ret; @@ -254,13 +250,20 @@ static int __devinit pata_at91_probe(struct platform_device *pdev) ata_port_desc(ap, "no IRQ, using PIO polling"); } - info = kzalloc(sizeof(*info), GFP_KERNEL); + info = devm_kzalloc(dev, sizeof(*info), GFP_KERNEL); if (!info) { dev_err(dev, "failed to allocate memory for private data\n"); return -ENOMEM; } + info->mck = clk_get(NULL, "mck"); + + if (IS_ERR(info->mck)) { + dev_err(dev, "failed to get access to mck clock\n"); + return -ENODEV; + } + info->cs = board->chipselect; info->mode = AT91_SMC_READMODE | AT91_SMC_WRITEMODE | AT91_SMC_EXNWMODE_READY | AT91_SMC_BAT_SELECT | @@ -272,7 +275,7 @@ static int __devinit pata_at91_probe(struct platform_device *pdev) if (!info->ide_addr) { dev_err(dev, "failed to map IO base\n"); ret = -ENOMEM; - goto err_ide_ioremap; + goto err_put; } info->alt_addr = devm_ioremap(dev, @@ -281,7 +284,7 @@ static int __devinit pata_at91_probe(struct platform_device *pdev) if (!info->alt_addr) { dev_err(dev, "failed to map CTL base\n"); ret = -ENOMEM; - goto err_alt_ioremap; + goto err_put; } ap->ioaddr.cmd_addr = info->ide_addr; @@ -300,33 +303,27 @@ static int __devinit pata_at91_probe(struct platform_device *pdev) irq ? ata_sff_interrupt : NULL, irq_flags, &pata_at91_sht); -err_alt_ioremap: - devm_iounmap(dev, info->ide_addr); - -err_ide_ioremap: - kfree(info); - +err_put: + clk_put(info->mck); return ret; } static int __devexit pata_at91_remove(struct platform_device *pdev) { struct ata_host *host = dev_get_drvdata(&pdev->dev); - struct at91_ide_info *info = host->private_data; - struct device *dev = &pdev->dev; + struct at91_ide_info *info; if (!host) return 0; + info = host->private_data; ata_host_detach(host); if (!info) return 0; - devm_iounmap(dev, info->ide_addr); - devm_iounmap(dev, info->alt_addr); + clk_put(info->mck); - kfree(info); return 0; } diff --git a/drivers/ata/pata_atiixp.c b/drivers/ata/pata_atiixp.c index bec0b8ade66..45915566e4e 100644 --- a/drivers/ata/pata_atiixp.c +++ b/drivers/ata/pata_atiixp.c @@ -1,6 +1,7 @@ /* * pata_atiixp.c - ATI PATA for new ATA layer * (C) 2005 Red Hat Inc + * (C) 2009 Bartlomiej Zolnierkiewicz * * Based on * @@ -61,20 +62,19 @@ static void atiixp_set_pio_timing(struct ata_port *ap, struct ata_device *adev, struct pci_dev *pdev = to_pci_dev(ap->host->dev); int dn = 2 * ap->port_no + adev->devno; - - /* Check this is correct - the order is odd in both drivers */ int timing_shift = (16 * ap->port_no) + 8 * (adev->devno ^ 1); - u16 pio_mode_data, pio_timing_data; + u32 pio_timing_data; + u16 pio_mode_data; pci_read_config_word(pdev, ATIIXP_IDE_PIO_MODE, &pio_mode_data); pio_mode_data &= ~(0x7 << (4 * dn)); pio_mode_data |= pio << (4 * dn); pci_write_config_word(pdev, ATIIXP_IDE_PIO_MODE, pio_mode_data); - pci_read_config_word(pdev, ATIIXP_IDE_PIO_TIMING, &pio_timing_data); + pci_read_config_dword(pdev, ATIIXP_IDE_PIO_TIMING, &pio_timing_data); pio_timing_data &= ~(0xFF << timing_shift); pio_timing_data |= (pio_timings[pio] << timing_shift); - pci_write_config_word(pdev, ATIIXP_IDE_PIO_TIMING, pio_timing_data); + pci_write_config_dword(pdev, ATIIXP_IDE_PIO_TIMING, pio_timing_data); } /** @@ -119,16 +119,17 @@ static void atiixp_set_dmamode(struct ata_port *ap, struct ata_device *adev) udma_mode_data |= dma << (4 * dn); pci_write_config_word(pdev, ATIIXP_IDE_UDMA_MODE, udma_mode_data); } else { - u16 mwdma_timing_data; - /* Check this is correct - the order is odd in both drivers */ int timing_shift = (16 * ap->port_no) + 8 * (adev->devno ^ 1); + u32 mwdma_timing_data; dma -= XFER_MW_DMA_0; - pci_read_config_word(pdev, ATIIXP_IDE_MWDMA_TIMING, &mwdma_timing_data); + pci_read_config_dword(pdev, ATIIXP_IDE_MWDMA_TIMING, + &mwdma_timing_data); mwdma_timing_data &= ~(0xFF << timing_shift); mwdma_timing_data |= (mwdma_timings[dma] << timing_shift); - pci_write_config_word(pdev, ATIIXP_IDE_MWDMA_TIMING, mwdma_timing_data); + pci_write_config_dword(pdev, ATIIXP_IDE_MWDMA_TIMING, + mwdma_timing_data); } /* * We must now look at the PIO mode situation. We may need to diff --git a/drivers/ata/pata_octeon_cf.c b/drivers/ata/pata_octeon_cf.c index 8d9343accf3..abdd19fe990 100644 --- a/drivers/ata/pata_octeon_cf.c +++ b/drivers/ata/pata_octeon_cf.c @@ -653,7 +653,8 @@ static irqreturn_t octeon_cf_interrupt(int irq, void *dev_instance) ap = host->ports[i]; ocd = ap->dev->platform_data; - if (!ap || (ap->flags & ATA_FLAG_DISABLED)) + + if (ap->flags & ATA_FLAG_DISABLED) continue; ocd = ap->dev->platform_data; diff --git a/drivers/ata/pata_pcmcia.c b/drivers/ata/pata_pcmcia.c index f4d009ed50a..dc99e26f8e5 100644 --- a/drivers/ata/pata_pcmcia.c +++ b/drivers/ata/pata_pcmcia.c @@ -411,6 +411,7 @@ static struct pcmcia_device_id pcmcia_devices[] = { PCMCIA_DEVICE_PROD_ID123("PCMCIA", "IDE CARD", "F1", 0x281f1c5d, 0x1907960c, 0xf7fde8b9), PCMCIA_DEVICE_PROD_ID12("ARGOSY", "CD-ROM", 0x78f308dc, 0x66536591), PCMCIA_DEVICE_PROD_ID12("ARGOSY", "PnPIDE", 0x78f308dc, 0x0c694728), + PCMCIA_DEVICE_PROD_ID12("CNF ", "CD-ROM", 0x46d7db81, 0x66536591), PCMCIA_DEVICE_PROD_ID12("CNF CD-M", "CD-ROM", 0x7d93b852, 0x66536591), PCMCIA_DEVICE_PROD_ID12("Creative Technology Ltd.", "PCMCIA CD-ROM Interface Card", 0xff8c8a45, 0xfe8020c4), PCMCIA_DEVICE_PROD_ID12("Digital Equipment Corporation.", "Digital Mobile Media CD-ROM", 0x17692a66, 0xef1dcbde), diff --git a/drivers/ata/sata_mv.c b/drivers/ata/sata_mv.c index 23714aefb82..c19417e0220 100644 --- a/drivers/ata/sata_mv.c +++ b/drivers/ata/sata_mv.c @@ -2514,7 +2514,7 @@ static void mv_unexpected_intr(struct ata_port *ap, int edma_was_enabled) char *when = "idle"; ata_ehi_clear_desc(ehi); - if (!ap || (ap->flags & ATA_FLAG_DISABLED)) { + if (ap->flags & ATA_FLAG_DISABLED) { when = "disabled"; } else if (edma_was_enabled) { when = "EDMA enabled"; diff --git a/drivers/ata/sata_nv.c b/drivers/ata/sata_nv.c index b2d11f300c3..86a40582999 100644 --- a/drivers/ata/sata_nv.c +++ b/drivers/ata/sata_nv.c @@ -602,6 +602,7 @@ MODULE_VERSION(DRV_VERSION); static int adma_enabled; static int swncq_enabled = 1; +static int msi_enabled; static void nv_adma_register_mode(struct ata_port *ap) { @@ -2459,6 +2460,11 @@ static int nv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) } else if (type == SWNCQ) nv_swncq_host_init(host); + if (msi_enabled) { + dev_printk(KERN_NOTICE, &pdev->dev, "Using MSI\n"); + pci_enable_msi(pdev); + } + pci_set_master(pdev); return ata_host_activate(host, pdev->irq, ipriv->irq_handler, IRQF_SHARED, ipriv->sht); @@ -2558,4 +2564,6 @@ module_param_named(adma, adma_enabled, bool, 0444); MODULE_PARM_DESC(adma, "Enable use of ADMA (Default: false)"); module_param_named(swncq, swncq_enabled, bool, 0444); MODULE_PARM_DESC(swncq, "Enable use of SWNCQ (Default: true)"); +module_param_named(msi, msi_enabled, bool, 0444); +MODULE_PARM_DESC(msi, "Enable use of MSI (Default: false)"); diff --git a/drivers/ata/sata_sil.c b/drivers/ata/sata_sil.c index 030ec079b18..35bd5cc7f28 100644 --- a/drivers/ata/sata_sil.c +++ b/drivers/ata/sata_sil.c @@ -532,7 +532,7 @@ static irqreturn_t sil_interrupt(int irq, void *dev_instance) struct ata_port *ap = host->ports[i]; u32 bmdma2 = readl(mmio_base + sil_port[ap->port_no].bmdma2); - if (unlikely(!ap || ap->flags & ATA_FLAG_DISABLED)) + if (unlikely(ap->flags & ATA_FLAG_DISABLED)) continue; /* turn off SATA_IRQ if not supported */ diff --git a/drivers/base/devres.c b/drivers/base/devres.c index e8beb8e5b62..05dd307e8f0 100644 --- a/drivers/base/devres.c +++ b/drivers/base/devres.c @@ -428,6 +428,9 @@ int devres_release_all(struct device *dev) { unsigned long flags; + /* Looks like an uninitialized device structure */ + if (WARN_ON(dev->devres_head.next == NULL)) + return -ENODEV; spin_lock_irqsave(&dev->devres_lock, flags); return release_nodes(dev, dev->devres_head.next, &dev->devres_head, flags); diff --git a/drivers/base/firmware_class.c b/drivers/base/firmware_class.c index ddeb819c8f8..7376367bcb8 100644 --- a/drivers/base/firmware_class.c +++ b/drivers/base/firmware_class.c @@ -180,7 +180,6 @@ static ssize_t firmware_loading_store(struct device *dev, goto err; } /* Pages will be freed by vfree() */ - fw_priv->pages = NULL; fw_priv->page_array_size = 0; fw_priv->nr_pages = 0; complete(&fw_priv->completion); @@ -217,8 +216,10 @@ firmware_data_read(struct kobject *kobj, struct bin_attribute *bin_attr, ret_count = -ENODEV; goto out; } - if (offset > fw->size) - return 0; + if (offset > fw->size) { + ret_count = 0; + goto out; + } if (count > fw->size - offset) count = fw->size - offset; @@ -357,7 +358,7 @@ static void fw_dev_release(struct device *dev) kfree(fw_priv->pages); kfree(fw_priv->fw_id); kfree(fw_priv); - put_device(dev); + kfree(dev); module_put(THIS_MODULE); } @@ -408,13 +409,11 @@ static int fw_register_device(struct device **dev_p, const char *fw_name, if (retval) { dev_err(device, "%s: device_register failed\n", __func__); put_device(f_dev); - goto error_kfree_fw_id; + return retval; } *dev_p = f_dev; return 0; -error_kfree_fw_id: - kfree(fw_priv->fw_id); error_kfree: kfree(f_dev); kfree(fw_priv); diff --git a/drivers/base/platform.c b/drivers/base/platform.c index 81cb01bfc35..456594bd97b 100644 --- a/drivers/base/platform.c +++ b/drivers/base/platform.c @@ -483,9 +483,6 @@ int platform_driver_register(struct platform_driver *drv) drv->driver.remove = platform_drv_remove; if (drv->shutdown) drv->driver.shutdown = platform_drv_shutdown; - if (drv->suspend || drv->resume) - pr_warning("Platform driver '%s' needs updating - please use " - "dev_pm_ops\n", drv->driver.name); return driver_register(&drv->driver); } diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c index fae72545898..58a3e572f2c 100644 --- a/drivers/base/power/main.c +++ b/drivers/base/power/main.c @@ -762,6 +762,7 @@ static int dpm_prepare(pm_message_t state) dev->power.status = DPM_ON; if (error == -EAGAIN) { put_device(dev); + error = 0; continue; } printk(KERN_ERR "PM: Failed to prepare device %s " diff --git a/drivers/base/sys.c b/drivers/base/sys.c index 79a9ae5238a..0d903909af7 100644 --- a/drivers/base/sys.c +++ b/drivers/base/sys.c @@ -275,9 +275,9 @@ int sysdev_register(struct sys_device *sysdev) drv->add(sysdev); } mutex_unlock(&sysdev_drivers_lock); + kobject_uevent(&sysdev->kobj, KOBJ_ADD); } - kobject_uevent(&sysdev->kobj, KOBJ_ADD); return error; } diff --git a/drivers/block/DAC960.c b/drivers/block/DAC960.c index 668dc234b8e..1e6b7c14f69 100644 --- a/drivers/block/DAC960.c +++ b/drivers/block/DAC960.c @@ -36,6 +36,7 @@ #include <linux/ioport.h> #include <linux/mm.h> #include <linux/slab.h> +#include <linux/smp_lock.h> #include <linux/proc_fs.h> #include <linux/reboot.h> #include <linux/spinlock.h> diff --git a/drivers/block/Kconfig b/drivers/block/Kconfig index bb72ada9f07..1d886e079c5 100644 --- a/drivers/block/Kconfig +++ b/drivers/block/Kconfig @@ -298,6 +298,22 @@ config BLK_DEV_NBD If unsure, say N. +config BLK_DEV_OSD + tristate "OSD object-as-blkdev support" + depends on SCSI_OSD_ULD + ---help--- + Saying Y or M here will allow the exporting of a single SCSI + OSD (object-based storage) object as a Linux block device. + + For example, if you create a 2G object on an OSD device, + you can then use this module to present that 2G object as + a Linux block device. + + To compile this driver as a module, choose M here: the + module will be called osdblk. + + If unsure, say N. + config BLK_DEV_SX8 tristate "Promise SATA SX8 support" depends on PCI diff --git a/drivers/block/Makefile b/drivers/block/Makefile index 7755a5e2a85..cdaa3f8fddf 100644 --- a/drivers/block/Makefile +++ b/drivers/block/Makefile @@ -23,6 +23,7 @@ obj-$(CONFIG_XILINX_SYSACE) += xsysace.o obj-$(CONFIG_CDROM_PKTCDVD) += pktcdvd.o obj-$(CONFIG_MG_DISK) += mg_disk.o obj-$(CONFIG_SUNVDC) += sunvdc.o +obj-$(CONFIG_BLK_DEV_OSD) += osdblk.o obj-$(CONFIG_BLK_DEV_UMEM) += umem.o obj-$(CONFIG_BLK_DEV_NBD) += nbd.o diff --git a/drivers/block/amiflop.c b/drivers/block/amiflop.c index 9c6e5b0fe89..2f07b7c99a9 100644 --- a/drivers/block/amiflop.c +++ b/drivers/block/amiflop.c @@ -1645,7 +1645,7 @@ static int __init fd_probe_drives(void) { int drive,drives,nomem; - printk(KERN_INFO "FD: probing units\n" KERN_INFO "found "); + printk(KERN_INFO "FD: probing units\nfound "); drives=0; nomem=0; for(drive=0;drive<FD_MAX_UNITS;drive++) { diff --git a/drivers/block/aoe/aoe.h b/drivers/block/aoe/aoe.h index 5e41e6dd657..db195abad69 100644 --- a/drivers/block/aoe/aoe.h +++ b/drivers/block/aoe/aoe.h @@ -155,7 +155,7 @@ struct aoedev { u16 fw_ver; /* version of blade's firmware */ struct work_struct work;/* disk create work struct */ struct gendisk *gd; - struct request_queue blkq; + struct request_queue *blkq; struct hd_geometry geo; sector_t ssize; struct timer_list timer; diff --git a/drivers/block/aoe/aoeblk.c b/drivers/block/aoe/aoeblk.c index 2307a271bdc..1e15889c4b9 100644 --- a/drivers/block/aoe/aoeblk.c +++ b/drivers/block/aoe/aoeblk.c @@ -264,9 +264,12 @@ aoeblk_gdalloc(void *vp) goto err_disk; } - blk_queue_make_request(&d->blkq, aoeblk_make_request); - if (bdi_init(&d->blkq.backing_dev_info)) + d->blkq = blk_alloc_queue(GFP_KERNEL); + if (!d->blkq) goto err_mempool; + blk_queue_make_request(d->blkq, aoeblk_make_request); + if (bdi_init(&d->blkq->backing_dev_info)) + goto err_blkq; spin_lock_irqsave(&d->lock, flags); gd->major = AOE_MAJOR; gd->first_minor = d->sysminor * AOE_PARTITIONS; @@ -276,7 +279,7 @@ aoeblk_gdalloc(void *vp) snprintf(gd->disk_name, sizeof gd->disk_name, "etherd/e%ld.%d", d->aoemajor, d->aoeminor); - gd->queue = &d->blkq; + gd->queue = d->blkq; d->gd = gd; d->flags &= ~DEVFL_GDALLOC; d->flags |= DEVFL_UP; @@ -287,6 +290,9 @@ aoeblk_gdalloc(void *vp) aoedisk_add_sysfs(d); return; +err_blkq: + blk_cleanup_queue(d->blkq); + d->blkq = NULL; err_mempool: mempool_destroy(d->bufpool); err_disk: diff --git a/drivers/block/aoe/aoedev.c b/drivers/block/aoe/aoedev.c index eeea477d960..fa67027789a 100644 --- a/drivers/block/aoe/aoedev.c +++ b/drivers/block/aoe/aoedev.c @@ -113,6 +113,7 @@ aoedev_freedev(struct aoedev *d) if (d->bufpool) mempool_destroy(d->bufpool); skbpoolfree(d); + blk_cleanup_queue(d->blkq); kfree(d); } diff --git a/drivers/block/ataflop.c b/drivers/block/ataflop.c index f5e7180d7f4..3ff02941b3d 100644 --- a/drivers/block/ataflop.c +++ b/drivers/block/ataflop.c @@ -1627,7 +1627,7 @@ static int fd_ioctl(struct block_device *bdev, fmode_t mode, drive, dtp->blocks, dtp->spt, dtp->stretch); /* sanity check */ - if (!dtp || setprm.track != dtp->blocks/dtp->spt/2 || + if (setprm.track != dtp->blocks/dtp->spt/2 || setprm.head != 2) { redo_fd_request(); return -EINVAL; diff --git a/drivers/block/cciss.c b/drivers/block/cciss.c index c7a527c08a0..a52cc7fe45e 100644 --- a/drivers/block/cciss.c +++ b/drivers/block/cciss.c @@ -26,6 +26,7 @@ #include <linux/pci.h> #include <linux/kernel.h> #include <linux/slab.h> +#include <linux/smp_lock.h> #include <linux/delay.h> #include <linux/major.h> #include <linux/fs.h> @@ -226,8 +227,18 @@ static inline void addQ(struct hlist_head *list, CommandList_struct *c) static inline void removeQ(CommandList_struct *c) { - if (WARN_ON(hlist_unhashed(&c->list))) + /* + * After kexec/dump some commands might still + * be in flight, which the firmware will try + * to complete. Resetting the firmware doesn't work + * with old fw revisions, so we have to mark + * them off as 'stale' to prevent the driver from + * falling over. + */ + if (WARN_ON(hlist_unhashed(&c->list))) { + c->cmd_type = CMD_MSG_STALE; return; + } hlist_del_init(&c->list); } @@ -4246,7 +4257,8 @@ static void fail_all_cmds(unsigned long ctlr) while (!hlist_empty(&h->cmpQ)) { c = hlist_entry(h->cmpQ.first, CommandList_struct, list); removeQ(c); - c->err_info->CommandStatus = CMD_HARDWARE_ERR; + if (c->cmd_type != CMD_MSG_STALE) + c->err_info->CommandStatus = CMD_HARDWARE_ERR; if (c->cmd_type == CMD_RWREQ) { complete_command(h, c, 0); } else if (c->cmd_type == CMD_IOCTL_PEND) diff --git a/drivers/block/cciss_cmd.h b/drivers/block/cciss_cmd.h index cd665b00c7c..dbaed1ea0da 100644 --- a/drivers/block/cciss_cmd.h +++ b/drivers/block/cciss_cmd.h @@ -274,6 +274,7 @@ typedef struct _ErrorInfo_struct { #define CMD_SCSI 0x03 #define CMD_MSG_DONE 0x04 #define CMD_MSG_TIMEOUT 0x05 +#define CMD_MSG_STALE 0xff /* This structure needs to be divisible by 8 for new * indexing method. diff --git a/drivers/block/floppy.c b/drivers/block/floppy.c index 862b40c9018..91b75301378 100644 --- a/drivers/block/floppy.c +++ b/drivers/block/floppy.c @@ -3327,7 +3327,10 @@ static inline int set_geometry(unsigned int cmd, struct floppy_struct *g, if (!capable(CAP_SYS_ADMIN)) return -EPERM; mutex_lock(&open_lock); - LOCK_FDC(drive, 1); + if (lock_fdc(drive, 1)) { + mutex_unlock(&open_lock); + return -EINTR; + } floppy_type[type] = *g; floppy_type[type].name = "user format"; for (cnt = type << 2; cnt < (type << 2) + 4; cnt++) diff --git a/drivers/block/loop.c b/drivers/block/loop.c index 801f4ab8330..5757188cd1f 100644 --- a/drivers/block/loop.c +++ b/drivers/block/loop.c @@ -61,7 +61,6 @@ #include <linux/blkdev.h> #include <linux/blkpg.h> #include <linux/init.h> -#include <linux/smp_lock.h> #include <linux/swap.h> #include <linux/slab.h> #include <linux/loop.h> diff --git a/drivers/block/mg_disk.c b/drivers/block/mg_disk.c index f703f547824..6d7fbaa9224 100644 --- a/drivers/block/mg_disk.c +++ b/drivers/block/mg_disk.c @@ -36,7 +36,6 @@ /* Register offsets */ #define MG_BUFF_OFFSET 0x8000 -#define MG_STORAGE_BUFFER_SIZE 0x200 #define MG_REG_OFFSET 0xC000 #define MG_REG_FEATURE (MG_REG_OFFSET + 2) /* write case */ #define MG_REG_ERROR (MG_REG_OFFSET + 2) /* read case */ @@ -219,6 +218,16 @@ static unsigned int mg_wait(struct mg_host *host, u32 expect, u32 msec) host->error = MG_ERR_NONE; expire = jiffies + msecs_to_jiffies(msec); + /* These 2 times dummy status read prevents reading invalid + * status. A very little time (3 times of mflash operating clk) + * is required for busy bit is set. Use dummy read instead of + * busy wait, because mflash's PLL is machine dependent. + */ + if (prv_data->use_polling) { + status = inb((unsigned long)host->dev_base + MG_REG_STATUS); + status = inb((unsigned long)host->dev_base + MG_REG_STATUS); + } + status = inb((unsigned long)host->dev_base + MG_REG_STATUS); do { @@ -245,8 +254,6 @@ static unsigned int mg_wait(struct mg_host *host, u32 expect, u32 msec) mg_dump_status("not ready", status, host); return MG_ERR_INV_STAT; } - if (prv_data->use_polling) - msleep(1); status = inb((unsigned long)host->dev_base + MG_REG_STATUS); } while (time_before(cur_jiffies, expire)); @@ -469,9 +476,18 @@ static unsigned int mg_out(struct mg_host *host, return MG_ERR_NONE; } +static void mg_read_one(struct mg_host *host, struct request *req) +{ + u16 *buff = (u16 *)req->buffer; + u32 i; + + for (i = 0; i < MG_SECTOR_SIZE >> 1; i++) + *buff++ = inw((unsigned long)host->dev_base + MG_BUFF_OFFSET + + (i << 1)); +} + static void mg_read(struct request *req) { - u32 j; struct mg_host *host = req->rq_disk->private_data; if (mg_out(host, blk_rq_pos(req), blk_rq_sectors(req), @@ -482,49 +498,65 @@ static void mg_read(struct request *req) blk_rq_sectors(req), blk_rq_pos(req), req->buffer); do { - u16 *buff = (u16 *)req->buffer; - if (mg_wait(host, ATA_DRQ, MG_TMAX_WAIT_RD_DRQ) != MG_ERR_NONE) { mg_bad_rw_intr(host); return; } - for (j = 0; j < MG_SECTOR_SIZE >> 1; j++) - *buff++ = inw((unsigned long)host->dev_base + - MG_BUFF_OFFSET + (j << 1)); + + mg_read_one(host, req); outb(MG_CMD_RD_CONF, (unsigned long)host->dev_base + MG_REG_COMMAND); } while (mg_end_request(host, 0, MG_SECTOR_SIZE)); } +static void mg_write_one(struct mg_host *host, struct request *req) +{ + u16 *buff = (u16 *)req->buffer; + u32 i; + + for (i = 0; i < MG_SECTOR_SIZE >> 1; i++) + outw(*buff++, (unsigned long)host->dev_base + MG_BUFF_OFFSET + + (i << 1)); +} + static void mg_write(struct request *req) { - u32 j; struct mg_host *host = req->rq_disk->private_data; + unsigned int rem = blk_rq_sectors(req); - if (mg_out(host, blk_rq_pos(req), blk_rq_sectors(req), + if (mg_out(host, blk_rq_pos(req), rem, MG_CMD_WR, NULL) != MG_ERR_NONE) { mg_bad_rw_intr(host); return; } MG_DBG("requested %d sects (from %ld), buffer=0x%p\n", - blk_rq_sectors(req), blk_rq_pos(req), req->buffer); + rem, blk_rq_pos(req), req->buffer); + + if (mg_wait(host, ATA_DRQ, + MG_TMAX_WAIT_WR_DRQ) != MG_ERR_NONE) { + mg_bad_rw_intr(host); + return; + } do { - u16 *buff = (u16 *)req->buffer; + mg_write_one(host, req); - if (mg_wait(host, ATA_DRQ, MG_TMAX_WAIT_WR_DRQ) != MG_ERR_NONE) { + outb(MG_CMD_WR_CONF, (unsigned long)host->dev_base + + MG_REG_COMMAND); + + rem--; + if (rem > 1 && mg_wait(host, ATA_DRQ, + MG_TMAX_WAIT_WR_DRQ) != MG_ERR_NONE) { + mg_bad_rw_intr(host); + return; + } else if (mg_wait(host, MG_STAT_READY, + MG_TMAX_WAIT_WR_DRQ) != MG_ERR_NONE) { mg_bad_rw_intr(host); return; } - for (j = 0; j < MG_SECTOR_SIZE >> 1; j++) - outw(*buff++, (unsigned long)host->dev_base + - MG_BUFF_OFFSET + (j << 1)); - - outb(MG_CMD_WR_CONF, (unsigned long)host->dev_base + - MG_REG_COMMAND); } while (mg_end_request(host, 0, MG_SECTOR_SIZE)); } @@ -532,7 +564,6 @@ static void mg_read_intr(struct mg_host *host) { struct request *req = host->req; u32 i; - u16 *buff; /* check status */ do { @@ -550,13 +581,7 @@ static void mg_read_intr(struct mg_host *host) return; ok_to_read: - /* get current segment of request */ - buff = (u16 *)req->buffer; - - /* read 1 sector */ - for (i = 0; i < MG_SECTOR_SIZE >> 1; i++) - *buff++ = inw((unsigned long)host->dev_base + MG_BUFF_OFFSET + - (i << 1)); + mg_read_one(host, req); MG_DBG("sector %ld, remaining=%ld, buffer=0x%p\n", blk_rq_pos(req), blk_rq_sectors(req) - 1, req->buffer); @@ -575,8 +600,7 @@ ok_to_read: static void mg_write_intr(struct mg_host *host) { struct request *req = host->req; - u32 i, j; - u16 *buff; + u32 i; bool rem; /* check status */ @@ -597,12 +621,7 @@ static void mg_write_intr(struct mg_host *host) ok_to_write: if ((rem = mg_end_request(host, 0, MG_SECTOR_SIZE))) { /* write 1 sector and set handler if remains */ - buff = (u16 *)req->buffer; - for (j = 0; j < MG_STORAGE_BUFFER_SIZE >> 1; j++) { - outw(*buff, (unsigned long)host->dev_base + - MG_BUFF_OFFSET + (j << 1)); - buff++; - } + mg_write_one(host, req); MG_DBG("sector %ld, remaining=%ld, buffer=0x%p\n", blk_rq_pos(req), blk_rq_sectors(req), req->buffer); host->mg_do_intr = mg_write_intr; @@ -667,9 +686,6 @@ static unsigned int mg_issue_req(struct request *req, unsigned int sect_num, unsigned int sect_cnt) { - u16 *buff; - u32 i; - switch (rq_data_dir(req)) { case READ: if (mg_out(host, sect_num, sect_cnt, MG_CMD_RD, &mg_read_intr) @@ -693,12 +709,7 @@ static unsigned int mg_issue_req(struct request *req, mg_bad_rw_intr(host); return host->error; } - buff = (u16 *)req->buffer; - for (i = 0; i < MG_SECTOR_SIZE >> 1; i++) { - outw(*buff, (unsigned long)host->dev_base + - MG_BUFF_OFFSET + (i << 1)); - buff++; - } + mg_write_one(host, req); mod_timer(&host->timer, jiffies + 3 * HZ); outb(MG_CMD_WR_CONF, (unsigned long)host->dev_base + MG_REG_COMMAND); diff --git a/drivers/block/osdblk.c b/drivers/block/osdblk.c new file mode 100644 index 00000000000..13c1aee6aa3 --- /dev/null +++ b/drivers/block/osdblk.c @@ -0,0 +1,701 @@ + +/* + osdblk.c -- Export a single SCSI OSD object as a Linux block device + + + Copyright 2009 Red Hat, Inc. + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; see the file COPYING. If not, write to + the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. + + + Instructions for use + -------------------- + + 1) Map a Linux block device to an existing OSD object. + + In this example, we will use partition id 1234, object id 5678, + OSD device /dev/osd1. + + $ echo "1234 5678 /dev/osd1" > /sys/class/osdblk/add + + + 2) List all active blkdev<->object mappings. + + In this example, we have performed step #1 twice, creating two blkdevs, + mapped to two separate OSD objects. + + $ cat /sys/class/osdblk/list + 0 174 1234 5678 /dev/osd1 + 1 179 1994 897123 /dev/osd0 + + The columns, in order, are: + - blkdev unique id + - blkdev assigned major + - OSD object partition id + - OSD object id + - OSD device + + + 3) Remove an active blkdev<->object mapping. + + In this example, we remove the mapping with blkdev unique id 1. + + $ echo 1 > /sys/class/osdblk/remove + + + NOTE: The actual creation and deletion of OSD objects is outside the scope + of this driver. + + */ + +#include <linux/kernel.h> +#include <linux/device.h> +#include <linux/module.h> +#include <linux/fs.h> +#include <scsi/osd_initiator.h> +#include <scsi/osd_attributes.h> +#include <scsi/osd_sec.h> +#include <scsi/scsi_device.h> + +#define DRV_NAME "osdblk" +#define PFX DRV_NAME ": " + +/* #define _OSDBLK_DEBUG */ +#ifdef _OSDBLK_DEBUG +#define OSDBLK_DEBUG(fmt, a...) \ + printk(KERN_NOTICE "osdblk @%s:%d: " fmt, __func__, __LINE__, ##a) +#else +#define OSDBLK_DEBUG(fmt, a...) \ + do { if (0) printk(fmt, ##a); } while (0) +#endif + +MODULE_AUTHOR("Jeff Garzik <jeff@garzik.org>"); +MODULE_DESCRIPTION("block device inside an OSD object osdblk.ko"); +MODULE_LICENSE("GPL"); + +struct osdblk_device; + +enum { + OSDBLK_MINORS_PER_MAJOR = 256, /* max minors per blkdev */ + OSDBLK_MAX_REQ = 32, /* max parallel requests */ + OSDBLK_OP_TIMEOUT = 4 * 60, /* sync OSD req timeout */ +}; + +struct osdblk_request { + struct request *rq; /* blk layer request */ + struct bio *bio; /* cloned bio */ + struct osdblk_device *osdev; /* associated blkdev */ +}; + +struct osdblk_device { + int id; /* blkdev unique id */ + + int major; /* blkdev assigned major */ + struct gendisk *disk; /* blkdev's gendisk and rq */ + struct request_queue *q; + + struct osd_dev *osd; /* associated OSD */ + + char name[32]; /* blkdev name, e.g. osdblk34 */ + + spinlock_t lock; /* queue lock */ + + struct osd_obj_id obj; /* OSD partition, obj id */ + uint8_t obj_cred[OSD_CAP_LEN]; /* OSD cred */ + + struct osdblk_request req[OSDBLK_MAX_REQ]; /* request table */ + + struct list_head node; + + char osd_path[0]; /* OSD device path */ +}; + +static struct class *class_osdblk; /* /sys/class/osdblk */ +static DEFINE_MUTEX(ctl_mutex); /* Serialize open/close/setup/teardown */ +static LIST_HEAD(osdblkdev_list); + +static struct block_device_operations osdblk_bd_ops = { + .owner = THIS_MODULE, +}; + +static const struct osd_attr g_attr_logical_length = ATTR_DEF( + OSD_APAGE_OBJECT_INFORMATION, OSD_ATTR_OI_LOGICAL_LENGTH, 8); + +static void osdblk_make_credential(u8 cred_a[OSD_CAP_LEN], + const struct osd_obj_id *obj) +{ + osd_sec_init_nosec_doall_caps(cred_a, obj, false, true); +} + +/* copied from exofs; move to libosd? */ +/* + * Perform a synchronous OSD operation. copied from exofs; move to libosd? + */ +static int osd_sync_op(struct osd_request *or, int timeout, uint8_t *credential) +{ + int ret; + + or->timeout = timeout; + ret = osd_finalize_request(or, 0, credential, NULL); + if (ret) + return ret; + + ret = osd_execute_request(or); + + /* osd_req_decode_sense(or, ret); */ + return ret; +} + +/* + * Perform an asynchronous OSD operation. copied from exofs; move to libosd? + */ +static int osd_async_op(struct osd_request *or, osd_req_done_fn *async_done, + void *caller_context, u8 *cred) +{ + int ret; + + ret = osd_finalize_request(or, 0, cred, NULL); + if (ret) + return ret; + + ret = osd_execute_request_async(or, async_done, caller_context); + + return ret; +} + +/* copied from exofs; move to libosd? */ +static int extract_attr_from_req(struct osd_request *or, struct osd_attr *attr) +{ + struct osd_attr cur_attr = {.attr_page = 0}; /* start with zeros */ + void *iter = NULL; + int nelem; + + do { + nelem = 1; + osd_req_decode_get_attr_list(or, &cur_attr, &nelem, &iter); + if ((cur_attr.attr_page == attr->attr_page) && + (cur_attr.attr_id == attr->attr_id)) { + attr->len = cur_attr.len; + attr->val_ptr = cur_attr.val_ptr; + return 0; + } + } while (iter); + + return -EIO; +} + +static int osdblk_get_obj_size(struct osdblk_device *osdev, u64 *size_out) +{ + struct osd_request *or; + struct osd_attr attr; + int ret; + + /* start request */ + or = osd_start_request(osdev->osd, GFP_KERNEL); + if (!or) + return -ENOMEM; + + /* create a get-attributes(length) request */ + osd_req_get_attributes(or, &osdev->obj); + + osd_req_add_get_attr_list(or, &g_attr_logical_length, 1); + + /* execute op synchronously */ + ret = osd_sync_op(or, OSDBLK_OP_TIMEOUT, osdev->obj_cred); + if (ret) + goto out; + + /* extract length from returned attribute info */ + attr = g_attr_logical_length; + ret = extract_attr_from_req(or, &attr); + if (ret) + goto out; + + *size_out = get_unaligned_be64(attr.val_ptr); + +out: + osd_end_request(or); + return ret; + +} + +static void osdblk_osd_complete(struct osd_request *or, void *private) +{ + struct osdblk_request *orq = private; + struct osd_sense_info osi; + int ret = osd_req_decode_sense(or, &osi); + + if (ret) { + ret = -EIO; + OSDBLK_DEBUG("osdblk_osd_complete with err=%d\n", ret); + } + + /* complete OSD request */ + osd_end_request(or); + + /* complete request passed to osdblk by block layer */ + __blk_end_request_all(orq->rq, ret); +} + +static void bio_chain_put(struct bio *chain) +{ + struct bio *tmp; + + while (chain) { + tmp = chain; + chain = chain->bi_next; + + bio_put(tmp); + } +} + +static struct bio *bio_chain_clone(struct bio *old_chain, gfp_t gfpmask) +{ + struct bio *tmp, *new_chain = NULL, *tail = NULL; + + while (old_chain) { + tmp = bio_kmalloc(gfpmask, old_chain->bi_max_vecs); + if (!tmp) + goto err_out; + + __bio_clone(tmp, old_chain); + tmp->bi_bdev = NULL; + gfpmask &= ~__GFP_WAIT; + tmp->bi_next = NULL; + + if (!new_chain) + new_chain = tail = tmp; + else { + tail->bi_next = tmp; + tail = tmp; + } + + old_chain = old_chain->bi_next; + } + + return new_chain; + +err_out: + OSDBLK_DEBUG("bio_chain_clone with err\n"); + bio_chain_put(new_chain); + return NULL; +} + +static void osdblk_rq_fn(struct request_queue *q) +{ + struct osdblk_device *osdev = q->queuedata; + + while (1) { + struct request *rq; + struct osdblk_request *orq; + struct osd_request *or; + struct bio *bio; + bool do_write, do_flush; + + /* peek at request from block layer */ + rq = blk_fetch_request(q); + if (!rq) + break; + + /* filter out block requests we don't understand */ + if (!blk_fs_request(rq) && !blk_barrier_rq(rq)) { + blk_end_request_all(rq, 0); + continue; + } + + /* deduce our operation (read, write, flush) */ + /* I wish the block layer simplified cmd_type/cmd_flags/cmd[] + * into a clearly defined set of RPC commands: + * read, write, flush, scsi command, power mgmt req, + * driver-specific, etc. + */ + + do_flush = (rq->special == (void *) 0xdeadbeefUL); + do_write = (rq_data_dir(rq) == WRITE); + + if (!do_flush) { /* osd_flush does not use a bio */ + /* a bio clone to be passed down to OSD request */ + bio = bio_chain_clone(rq->bio, GFP_ATOMIC); + if (!bio) + break; + } else + bio = NULL; + + /* alloc internal OSD request, for OSD command execution */ + or = osd_start_request(osdev->osd, GFP_ATOMIC); + if (!or) { + bio_chain_put(bio); + OSDBLK_DEBUG("osd_start_request with err\n"); + break; + } + + orq = &osdev->req[rq->tag]; + orq->rq = rq; + orq->bio = bio; + orq->osdev = osdev; + + /* init OSD command: flush, write or read */ + if (do_flush) + osd_req_flush_object(or, &osdev->obj, + OSD_CDB_FLUSH_ALL, 0, 0); + else if (do_write) + osd_req_write(or, &osdev->obj, blk_rq_pos(rq) * 512ULL, + bio, blk_rq_bytes(rq)); + else + osd_req_read(or, &osdev->obj, blk_rq_pos(rq) * 512ULL, + bio, blk_rq_bytes(rq)); + + OSDBLK_DEBUG("%s 0x%x bytes at 0x%llx\n", + do_flush ? "flush" : do_write ? + "write" : "read", blk_rq_bytes(rq), + blk_rq_pos(rq) * 512ULL); + + /* begin OSD command execution */ + if (osd_async_op(or, osdblk_osd_complete, orq, + osdev->obj_cred)) { + osd_end_request(or); + blk_requeue_request(q, rq); + bio_chain_put(bio); + OSDBLK_DEBUG("osd_execute_request_async with err\n"); + break; + } + + /* remove the special 'flush' marker, now that the command + * is executing + */ + rq->special = NULL; + } +} + +static void osdblk_prepare_flush(struct request_queue *q, struct request *rq) +{ + /* add driver-specific marker, to indicate that this request + * is a flush command + */ + rq->special = (void *) 0xdeadbeefUL; +} + +static void osdblk_free_disk(struct osdblk_device *osdev) +{ + struct gendisk *disk = osdev->disk; + + if (!disk) + return; + + if (disk->flags & GENHD_FL_UP) + del_gendisk(disk); + if (disk->queue) + blk_cleanup_queue(disk->queue); + put_disk(disk); +} + +static int osdblk_init_disk(struct osdblk_device *osdev) +{ + struct gendisk *disk; + struct request_queue *q; + int rc; + u64 obj_size = 0; + + /* contact OSD, request size info about the object being mapped */ + rc = osdblk_get_obj_size(osdev, &obj_size); + if (rc) + return rc; + + /* create gendisk info */ + disk = alloc_disk(OSDBLK_MINORS_PER_MAJOR); + if (!disk) + return -ENOMEM; + + sprintf(disk->disk_name, DRV_NAME "%d", osdev->id); + disk->major = osdev->major; + disk->first_minor = 0; + disk->fops = &osdblk_bd_ops; + disk->private_data = osdev; + + /* init rq */ + q = blk_init_queue(osdblk_rq_fn, &osdev->lock); + if (!q) { + put_disk(disk); + return -ENOMEM; + } + + /* switch queue to TCQ mode; allocate tag map */ + rc = blk_queue_init_tags(q, OSDBLK_MAX_REQ, NULL); + if (rc) { + blk_cleanup_queue(q); + put_disk(disk); + return rc; + } + + /* Set our limits to the lower device limits, because osdblk cannot + * sleep when allocating a lower-request and therefore cannot be + * bouncing. + */ + blk_queue_stack_limits(q, osd_request_queue(osdev->osd)); + + blk_queue_prep_rq(q, blk_queue_start_tag); + blk_queue_ordered(q, QUEUE_ORDERED_DRAIN_FLUSH, osdblk_prepare_flush); + + disk->queue = q; + + q->queuedata = osdev; + + osdev->disk = disk; + osdev->q = q; + + /* finally, announce the disk to the world */ + set_capacity(disk, obj_size / 512ULL); + add_disk(disk); + + printk(KERN_INFO "%s: Added of size 0x%llx\n", + disk->disk_name, (unsigned long long)obj_size); + + return 0; +} + +/******************************************************************** + * /sys/class/osdblk/ + * add map OSD object to blkdev + * remove unmap OSD object + * list show mappings + *******************************************************************/ + +static void class_osdblk_release(struct class *cls) +{ + kfree(cls); +} + +static ssize_t class_osdblk_list(struct class *c, char *data) +{ + int n = 0; + struct list_head *tmp; + + mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING); + + list_for_each(tmp, &osdblkdev_list) { + struct osdblk_device *osdev; + + osdev = list_entry(tmp, struct osdblk_device, node); + + n += sprintf(data+n, "%d %d %llu %llu %s\n", + osdev->id, + osdev->major, + osdev->obj.partition, + osdev->obj.id, + osdev->osd_path); + } + + mutex_unlock(&ctl_mutex); + return n; +} + +static ssize_t class_osdblk_add(struct class *c, const char *buf, size_t count) +{ + struct osdblk_device *osdev; + ssize_t rc; + int irc, new_id = 0; + struct list_head *tmp; + + if (!try_module_get(THIS_MODULE)) + return -ENODEV; + + /* new osdblk_device object */ + osdev = kzalloc(sizeof(*osdev) + strlen(buf) + 1, GFP_KERNEL); + if (!osdev) { + rc = -ENOMEM; + goto err_out_mod; + } + + /* static osdblk_device initialization */ + spin_lock_init(&osdev->lock); + INIT_LIST_HEAD(&osdev->node); + + /* generate unique id: find highest unique id, add one */ + + mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING); + + list_for_each(tmp, &osdblkdev_list) { + struct osdblk_device *osdev; + + osdev = list_entry(tmp, struct osdblk_device, node); + if (osdev->id > new_id) + new_id = osdev->id + 1; + } + + osdev->id = new_id; + + /* add to global list */ + list_add_tail(&osdev->node, &osdblkdev_list); + + mutex_unlock(&ctl_mutex); + + /* parse add command */ + if (sscanf(buf, "%llu %llu %s", &osdev->obj.partition, &osdev->obj.id, + osdev->osd_path) != 3) { + rc = -EINVAL; + goto err_out_slot; + } + + /* initialize rest of new object */ + sprintf(osdev->name, DRV_NAME "%d", osdev->id); + + /* contact requested OSD */ + osdev->osd = osduld_path_lookup(osdev->osd_path); + if (IS_ERR(osdev->osd)) { + rc = PTR_ERR(osdev->osd); + goto err_out_slot; + } + + /* build OSD credential */ + osdblk_make_credential(osdev->obj_cred, &osdev->obj); + + /* register our block device */ + irc = register_blkdev(0, osdev->name); + if (irc < 0) { + rc = irc; + goto err_out_osd; + } + + osdev->major = irc; + + /* set up and announce blkdev mapping */ + rc = osdblk_init_disk(osdev); + if (rc) + goto err_out_blkdev; + + return count; + +err_out_blkdev: + unregister_blkdev(osdev->major, osdev->name); +err_out_osd: + osduld_put_device(osdev->osd); +err_out_slot: + mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING); + list_del_init(&osdev->node); + mutex_unlock(&ctl_mutex); + + kfree(osdev); +err_out_mod: + OSDBLK_DEBUG("Error adding device %s\n", buf); + module_put(THIS_MODULE); + return rc; +} + +static ssize_t class_osdblk_remove(struct class *c, const char *buf, + size_t count) +{ + struct osdblk_device *osdev = NULL; + int target_id, rc; + unsigned long ul; + struct list_head *tmp; + + rc = strict_strtoul(buf, 10, &ul); + if (rc) + return rc; + + /* convert to int; abort if we lost anything in the conversion */ + target_id = (int) ul; + if (target_id != ul) + return -EINVAL; + + /* remove object from list immediately */ + mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING); + + list_for_each(tmp, &osdblkdev_list) { + osdev = list_entry(tmp, struct osdblk_device, node); + if (osdev->id == target_id) { + list_del_init(&osdev->node); + break; + } + osdev = NULL; + } + + mutex_unlock(&ctl_mutex); + + if (!osdev) + return -ENOENT; + + /* clean up and free blkdev and associated OSD connection */ + osdblk_free_disk(osdev); + unregister_blkdev(osdev->major, osdev->name); + osduld_put_device(osdev->osd); + kfree(osdev); + + /* release module ref */ + module_put(THIS_MODULE); + + return count; +} + +static struct class_attribute class_osdblk_attrs[] = { + __ATTR(add, 0200, NULL, class_osdblk_add), + __ATTR(remove, 0200, NULL, class_osdblk_remove), + __ATTR(list, 0444, class_osdblk_list, NULL), + __ATTR_NULL +}; + +static int osdblk_sysfs_init(void) +{ + int ret = 0; + + /* + * create control files in sysfs + * /sys/class/osdblk/... + */ + class_osdblk = kzalloc(sizeof(*class_osdblk), GFP_KERNEL); + if (!class_osdblk) + return -ENOMEM; + + class_osdblk->name = DRV_NAME; + class_osdblk->owner = THIS_MODULE; + class_osdblk->class_release = class_osdblk_release; + class_osdblk->class_attrs = class_osdblk_attrs; + + ret = class_register(class_osdblk); + if (ret) { + kfree(class_osdblk); + class_osdblk = NULL; + printk(PFX "failed to create class osdblk\n"); + return ret; + } + + return 0; +} + +static void osdblk_sysfs_cleanup(void) +{ + if (class_osdblk) + class_destroy(class_osdblk); + class_osdblk = NULL; +} + +static int __init osdblk_init(void) +{ + int rc; + + rc = osdblk_sysfs_init(); + if (rc) + return rc; + + return 0; +} + +static void __exit osdblk_exit(void) +{ + osdblk_sysfs_cleanup(); +} + +module_init(osdblk_init); +module_exit(osdblk_exit); + diff --git a/drivers/block/pktcdvd.c b/drivers/block/pktcdvd.c index 83650e00632..99a506f619b 100644 --- a/drivers/block/pktcdvd.c +++ b/drivers/block/pktcdvd.c @@ -1372,8 +1372,10 @@ try_next_bio: wakeup = (pd->write_congestion_on > 0 && pd->bio_queue_size <= pd->write_congestion_off); spin_unlock(&pd->lock); - if (wakeup) - clear_bdi_congested(&pd->disk->queue->backing_dev_info, WRITE); + if (wakeup) { + clear_bdi_congested(&pd->disk->queue->backing_dev_info, + BLK_RW_ASYNC); + } pkt->sleep_time = max(PACKET_WAIT_TIME, 1); pkt_set_state(pkt, PACKET_WAITING_STATE); @@ -2592,10 +2594,10 @@ static int pkt_make_request(struct request_queue *q, struct bio *bio) spin_lock(&pd->lock); if (pd->write_congestion_on > 0 && pd->bio_queue_size >= pd->write_congestion_on) { - set_bdi_congested(&q->backing_dev_info, WRITE); + set_bdi_congested(&q->backing_dev_info, BLK_RW_ASYNC); do { spin_unlock(&pd->lock); - congestion_wait(WRITE, HZ); + congestion_wait(BLK_RW_ASYNC, HZ); spin_lock(&pd->lock); } while(pd->bio_queue_size > pd->write_congestion_off); } diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c index 43db3ea15b5..aa1a3d5a3e2 100644 --- a/drivers/block/virtio_blk.c +++ b/drivers/block/virtio_blk.c @@ -213,7 +213,7 @@ static int virtblk_ioctl(struct block_device *bdev, fmode_t mode, * Only allow the generic SCSI ioctls if the host can support it. */ if (!virtio_has_feature(vblk->vdev, VIRTIO_BLK_F_SCSI)) - return -ENOIOCTLCMD; + return -ENOTTY; return scsi_cmd_ioctl(disk->queue, disk, mode, cmd, argp); } @@ -360,6 +360,9 @@ static int __devinit virtblk_probe(struct virtio_device *vdev) blk_queue_max_phys_segments(vblk->disk->queue, vblk->sg_elems-2); blk_queue_max_hw_segments(vblk->disk->queue, vblk->sg_elems-2); + /* No need to bounce any requests */ + blk_queue_bounce_limit(vblk->disk->queue, BLK_BOUNCE_ANY); + /* No real sector limit. */ blk_queue_max_sectors(vblk->disk->queue, -1U); @@ -424,7 +427,12 @@ static unsigned int features[] = { VIRTIO_BLK_F_SCSI, VIRTIO_BLK_F_IDENTIFY }; -static struct virtio_driver virtio_blk = { +/* + * virtio_blk causes spurious section mismatch warning by + * simultaneously referring to a __devinit and a __devexit function. + * Use __refdata to avoid this warning. + */ +static struct virtio_driver __refdata virtio_blk = { .feature_table = features, .feature_table_size = ARRAY_SIZE(features), .driver.name = KBUILD_MODNAME, diff --git a/drivers/block/xsysace.c b/drivers/block/xsysace.c index f08491a3a81..b20abe102a2 100644 --- a/drivers/block/xsysace.c +++ b/drivers/block/xsysace.c @@ -390,9 +390,10 @@ static inline void ace_dump_mem(void *base, int len) static void ace_dump_regs(struct ace_device *ace) { - dev_info(ace->dev, " ctrl: %.8x seccnt/cmd: %.4x ver:%.4x\n" - KERN_INFO " status:%.8x mpu_lba:%.8x busmode:%4x\n" - KERN_INFO " error: %.8x cfg_lba:%.8x fatstat:%.4x\n", + dev_info(ace->dev, + " ctrl: %.8x seccnt/cmd: %.4x ver:%.4x\n" + " status:%.8x mpu_lba:%.8x busmode:%4x\n" + " error: %.8x cfg_lba:%.8x fatstat:%.4x\n", ace_in32(ace, ACE_CTRL), ace_in(ace, ACE_SECCNTCMD), ace_in(ace, ACE_VERSION), diff --git a/drivers/block/z2ram.c b/drivers/block/z2ram.c index 4575171e5be..b2590409f25 100644 --- a/drivers/block/z2ram.c +++ b/drivers/block/z2ram.c @@ -374,7 +374,7 @@ err: static void __exit z2_exit(void) { int i, j; - blk_unregister_region(MKDEV(Z2RAM_MAJOR, 0), 256); + blk_unregister_region(MKDEV(Z2RAM_MAJOR, 0), Z2MINOR_COUNT); unregister_blkdev(Z2RAM_MAJOR, DEVICE_NAME); del_gendisk(z2ram_gendisk); put_disk(z2ram_gendisk); diff --git a/drivers/bluetooth/hci_vhci.c b/drivers/bluetooth/hci_vhci.c index 1df9dda2e37..d5cde6d86f8 100644 --- a/drivers/bluetooth/hci_vhci.c +++ b/drivers/bluetooth/hci_vhci.c @@ -28,7 +28,6 @@ #include <linux/kernel.h> #include <linux/init.h> #include <linux/slab.h> -#include <linux/smp_lock.h> #include <linux/types.h> #include <linux/errno.h> #include <linux/sched.h> diff --git a/drivers/char/Kconfig b/drivers/char/Kconfig index 0bd01f49cfd..6a06913b01d 100644 --- a/drivers/char/Kconfig +++ b/drivers/char/Kconfig @@ -1029,10 +1029,6 @@ config CS5535_GPIO If compiled as a module, it will be called cs5535_gpio. -config GPIO_VR41XX - tristate "NEC VR4100 series General-purpose I/O Unit support" - depends on CPU_VR41XX - config RAW_DRIVER tristate "RAW driver (/dev/raw/rawN)" depends on BLOCK diff --git a/drivers/char/Makefile b/drivers/char/Makefile index 189efcff08c..66f779ad4f4 100644 --- a/drivers/char/Makefile +++ b/drivers/char/Makefile @@ -95,7 +95,6 @@ obj-$(CONFIG_SCx200_GPIO) += scx200_gpio.o obj-$(CONFIG_PC8736x_GPIO) += pc8736x_gpio.o obj-$(CONFIG_NSC_GPIO) += nsc_gpio.o obj-$(CONFIG_CS5535_GPIO) += cs5535_gpio.o -obj-$(CONFIG_GPIO_VR41XX) += vr41xx_giu.o obj-$(CONFIG_GPIO_TB0219) += tb0219.o obj-$(CONFIG_TELCLOCK) += tlclk.o diff --git a/drivers/char/agp/intel-agp.c b/drivers/char/agp/intel-agp.c index 8c9d50db5c3..c5855779058 100644 --- a/drivers/char/agp/intel-agp.c +++ b/drivers/char/agp/intel-agp.c @@ -49,6 +49,7 @@ #define PCI_DEVICE_ID_INTEL_IGDNG_D_HB 0x0040 #define PCI_DEVICE_ID_INTEL_IGDNG_D_IG 0x0042 #define PCI_DEVICE_ID_INTEL_IGDNG_M_HB 0x0044 +#define PCI_DEVICE_ID_INTEL_IGDNG_MA_HB 0x0062 #define PCI_DEVICE_ID_INTEL_IGDNG_M_IG 0x0046 /* cover 915 and 945 variants */ @@ -81,7 +82,8 @@ agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_GM45_HB || \ agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_G41_HB || \ agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_IGDNG_D_HB || \ - agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_IGDNG_M_HB) + agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_IGDNG_M_HB || \ + agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_IGDNG_MA_HB) extern int agp_memory_reserved; @@ -1216,6 +1218,7 @@ static void intel_i965_get_gtt_range(int *gtt_offset, int *gtt_size) case PCI_DEVICE_ID_INTEL_G41_HB: case PCI_DEVICE_ID_INTEL_IGDNG_D_HB: case PCI_DEVICE_ID_INTEL_IGDNG_M_HB: + case PCI_DEVICE_ID_INTEL_IGDNG_MA_HB: *gtt_offset = *gtt_size = MB(2); break; default: @@ -2195,6 +2198,8 @@ static const struct intel_driver_description { "IGDNG/D", NULL, &intel_i965_driver }, { PCI_DEVICE_ID_INTEL_IGDNG_M_HB, PCI_DEVICE_ID_INTEL_IGDNG_M_IG, 0, "IGDNG/M", NULL, &intel_i965_driver }, + { PCI_DEVICE_ID_INTEL_IGDNG_MA_HB, PCI_DEVICE_ID_INTEL_IGDNG_M_IG, 0, + "IGDNG/MA", NULL, &intel_i965_driver }, { 0, 0, 0, NULL, NULL, NULL } }; @@ -2398,6 +2403,7 @@ static struct pci_device_id agp_intel_pci_table[] = { ID(PCI_DEVICE_ID_INTEL_G41_HB), ID(PCI_DEVICE_ID_INTEL_IGDNG_D_HB), ID(PCI_DEVICE_ID_INTEL_IGDNG_M_HB), + ID(PCI_DEVICE_ID_INTEL_IGDNG_MA_HB), { } }; diff --git a/drivers/char/agp/parisc-agp.c b/drivers/char/agp/parisc-agp.c index f4bb43fb801..e077701ae3d 100644 --- a/drivers/char/agp/parisc-agp.c +++ b/drivers/char/agp/parisc-agp.c @@ -225,7 +225,7 @@ static const struct agp_bridge_driver parisc_agp_driver = { .configure = parisc_agp_configure, .fetch_size = parisc_agp_fetch_size, .tlb_flush = parisc_agp_tlbflush, - .mask_memory = parisc_agp_mask_memory, + .mask_memory = parisc_agp_page_mask_memory, .masks = parisc_agp_masks, .agp_enable = parisc_agp_enable, .cache_flush = global_cache_flush, diff --git a/drivers/char/amiserial.c b/drivers/char/amiserial.c index 72429b6b2fa..6c32fbf0716 100644 --- a/drivers/char/amiserial.c +++ b/drivers/char/amiserial.c @@ -81,6 +81,7 @@ static char *serial_version = "4.30"; #include <linux/mm.h> #include <linux/seq_file.h> #include <linux/slab.h> +#include <linux/smp_lock.h> #include <linux/init.h> #include <linux/bitops.h> diff --git a/drivers/char/bsr.c b/drivers/char/bsr.c index 140ea10ecb8..c02db01f736 100644 --- a/drivers/char/bsr.c +++ b/drivers/char/bsr.c @@ -27,6 +27,7 @@ #include <linux/cdev.h> #include <linux/list.h> #include <linux/mm.h> +#include <asm/pgtable.h> #include <asm/io.h> /* @@ -75,12 +76,13 @@ static struct class *bsr_class; static int bsr_major; enum { - BSR_8 = 0, - BSR_16 = 1, - BSR_64 = 2, - BSR_128 = 3, - BSR_UNKNOWN = 4, - BSR_MAX = 5, + BSR_8 = 0, + BSR_16 = 1, + BSR_64 = 2, + BSR_128 = 3, + BSR_4096 = 4, + BSR_UNKNOWN = 5, + BSR_MAX = 6, }; static unsigned bsr_types[BSR_MAX]; @@ -117,15 +119,22 @@ static int bsr_mmap(struct file *filp, struct vm_area_struct *vma) { unsigned long size = vma->vm_end - vma->vm_start; struct bsr_dev *dev = filp->private_data; + int ret; - if (size > dev->bsr_len || (size & (PAGE_SIZE-1))) - return -EINVAL; - - vma->vm_flags |= (VM_IO | VM_DONTEXPAND); vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); - if (io_remap_pfn_range(vma, vma->vm_start, dev->bsr_addr >> PAGE_SHIFT, - size, vma->vm_page_prot)) + /* check for the case of a small BSR device and map one 4k page for it*/ + if (dev->bsr_len < PAGE_SIZE && size == PAGE_SIZE) + ret = remap_4k_pfn(vma, vma->vm_start, dev->bsr_addr >> 12, + vma->vm_page_prot); + else if (size <= dev->bsr_len) + ret = io_remap_pfn_range(vma, vma->vm_start, + dev->bsr_addr >> PAGE_SHIFT, + size, vma->vm_page_prot); + else + return -EINVAL; + + if (ret) return -EAGAIN; return 0; @@ -205,6 +214,11 @@ static int bsr_add_node(struct device_node *bn) cur->bsr_stride = bsr_stride[i]; cur->bsr_dev = MKDEV(bsr_major, i + total_bsr_devs); + /* if we have a bsr_len of > 4k and less then PAGE_SIZE (64k pages) */ + /* we can only map 4k of it, so only advertise the 4k in sysfs */ + if (cur->bsr_len > 4096 && cur->bsr_len < PAGE_SIZE) + cur->bsr_len = 4096; + switch(cur->bsr_bytes) { case 8: cur->bsr_type = BSR_8; @@ -218,9 +232,11 @@ static int bsr_add_node(struct device_node *bn) case 128: cur->bsr_type = BSR_128; break; + case 4096: + cur->bsr_type = BSR_4096; + break; default: cur->bsr_type = BSR_UNKNOWN; - printk(KERN_INFO "unknown BSR size %d\n",cur->bsr_bytes); } cur->bsr_num = bsr_types[cur->bsr_type]; diff --git a/drivers/char/cyclades.c b/drivers/char/cyclades.c index f3366d3f06c..2dafc2da064 100644 --- a/drivers/char/cyclades.c +++ b/drivers/char/cyclades.c @@ -633,6 +633,7 @@ #include <linux/tty.h> #include <linux/tty_flip.h> #include <linux/serial.h> +#include <linux/smp_lock.h> #include <linux/major.h> #include <linux/string.h> #include <linux/fcntl.h> diff --git a/drivers/char/epca.c b/drivers/char/epca.c index abef1f7d84f..ff647ca1c48 100644 --- a/drivers/char/epca.c +++ b/drivers/char/epca.c @@ -36,6 +36,7 @@ #include <linux/tty.h> #include <linux/tty_flip.h> #include <linux/slab.h> +#include <linux/smp_lock.h> #include <linux/ioport.h> #include <linux/interrupt.h> #include <linux/uaccess.h> diff --git a/drivers/char/hvc_console.c b/drivers/char/hvc_console.c index 94e7e3c8c05..d97779ef72c 100644 --- a/drivers/char/hvc_console.c +++ b/drivers/char/hvc_console.c @@ -552,7 +552,7 @@ static int hvc_chars_in_buffer(struct tty_struct *tty) struct hvc_struct *hp = tty->driver_data; if (!hp) - return -1; + return 0; return hp->n_outbuf; } diff --git a/drivers/char/hw_random/intel-rng.c b/drivers/char/hw_random/intel-rng.c index 5dcbe603eca..91b53eb1c05 100644 --- a/drivers/char/hw_random/intel-rng.c +++ b/drivers/char/hw_random/intel-rng.c @@ -305,10 +305,11 @@ static int __init intel_init_hw_struct(struct intel_rng_hw *intel_rng_hw, (BIOS_CNTL_LOCK_ENABLE_MASK|BIOS_CNTL_WRITE_ENABLE_MASK)) == BIOS_CNTL_LOCK_ENABLE_MASK) { static __initdata /*const*/ char warning[] = - KERN_WARNING PFX "Firmware space is locked read-only. If you can't or\n" - KERN_WARNING PFX "don't want to disable this in firmware setup, and if\n" - KERN_WARNING PFX "you are certain that your system has a functional\n" - KERN_WARNING PFX "RNG, try using the 'no_fwh_detect' option.\n"; + KERN_WARNING +PFX "Firmware space is locked read-only. If you can't or\n" +PFX "don't want to disable this in firmware setup, and if\n" +PFX "you are certain that your system has a functional\n" +PFX "RNG, try using the 'no_fwh_detect' option.\n"; if (no_fwh_detect) return -ENODEV; diff --git a/drivers/char/isicom.c b/drivers/char/isicom.c index 4159292e35c..4f1f4cd670d 100644 --- a/drivers/char/isicom.c +++ b/drivers/char/isicom.c @@ -122,6 +122,7 @@ #include <linux/fs.h> #include <linux/sched.h> #include <linux/serial.h> +#include <linux/smp_lock.h> #include <linux/mm.h> #include <linux/interrupt.h> #include <linux/timer.h> @@ -1478,10 +1479,10 @@ static int __devinit load_firmware(struct pci_dev *pdev, status = inw(base + 0x4); if (status != 0) { dev_warn(&pdev->dev, "Card%d rejected load header:\n" - KERN_WARNING "Address:0x%x\n" - KERN_WARNING "Count:0x%x\n" - KERN_WARNING "Status:0x%x\n", - index + 1, frame->addr, frame->count, status); + "Address:0x%x\n" + "Count:0x%x\n" + "Status:0x%x\n", + index + 1, frame->addr, frame->count, status); goto errrelfw; } outsw(base, frame->data, word_count); @@ -1526,10 +1527,10 @@ static int __devinit load_firmware(struct pci_dev *pdev, status = inw(base + 0x4); if (status != 0) { dev_warn(&pdev->dev, "Card%d rejected verify header:\n" - KERN_WARNING "Address:0x%x\n" - KERN_WARNING "Count:0x%x\n" - KERN_WARNING "Status: 0x%x\n", - index + 1, frame->addr, frame->count, status); + "Address:0x%x\n" + "Count:0x%x\n" + "Status: 0x%x\n", + index + 1, frame->addr, frame->count, status); goto errrelfw; } diff --git a/drivers/char/istallion.c b/drivers/char/istallion.c index 0c999f5bb3d..ab2f3349c5c 100644 --- a/drivers/char/istallion.c +++ b/drivers/char/istallion.c @@ -20,6 +20,7 @@ #include <linux/module.h> #include <linux/slab.h> +#include <linux/smp_lock.h> #include <linux/interrupt.h> #include <linux/tty.h> #include <linux/tty_flip.h> diff --git a/drivers/char/moxa.c b/drivers/char/moxa.c index 65b6ff2442c..dd0083bbb64 100644 --- a/drivers/char/moxa.c +++ b/drivers/char/moxa.c @@ -34,6 +34,7 @@ #include <linux/tty.h> #include <linux/tty_flip.h> #include <linux/major.h> +#include <linux/smp_lock.h> #include <linux/string.h> #include <linux/fcntl.h> #include <linux/ptrace.h> diff --git a/drivers/char/mxser.c b/drivers/char/mxser.c index 52d953eb30c..dbf8d52f31d 100644 --- a/drivers/char/mxser.c +++ b/drivers/char/mxser.c @@ -23,6 +23,7 @@ #include <linux/errno.h> #include <linux/signal.h> #include <linux/sched.h> +#include <linux/smp_lock.h> #include <linux/timer.h> #include <linux/interrupt.h> #include <linux/tty.h> diff --git a/drivers/char/n_hdlc.c b/drivers/char/n_hdlc.c index 1c43c8cdee2..c68118efad8 100644 --- a/drivers/char/n_hdlc.c +++ b/drivers/char/n_hdlc.c @@ -97,6 +97,7 @@ #include <linux/slab.h> #include <linux/tty.h> #include <linux/errno.h> +#include <linux/smp_lock.h> #include <linux/string.h> /* used in new tty drivers */ #include <linux/signal.h> /* used in new tty drivers */ #include <linux/if.h> diff --git a/drivers/char/n_r3964.c b/drivers/char/n_r3964.c index 2e99158ebb8..6934025a1ac 100644 --- a/drivers/char/n_r3964.c +++ b/drivers/char/n_r3964.c @@ -58,6 +58,7 @@ #include <linux/ioport.h> #include <linux/in.h> #include <linux/slab.h> +#include <linux/smp_lock.h> #include <linux/tty.h> #include <linux/errno.h> #include <linux/string.h> /* used in new tty drivers */ diff --git a/drivers/char/n_tty.c b/drivers/char/n_tty.c index 94a5d5020ab..4e28b35024e 100644 --- a/drivers/char/n_tty.c +++ b/drivers/char/n_tty.c @@ -300,8 +300,7 @@ static int do_output_char(unsigned char c, struct tty_struct *tty, int space) if (space < 2) return -1; tty->canon_column = tty->column = 0; - tty_put_char(tty, '\r'); - tty_put_char(tty, c); + tty->ops->write(tty, "\r\n", 2); return 2; } tty->canon_column = tty->column; @@ -1331,9 +1330,6 @@ handle_newline: static void n_tty_write_wakeup(struct tty_struct *tty) { - /* Write out any echoed characters that are still pending */ - process_echoes(tty); - if (tty->fasync && test_and_clear_bit(TTY_DO_WRITE_WAKEUP, &tty->flags)) kill_fasync(&tty->fasync, SIGIO, POLL_OUT); } @@ -1586,6 +1582,7 @@ static int n_tty_open(struct tty_struct *tty) static inline int input_available_p(struct tty_struct *tty, int amt) { + tty_flush_to_ldisc(tty); if (tty->icanon) { if (tty->canon_data) return 1; diff --git a/drivers/char/nozomi.c b/drivers/char/nozomi.c index 574f1c79b6e..ec58d8c387f 100644 --- a/drivers/char/nozomi.c +++ b/drivers/char/nozomi.c @@ -828,7 +828,7 @@ static int receive_data(enum port_type index, struct nozomi *dc) struct port *port = &dc->port[index]; void __iomem *addr = port->dl_addr[port->toggle_dl]; struct tty_struct *tty = tty_port_tty_get(&port->port); - int i; + int i, ret; if (unlikely(!tty)) { DBG1("tty not open for port: %d?", index); @@ -844,12 +844,14 @@ static int receive_data(enum port_type index, struct nozomi *dc) /* disable interrupt in downlink... */ disable_transmit_dl(index, dc); - return 0; + ret = 0; + goto put; } if (unlikely(size == 0)) { dev_err(&dc->pdev->dev, "size == 0?\n"); - return 1; + ret = 1; + goto put; } tty_buffer_request_room(tty, size); @@ -871,8 +873,10 @@ static int receive_data(enum port_type index, struct nozomi *dc) } set_bit(index, &dc->flip); + ret = 1; +put: tty_kref_put(tty); - return 1; + return ret; } /* Debug for interrupts */ @@ -1862,16 +1866,14 @@ static s32 ntty_chars_in_buffer(struct tty_struct *tty) { struct port *port = tty->driver_data; struct nozomi *dc = get_dc_by_tty(tty); - s32 rval; + s32 rval = 0; if (unlikely(!dc || !port)) { - rval = -ENODEV; goto exit_in_buffer; } if (unlikely(!port->port.count)) { dev_err(&dc->pdev->dev, "No tty open?\n"); - rval = -ENODEV; goto exit_in_buffer; } diff --git a/drivers/char/pcmcia/ipwireless/tty.c b/drivers/char/pcmcia/ipwireless/tty.c index 569f2f7743a..674b3ab3587 100644 --- a/drivers/char/pcmcia/ipwireless/tty.c +++ b/drivers/char/pcmcia/ipwireless/tty.c @@ -320,10 +320,10 @@ static int ipw_chars_in_buffer(struct tty_struct *linux_tty) struct ipw_tty *tty = linux_tty->driver_data; if (!tty) - return -ENODEV; + return 0; if (!tty->open_count) - return -EINVAL; + return 0; return tty->tx_bytes_queued; } diff --git a/drivers/char/pty.c b/drivers/char/pty.c index daebe1ba43d..b33d6688e91 100644 --- a/drivers/char/pty.c +++ b/drivers/char/pty.c @@ -22,6 +22,7 @@ #include <linux/major.h> #include <linux/mm.h> #include <linux/init.h> +#include <linux/smp_lock.h> #include <linux/sysctl.h> #include <linux/device.h> #include <linux/uaccess.h> @@ -75,114 +76,82 @@ static void pty_close(struct tty_struct *tty, struct file *filp) */ static void pty_unthrottle(struct tty_struct *tty) { - struct tty_struct *o_tty = tty->link; - - if (!o_tty) - return; - - tty_wakeup(o_tty); + tty_wakeup(tty->link); set_bit(TTY_THROTTLED, &tty->flags); } -/* - * WSH 05/24/97: modified to - * (1) use space in tty->flip instead of a shared temp buffer - * The flip buffers aren't being used for a pty, so there's lots - * of space available. The buffer is protected by a per-pty - * semaphore that should almost never come under contention. - * (2) avoid redundant copying for cases where count >> receive_room - * N.B. Calls from user space may now return an error code instead of - * a count. +/** + * pty_space - report space left for writing + * @to: tty we are writing into * - * FIXME: Our pty_write method is called with our ldisc lock held but - * not our partners. We can't just wait on the other one blindly without - * risking deadlocks. At some point when everything has settled down we need - * to look into making pty_write at least able to sleep over an ldisc change. + * The tty buffers allow 64K but we sneak a peak and clip at 8K this + * allows a lot of overspill room for echo and other fun messes to + * be handled properly + */ + +static int pty_space(struct tty_struct *to) +{ + int n = 8192 - to->buf.memory_used; + if (n < 0) + return 0; + return n; +} + +/** + * pty_write - write to a pty + * @tty: the tty we write from + * @buf: kernel buffer of data + * @count: bytes to write * - * The return on no ldisc is a bit counter intuitive but the logic works - * like this. During an ldisc change the other end will flush its buffers. We - * thus return the full length which is identical to the case where we had - * proper locking and happened to queue the bytes just before the flush during - * the ldisc change. + * Our "hardware" write method. Data is coming from the ldisc which + * may be in a non sleeping state. We simply throw this at the other + * end of the link as if we were an IRQ handler receiving stuff for + * the other side of the pty/tty pair. */ -static int pty_write(struct tty_struct *tty, const unsigned char *buf, - int count) + +static int pty_write(struct tty_struct *tty, const unsigned char *buf, int c) { struct tty_struct *to = tty->link; - struct tty_ldisc *ld; - int c = count; - if (!to || tty->stopped) + if (tty->stopped) return 0; - ld = tty_ldisc_ref(to); - - if (ld) { - c = to->receive_room; - if (c > count) - c = count; - ld->ops->receive_buf(to, buf, NULL, c); - tty_ldisc_deref(ld); + + if (c > 0) { + /* Stuff the data into the input queue of the other end */ + c = tty_insert_flip_string(to, buf, c); + /* And shovel */ + tty_flip_buffer_push(to); + tty_wakeup(tty); } return c; } +/** + * pty_write_room - write space + * @tty: tty we are writing from + * + * Report how many bytes the ldisc can send into the queue for + * the other device. + */ + static int pty_write_room(struct tty_struct *tty) { - struct tty_struct *to = tty->link; - - if (!to || tty->stopped) + if (tty->stopped) return 0; - - return to->receive_room; + return pty_space(tty->link); } -/* - * WSH 05/24/97: Modified for asymmetric MASTER/SLAVE behavior - * The chars_in_buffer() value is used by the ldisc select() function - * to hold off writing when chars_in_buffer > WAKEUP_CHARS (== 256). - * The pty driver chars_in_buffer() Master/Slave must behave differently: - * - * The Master side needs to allow typed-ahead commands to accumulate - * while being canonicalized, so we report "our buffer" as empty until - * some threshold is reached, and then report the count. (Any count > - * WAKEUP_CHARS is regarded by select() as "full".) To avoid deadlock - * the count returned must be 0 if no canonical data is available to be - * read. (The N_TTY ldisc.chars_in_buffer now knows this.) +/** + * pty_chars_in_buffer - characters currently in our tx queue + * @tty: our tty * - * The Slave side passes all characters in raw mode to the Master side's - * buffer where they can be read immediately, so in this case we can - * return the true count in the buffer. + * Report how much we have in the transmit queue. As everything is + * instantly at the other end this is easy to implement. */ + static int pty_chars_in_buffer(struct tty_struct *tty) { - struct tty_struct *to = tty->link; - struct tty_ldisc *ld; - int count = 0; - - /* We should get the line discipline lock for "tty->link" */ - if (!to) - return 0; - /* We cannot take a sleeping reference here without deadlocking with - an ldisc change - but it doesn't really matter */ - ld = tty_ldisc_ref(to); - if (ld == NULL) - return 0; - - /* The ldisc must report 0 if no characters available to be read */ - if (ld->ops->chars_in_buffer) - count = ld->ops->chars_in_buffer(to); - - tty_ldisc_deref(ld); - - if (tty->driver->subtype == PTY_TYPE_SLAVE) - return count; - - /* Master side driver ... if the other side's read buffer is less than - * half full, return 0 to allow writers to proceed; otherwise return - * the count. This leaves a comfortable margin to avoid overflow, - * and still allows half a buffer's worth of typed-ahead commands. - */ - return (count < N_TTY_BUF_SIZE/2) ? 0 : count; + return 0; } /* Set the lock flag on a pty */ @@ -202,20 +171,10 @@ static void pty_flush_buffer(struct tty_struct *tty) { struct tty_struct *to = tty->link; unsigned long flags; - struct tty_ldisc *ld; if (!to) return; - ld = tty_ldisc_ref(to); - - /* The other end is changing discipline */ - if (!ld) - return; - - if (ld->ops->flush_buffer) - to->ldisc->ops->flush_buffer(to); - tty_ldisc_deref(ld); - + /* tty_buffer_flush(to); FIXME */ if (to->packet) { spin_lock_irqsave(&tty->ctrl_lock, flags); tty->ctrl_status |= TIOCPKT_FLUSHWRITE; diff --git a/drivers/char/rio/rio_linux.c b/drivers/char/rio/rio_linux.c index ce81da5b2da..d58c2eb07f0 100644 --- a/drivers/char/rio/rio_linux.c +++ b/drivers/char/rio/rio_linux.c @@ -44,6 +44,7 @@ #include <linux/delay.h> #include <linux/pci.h> #include <linux/slab.h> +#include <linux/smp_lock.h> #include <linux/miscdevice.h> #include <linux/init.h> diff --git a/drivers/char/riscom8.c b/drivers/char/riscom8.c index 21766045123..171711acf5c 100644 --- a/drivers/char/riscom8.c +++ b/drivers/char/riscom8.c @@ -47,6 +47,7 @@ #include <linux/init.h> #include <linux/delay.h> #include <linux/tty_flip.h> +#include <linux/smp_lock.h> #include <linux/spinlock.h> #include <linux/device.h> diff --git a/drivers/char/rocket.c b/drivers/char/rocket.c index 63d5b628477..0e29a23ec4c 100644 --- a/drivers/char/rocket.c +++ b/drivers/char/rocket.c @@ -73,6 +73,7 @@ #include <linux/tty_driver.h> #include <linux/tty_flip.h> #include <linux/serial.h> +#include <linux/smp_lock.h> #include <linux/string.h> #include <linux/fcntl.h> #include <linux/ptrace.h> diff --git a/drivers/char/serial167.c b/drivers/char/serial167.c index f1f24f0ee26..51e7a46787b 100644 --- a/drivers/char/serial167.c +++ b/drivers/char/serial167.c @@ -52,6 +52,7 @@ #include <linux/interrupt.h> #include <linux/serial.h> #include <linux/serialP.h> +#include <linux/smp_lock.h> #include <linux/string.h> #include <linux/fcntl.h> #include <linux/ptrace.h> diff --git a/drivers/char/specialix.c b/drivers/char/specialix.c index e72be4190a4..268e17f9ec3 100644 --- a/drivers/char/specialix.c +++ b/drivers/char/specialix.c @@ -87,6 +87,7 @@ #include <linux/tty_flip.h> #include <linux/mm.h> #include <linux/serial.h> +#include <linux/smp_lock.h> #include <linux/fcntl.h> #include <linux/major.h> #include <linux/delay.h> @@ -1808,10 +1809,10 @@ static int sx_tiocmset(struct tty_struct *tty, struct file *file, if (clear & TIOCM_DTR) port->MSVR &= ~MSVR_DTR; } - spin_lock_irqsave(&bp->lock, flags); + spin_lock(&bp->lock); sx_out(bp, CD186x_CAR, port_No(port)); sx_out(bp, CD186x_MSVR, port->MSVR); - spin_unlock_irqrestore(&bp->lock, flags); + spin_unlock(&bp->lock); spin_unlock_irqrestore(&port->lock, flags); func_exit(); return 0; @@ -1832,11 +1833,11 @@ static int sx_send_break(struct tty_struct *tty, int length) port->break_length = SPECIALIX_TPS / HZ * length; port->COR2 |= COR2_ETC; port->IER |= IER_TXRDY; - spin_lock_irqsave(&bp->lock, flags); + spin_lock(&bp->lock); sx_out(bp, CD186x_CAR, port_No(port)); sx_out(bp, CD186x_COR2, port->COR2); sx_out(bp, CD186x_IER, port->IER); - spin_unlock_irqrestore(&bp->lock, flags); + spin_unlock(&bp->lock); spin_unlock_irqrestore(&port->lock, flags); sx_wait_CCR(bp); spin_lock_irqsave(&bp->lock, flags); @@ -2022,9 +2023,9 @@ static void sx_unthrottle(struct tty_struct *tty) if (sx_crtscts(tty)) port->MSVR |= MSVR_DTR; /* Else clause: see remark in "sx_throttle"... */ - spin_lock_irqsave(&bp->lock, flags); + spin_lock(&bp->lock); sx_out(bp, CD186x_CAR, port_No(port)); - spin_unlock_irqrestore(&bp->lock, flags); + spin_unlock(&bp->lock); if (I_IXOFF(tty)) { spin_unlock_irqrestore(&port->lock, flags); sx_wait_CCR(bp); @@ -2034,9 +2035,9 @@ static void sx_unthrottle(struct tty_struct *tty) sx_wait_CCR(bp); spin_lock_irqsave(&port->lock, flags); } - spin_lock_irqsave(&bp->lock, flags); + spin_lock(&bp->lock); sx_out(bp, CD186x_MSVR, port->MSVR); - spin_unlock_irqrestore(&bp->lock, flags); + spin_unlock(&bp->lock); spin_unlock_irqrestore(&port->lock, flags); func_exit(); @@ -2060,10 +2061,10 @@ static void sx_stop(struct tty_struct *tty) spin_lock_irqsave(&port->lock, flags); port->IER &= ~IER_TXRDY; - spin_lock_irqsave(&bp->lock, flags); + spin_lock(&bp->lock); sx_out(bp, CD186x_CAR, port_No(port)); sx_out(bp, CD186x_IER, port->IER); - spin_unlock_irqrestore(&bp->lock, flags); + spin_unlock(&bp->lock); spin_unlock_irqrestore(&port->lock, flags); func_exit(); @@ -2088,10 +2089,10 @@ static void sx_start(struct tty_struct *tty) spin_lock_irqsave(&port->lock, flags); if (port->xmit_cnt && port->xmit_buf && !(port->IER & IER_TXRDY)) { port->IER |= IER_TXRDY; - spin_lock_irqsave(&bp->lock, flags); + spin_lock(&bp->lock); sx_out(bp, CD186x_CAR, port_No(port)); sx_out(bp, CD186x_IER, port->IER); - spin_unlock_irqrestore(&bp->lock, flags); + spin_unlock(&bp->lock); } spin_unlock_irqrestore(&port->lock, flags); diff --git a/drivers/char/sx.c b/drivers/char/sx.c index 518f2a25d91..a81ec4fcf6f 100644 --- a/drivers/char/sx.c +++ b/drivers/char/sx.c @@ -216,6 +216,7 @@ #include <linux/eisa.h> #include <linux/pci.h> #include <linux/slab.h> +#include <linux/smp_lock.h> #include <linux/init.h> #include <linux/miscdevice.h> #include <linux/bitops.h> diff --git a/drivers/char/synclink.c b/drivers/char/synclink.c index afded3a2379..813552f1488 100644 --- a/drivers/char/synclink.c +++ b/drivers/char/synclink.c @@ -81,6 +81,7 @@ #include <linux/mm.h> #include <linux/seq_file.h> #include <linux/slab.h> +#include <linux/smp_lock.h> #include <linux/delay.h> #include <linux/netdevice.h> #include <linux/vmalloc.h> diff --git a/drivers/char/synclink_gt.c b/drivers/char/synclink_gt.c index a2e67e6df3a..91f20a92fdd 100644 --- a/drivers/char/synclink_gt.c +++ b/drivers/char/synclink_gt.c @@ -62,6 +62,7 @@ #include <linux/mm.h> #include <linux/seq_file.h> #include <linux/slab.h> +#include <linux/smp_lock.h> #include <linux/netdevice.h> #include <linux/vmalloc.h> #include <linux/init.h> diff --git a/drivers/char/synclinkmp.c b/drivers/char/synclinkmp.c index 6f727e3c53a..8d4a2a8a0a7 100644 --- a/drivers/char/synclinkmp.c +++ b/drivers/char/synclinkmp.c @@ -52,6 +52,7 @@ #include <linux/mm.h> #include <linux/seq_file.h> #include <linux/slab.h> +#include <linux/smp_lock.h> #include <linux/netdevice.h> #include <linux/vmalloc.h> #include <linux/init.h> diff --git a/drivers/char/sysrq.c b/drivers/char/sysrq.c index 0db35857e4d..5d7a02f63e1 100644 --- a/drivers/char/sysrq.c +++ b/drivers/char/sysrq.c @@ -35,7 +35,6 @@ #include <linux/spinlock.h> #include <linux/vt_kern.h> #include <linux/workqueue.h> -#include <linux/kexec.h> #include <linux/hrtimer.h> #include <linux/oom.h> @@ -124,9 +123,12 @@ static struct sysrq_key_op sysrq_unraw_op = { static void sysrq_handle_crash(int key, struct tty_struct *tty) { char *killer = NULL; + + panic_on_oops = 1; /* force panic */ + wmb(); *killer = 1; } -static struct sysrq_key_op sysrq_crashdump_op = { +static struct sysrq_key_op sysrq_crash_op = { .handler = sysrq_handle_crash, .help_msg = "Crash", .action_msg = "Trigger a crash", @@ -401,7 +403,7 @@ static struct sysrq_key_op *sysrq_key_table[36] = { */ NULL, /* a */ &sysrq_reboot_op, /* b */ - &sysrq_crashdump_op, /* c & ibm_emac driver debug */ + &sysrq_crash_op, /* c & ibm_emac driver debug */ &sysrq_showlocks_op, /* d */ &sysrq_term_op, /* e */ &sysrq_moom_op, /* f */ diff --git a/drivers/char/tb0219.c b/drivers/char/tb0219.c index 6062b62800f..b3ec9b10e29 100644 --- a/drivers/char/tb0219.c +++ b/drivers/char/tb0219.c @@ -1,7 +1,7 @@ /* * Driver for TANBAC TB0219 base board. * - * Copyright (C) 2005 Yoichi Yuasa <yoichi_yuasa@tripeaks.co.jp> + * Copyright (C) 2005 Yoichi Yuasa <yuasa@linux-mips.org> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by @@ -28,7 +28,7 @@ #include <asm/vr41xx/giu.h> #include <asm/vr41xx/tb0219.h> -MODULE_AUTHOR("Yoichi Yuasa <yoichi_yuasa@tripeaks.co.jp>"); +MODULE_AUTHOR("Yoichi Yuasa <yuasa@linux-mips.org>"); MODULE_DESCRIPTION("TANBAC TB0219 base board driver"); MODULE_LICENSE("GPL"); diff --git a/drivers/char/tpm/tpm.c b/drivers/char/tpm/tpm.c index ccdd828adce..b0603b2e568 100644 --- a/drivers/char/tpm/tpm.c +++ b/drivers/char/tpm/tpm.c @@ -26,7 +26,6 @@ #include <linux/poll.h> #include <linux/mutex.h> #include <linux/spinlock.h> -#include <linux/smp_lock.h> #include "tpm.h" diff --git a/drivers/char/tty_buffer.c b/drivers/char/tty_buffer.c index 810ee25d66a..3108991c5c8 100644 --- a/drivers/char/tty_buffer.c +++ b/drivers/char/tty_buffer.c @@ -462,6 +462,19 @@ static void flush_to_ldisc(struct work_struct *work) } /** + * tty_flush_to_ldisc + * @tty: tty to push + * + * Push the terminal flip buffers to the line discipline. + * + * Must not be called from IRQ context. + */ +void tty_flush_to_ldisc(struct tty_struct *tty) +{ + flush_to_ldisc(&tty->buf.work.work); +} + +/** * tty_flip_buffer_push - terminal * @tty: tty to push * diff --git a/drivers/char/tty_ioctl.c b/drivers/char/tty_ioctl.c index b24f6c6a1ea..ad6ba4ed280 100644 --- a/drivers/char/tty_ioctl.c +++ b/drivers/char/tty_ioctl.c @@ -21,7 +21,6 @@ #include <linux/module.h> #include <linux/bitops.h> #include <linux/mutex.h> -#include <linux/smp_lock.h> #include <asm/io.h> #include <asm/uaccess.h> diff --git a/drivers/char/tty_ldisc.c b/drivers/char/tty_ldisc.c index a19e935847b..e48af9f7921 100644 --- a/drivers/char/tty_ldisc.c +++ b/drivers/char/tty_ldisc.c @@ -21,7 +21,6 @@ #include <linux/proc_fs.h> #include <linux/init.h> #include <linux/module.h> -#include <linux/smp_lock.h> #include <linux/device.h> #include <linux/wait.h> #include <linux/bitops.h> @@ -49,6 +48,41 @@ static DECLARE_WAIT_QUEUE_HEAD(tty_ldisc_wait); /* Line disc dispatch table */ static struct tty_ldisc_ops *tty_ldiscs[NR_LDISCS]; +static inline struct tty_ldisc *get_ldisc(struct tty_ldisc *ld) +{ + if (ld) + atomic_inc(&ld->users); + return ld; +} + +static void put_ldisc(struct tty_ldisc *ld) +{ + unsigned long flags; + + if (WARN_ON_ONCE(!ld)) + return; + + /* + * If this is the last user, free the ldisc, and + * release the ldisc ops. + * + * We really want an "atomic_dec_and_lock_irqsave()", + * but we don't have it, so this does it by hand. + */ + local_irq_save(flags); + if (atomic_dec_and_lock(&ld->users, &tty_ldisc_lock)) { + struct tty_ldisc_ops *ldo = ld->ops; + + ldo->refcount--; + module_put(ldo->owner); + spin_unlock_irqrestore(&tty_ldisc_lock, flags); + + kfree(ld); + return; + } + local_irq_restore(flags); +} + /** * tty_register_ldisc - install a line discipline * @disc: ldisc number @@ -143,7 +177,7 @@ static struct tty_ldisc *tty_ldisc_try_get(int disc) /* lock it */ ldops->refcount++; ld->ops = ldops; - ld->refcount = 0; + atomic_set(&ld->users, 1); err = 0; } } @@ -182,35 +216,6 @@ static struct tty_ldisc *tty_ldisc_get(int disc) return ld; } -/** - * tty_ldisc_put - drop ldisc reference - * @ld: ldisc - * - * Drop a reference to a line discipline. Manage refcounts and - * module usage counts. Free the ldisc once the recount hits zero. - * - * Locking: - * takes tty_ldisc_lock to guard against ldisc races - */ - -static void tty_ldisc_put(struct tty_ldisc *ld) -{ - unsigned long flags; - int disc = ld->ops->num; - struct tty_ldisc_ops *ldo; - - BUG_ON(disc < N_TTY || disc >= NR_LDISCS); - - spin_lock_irqsave(&tty_ldisc_lock, flags); - ldo = tty_ldiscs[disc]; - BUG_ON(ldo->refcount == 0); - ldo->refcount--; - module_put(ldo->owner); - spin_unlock_irqrestore(&tty_ldisc_lock, flags); - WARN_ON(ld->refcount); - kfree(ld); -} - static void *tty_ldiscs_seq_start(struct seq_file *m, loff_t *pos) { return (*pos < NR_LDISCS) ? pos : NULL; @@ -235,7 +240,7 @@ static int tty_ldiscs_seq_show(struct seq_file *m, void *v) if (IS_ERR(ld)) return 0; seq_printf(m, "%-10s %2d\n", ld->ops->name ? ld->ops->name : "???", i); - tty_ldisc_put(ld); + put_ldisc(ld); return 0; } @@ -289,20 +294,17 @@ static void tty_ldisc_assign(struct tty_struct *tty, struct tty_ldisc *ld) * Locking: takes tty_ldisc_lock */ -static int tty_ldisc_try(struct tty_struct *tty) +static struct tty_ldisc *tty_ldisc_try(struct tty_struct *tty) { unsigned long flags; struct tty_ldisc *ld; - int ret = 0; spin_lock_irqsave(&tty_ldisc_lock, flags); - ld = tty->ldisc; - if (test_bit(TTY_LDISC, &tty->flags)) { - ld->refcount++; - ret = 1; - } + ld = NULL; + if (test_bit(TTY_LDISC, &tty->flags)) + ld = get_ldisc(tty->ldisc); spin_unlock_irqrestore(&tty_ldisc_lock, flags); - return ret; + return ld; } /** @@ -323,10 +325,11 @@ static int tty_ldisc_try(struct tty_struct *tty) struct tty_ldisc *tty_ldisc_ref_wait(struct tty_struct *tty) { + struct tty_ldisc *ld; + /* wait_event is a macro */ - wait_event(tty_ldisc_wait, tty_ldisc_try(tty)); - WARN_ON(tty->ldisc->refcount == 0); - return tty->ldisc; + wait_event(tty_ldisc_wait, (ld = tty_ldisc_try(tty)) != NULL); + return ld; } EXPORT_SYMBOL_GPL(tty_ldisc_ref_wait); @@ -343,9 +346,7 @@ EXPORT_SYMBOL_GPL(tty_ldisc_ref_wait); struct tty_ldisc *tty_ldisc_ref(struct tty_struct *tty) { - if (tty_ldisc_try(tty)) - return tty->ldisc; - return NULL; + return tty_ldisc_try(tty); } EXPORT_SYMBOL_GPL(tty_ldisc_ref); @@ -361,21 +362,15 @@ EXPORT_SYMBOL_GPL(tty_ldisc_ref); void tty_ldisc_deref(struct tty_ldisc *ld) { - unsigned long flags; - - BUG_ON(ld == NULL); - - spin_lock_irqsave(&tty_ldisc_lock, flags); - if (ld->refcount == 0) - printk(KERN_ERR "tty_ldisc_deref: no references.\n"); - else - ld->refcount--; - if (ld->refcount == 0) - wake_up(&tty_ldisc_wait); - spin_unlock_irqrestore(&tty_ldisc_lock, flags); + put_ldisc(ld); } EXPORT_SYMBOL_GPL(tty_ldisc_deref); +static inline void tty_ldisc_put(struct tty_ldisc *ld) +{ + put_ldisc(ld); +} + /** * tty_ldisc_enable - allow ldisc use * @tty: terminal to activate ldisc on @@ -513,8 +508,9 @@ static void tty_ldisc_restore(struct tty_struct *tty, struct tty_ldisc *old) * be obtained while the delayed work queue halt ensures that no more * data is fed to the ldisc. * - * In order to wait for any existing references to complete see - * tty_ldisc_wait_idle. + * You need to do a 'flush_scheduled_work()' (outside the ldisc_mutex) + * in order to make sure any currently executing ldisc work is also + * flushed. */ static int tty_ldisc_halt(struct tty_struct *tty) @@ -524,31 +520,6 @@ static int tty_ldisc_halt(struct tty_struct *tty) } /** - * tty_ldisc_wait_idle - wait for the ldisc to become idle - * @tty: tty to wait for - * - * Wait for the line discipline to become idle. The discipline must - * have been halted for this to guarantee it remains idle. - * - * tty_ldisc_lock protects the ref counts currently. - */ - -static int tty_ldisc_wait_idle(struct tty_struct *tty) -{ - unsigned long flags; - spin_lock_irqsave(&tty_ldisc_lock, flags); - while (tty->ldisc->refcount) { - spin_unlock_irqrestore(&tty_ldisc_lock, flags); - if (wait_event_timeout(tty_ldisc_wait, - tty->ldisc->refcount == 0, 5 * HZ) == 0) - return -EBUSY; - spin_lock_irqsave(&tty_ldisc_lock, flags); - } - spin_unlock_irqrestore(&tty_ldisc_lock, flags); - return 0; -} - -/** * tty_set_ldisc - set line discipline * @tty: the terminal to set * @ldisc: the line discipline @@ -643,14 +614,6 @@ int tty_set_ldisc(struct tty_struct *tty, int ldisc) flush_scheduled_work(); - /* Let any existing reference holders finish */ - retval = tty_ldisc_wait_idle(tty); - if (retval < 0) { - clear_bit(TTY_LDISC_CHANGING, &tty->flags); - tty_ldisc_put(new_ldisc); - return retval; - } - mutex_lock(&tty->ldisc_mutex); if (test_bit(TTY_HUPPED, &tty->flags)) { /* We were raced by the hangup method. It will have stomped @@ -791,17 +754,22 @@ void tty_ldisc_hangup(struct tty_struct *tty) * N_TTY. */ if (tty->driver->flags & TTY_DRIVER_RESET_TERMIOS) { - /* Avoid racing set_ldisc */ - mutex_lock(&tty->ldisc_mutex); - /* Switch back to N_TTY */ + /* Make sure the old ldisc is quiescent */ tty_ldisc_halt(tty); - tty_ldisc_wait_idle(tty); - tty_ldisc_reinit(tty); - /* At this point we have a closed ldisc and we want to - reopen it. We could defer this to the next open but - it means auditing a lot of other paths so this is a FIXME */ - WARN_ON(tty_ldisc_open(tty, tty->ldisc)); - tty_ldisc_enable(tty); + flush_scheduled_work(); + + /* Avoid racing set_ldisc or tty_ldisc_release */ + mutex_lock(&tty->ldisc_mutex); + if (tty->ldisc) { /* Not yet closed */ + /* Switch back to N_TTY */ + tty_ldisc_reinit(tty); + /* At this point we have a closed ldisc and we want to + reopen it. We could defer this to the next open but + it means auditing a lot of other paths so this is + a FIXME */ + WARN_ON(tty_ldisc_open(tty, tty->ldisc)); + tty_ldisc_enable(tty); + } mutex_unlock(&tty->ldisc_mutex); tty_reset_termios(tty); } @@ -858,24 +826,25 @@ void tty_ldisc_release(struct tty_struct *tty, struct tty_struct *o_tty) tty_ldisc_halt(tty); flush_scheduled_work(); + mutex_lock(&tty->ldisc_mutex); /* - * Wait for any short term users (we know they are just driver - * side waiters as the file is closing so user count on the file - * side is zero. + * Now kill off the ldisc */ + tty_ldisc_close(tty, tty->ldisc); + tty_ldisc_put(tty->ldisc); + /* Force an oops if we mess this up */ + tty->ldisc = NULL; - tty_ldisc_wait_idle(tty); - - /* - * Shutdown the current line discipline, and reset it to N_TTY. - * - * FIXME: this MUST get fixed for the new reflocking - */ + /* Ensure the next open requests the N_TTY ldisc */ + tty_set_termios_ldisc(tty, N_TTY); + mutex_unlock(&tty->ldisc_mutex); - tty_ldisc_reinit(tty); /* This will need doing differently if we need to lock */ if (o_tty) tty_ldisc_release(o_tty, NULL); + + /* And the memory resources remaining (buffers, termios) will be + disposed of when the kref hits zero */ } /** diff --git a/drivers/char/tty_port.c b/drivers/char/tty_port.c index 4e862a75f7f..9769b1149f7 100644 --- a/drivers/char/tty_port.c +++ b/drivers/char/tty_port.c @@ -267,7 +267,7 @@ int tty_port_block_til_ready(struct tty_port *port, if (retval == 0) port->flags |= ASYNC_NORMAL_ACTIVE; spin_unlock_irqrestore(&port->lock, flags); - return 0; + return retval; } EXPORT_SYMBOL(tty_port_block_til_ready); diff --git a/drivers/char/vc_screen.c b/drivers/char/vc_screen.c index d94d25c12aa..c1791a63d99 100644 --- a/drivers/char/vc_screen.c +++ b/drivers/char/vc_screen.c @@ -495,11 +495,15 @@ void vcs_remove_sysfs(int index) int __init vcs_init(void) { + unsigned int i; + if (register_chrdev(VCS_MAJOR, "vcs", &vcs_fops)) panic("unable to get major %d for vcs device", VCS_MAJOR); vc_class = class_create(THIS_MODULE, "vc"); device_create(vc_class, NULL, MKDEV(VCS_MAJOR, 0), NULL, "vcs"); device_create(vc_class, NULL, MKDEV(VCS_MAJOR, 128), NULL, "vcsa"); + for (i = 0; i < MIN_NR_CONSOLES; i++) + vcs_make_sysfs(i); return 0; } diff --git a/drivers/char/vt.c b/drivers/char/vt.c index d9113b4c76e..404f4c1ee43 100644 --- a/drivers/char/vt.c +++ b/drivers/char/vt.c @@ -89,6 +89,7 @@ #include <linux/mutex.h> #include <linux/vt_kern.h> #include <linux/selection.h> +#include <linux/smp_lock.h> #include <linux/tiocl.h> #include <linux/kbd_kern.h> #include <linux/consolemap.h> @@ -769,14 +770,12 @@ int vc_allocate(unsigned int currcons) /* return 0 on success */ visual_init(vc, currcons, 1); if (!*vc->vc_uni_pagedir_loc) con_set_default_unimap(vc); - if (!vc->vc_kmalloced) - vc->vc_screenbuf = kmalloc(vc->vc_screenbuf_size, GFP_KERNEL); + vc->vc_screenbuf = kmalloc(vc->vc_screenbuf_size, GFP_KERNEL); if (!vc->vc_screenbuf) { kfree(vc); vc_cons[currcons].d = NULL; return -ENOMEM; } - vc->vc_kmalloced = 1; vc_init(vc, vc->vc_rows, vc->vc_cols, 1); vcs_make_sysfs(currcons); atomic_notifier_call_chain(&vt_notifier_list, VT_ALLOCATE, ¶m); @@ -912,10 +911,8 @@ static int vc_do_resize(struct tty_struct *tty, struct vc_data *vc, if (new_scr_end > new_origin) scr_memsetw((void *)new_origin, vc->vc_video_erase_char, new_scr_end - new_origin); - if (vc->vc_kmalloced) - kfree(vc->vc_screenbuf); + kfree(vc->vc_screenbuf); vc->vc_screenbuf = newscreen; - vc->vc_kmalloced = 1; vc->vc_screenbuf_size = new_screen_size; set_origin(vc); @@ -994,8 +991,7 @@ void vc_deallocate(unsigned int currcons) vc->vc_sw->con_deinit(vc); put_pid(vc->vt_pid); module_put(vc->vc_sw->owner); - if (vc->vc_kmalloced) - kfree(vc->vc_screenbuf); + kfree(vc->vc_screenbuf); if (currcons >= MIN_NR_CONSOLES) kfree(vc); vc_cons[currcons].d = NULL; @@ -2880,7 +2876,6 @@ static int __init con_init(void) INIT_WORK(&vc_cons[currcons].SAK_work, vc_SAK); visual_init(vc, currcons, 1); vc->vc_screenbuf = kzalloc(vc->vc_screenbuf_size, GFP_NOWAIT); - vc->vc_kmalloced = 0; vc_init(vc, vc->vc_rows, vc->vc_cols, currcons || !vc->vc_sw->con_save_screen); } diff --git a/drivers/char/vt_ioctl.c b/drivers/char/vt_ioctl.c index 7539bed0f7e..95189f288f8 100644 --- a/drivers/char/vt_ioctl.c +++ b/drivers/char/vt_ioctl.c @@ -25,6 +25,7 @@ #include <linux/console.h> #include <linux/consolemap.h> #include <linux/signal.h> +#include <linux/smp_lock.h> #include <linux/timex.h> #include <asm/io.h> diff --git a/drivers/clocksource/sh_cmt.c b/drivers/clocksource/sh_cmt.c index 2964f5f4a7e..6b3e0c2f33e 100644 --- a/drivers/clocksource/sh_cmt.c +++ b/drivers/clocksource/sh_cmt.c @@ -40,6 +40,7 @@ struct sh_cmt_priv { struct platform_device *pdev; unsigned long flags; + unsigned long flags_suspend; unsigned long match_value; unsigned long next_match_value; unsigned long max_match_value; @@ -667,11 +668,38 @@ static int __devexit sh_cmt_remove(struct platform_device *pdev) return -EBUSY; /* cannot unregister clockevent and clocksource */ } +static int sh_cmt_suspend(struct device *dev) +{ + struct platform_device *pdev = to_platform_device(dev); + struct sh_cmt_priv *p = platform_get_drvdata(pdev); + + /* save flag state and stop CMT channel */ + p->flags_suspend = p->flags; + sh_cmt_stop(p, p->flags); + return 0; +} + +static int sh_cmt_resume(struct device *dev) +{ + struct platform_device *pdev = to_platform_device(dev); + struct sh_cmt_priv *p = platform_get_drvdata(pdev); + + /* start CMT channel from saved state */ + sh_cmt_start(p, p->flags_suspend); + return 0; +} + +static struct dev_pm_ops sh_cmt_dev_pm_ops = { + .suspend = sh_cmt_suspend, + .resume = sh_cmt_resume, +}; + static struct platform_driver sh_cmt_device_driver = { .probe = sh_cmt_probe, .remove = __devexit_p(sh_cmt_remove), .driver = { .name = "sh_cmt", + .pm = &sh_cmt_dev_pm_ops, } }; diff --git a/drivers/clocksource/sh_tmu.c b/drivers/clocksource/sh_tmu.c index 9ffb05f4095..93c2322feab 100644 --- a/drivers/clocksource/sh_tmu.c +++ b/drivers/clocksource/sh_tmu.c @@ -161,7 +161,7 @@ static void sh_tmu_set_next(struct sh_tmu_priv *p, unsigned long delta, if (periodic) sh_tmu_write(p, TCOR, delta); else - sh_tmu_write(p, TCOR, 0); + sh_tmu_write(p, TCOR, 0xffffffff); sh_tmu_write(p, TCNT, delta); diff --git a/drivers/connector/cn_queue.c b/drivers/connector/cn_queue.c index c769ef269fb..408c2af25d5 100644 --- a/drivers/connector/cn_queue.c +++ b/drivers/connector/cn_queue.c @@ -1,7 +1,7 @@ /* * cn_queue.c * - * 2004-2005 Copyright (c) Evgeniy Polyakov <johnpol@2ka.mipt.ru> + * 2004+ Copyright (c) Evgeniy Polyakov <zbr@ioremap.net> * All rights reserved. * * This program is free software; you can redistribute it and/or modify diff --git a/drivers/connector/connector.c b/drivers/connector/connector.c index fd336c5a905..08b2500f21e 100644 --- a/drivers/connector/connector.c +++ b/drivers/connector/connector.c @@ -1,7 +1,7 @@ /* * connector.c * - * 2004-2005 Copyright (c) Evgeniy Polyakov <johnpol@2ka.mipt.ru> + * 2004+ Copyright (c) Evgeniy Polyakov <zbr@ioremap.net> * All rights reserved. * * This program is free software; you can redistribute it and/or modify @@ -33,7 +33,7 @@ #include <net/sock.h> MODULE_LICENSE("GPL"); -MODULE_AUTHOR("Evgeniy Polyakov <johnpol@2ka.mipt.ru>"); +MODULE_AUTHOR("Evgeniy Polyakov <zbr@ioremap.net>"); MODULE_DESCRIPTION("Generic userspace <-> kernelspace connector."); static u32 cn_idx = CN_IDX_CONNECTOR; diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c index 6e2ec0b1894..2968ed6a9c4 100644 --- a/drivers/cpufreq/cpufreq.c +++ b/drivers/cpufreq/cpufreq.c @@ -761,6 +761,10 @@ static struct kobj_type ktype_cpufreq = { * cpufreq_add_dev - add a CPU device * * Adds the cpufreq interface for a CPU device. + * + * The Oracle says: try running cpufreq registration/unregistration concurrently + * with with cpu hotplugging and all hell will break loose. Tried to clean this + * mess up, but more thorough testing is needed. - Mathieu */ static int cpufreq_add_dev(struct sys_device *sys_dev) { @@ -772,9 +776,6 @@ static int cpufreq_add_dev(struct sys_device *sys_dev) struct sys_device *cpu_sys_dev; unsigned long flags; unsigned int j; -#ifdef CONFIG_SMP - struct cpufreq_policy *managed_policy; -#endif if (cpu_is_offline(cpu)) return 0; @@ -804,15 +805,12 @@ static int cpufreq_add_dev(struct sys_device *sys_dev) goto nomem_out; } if (!alloc_cpumask_var(&policy->cpus, GFP_KERNEL)) { - kfree(policy); ret = -ENOMEM; - goto nomem_out; + goto err_free_policy; } if (!zalloc_cpumask_var(&policy->related_cpus, GFP_KERNEL)) { - free_cpumask_var(policy->cpus); - kfree(policy); ret = -ENOMEM; - goto nomem_out; + goto err_free_cpumask; } policy->cpu = cpu; @@ -820,7 +818,8 @@ static int cpufreq_add_dev(struct sys_device *sys_dev) /* Initially set CPU itself as the policy_cpu */ per_cpu(policy_cpu, cpu) = cpu; - lock_policy_rwsem_write(cpu); + ret = (lock_policy_rwsem_write(cpu) < 0); + WARN_ON(ret); init_completion(&policy->kobj_unregister); INIT_WORK(&policy->update, handle_update); @@ -833,7 +832,7 @@ static int cpufreq_add_dev(struct sys_device *sys_dev) ret = cpufreq_driver->init(policy); if (ret) { dprintk("initialization failed\n"); - goto err_out; + goto err_unlock_policy; } policy->user_policy.min = policy->min; policy->user_policy.max = policy->max; @@ -852,21 +851,31 @@ static int cpufreq_add_dev(struct sys_device *sys_dev) #endif for_each_cpu(j, policy->cpus) { + struct cpufreq_policy *managed_policy; + if (cpu == j) continue; /* Check for existing affected CPUs. * They may not be aware of it due to CPU Hotplug. + * cpufreq_cpu_put is called when the device is removed + * in __cpufreq_remove_dev() */ - managed_policy = cpufreq_cpu_get(j); /* FIXME: Where is this released? What about error paths? */ + managed_policy = cpufreq_cpu_get(j); if (unlikely(managed_policy)) { /* Set proper policy_cpu */ unlock_policy_rwsem_write(cpu); per_cpu(policy_cpu, cpu) = managed_policy->cpu; - if (lock_policy_rwsem_write(cpu) < 0) - goto err_out_driver_exit; + if (lock_policy_rwsem_write(cpu) < 0) { + /* Should not go through policy unlock path */ + if (cpufreq_driver->exit) + cpufreq_driver->exit(policy); + ret = -EBUSY; + cpufreq_cpu_put(managed_policy); + goto err_free_cpumask; + } spin_lock_irqsave(&cpufreq_driver_lock, flags); cpumask_copy(managed_policy->cpus, policy->cpus); @@ -878,11 +887,13 @@ static int cpufreq_add_dev(struct sys_device *sys_dev) &managed_policy->kobj, "cpufreq"); if (ret) - goto err_out_driver_exit; - - cpufreq_debug_enable_ratelimit(); - ret = 0; - goto err_out_driver_exit; /* call driver->exit() */ + cpufreq_cpu_put(managed_policy); + /* + * Success. We only needed to be added to the mask. + * Call driver->exit() because only the cpu parent of + * the kobj needed to call init(). + */ + goto out_driver_exit; /* call driver->exit() */ } } #endif @@ -892,29 +903,31 @@ static int cpufreq_add_dev(struct sys_device *sys_dev) ret = kobject_init_and_add(&policy->kobj, &ktype_cpufreq, &sys_dev->kobj, "cpufreq"); if (ret) - goto err_out_driver_exit; + goto out_driver_exit; /* set up files for this cpu device */ drv_attr = cpufreq_driver->attr; while ((drv_attr) && (*drv_attr)) { ret = sysfs_create_file(&policy->kobj, &((*drv_attr)->attr)); if (ret) - goto err_out_driver_exit; + goto err_out_kobj_put; drv_attr++; } if (cpufreq_driver->get) { ret = sysfs_create_file(&policy->kobj, &cpuinfo_cur_freq.attr); if (ret) - goto err_out_driver_exit; + goto err_out_kobj_put; } if (cpufreq_driver->target) { ret = sysfs_create_file(&policy->kobj, &scaling_cur_freq.attr); if (ret) - goto err_out_driver_exit; + goto err_out_kobj_put; } spin_lock_irqsave(&cpufreq_driver_lock, flags); for_each_cpu(j, policy->cpus) { + if (!cpu_online(j)) + continue; per_cpu(cpufreq_cpu_data, j) = policy; per_cpu(policy_cpu, j) = policy->cpu; } @@ -922,18 +935,22 @@ static int cpufreq_add_dev(struct sys_device *sys_dev) /* symlink affected CPUs */ for_each_cpu(j, policy->cpus) { + struct cpufreq_policy *managed_policy; + if (j == cpu) continue; if (!cpu_online(j)) continue; dprintk("CPU %u already managed, adding link\n", j); - cpufreq_cpu_get(cpu); + managed_policy = cpufreq_cpu_get(cpu); cpu_sys_dev = get_cpu_sysdev(j); ret = sysfs_create_link(&cpu_sys_dev->kobj, &policy->kobj, "cpufreq"); - if (ret) + if (ret) { + cpufreq_cpu_put(managed_policy); goto err_out_unregister; + } } policy->governor = NULL; /* to assure that the starting sequence is @@ -965,17 +982,20 @@ err_out_unregister: per_cpu(cpufreq_cpu_data, j) = NULL; spin_unlock_irqrestore(&cpufreq_driver_lock, flags); +err_out_kobj_put: kobject_put(&policy->kobj); wait_for_completion(&policy->kobj_unregister); -err_out_driver_exit: +out_driver_exit: if (cpufreq_driver->exit) cpufreq_driver->exit(policy); -err_out: +err_unlock_policy: unlock_policy_rwsem_write(cpu); +err_free_cpumask: + free_cpumask_var(policy->cpus); +err_free_policy: kfree(policy); - nomem_out: module_put(cpufreq_driver->owner); module_out: @@ -1070,8 +1090,6 @@ static int __cpufreq_remove_dev(struct sys_device *sys_dev) spin_unlock_irqrestore(&cpufreq_driver_lock, flags); #endif - unlock_policy_rwsem_write(cpu); - if (cpufreq_driver->target) __cpufreq_governor(data, CPUFREQ_GOV_STOP); @@ -1088,6 +1106,8 @@ static int __cpufreq_remove_dev(struct sys_device *sys_dev) if (cpufreq_driver->exit) cpufreq_driver->exit(data); + unlock_policy_rwsem_write(cpu); + free_cpumask_var(data->related_cpus); free_cpumask_var(data->cpus); kfree(data); @@ -1228,9 +1248,9 @@ EXPORT_SYMBOL(cpufreq_get); static int cpufreq_suspend(struct sys_device *sysdev, pm_message_t pmsg) { - int cpu = sysdev->id; int ret = 0; - unsigned int cur_freq = 0; + + int cpu = sysdev->id; struct cpufreq_policy *cpu_policy; dprintk("suspending cpu %u\n", cpu); @@ -1253,42 +1273,9 @@ static int cpufreq_suspend(struct sys_device *sysdev, pm_message_t pmsg) if (cpufreq_driver->suspend) { ret = cpufreq_driver->suspend(cpu_policy, pmsg); - if (ret) { + if (ret) printk(KERN_ERR "cpufreq: suspend failed in ->suspend " "step on CPU %u\n", cpu_policy->cpu); - goto out; - } - } - - if (cpufreq_driver->flags & CPUFREQ_CONST_LOOPS) - goto out; - - if (cpufreq_driver->get) - cur_freq = cpufreq_driver->get(cpu_policy->cpu); - - if (!cur_freq || !cpu_policy->cur) { - printk(KERN_ERR "cpufreq: suspend failed to assert current " - "frequency is what timing core thinks it is.\n"); - goto out; - } - - if (unlikely(cur_freq != cpu_policy->cur)) { - struct cpufreq_freqs freqs; - - if (!(cpufreq_driver->flags & CPUFREQ_PM_NO_WARN)) - dprintk("Warning: CPU frequency is %u, " - "cpufreq assumed %u kHz.\n", - cur_freq, cpu_policy->cur); - - freqs.cpu = cpu; - freqs.old = cpu_policy->cur; - freqs.new = cur_freq; - - srcu_notifier_call_chain(&cpufreq_transition_notifier_list, - CPUFREQ_SUSPENDCHANGE, &freqs); - adjust_jiffies(CPUFREQ_SUSPENDCHANGE, &freqs); - - cpu_policy->cur = cur_freq; } out: @@ -1300,14 +1287,17 @@ out: * cpufreq_resume - restore proper CPU frequency handling after resume * * 1.) resume CPUfreq hardware support (cpufreq_driver->resume()) - * 2.) if ->target and !CPUFREQ_CONST_LOOPS: verify we're in sync - * 3.) schedule call cpufreq_update_policy() ASAP as interrupts are - * restored. + * 2.) schedule call cpufreq_update_policy() ASAP as interrupts are + * restored. It will verify that the current freq is in sync with + * what we believe it to be. This is a bit later than when it + * should be, but nonethteless it's better than calling + * cpufreq_driver->get() here which might re-enable interrupts... */ static int cpufreq_resume(struct sys_device *sysdev) { - int cpu = sysdev->id; int ret = 0; + + int cpu = sysdev->id; struct cpufreq_policy *cpu_policy; dprintk("resuming cpu %u\n", cpu); @@ -1337,42 +1327,8 @@ static int cpufreq_resume(struct sys_device *sysdev) } } - if (!(cpufreq_driver->flags & CPUFREQ_CONST_LOOPS)) { - unsigned int cur_freq = 0; - - if (cpufreq_driver->get) - cur_freq = cpufreq_driver->get(cpu_policy->cpu); - - if (!cur_freq || !cpu_policy->cur) { - printk(KERN_ERR "cpufreq: resume failed to assert " - "current frequency is what timing core " - "thinks it is.\n"); - goto out; - } - - if (unlikely(cur_freq != cpu_policy->cur)) { - struct cpufreq_freqs freqs; - - if (!(cpufreq_driver->flags & CPUFREQ_PM_NO_WARN)) - dprintk("Warning: CPU frequency " - "is %u, cpufreq assumed %u kHz.\n", - cur_freq, cpu_policy->cur); - - freqs.cpu = cpu; - freqs.old = cpu_policy->cur; - freqs.new = cur_freq; - - srcu_notifier_call_chain( - &cpufreq_transition_notifier_list, - CPUFREQ_RESUMECHANGE, &freqs); - adjust_jiffies(CPUFREQ_RESUMECHANGE, &freqs); - - cpu_policy->cur = cur_freq; - } - } - -out: schedule_work(&cpu_policy->update); + fail: cpufreq_cpu_put(cpu_policy); return ret; diff --git a/drivers/cpufreq/cpufreq_conservative.c b/drivers/cpufreq/cpufreq_conservative.c index 7fc58af748b..bdea7e2f94b 100644 --- a/drivers/cpufreq/cpufreq_conservative.c +++ b/drivers/cpufreq/cpufreq_conservative.c @@ -64,21 +64,20 @@ struct cpu_dbs_info_s { unsigned int requested_freq; int cpu; unsigned int enable:1; + /* + * percpu mutex that serializes governor limit change with + * do_dbs_timer invocation. We do not want do_dbs_timer to run + * when user is changing the governor or limits. + */ + struct mutex timer_mutex; }; static DEFINE_PER_CPU(struct cpu_dbs_info_s, cpu_dbs_info); static unsigned int dbs_enable; /* number of CPUs using this policy */ /* - * DEADLOCK ALERT! There is a ordering requirement between cpu_hotplug - * lock and dbs_mutex. cpu_hotplug lock should always be held before - * dbs_mutex. If any function that can potentially take cpu_hotplug lock - * (like __cpufreq_driver_target()) is being called with dbs_mutex taken, then - * cpu_hotplug lock should be taken before that. Note that cpu_hotplug lock - * is recursive for the same process. -Venki - * DEADLOCK ALERT! (2) : do_dbs_timer() must not take the dbs_mutex, because it - * would deadlock with cancel_delayed_work_sync(), which is needed for proper - * raceless workqueue teardown. + * dbs_mutex protects data in dbs_tuners_ins from concurrent changes on + * different CPUs. It protects dbs_enable in governor start/stop. */ static DEFINE_MUTEX(dbs_mutex); @@ -488,18 +487,12 @@ static void do_dbs_timer(struct work_struct *work) delay -= jiffies % delay; - if (lock_policy_rwsem_write(cpu) < 0) - return; - - if (!dbs_info->enable) { - unlock_policy_rwsem_write(cpu); - return; - } + mutex_lock(&dbs_info->timer_mutex); dbs_check_cpu(dbs_info); queue_delayed_work_on(cpu, kconservative_wq, &dbs_info->work, delay); - unlock_policy_rwsem_write(cpu); + mutex_unlock(&dbs_info->timer_mutex); } static inline void dbs_timer_init(struct cpu_dbs_info_s *dbs_info) @@ -535,9 +528,6 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy, if ((!cpu_online(cpu)) || (!policy->cur)) return -EINVAL; - if (this_dbs_info->enable) /* Already enabled */ - break; - mutex_lock(&dbs_mutex); rc = sysfs_create_group(&policy->kobj, &dbs_attr_group); @@ -561,6 +551,7 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy, this_dbs_info->down_skip = 0; this_dbs_info->requested_freq = policy->cur; + mutex_init(&this_dbs_info->timer_mutex); dbs_enable++; /* * Start the timerschedule work, when this governor @@ -590,17 +581,19 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy, &dbs_cpufreq_notifier_block, CPUFREQ_TRANSITION_NOTIFIER); } - dbs_timer_init(this_dbs_info); - mutex_unlock(&dbs_mutex); + dbs_timer_init(this_dbs_info); + break; case CPUFREQ_GOV_STOP: - mutex_lock(&dbs_mutex); dbs_timer_exit(this_dbs_info); + + mutex_lock(&dbs_mutex); sysfs_remove_group(&policy->kobj, &dbs_attr_group); dbs_enable--; + mutex_destroy(&this_dbs_info->timer_mutex); /* * Stop the timerschedule work, when this governor @@ -616,7 +609,7 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy, break; case CPUFREQ_GOV_LIMITS: - mutex_lock(&dbs_mutex); + mutex_lock(&this_dbs_info->timer_mutex); if (policy->max < this_dbs_info->cur_policy->cur) __cpufreq_driver_target( this_dbs_info->cur_policy, @@ -625,7 +618,7 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy, __cpufreq_driver_target( this_dbs_info->cur_policy, policy->min, CPUFREQ_RELATION_L); - mutex_unlock(&dbs_mutex); + mutex_unlock(&this_dbs_info->timer_mutex); break; } diff --git a/drivers/cpufreq/cpufreq_ondemand.c b/drivers/cpufreq/cpufreq_ondemand.c index 1911d172935..d6ba14276bb 100644 --- a/drivers/cpufreq/cpufreq_ondemand.c +++ b/drivers/cpufreq/cpufreq_ondemand.c @@ -70,23 +70,21 @@ struct cpu_dbs_info_s { unsigned int freq_lo_jiffies; unsigned int freq_hi_jiffies; int cpu; - unsigned int enable:1, - sample_type:1; + unsigned int sample_type:1; + /* + * percpu mutex that serializes governor limit change with + * do_dbs_timer invocation. We do not want do_dbs_timer to run + * when user is changing the governor or limits. + */ + struct mutex timer_mutex; }; static DEFINE_PER_CPU(struct cpu_dbs_info_s, cpu_dbs_info); static unsigned int dbs_enable; /* number of CPUs using this policy */ /* - * DEADLOCK ALERT! There is a ordering requirement between cpu_hotplug - * lock and dbs_mutex. cpu_hotplug lock should always be held before - * dbs_mutex. If any function that can potentially take cpu_hotplug lock - * (like __cpufreq_driver_target()) is being called with dbs_mutex taken, then - * cpu_hotplug lock should be taken before that. Note that cpu_hotplug lock - * is recursive for the same process. -Venki - * DEADLOCK ALERT! (2) : do_dbs_timer() must not take the dbs_mutex, because it - * would deadlock with cancel_delayed_work_sync(), which is needed for proper - * raceless workqueue teardown. + * dbs_mutex protects data in dbs_tuners_ins from concurrent changes on + * different CPUs. It protects dbs_enable in governor start/stop. */ static DEFINE_MUTEX(dbs_mutex); @@ -192,13 +190,18 @@ static unsigned int powersave_bias_target(struct cpufreq_policy *policy, return freq_hi; } +static void ondemand_powersave_bias_init_cpu(int cpu) +{ + struct cpu_dbs_info_s *dbs_info = &per_cpu(cpu_dbs_info, cpu); + dbs_info->freq_table = cpufreq_frequency_get_table(cpu); + dbs_info->freq_lo = 0; +} + static void ondemand_powersave_bias_init(void) { int i; for_each_online_cpu(i) { - struct cpu_dbs_info_s *dbs_info = &per_cpu(cpu_dbs_info, i); - dbs_info->freq_table = cpufreq_frequency_get_table(i); - dbs_info->freq_lo = 0; + ondemand_powersave_bias_init_cpu(i); } } @@ -240,12 +243,10 @@ static ssize_t store_sampling_rate(struct cpufreq_policy *unused, unsigned int input; int ret; ret = sscanf(buf, "%u", &input); + if (ret != 1) + return -EINVAL; mutex_lock(&dbs_mutex); - if (ret != 1) { - mutex_unlock(&dbs_mutex); - return -EINVAL; - } dbs_tuners_ins.sampling_rate = max(input, min_sampling_rate); mutex_unlock(&dbs_mutex); @@ -259,13 +260,12 @@ static ssize_t store_up_threshold(struct cpufreq_policy *unused, int ret; ret = sscanf(buf, "%u", &input); - mutex_lock(&dbs_mutex); if (ret != 1 || input > MAX_FREQUENCY_UP_THRESHOLD || input < MIN_FREQUENCY_UP_THRESHOLD) { - mutex_unlock(&dbs_mutex); return -EINVAL; } + mutex_lock(&dbs_mutex); dbs_tuners_ins.up_threshold = input; mutex_unlock(&dbs_mutex); @@ -363,9 +363,6 @@ static void dbs_check_cpu(struct cpu_dbs_info_s *this_dbs_info) struct cpufreq_policy *policy; unsigned int j; - if (!this_dbs_info->enable) - return; - this_dbs_info->freq_lo = 0; policy = this_dbs_info->cur_policy; @@ -493,14 +490,7 @@ static void do_dbs_timer(struct work_struct *work) int delay = usecs_to_jiffies(dbs_tuners_ins.sampling_rate); delay -= jiffies % delay; - - if (lock_policy_rwsem_write(cpu) < 0) - return; - - if (!dbs_info->enable) { - unlock_policy_rwsem_write(cpu); - return; - } + mutex_lock(&dbs_info->timer_mutex); /* Common NORMAL_SAMPLE setup */ dbs_info->sample_type = DBS_NORMAL_SAMPLE; @@ -517,7 +507,7 @@ static void do_dbs_timer(struct work_struct *work) dbs_info->freq_lo, CPUFREQ_RELATION_H); } queue_delayed_work_on(cpu, kondemand_wq, &dbs_info->work, delay); - unlock_policy_rwsem_write(cpu); + mutex_unlock(&dbs_info->timer_mutex); } static inline void dbs_timer_init(struct cpu_dbs_info_s *dbs_info) @@ -526,8 +516,6 @@ static inline void dbs_timer_init(struct cpu_dbs_info_s *dbs_info) int delay = usecs_to_jiffies(dbs_tuners_ins.sampling_rate); delay -= jiffies % delay; - dbs_info->enable = 1; - ondemand_powersave_bias_init(); dbs_info->sample_type = DBS_NORMAL_SAMPLE; INIT_DELAYED_WORK_DEFERRABLE(&dbs_info->work, do_dbs_timer); queue_delayed_work_on(dbs_info->cpu, kondemand_wq, &dbs_info->work, @@ -536,7 +524,6 @@ static inline void dbs_timer_init(struct cpu_dbs_info_s *dbs_info) static inline void dbs_timer_exit(struct cpu_dbs_info_s *dbs_info) { - dbs_info->enable = 0; cancel_delayed_work_sync(&dbs_info->work); } @@ -555,19 +542,15 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy, if ((!cpu_online(cpu)) || (!policy->cur)) return -EINVAL; - if (this_dbs_info->enable) /* Already enabled */ - break; - mutex_lock(&dbs_mutex); - dbs_enable++; rc = sysfs_create_group(&policy->kobj, &dbs_attr_group); if (rc) { - dbs_enable--; mutex_unlock(&dbs_mutex); return rc; } + dbs_enable++; for_each_cpu(j, policy->cpus) { struct cpu_dbs_info_s *j_dbs_info; j_dbs_info = &per_cpu(cpu_dbs_info, j); @@ -581,6 +564,8 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy, } } this_dbs_info->cpu = cpu; + ondemand_powersave_bias_init_cpu(cpu); + mutex_init(&this_dbs_info->timer_mutex); /* * Start the timerschedule work, when this governor * is used for first time @@ -598,29 +583,31 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy, max(min_sampling_rate, latency * LATENCY_MULTIPLIER); } - dbs_timer_init(this_dbs_info); - mutex_unlock(&dbs_mutex); + + dbs_timer_init(this_dbs_info); break; case CPUFREQ_GOV_STOP: - mutex_lock(&dbs_mutex); dbs_timer_exit(this_dbs_info); + + mutex_lock(&dbs_mutex); sysfs_remove_group(&policy->kobj, &dbs_attr_group); + mutex_destroy(&this_dbs_info->timer_mutex); dbs_enable--; mutex_unlock(&dbs_mutex); break; case CPUFREQ_GOV_LIMITS: - mutex_lock(&dbs_mutex); + mutex_lock(&this_dbs_info->timer_mutex); if (policy->max < this_dbs_info->cur_policy->cur) __cpufreq_driver_target(this_dbs_info->cur_policy, policy->max, CPUFREQ_RELATION_H); else if (policy->min > this_dbs_info->cur_policy->cur) __cpufreq_driver_target(this_dbs_info->cur_policy, policy->min, CPUFREQ_RELATION_L); - mutex_unlock(&dbs_mutex); + mutex_unlock(&this_dbs_info->timer_mutex); break; } return 0; diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig index 070357aaedb..81e1020fb51 100644 --- a/drivers/dma/Kconfig +++ b/drivers/dma/Kconfig @@ -4,7 +4,7 @@ menuconfig DMADEVICES bool "DMA Engine support" - depends on !HIGHMEM64G && HAS_DMA + depends on HAS_DMA help DMA engines can do asynchronous data transfers without involving the host CPU. Currently, this framework can be @@ -46,6 +46,14 @@ config DW_DMAC Support the Synopsys DesignWare AHB DMA controller. This can be integrated in chips such as the Atmel AT32ap7000. +config AT_HDMAC + tristate "Atmel AHB DMA support" + depends on ARCH_AT91SAM9RL + select DMA_ENGINE + help + Support the Atmel AHB DMA controller. This can be integrated in + chips such as the Atmel AT91SAM9RL. + config FSL_DMA tristate "Freescale Elo and Elo Plus DMA support" depends on FSL_SOC @@ -108,7 +116,7 @@ config NET_DMA config ASYNC_TX_DMA bool "Async_tx: Offload support for the async_tx api" - depends on DMA_ENGINE + depends on DMA_ENGINE && !HIGHMEM64G help This allows the async_tx api to take advantage of offload engines for memcpy, memset, xor, and raid6 p+q operations. If your platform has diff --git a/drivers/dma/Makefile b/drivers/dma/Makefile index a0b6564800c..40e1e008357 100644 --- a/drivers/dma/Makefile +++ b/drivers/dma/Makefile @@ -7,5 +7,6 @@ obj-$(CONFIG_INTEL_IOP_ADMA) += iop-adma.o obj-$(CONFIG_FSL_DMA) += fsldma.o obj-$(CONFIG_MV_XOR) += mv_xor.o obj-$(CONFIG_DW_DMAC) += dw_dmac.o +obj-$(CONFIG_AT_HDMAC) += at_hdmac.o obj-$(CONFIG_MX3_IPU) += ipu/ obj-$(CONFIG_TXX9_DMAC) += txx9dmac.o diff --git a/drivers/dma/at_hdmac.c b/drivers/dma/at_hdmac.c new file mode 100644 index 00000000000..9a1e5fb412e --- /dev/null +++ b/drivers/dma/at_hdmac.c @@ -0,0 +1,1213 @@ +/* + * Driver for the Atmel AHB DMA Controller (aka HDMA or DMAC on AT91 systems) + * + * Copyright (C) 2008 Atmel Corporation + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * + * This supports the Atmel AHB DMA Controller, + * + * The driver has currently been tested with the Atmel AT91SAM9RL + * and AT91SAM9G45 series. + */ + +#include <linux/clk.h> +#include <linux/dmaengine.h> +#include <linux/dma-mapping.h> +#include <linux/dmapool.h> +#include <linux/interrupt.h> +#include <linux/module.h> +#include <linux/platform_device.h> + +#include "at_hdmac_regs.h" + +/* + * Glossary + * -------- + * + * at_hdmac : Name of the ATmel AHB DMA Controller + * at_dma_ / atdma : ATmel DMA controller entity related + * atc_ / atchan : ATmel DMA Channel entity related + */ + +#define ATC_DEFAULT_CFG (ATC_FIFOCFG_HALFFIFO) +#define ATC_DEFAULT_CTRLA (0) +#define ATC_DEFAULT_CTRLB (ATC_SIF(0) \ + |ATC_DIF(1)) + +/* + * Initial number of descriptors to allocate for each channel. This could + * be increased during dma usage. + */ +static unsigned int init_nr_desc_per_channel = 64; +module_param(init_nr_desc_per_channel, uint, 0644); +MODULE_PARM_DESC(init_nr_desc_per_channel, + "initial descriptors per channel (default: 64)"); + + +/* prototypes */ +static dma_cookie_t atc_tx_submit(struct dma_async_tx_descriptor *tx); + + +/*----------------------------------------------------------------------*/ + +static struct at_desc *atc_first_active(struct at_dma_chan *atchan) +{ + return list_first_entry(&atchan->active_list, + struct at_desc, desc_node); +} + +static struct at_desc *atc_first_queued(struct at_dma_chan *atchan) +{ + return list_first_entry(&atchan->queue, + struct at_desc, desc_node); +} + +/** + * atc_alloc_descriptor - allocate and return an initilized descriptor + * @chan: the channel to allocate descriptors for + * @gfp_flags: GFP allocation flags + * + * Note: The ack-bit is positioned in the descriptor flag at creation time + * to make initial allocation more convenient. This bit will be cleared + * and control will be given to client at usage time (during + * preparation functions). + */ +static struct at_desc *atc_alloc_descriptor(struct dma_chan *chan, + gfp_t gfp_flags) +{ + struct at_desc *desc = NULL; + struct at_dma *atdma = to_at_dma(chan->device); + dma_addr_t phys; + + desc = dma_pool_alloc(atdma->dma_desc_pool, gfp_flags, &phys); + if (desc) { + memset(desc, 0, sizeof(struct at_desc)); + dma_async_tx_descriptor_init(&desc->txd, chan); + /* txd.flags will be overwritten in prep functions */ + desc->txd.flags = DMA_CTRL_ACK; + desc->txd.tx_submit = atc_tx_submit; + desc->txd.phys = phys; + } + + return desc; +} + +/** + * atc_desc_get - get a unsused descriptor from free_list + * @atchan: channel we want a new descriptor for + */ +static struct at_desc *atc_desc_get(struct at_dma_chan *atchan) +{ + struct at_desc *desc, *_desc; + struct at_desc *ret = NULL; + unsigned int i = 0; + LIST_HEAD(tmp_list); + + spin_lock_bh(&atchan->lock); + list_for_each_entry_safe(desc, _desc, &atchan->free_list, desc_node) { + i++; + if (async_tx_test_ack(&desc->txd)) { + list_del(&desc->desc_node); + ret = desc; + break; + } + dev_dbg(chan2dev(&atchan->chan_common), + "desc %p not ACKed\n", desc); + } + spin_unlock_bh(&atchan->lock); + dev_vdbg(chan2dev(&atchan->chan_common), + "scanned %u descriptors on freelist\n", i); + + /* no more descriptor available in initial pool: create one more */ + if (!ret) { + ret = atc_alloc_descriptor(&atchan->chan_common, GFP_ATOMIC); + if (ret) { + spin_lock_bh(&atchan->lock); + atchan->descs_allocated++; + spin_unlock_bh(&atchan->lock); + } else { + dev_err(chan2dev(&atchan->chan_common), + "not enough descriptors available\n"); + } + } + + return ret; +} + +/** + * atc_desc_put - move a descriptor, including any children, to the free list + * @atchan: channel we work on + * @desc: descriptor, at the head of a chain, to move to free list + */ +static void atc_desc_put(struct at_dma_chan *atchan, struct at_desc *desc) +{ + if (desc) { + struct at_desc *child; + + spin_lock_bh(&atchan->lock); + list_for_each_entry(child, &desc->txd.tx_list, desc_node) + dev_vdbg(chan2dev(&atchan->chan_common), + "moving child desc %p to freelist\n", + child); + list_splice_init(&desc->txd.tx_list, &atchan->free_list); + dev_vdbg(chan2dev(&atchan->chan_common), + "moving desc %p to freelist\n", desc); + list_add(&desc->desc_node, &atchan->free_list); + spin_unlock_bh(&atchan->lock); + } +} + +/** + * atc_assign_cookie - compute and assign new cookie + * @atchan: channel we work on + * @desc: descriptor to asign cookie for + * + * Called with atchan->lock held and bh disabled + */ +static dma_cookie_t +atc_assign_cookie(struct at_dma_chan *atchan, struct at_desc *desc) +{ + dma_cookie_t cookie = atchan->chan_common.cookie; + + if (++cookie < 0) + cookie = 1; + + atchan->chan_common.cookie = cookie; + desc->txd.cookie = cookie; + + return cookie; +} + +/** + * atc_dostart - starts the DMA engine for real + * @atchan: the channel we want to start + * @first: first descriptor in the list we want to begin with + * + * Called with atchan->lock held and bh disabled + */ +static void atc_dostart(struct at_dma_chan *atchan, struct at_desc *first) +{ + struct at_dma *atdma = to_at_dma(atchan->chan_common.device); + + /* ASSERT: channel is idle */ + if (atc_chan_is_enabled(atchan)) { + dev_err(chan2dev(&atchan->chan_common), + "BUG: Attempted to start non-idle channel\n"); + dev_err(chan2dev(&atchan->chan_common), + " channel: s0x%x d0x%x ctrl0x%x:0x%x l0x%x\n", + channel_readl(atchan, SADDR), + channel_readl(atchan, DADDR), + channel_readl(atchan, CTRLA), + channel_readl(atchan, CTRLB), + channel_readl(atchan, DSCR)); + + /* The tasklet will hopefully advance the queue... */ + return; + } + + vdbg_dump_regs(atchan); + + /* clear any pending interrupt */ + while (dma_readl(atdma, EBCISR)) + cpu_relax(); + + channel_writel(atchan, SADDR, 0); + channel_writel(atchan, DADDR, 0); + channel_writel(atchan, CTRLA, 0); + channel_writel(atchan, CTRLB, 0); + channel_writel(atchan, DSCR, first->txd.phys); + dma_writel(atdma, CHER, atchan->mask); + + vdbg_dump_regs(atchan); +} + +/** + * atc_chain_complete - finish work for one transaction chain + * @atchan: channel we work on + * @desc: descriptor at the head of the chain we want do complete + * + * Called with atchan->lock held and bh disabled */ +static void +atc_chain_complete(struct at_dma_chan *atchan, struct at_desc *desc) +{ + dma_async_tx_callback callback; + void *param; + struct dma_async_tx_descriptor *txd = &desc->txd; + + dev_vdbg(chan2dev(&atchan->chan_common), + "descriptor %u complete\n", txd->cookie); + + atchan->completed_cookie = txd->cookie; + callback = txd->callback; + param = txd->callback_param; + + /* move children to free_list */ + list_splice_init(&txd->tx_list, &atchan->free_list); + /* move myself to free_list */ + list_move(&desc->desc_node, &atchan->free_list); + + /* unmap dma addresses */ + if (!(txd->flags & DMA_COMPL_SKIP_DEST_UNMAP)) { + if (txd->flags & DMA_COMPL_DEST_UNMAP_SINGLE) + dma_unmap_single(chan2parent(&atchan->chan_common), + desc->lli.daddr, + desc->len, DMA_FROM_DEVICE); + else + dma_unmap_page(chan2parent(&atchan->chan_common), + desc->lli.daddr, + desc->len, DMA_FROM_DEVICE); + } + if (!(txd->flags & DMA_COMPL_SKIP_SRC_UNMAP)) { + if (txd->flags & DMA_COMPL_SRC_UNMAP_SINGLE) + dma_unmap_single(chan2parent(&atchan->chan_common), + desc->lli.saddr, + desc->len, DMA_TO_DEVICE); + else + dma_unmap_page(chan2parent(&atchan->chan_common), + desc->lli.saddr, + desc->len, DMA_TO_DEVICE); + } + + /* + * The API requires that no submissions are done from a + * callback, so we don't need to drop the lock here + */ + if (callback) + callback(param); + + dma_run_dependencies(txd); +} + +/** + * atc_complete_all - finish work for all transactions + * @atchan: channel to complete transactions for + * + * Eventually submit queued descriptors if any + * + * Assume channel is idle while calling this function + * Called with atchan->lock held and bh disabled + */ +static void atc_complete_all(struct at_dma_chan *atchan) +{ + struct at_desc *desc, *_desc; + LIST_HEAD(list); + + dev_vdbg(chan2dev(&atchan->chan_common), "complete all\n"); + + BUG_ON(atc_chan_is_enabled(atchan)); + + /* + * Submit queued descriptors ASAP, i.e. before we go through + * the completed ones. + */ + if (!list_empty(&atchan->queue)) + atc_dostart(atchan, atc_first_queued(atchan)); + /* empty active_list now it is completed */ + list_splice_init(&atchan->active_list, &list); + /* empty queue list by moving descriptors (if any) to active_list */ + list_splice_init(&atchan->queue, &atchan->active_list); + + list_for_each_entry_safe(desc, _desc, &list, desc_node) + atc_chain_complete(atchan, desc); +} + +/** + * atc_cleanup_descriptors - cleanup up finished descriptors in active_list + * @atchan: channel to be cleaned up + * + * Called with atchan->lock held and bh disabled + */ +static void atc_cleanup_descriptors(struct at_dma_chan *atchan) +{ + struct at_desc *desc, *_desc; + struct at_desc *child; + + dev_vdbg(chan2dev(&atchan->chan_common), "cleanup descriptors\n"); + + list_for_each_entry_safe(desc, _desc, &atchan->active_list, desc_node) { + if (!(desc->lli.ctrla & ATC_DONE)) + /* This one is currently in progress */ + return; + + list_for_each_entry(child, &desc->txd.tx_list, desc_node) + if (!(child->lli.ctrla & ATC_DONE)) + /* Currently in progress */ + return; + + /* + * No descriptors so far seem to be in progress, i.e. + * this chain must be done. + */ + atc_chain_complete(atchan, desc); + } +} + +/** + * atc_advance_work - at the end of a transaction, move forward + * @atchan: channel where the transaction ended + * + * Called with atchan->lock held and bh disabled + */ +static void atc_advance_work(struct at_dma_chan *atchan) +{ + dev_vdbg(chan2dev(&atchan->chan_common), "advance_work\n"); + + if (list_empty(&atchan->active_list) || + list_is_singular(&atchan->active_list)) { + atc_complete_all(atchan); + } else { + atc_chain_complete(atchan, atc_first_active(atchan)); + /* advance work */ + atc_dostart(atchan, atc_first_active(atchan)); + } +} + + +/** + * atc_handle_error - handle errors reported by DMA controller + * @atchan: channel where error occurs + * + * Called with atchan->lock held and bh disabled + */ +static void atc_handle_error(struct at_dma_chan *atchan) +{ + struct at_desc *bad_desc; + struct at_desc *child; + + /* + * The descriptor currently at the head of the active list is + * broked. Since we don't have any way to report errors, we'll + * just have to scream loudly and try to carry on. + */ + bad_desc = atc_first_active(atchan); + list_del_init(&bad_desc->desc_node); + + /* As we are stopped, take advantage to push queued descriptors + * in active_list */ + list_splice_init(&atchan->queue, atchan->active_list.prev); + + /* Try to restart the controller */ + if (!list_empty(&atchan->active_list)) + atc_dostart(atchan, atc_first_active(atchan)); + + /* + * KERN_CRITICAL may seem harsh, but since this only happens + * when someone submits a bad physical address in a + * descriptor, we should consider ourselves lucky that the + * controller flagged an error instead of scribbling over + * random memory locations. + */ + dev_crit(chan2dev(&atchan->chan_common), + "Bad descriptor submitted for DMA!\n"); + dev_crit(chan2dev(&atchan->chan_common), + " cookie: %d\n", bad_desc->txd.cookie); + atc_dump_lli(atchan, &bad_desc->lli); + list_for_each_entry(child, &bad_desc->txd.tx_list, desc_node) + atc_dump_lli(atchan, &child->lli); + + /* Pretend the descriptor completed successfully */ + atc_chain_complete(atchan, bad_desc); +} + + +/*-- IRQ & Tasklet ---------------------------------------------------*/ + +static void atc_tasklet(unsigned long data) +{ + struct at_dma_chan *atchan = (struct at_dma_chan *)data; + + /* Channel cannot be enabled here */ + if (atc_chan_is_enabled(atchan)) { + dev_err(chan2dev(&atchan->chan_common), + "BUG: channel enabled in tasklet\n"); + return; + } + + spin_lock(&atchan->lock); + if (test_and_clear_bit(0, &atchan->error_status)) + atc_handle_error(atchan); + else + atc_advance_work(atchan); + + spin_unlock(&atchan->lock); +} + +static irqreturn_t at_dma_interrupt(int irq, void *dev_id) +{ + struct at_dma *atdma = (struct at_dma *)dev_id; + struct at_dma_chan *atchan; + int i; + u32 status, pending, imr; + int ret = IRQ_NONE; + + do { + imr = dma_readl(atdma, EBCIMR); + status = dma_readl(atdma, EBCISR); + pending = status & imr; + + if (!pending) + break; + + dev_vdbg(atdma->dma_common.dev, + "interrupt: status = 0x%08x, 0x%08x, 0x%08x\n", + status, imr, pending); + + for (i = 0; i < atdma->dma_common.chancnt; i++) { + atchan = &atdma->chan[i]; + if (pending & (AT_DMA_CBTC(i) | AT_DMA_ERR(i))) { + if (pending & AT_DMA_ERR(i)) { + /* Disable channel on AHB error */ + dma_writel(atdma, CHDR, atchan->mask); + /* Give information to tasklet */ + set_bit(0, &atchan->error_status); + } + tasklet_schedule(&atchan->tasklet); + ret = IRQ_HANDLED; + } + } + + } while (pending); + + return ret; +} + + +/*-- DMA Engine API --------------------------------------------------*/ + +/** + * atc_tx_submit - set the prepared descriptor(s) to be executed by the engine + * @desc: descriptor at the head of the transaction chain + * + * Queue chain if DMA engine is working already + * + * Cookie increment and adding to active_list or queue must be atomic + */ +static dma_cookie_t atc_tx_submit(struct dma_async_tx_descriptor *tx) +{ + struct at_desc *desc = txd_to_at_desc(tx); + struct at_dma_chan *atchan = to_at_dma_chan(tx->chan); + dma_cookie_t cookie; + + spin_lock_bh(&atchan->lock); + cookie = atc_assign_cookie(atchan, desc); + + if (list_empty(&atchan->active_list)) { + dev_vdbg(chan2dev(tx->chan), "tx_submit: started %u\n", + desc->txd.cookie); + atc_dostart(atchan, desc); + list_add_tail(&desc->desc_node, &atchan->active_list); + } else { + dev_vdbg(chan2dev(tx->chan), "tx_submit: queued %u\n", + desc->txd.cookie); + list_add_tail(&desc->desc_node, &atchan->queue); + } + + spin_unlock_bh(&atchan->lock); + + return cookie; +} + +/** + * atc_prep_dma_memcpy - prepare a memcpy operation + * @chan: the channel to prepare operation on + * @dest: operation virtual destination address + * @src: operation virtual source address + * @len: operation length + * @flags: tx descriptor status flags + */ +static struct dma_async_tx_descriptor * +atc_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src, + size_t len, unsigned long flags) +{ + struct at_dma_chan *atchan = to_at_dma_chan(chan); + struct at_desc *desc = NULL; + struct at_desc *first = NULL; + struct at_desc *prev = NULL; + size_t xfer_count; + size_t offset; + unsigned int src_width; + unsigned int dst_width; + u32 ctrla; + u32 ctrlb; + + dev_vdbg(chan2dev(chan), "prep_dma_memcpy: d0x%x s0x%x l0x%zx f0x%lx\n", + dest, src, len, flags); + + if (unlikely(!len)) { + dev_dbg(chan2dev(chan), "prep_dma_memcpy: length is zero!\n"); + return NULL; + } + + ctrla = ATC_DEFAULT_CTRLA; + ctrlb = ATC_DEFAULT_CTRLB + | ATC_SRC_ADDR_MODE_INCR + | ATC_DST_ADDR_MODE_INCR + | ATC_FC_MEM2MEM; + + /* + * We can be a lot more clever here, but this should take care + * of the most common optimization. + */ + if (!((src | dest | len) & 3)) { + ctrla |= ATC_SRC_WIDTH_WORD | ATC_DST_WIDTH_WORD; + src_width = dst_width = 2; + } else if (!((src | dest | len) & 1)) { + ctrla |= ATC_SRC_WIDTH_HALFWORD | ATC_DST_WIDTH_HALFWORD; + src_width = dst_width = 1; + } else { + ctrla |= ATC_SRC_WIDTH_BYTE | ATC_DST_WIDTH_BYTE; + src_width = dst_width = 0; + } + + for (offset = 0; offset < len; offset += xfer_count << src_width) { + xfer_count = min_t(size_t, (len - offset) >> src_width, + ATC_BTSIZE_MAX); + + desc = atc_desc_get(atchan); + if (!desc) + goto err_desc_get; + + desc->lli.saddr = src + offset; + desc->lli.daddr = dest + offset; + desc->lli.ctrla = ctrla | xfer_count; + desc->lli.ctrlb = ctrlb; + + desc->txd.cookie = 0; + async_tx_ack(&desc->txd); + + if (!first) { + first = desc; + } else { + /* inform the HW lli about chaining */ + prev->lli.dscr = desc->txd.phys; + /* insert the link descriptor to the LD ring */ + list_add_tail(&desc->desc_node, + &first->txd.tx_list); + } + prev = desc; + } + + /* First descriptor of the chain embedds additional information */ + first->txd.cookie = -EBUSY; + first->len = len; + + /* set end-of-link to the last link descriptor of list*/ + set_desc_eol(desc); + + desc->txd.flags = flags; /* client is in control of this ack */ + + return &first->txd; + +err_desc_get: + atc_desc_put(atchan, first); + return NULL; +} + + +/** + * atc_prep_slave_sg - prepare descriptors for a DMA_SLAVE transaction + * @chan: DMA channel + * @sgl: scatterlist to transfer to/from + * @sg_len: number of entries in @scatterlist + * @direction: DMA direction + * @flags: tx descriptor status flags + */ +static struct dma_async_tx_descriptor * +atc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, + unsigned int sg_len, enum dma_data_direction direction, + unsigned long flags) +{ + struct at_dma_chan *atchan = to_at_dma_chan(chan); + struct at_dma_slave *atslave = chan->private; + struct at_desc *first = NULL; + struct at_desc *prev = NULL; + u32 ctrla; + u32 ctrlb; + dma_addr_t reg; + unsigned int reg_width; + unsigned int mem_width; + unsigned int i; + struct scatterlist *sg; + size_t total_len = 0; + + dev_vdbg(chan2dev(chan), "prep_slave_sg: %s f0x%lx\n", + direction == DMA_TO_DEVICE ? "TO DEVICE" : "FROM DEVICE", + flags); + + if (unlikely(!atslave || !sg_len)) { + dev_dbg(chan2dev(chan), "prep_dma_memcpy: length is zero!\n"); + return NULL; + } + + reg_width = atslave->reg_width; + + sg_len = dma_map_sg(chan2parent(chan), sgl, sg_len, direction); + + ctrla = ATC_DEFAULT_CTRLA | atslave->ctrla; + ctrlb = ATC_DEFAULT_CTRLB | ATC_IEN; + + switch (direction) { + case DMA_TO_DEVICE: + ctrla |= ATC_DST_WIDTH(reg_width); + ctrlb |= ATC_DST_ADDR_MODE_FIXED + | ATC_SRC_ADDR_MODE_INCR + | ATC_FC_MEM2PER; + reg = atslave->tx_reg; + for_each_sg(sgl, sg, sg_len, i) { + struct at_desc *desc; + u32 len; + u32 mem; + + desc = atc_desc_get(atchan); + if (!desc) + goto err_desc_get; + + mem = sg_phys(sg); + len = sg_dma_len(sg); + mem_width = 2; + if (unlikely(mem & 3 || len & 3)) + mem_width = 0; + + desc->lli.saddr = mem; + desc->lli.daddr = reg; + desc->lli.ctrla = ctrla + | ATC_SRC_WIDTH(mem_width) + | len >> mem_width; + desc->lli.ctrlb = ctrlb; + + if (!first) { + first = desc; + } else { + /* inform the HW lli about chaining */ + prev->lli.dscr = desc->txd.phys; + /* insert the link descriptor to the LD ring */ + list_add_tail(&desc->desc_node, + &first->txd.tx_list); + } + prev = desc; + total_len += len; + } + break; + case DMA_FROM_DEVICE: + ctrla |= ATC_SRC_WIDTH(reg_width); + ctrlb |= ATC_DST_ADDR_MODE_INCR + | ATC_SRC_ADDR_MODE_FIXED + | ATC_FC_PER2MEM; + + reg = atslave->rx_reg; + for_each_sg(sgl, sg, sg_len, i) { + struct at_desc *desc; + u32 len; + u32 mem; + + desc = atc_desc_get(atchan); + if (!desc) + goto err_desc_get; + + mem = sg_phys(sg); + len = sg_dma_len(sg); + mem_width = 2; + if (unlikely(mem & 3 || len & 3)) + mem_width = 0; + + desc->lli.saddr = reg; + desc->lli.daddr = mem; + desc->lli.ctrla = ctrla + | ATC_DST_WIDTH(mem_width) + | len >> mem_width; + desc->lli.ctrlb = ctrlb; + + if (!first) { + first = desc; + } else { + /* inform the HW lli about chaining */ + prev->lli.dscr = desc->txd.phys; + /* insert the link descriptor to the LD ring */ + list_add_tail(&desc->desc_node, + &first->txd.tx_list); + } + prev = desc; + total_len += len; + } + break; + default: + return NULL; + } + + /* set end-of-link to the last link descriptor of list*/ + set_desc_eol(prev); + + /* First descriptor of the chain embedds additional information */ + first->txd.cookie = -EBUSY; + first->len = total_len; + + /* last link descriptor of list is responsible of flags */ + prev->txd.flags = flags; /* client is in control of this ack */ + + return &first->txd; + +err_desc_get: + dev_err(chan2dev(chan), "not enough descriptors available\n"); + atc_desc_put(atchan, first); + return NULL; +} + +static void atc_terminate_all(struct dma_chan *chan) +{ + struct at_dma_chan *atchan = to_at_dma_chan(chan); + struct at_dma *atdma = to_at_dma(chan->device); + struct at_desc *desc, *_desc; + LIST_HEAD(list); + + /* + * This is only called when something went wrong elsewhere, so + * we don't really care about the data. Just disable the + * channel. We still have to poll the channel enable bit due + * to AHB/HSB limitations. + */ + spin_lock_bh(&atchan->lock); + + dma_writel(atdma, CHDR, atchan->mask); + + /* confirm that this channel is disabled */ + while (dma_readl(atdma, CHSR) & atchan->mask) + cpu_relax(); + + /* active_list entries will end up before queued entries */ + list_splice_init(&atchan->queue, &list); + list_splice_init(&atchan->active_list, &list); + + spin_unlock_bh(&atchan->lock); + + /* Flush all pending and queued descriptors */ + list_for_each_entry_safe(desc, _desc, &list, desc_node) + atc_chain_complete(atchan, desc); +} + +/** + * atc_is_tx_complete - poll for transaction completion + * @chan: DMA channel + * @cookie: transaction identifier to check status of + * @done: if not %NULL, updated with last completed transaction + * @used: if not %NULL, updated with last used transaction + * + * If @done and @used are passed in, upon return they reflect the driver + * internal state and can be used with dma_async_is_complete() to check + * the status of multiple cookies without re-checking hardware state. + */ +static enum dma_status +atc_is_tx_complete(struct dma_chan *chan, + dma_cookie_t cookie, + dma_cookie_t *done, dma_cookie_t *used) +{ + struct at_dma_chan *atchan = to_at_dma_chan(chan); + dma_cookie_t last_used; + dma_cookie_t last_complete; + enum dma_status ret; + + dev_vdbg(chan2dev(chan), "is_tx_complete: %d (d%d, u%d)\n", + cookie, done ? *done : 0, used ? *used : 0); + + spin_lock_bh(atchan->lock); + + last_complete = atchan->completed_cookie; + last_used = chan->cookie; + + ret = dma_async_is_complete(cookie, last_complete, last_used); + if (ret != DMA_SUCCESS) { + atc_cleanup_descriptors(atchan); + + last_complete = atchan->completed_cookie; + last_used = chan->cookie; + + ret = dma_async_is_complete(cookie, last_complete, last_used); + } + + spin_unlock_bh(atchan->lock); + + if (done) + *done = last_complete; + if (used) + *used = last_used; + + return ret; +} + +/** + * atc_issue_pending - try to finish work + * @chan: target DMA channel + */ +static void atc_issue_pending(struct dma_chan *chan) +{ + struct at_dma_chan *atchan = to_at_dma_chan(chan); + + dev_vdbg(chan2dev(chan), "issue_pending\n"); + + if (!atc_chan_is_enabled(atchan)) { + spin_lock_bh(&atchan->lock); + atc_advance_work(atchan); + spin_unlock_bh(&atchan->lock); + } +} + +/** + * atc_alloc_chan_resources - allocate resources for DMA channel + * @chan: allocate descriptor resources for this channel + * @client: current client requesting the channel be ready for requests + * + * return - the number of allocated descriptors + */ +static int atc_alloc_chan_resources(struct dma_chan *chan) +{ + struct at_dma_chan *atchan = to_at_dma_chan(chan); + struct at_dma *atdma = to_at_dma(chan->device); + struct at_desc *desc; + struct at_dma_slave *atslave; + int i; + u32 cfg; + LIST_HEAD(tmp_list); + + dev_vdbg(chan2dev(chan), "alloc_chan_resources\n"); + + /* ASSERT: channel is idle */ + if (atc_chan_is_enabled(atchan)) { + dev_dbg(chan2dev(chan), "DMA channel not idle ?\n"); + return -EIO; + } + + cfg = ATC_DEFAULT_CFG; + + atslave = chan->private; + if (atslave) { + /* + * We need controller-specific data to set up slave + * transfers. + */ + BUG_ON(!atslave->dma_dev || atslave->dma_dev != atdma->dma_common.dev); + + /* if cfg configuration specified take it instad of default */ + if (atslave->cfg) + cfg = atslave->cfg; + } + + /* have we already been set up? + * reconfigure channel but no need to reallocate descriptors */ + if (!list_empty(&atchan->free_list)) + return atchan->descs_allocated; + + /* Allocate initial pool of descriptors */ + for (i = 0; i < init_nr_desc_per_channel; i++) { + desc = atc_alloc_descriptor(chan, GFP_KERNEL); + if (!desc) { + dev_err(atdma->dma_common.dev, + "Only %d initial descriptors\n", i); + break; + } + list_add_tail(&desc->desc_node, &tmp_list); + } + + spin_lock_bh(&atchan->lock); + atchan->descs_allocated = i; + list_splice(&tmp_list, &atchan->free_list); + atchan->completed_cookie = chan->cookie = 1; + spin_unlock_bh(&atchan->lock); + + /* channel parameters */ + channel_writel(atchan, CFG, cfg); + + dev_dbg(chan2dev(chan), + "alloc_chan_resources: allocated %d descriptors\n", + atchan->descs_allocated); + + return atchan->descs_allocated; +} + +/** + * atc_free_chan_resources - free all channel resources + * @chan: DMA channel + */ +static void atc_free_chan_resources(struct dma_chan *chan) +{ + struct at_dma_chan *atchan = to_at_dma_chan(chan); + struct at_dma *atdma = to_at_dma(chan->device); + struct at_desc *desc, *_desc; + LIST_HEAD(list); + + dev_dbg(chan2dev(chan), "free_chan_resources: (descs allocated=%u)\n", + atchan->descs_allocated); + + /* ASSERT: channel is idle */ + BUG_ON(!list_empty(&atchan->active_list)); + BUG_ON(!list_empty(&atchan->queue)); + BUG_ON(atc_chan_is_enabled(atchan)); + + list_for_each_entry_safe(desc, _desc, &atchan->free_list, desc_node) { + dev_vdbg(chan2dev(chan), " freeing descriptor %p\n", desc); + list_del(&desc->desc_node); + /* free link descriptor */ + dma_pool_free(atdma->dma_desc_pool, desc, desc->txd.phys); + } + list_splice_init(&atchan->free_list, &list); + atchan->descs_allocated = 0; + + dev_vdbg(chan2dev(chan), "free_chan_resources: done\n"); +} + + +/*-- Module Management -----------------------------------------------*/ + +/** + * at_dma_off - disable DMA controller + * @atdma: the Atmel HDAMC device + */ +static void at_dma_off(struct at_dma *atdma) +{ + dma_writel(atdma, EN, 0); + + /* disable all interrupts */ + dma_writel(atdma, EBCIDR, -1L); + + /* confirm that all channels are disabled */ + while (dma_readl(atdma, CHSR) & atdma->all_chan_mask) + cpu_relax(); +} + +static int __init at_dma_probe(struct platform_device *pdev) +{ + struct at_dma_platform_data *pdata; + struct resource *io; + struct at_dma *atdma; + size_t size; + int irq; + int err; + int i; + + /* get DMA Controller parameters from platform */ + pdata = pdev->dev.platform_data; + if (!pdata || pdata->nr_channels > AT_DMA_MAX_NR_CHANNELS) + return -EINVAL; + + io = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!io) + return -EINVAL; + + irq = platform_get_irq(pdev, 0); + if (irq < 0) + return irq; + + size = sizeof(struct at_dma); + size += pdata->nr_channels * sizeof(struct at_dma_chan); + atdma = kzalloc(size, GFP_KERNEL); + if (!atdma) + return -ENOMEM; + + /* discover transaction capabilites from the platform data */ + atdma->dma_common.cap_mask = pdata->cap_mask; + atdma->all_chan_mask = (1 << pdata->nr_channels) - 1; + + size = io->end - io->start + 1; + if (!request_mem_region(io->start, size, pdev->dev.driver->name)) { + err = -EBUSY; + goto err_kfree; + } + + atdma->regs = ioremap(io->start, size); + if (!atdma->regs) { + err = -ENOMEM; + goto err_release_r; + } + + atdma->clk = clk_get(&pdev->dev, "dma_clk"); + if (IS_ERR(atdma->clk)) { + err = PTR_ERR(atdma->clk); + goto err_clk; + } + clk_enable(atdma->clk); + + /* force dma off, just in case */ + at_dma_off(atdma); + + err = request_irq(irq, at_dma_interrupt, 0, "at_hdmac", atdma); + if (err) + goto err_irq; + + platform_set_drvdata(pdev, atdma); + + /* create a pool of consistent memory blocks for hardware descriptors */ + atdma->dma_desc_pool = dma_pool_create("at_hdmac_desc_pool", + &pdev->dev, sizeof(struct at_desc), + 4 /* word alignment */, 0); + if (!atdma->dma_desc_pool) { + dev_err(&pdev->dev, "No memory for descriptors dma pool\n"); + err = -ENOMEM; + goto err_pool_create; + } + + /* clear any pending interrupt */ + while (dma_readl(atdma, EBCISR)) + cpu_relax(); + + /* initialize channels related values */ + INIT_LIST_HEAD(&atdma->dma_common.channels); + for (i = 0; i < pdata->nr_channels; i++, atdma->dma_common.chancnt++) { + struct at_dma_chan *atchan = &atdma->chan[i]; + + atchan->chan_common.device = &atdma->dma_common; + atchan->chan_common.cookie = atchan->completed_cookie = 1; + atchan->chan_common.chan_id = i; + list_add_tail(&atchan->chan_common.device_node, + &atdma->dma_common.channels); + + atchan->ch_regs = atdma->regs + ch_regs(i); + spin_lock_init(&atchan->lock); + atchan->mask = 1 << i; + + INIT_LIST_HEAD(&atchan->active_list); + INIT_LIST_HEAD(&atchan->queue); + INIT_LIST_HEAD(&atchan->free_list); + + tasklet_init(&atchan->tasklet, atc_tasklet, + (unsigned long)atchan); + atc_enable_irq(atchan); + } + + /* set base routines */ + atdma->dma_common.device_alloc_chan_resources = atc_alloc_chan_resources; + atdma->dma_common.device_free_chan_resources = atc_free_chan_resources; + atdma->dma_common.device_is_tx_complete = atc_is_tx_complete; + atdma->dma_common.device_issue_pending = atc_issue_pending; + atdma->dma_common.dev = &pdev->dev; + + /* set prep routines based on capability */ + if (dma_has_cap(DMA_MEMCPY, atdma->dma_common.cap_mask)) + atdma->dma_common.device_prep_dma_memcpy = atc_prep_dma_memcpy; + + if (dma_has_cap(DMA_SLAVE, atdma->dma_common.cap_mask)) { + atdma->dma_common.device_prep_slave_sg = atc_prep_slave_sg; + atdma->dma_common.device_terminate_all = atc_terminate_all; + } + + dma_writel(atdma, EN, AT_DMA_ENABLE); + + dev_info(&pdev->dev, "Atmel AHB DMA Controller ( %s%s), %d channels\n", + dma_has_cap(DMA_MEMCPY, atdma->dma_common.cap_mask) ? "cpy " : "", + dma_has_cap(DMA_SLAVE, atdma->dma_common.cap_mask) ? "slave " : "", + atdma->dma_common.chancnt); + + dma_async_device_register(&atdma->dma_common); + + return 0; + +err_pool_create: + platform_set_drvdata(pdev, NULL); + free_irq(platform_get_irq(pdev, 0), atdma); +err_irq: + clk_disable(atdma->clk); + clk_put(atdma->clk); +err_clk: + iounmap(atdma->regs); + atdma->regs = NULL; +err_release_r: + release_mem_region(io->start, size); +err_kfree: + kfree(atdma); + return err; +} + +static int __exit at_dma_remove(struct platform_device *pdev) +{ + struct at_dma *atdma = platform_get_drvdata(pdev); + struct dma_chan *chan, *_chan; + struct resource *io; + + at_dma_off(atdma); + dma_async_device_unregister(&atdma->dma_common); + + dma_pool_destroy(atdma->dma_desc_pool); + platform_set_drvdata(pdev, NULL); + free_irq(platform_get_irq(pdev, 0), atdma); + + list_for_each_entry_safe(chan, _chan, &atdma->dma_common.channels, + device_node) { + struct at_dma_chan *atchan = to_at_dma_chan(chan); + + /* Disable interrupts */ + atc_disable_irq(atchan); + tasklet_disable(&atchan->tasklet); + + tasklet_kill(&atchan->tasklet); + list_del(&chan->device_node); + } + + clk_disable(atdma->clk); + clk_put(atdma->clk); + + iounmap(atdma->regs); + atdma->regs = NULL; + + io = platform_get_resource(pdev, IORESOURCE_MEM, 0); + release_mem_region(io->start, io->end - io->start + 1); + + kfree(atdma); + + return 0; +} + +static void at_dma_shutdown(struct platform_device *pdev) +{ + struct at_dma *atdma = platform_get_drvdata(pdev); + + at_dma_off(platform_get_drvdata(pdev)); + clk_disable(atdma->clk); +} + +static int at_dma_suspend_late(struct platform_device *pdev, pm_message_t mesg) +{ + struct at_dma *atdma = platform_get_drvdata(pdev); + + at_dma_off(platform_get_drvdata(pdev)); + clk_disable(atdma->clk); + return 0; +} + +static int at_dma_resume_early(struct platform_device *pdev) +{ + struct at_dma *atdma = platform_get_drvdata(pdev); + + clk_enable(atdma->clk); + dma_writel(atdma, EN, AT_DMA_ENABLE); + return 0; + +} + +static struct platform_driver at_dma_driver = { + .remove = __exit_p(at_dma_remove), + .shutdown = at_dma_shutdown, + .suspend_late = at_dma_suspend_late, + .resume_early = at_dma_resume_early, + .driver = { + .name = "at_hdmac", + }, +}; + +static int __init at_dma_init(void) +{ + return platform_driver_probe(&at_dma_driver, at_dma_probe); +} +module_init(at_dma_init); + +static void __exit at_dma_exit(void) +{ + platform_driver_unregister(&at_dma_driver); +} +module_exit(at_dma_exit); + +MODULE_DESCRIPTION("Atmel AHB DMA Controller driver"); +MODULE_AUTHOR("Nicolas Ferre <nicolas.ferre@atmel.com>"); +MODULE_LICENSE("GPL"); +MODULE_ALIAS("platform:at_hdmac"); diff --git a/drivers/dma/at_hdmac_regs.h b/drivers/dma/at_hdmac_regs.h new file mode 100644 index 00000000000..4c972afc49e --- /dev/null +++ b/drivers/dma/at_hdmac_regs.h @@ -0,0 +1,353 @@ +/* + * Header file for the Atmel AHB DMA Controller driver + * + * Copyright (C) 2008 Atmel Corporation + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ +#ifndef AT_HDMAC_REGS_H +#define AT_HDMAC_REGS_H + +#include <mach/at_hdmac.h> + +#define AT_DMA_MAX_NR_CHANNELS 8 + + +#define AT_DMA_GCFG 0x00 /* Global Configuration Register */ +#define AT_DMA_IF_BIGEND(i) (0x1 << (i)) /* AHB-Lite Interface i in Big-endian mode */ +#define AT_DMA_ARB_CFG (0x1 << 4) /* Arbiter mode. */ +#define AT_DMA_ARB_CFG_FIXED (0x0 << 4) +#define AT_DMA_ARB_CFG_ROUND_ROBIN (0x1 << 4) + +#define AT_DMA_EN 0x04 /* Controller Enable Register */ +#define AT_DMA_ENABLE (0x1 << 0) + +#define AT_DMA_SREQ 0x08 /* Software Single Request Register */ +#define AT_DMA_SSREQ(x) (0x1 << ((x) << 1)) /* Request a source single transfer on channel x */ +#define AT_DMA_DSREQ(x) (0x1 << (1 + ((x) << 1))) /* Request a destination single transfer on channel x */ + +#define AT_DMA_CREQ 0x0C /* Software Chunk Transfer Request Register */ +#define AT_DMA_SCREQ(x) (0x1 << ((x) << 1)) /* Request a source chunk transfer on channel x */ +#define AT_DMA_DCREQ(x) (0x1 << (1 + ((x) << 1))) /* Request a destination chunk transfer on channel x */ + +#define AT_DMA_LAST 0x10 /* Software Last Transfer Flag Register */ +#define AT_DMA_SLAST(x) (0x1 << ((x) << 1)) /* This src rq is last tx of buffer on channel x */ +#define AT_DMA_DLAST(x) (0x1 << (1 + ((x) << 1))) /* This dst rq is last tx of buffer on channel x */ + +#define AT_DMA_SYNC 0x14 /* Request Synchronization Register */ +#define AT_DMA_SYR(h) (0x1 << (h)) /* Synchronize handshake line h */ + +/* Error, Chained Buffer transfer completed and Buffer transfer completed Interrupt registers */ +#define AT_DMA_EBCIER 0x18 /* Enable register */ +#define AT_DMA_EBCIDR 0x1C /* Disable register */ +#define AT_DMA_EBCIMR 0x20 /* Mask Register */ +#define AT_DMA_EBCISR 0x24 /* Status Register */ +#define AT_DMA_CBTC_OFFSET 8 +#define AT_DMA_ERR_OFFSET 16 +#define AT_DMA_BTC(x) (0x1 << (x)) +#define AT_DMA_CBTC(x) (0x1 << (AT_DMA_CBTC_OFFSET + (x))) +#define AT_DMA_ERR(x) (0x1 << (AT_DMA_ERR_OFFSET + (x))) + +#define AT_DMA_CHER 0x28 /* Channel Handler Enable Register */ +#define AT_DMA_ENA(x) (0x1 << (x)) +#define AT_DMA_SUSP(x) (0x1 << ( 8 + (x))) +#define AT_DMA_KEEP(x) (0x1 << (24 + (x))) + +#define AT_DMA_CHDR 0x2C /* Channel Handler Disable Register */ +#define AT_DMA_DIS(x) (0x1 << (x)) +#define AT_DMA_RES(x) (0x1 << ( 8 + (x))) + +#define AT_DMA_CHSR 0x30 /* Channel Handler Status Register */ +#define AT_DMA_EMPT(x) (0x1 << (16 + (x))) +#define AT_DMA_STAL(x) (0x1 << (24 + (x))) + + +#define AT_DMA_CH_REGS_BASE 0x3C /* Channel registers base address */ +#define ch_regs(x) (AT_DMA_CH_REGS_BASE + (x) * 0x28) /* Channel x base addr */ + +/* Hardware register offset for each channel */ +#define ATC_SADDR_OFFSET 0x00 /* Source Address Register */ +#define ATC_DADDR_OFFSET 0x04 /* Destination Address Register */ +#define ATC_DSCR_OFFSET 0x08 /* Descriptor Address Register */ +#define ATC_CTRLA_OFFSET 0x0C /* Control A Register */ +#define ATC_CTRLB_OFFSET 0x10 /* Control B Register */ +#define ATC_CFG_OFFSET 0x14 /* Configuration Register */ +#define ATC_SPIP_OFFSET 0x18 /* Src PIP Configuration Register */ +#define ATC_DPIP_OFFSET 0x1C /* Dst PIP Configuration Register */ + + +/* Bitfield definitions */ + +/* Bitfields in DSCR */ +#define ATC_DSCR_IF(i) (0x3 & (i)) /* Dsc feched via AHB-Lite Interface i */ + +/* Bitfields in CTRLA */ +#define ATC_BTSIZE_MAX 0xFFFFUL /* Maximum Buffer Transfer Size */ +#define ATC_BTSIZE(x) (ATC_BTSIZE_MAX & (x)) /* Buffer Transfer Size */ +/* Chunck Tranfer size definitions are in at_hdmac.h */ +#define ATC_SRC_WIDTH_MASK (0x3 << 24) /* Source Single Transfer Size */ +#define ATC_SRC_WIDTH(x) ((x) << 24) +#define ATC_SRC_WIDTH_BYTE (0x0 << 24) +#define ATC_SRC_WIDTH_HALFWORD (0x1 << 24) +#define ATC_SRC_WIDTH_WORD (0x2 << 24) +#define ATC_DST_WIDTH_MASK (0x3 << 28) /* Destination Single Transfer Size */ +#define ATC_DST_WIDTH(x) ((x) << 28) +#define ATC_DST_WIDTH_BYTE (0x0 << 28) +#define ATC_DST_WIDTH_HALFWORD (0x1 << 28) +#define ATC_DST_WIDTH_WORD (0x2 << 28) +#define ATC_DONE (0x1 << 31) /* Tx Done (only written back in descriptor) */ + +/* Bitfields in CTRLB */ +#define ATC_SIF(i) (0x3 & (i)) /* Src tx done via AHB-Lite Interface i */ +#define ATC_DIF(i) ((0x3 & (i)) << 4) /* Dst tx done via AHB-Lite Interface i */ +#define ATC_SRC_PIP (0x1 << 8) /* Source Picture-in-Picture enabled */ +#define ATC_DST_PIP (0x1 << 12) /* Destination Picture-in-Picture enabled */ +#define ATC_SRC_DSCR_DIS (0x1 << 16) /* Src Descriptor fetch disable */ +#define ATC_DST_DSCR_DIS (0x1 << 20) /* Dst Descriptor fetch disable */ +#define ATC_FC_MASK (0x7 << 21) /* Choose Flow Controller */ +#define ATC_FC_MEM2MEM (0x0 << 21) /* Mem-to-Mem (DMA) */ +#define ATC_FC_MEM2PER (0x1 << 21) /* Mem-to-Periph (DMA) */ +#define ATC_FC_PER2MEM (0x2 << 21) /* Periph-to-Mem (DMA) */ +#define ATC_FC_PER2PER (0x3 << 21) /* Periph-to-Periph (DMA) */ +#define ATC_FC_PER2MEM_PER (0x4 << 21) /* Periph-to-Mem (Peripheral) */ +#define ATC_FC_MEM2PER_PER (0x5 << 21) /* Mem-to-Periph (Peripheral) */ +#define ATC_FC_PER2PER_SRCPER (0x6 << 21) /* Periph-to-Periph (Src Peripheral) */ +#define ATC_FC_PER2PER_DSTPER (0x7 << 21) /* Periph-to-Periph (Dst Peripheral) */ +#define ATC_SRC_ADDR_MODE_MASK (0x3 << 24) +#define ATC_SRC_ADDR_MODE_INCR (0x0 << 24) /* Incrementing Mode */ +#define ATC_SRC_ADDR_MODE_DECR (0x1 << 24) /* Decrementing Mode */ +#define ATC_SRC_ADDR_MODE_FIXED (0x2 << 24) /* Fixed Mode */ +#define ATC_DST_ADDR_MODE_MASK (0x3 << 28) +#define ATC_DST_ADDR_MODE_INCR (0x0 << 28) /* Incrementing Mode */ +#define ATC_DST_ADDR_MODE_DECR (0x1 << 28) /* Decrementing Mode */ +#define ATC_DST_ADDR_MODE_FIXED (0x2 << 28) /* Fixed Mode */ +#define ATC_IEN (0x1 << 30) /* BTC interrupt enable (active low) */ +#define ATC_AUTO (0x1 << 31) /* Auto multiple buffer tx enable */ + +/* Bitfields in CFG */ +/* are in at_hdmac.h */ + +/* Bitfields in SPIP */ +#define ATC_SPIP_HOLE(x) (0xFFFFU & (x)) +#define ATC_SPIP_BOUNDARY(x) ((0x3FF & (x)) << 16) + +/* Bitfields in DPIP */ +#define ATC_DPIP_HOLE(x) (0xFFFFU & (x)) +#define ATC_DPIP_BOUNDARY(x) ((0x3FF & (x)) << 16) + + +/*-- descriptors -----------------------------------------------------*/ + +/* LLI == Linked List Item; aka DMA buffer descriptor */ +struct at_lli { + /* values that are not changed by hardware */ + dma_addr_t saddr; + dma_addr_t daddr; + /* value that may get written back: */ + u32 ctrla; + /* more values that are not changed by hardware */ + u32 ctrlb; + dma_addr_t dscr; /* chain to next lli */ +}; + +/** + * struct at_desc - software descriptor + * @at_lli: hardware lli structure + * @txd: support for the async_tx api + * @desc_node: node on the channed descriptors list + * @len: total transaction bytecount + */ +struct at_desc { + /* FIRST values the hardware uses */ + struct at_lli lli; + + /* THEN values for driver housekeeping */ + struct dma_async_tx_descriptor txd; + struct list_head desc_node; + size_t len; +}; + +static inline struct at_desc * +txd_to_at_desc(struct dma_async_tx_descriptor *txd) +{ + return container_of(txd, struct at_desc, txd); +} + + +/*-- Channels --------------------------------------------------------*/ + +/** + * struct at_dma_chan - internal representation of an Atmel HDMAC channel + * @chan_common: common dmaengine channel object members + * @device: parent device + * @ch_regs: memory mapped register base + * @mask: channel index in a mask + * @error_status: transmit error status information from irq handler + * to tasklet (use atomic operations) + * @tasklet: bottom half to finish transaction work + * @lock: serializes enqueue/dequeue operations to descriptors lists + * @completed_cookie: identifier for the most recently completed operation + * @active_list: list of descriptors dmaengine is being running on + * @queue: list of descriptors ready to be submitted to engine + * @free_list: list of descriptors usable by the channel + * @descs_allocated: records the actual size of the descriptor pool + */ +struct at_dma_chan { + struct dma_chan chan_common; + struct at_dma *device; + void __iomem *ch_regs; + u8 mask; + unsigned long error_status; + struct tasklet_struct tasklet; + + spinlock_t lock; + + /* these other elements are all protected by lock */ + dma_cookie_t completed_cookie; + struct list_head active_list; + struct list_head queue; + struct list_head free_list; + unsigned int descs_allocated; +}; + +#define channel_readl(atchan, name) \ + __raw_readl((atchan)->ch_regs + ATC_##name##_OFFSET) + +#define channel_writel(atchan, name, val) \ + __raw_writel((val), (atchan)->ch_regs + ATC_##name##_OFFSET) + +static inline struct at_dma_chan *to_at_dma_chan(struct dma_chan *dchan) +{ + return container_of(dchan, struct at_dma_chan, chan_common); +} + + +/*-- Controller ------------------------------------------------------*/ + +/** + * struct at_dma - internal representation of an Atmel HDMA Controller + * @chan_common: common dmaengine dma_device object members + * @ch_regs: memory mapped register base + * @clk: dma controller clock + * @all_chan_mask: all channels availlable in a mask + * @dma_desc_pool: base of DMA descriptor region (DMA address) + * @chan: channels table to store at_dma_chan structures + */ +struct at_dma { + struct dma_device dma_common; + void __iomem *regs; + struct clk *clk; + + u8 all_chan_mask; + + struct dma_pool *dma_desc_pool; + /* AT THE END channels table */ + struct at_dma_chan chan[0]; +}; + +#define dma_readl(atdma, name) \ + __raw_readl((atdma)->regs + AT_DMA_##name) +#define dma_writel(atdma, name, val) \ + __raw_writel((val), (atdma)->regs + AT_DMA_##name) + +static inline struct at_dma *to_at_dma(struct dma_device *ddev) +{ + return container_of(ddev, struct at_dma, dma_common); +} + + +/*-- Helper functions ------------------------------------------------*/ + +static struct device *chan2dev(struct dma_chan *chan) +{ + return &chan->dev->device; +} +static struct device *chan2parent(struct dma_chan *chan) +{ + return chan->dev->device.parent; +} + +#if defined(VERBOSE_DEBUG) +static void vdbg_dump_regs(struct at_dma_chan *atchan) +{ + struct at_dma *atdma = to_at_dma(atchan->chan_common.device); + + dev_err(chan2dev(&atchan->chan_common), + " channel %d : imr = 0x%x, chsr = 0x%x\n", + atchan->chan_common.chan_id, + dma_readl(atdma, EBCIMR), + dma_readl(atdma, CHSR)); + + dev_err(chan2dev(&atchan->chan_common), + " channel: s0x%x d0x%x ctrl0x%x:0x%x cfg0x%x l0x%x\n", + channel_readl(atchan, SADDR), + channel_readl(atchan, DADDR), + channel_readl(atchan, CTRLA), + channel_readl(atchan, CTRLB), + channel_readl(atchan, CFG), + channel_readl(atchan, DSCR)); +} +#else +static void vdbg_dump_regs(struct at_dma_chan *atchan) {} +#endif + +static void atc_dump_lli(struct at_dma_chan *atchan, struct at_lli *lli) +{ + dev_printk(KERN_CRIT, chan2dev(&atchan->chan_common), + " desc: s0x%x d0x%x ctrl0x%x:0x%x l0x%x\n", + lli->saddr, lli->daddr, + lli->ctrla, lli->ctrlb, lli->dscr); +} + + +static void atc_setup_irq(struct at_dma_chan *atchan, int on) +{ + struct at_dma *atdma = to_at_dma(atchan->chan_common.device); + u32 ebci; + + /* enable interrupts on buffer chain completion & error */ + ebci = AT_DMA_CBTC(atchan->chan_common.chan_id) + | AT_DMA_ERR(atchan->chan_common.chan_id); + if (on) + dma_writel(atdma, EBCIER, ebci); + else + dma_writel(atdma, EBCIDR, ebci); +} + +static inline void atc_enable_irq(struct at_dma_chan *atchan) +{ + atc_setup_irq(atchan, 1); +} + +static inline void atc_disable_irq(struct at_dma_chan *atchan) +{ + atc_setup_irq(atchan, 0); +} + + +/** + * atc_chan_is_enabled - test if given channel is enabled + * @atchan: channel we want to test status + */ +static inline int atc_chan_is_enabled(struct at_dma_chan *atchan) +{ + struct at_dma *atdma = to_at_dma(atchan->chan_common.device); + + return !!(dma_readl(atdma, CHSR) & atchan->mask); +} + + +/** + * set_desc_eol - set end-of-link to descriptor so it will end transfer + * @desc: descriptor, signle or at the end of a chain, to end chain on + */ +static void set_desc_eol(struct at_desc *desc) +{ + desc->lli.ctrlb |= ATC_SRC_DSCR_DIS | ATC_DST_DSCR_DIS; + desc->lli.dscr = 0; +} + +#endif /* AT_HDMAC_REGS_H */ diff --git a/drivers/dma/dmatest.c b/drivers/dma/dmatest.c index fb7da5141e9..d93017fc787 100644 --- a/drivers/dma/dmatest.c +++ b/drivers/dma/dmatest.c @@ -38,6 +38,11 @@ module_param(max_channels, uint, S_IRUGO); MODULE_PARM_DESC(max_channels, "Maximum number of channels to use (default: all)"); +static unsigned int iterations; +module_param(iterations, uint, S_IRUGO); +MODULE_PARM_DESC(iterations, + "Iterations before stopping test (default: infinite)"); + static unsigned int xor_sources = 3; module_param(xor_sources, uint, S_IRUGO); MODULE_PARM_DESC(xor_sources, @@ -114,7 +119,7 @@ static void dmatest_init_srcs(u8 **bufs, unsigned int start, unsigned int len) buf[i] = PATTERN_SRC | (~i & PATTERN_COUNT_MASK); for ( ; i < start + len; i++) buf[i] = PATTERN_SRC | PATTERN_COPY - | (~i & PATTERN_COUNT_MASK);; + | (~i & PATTERN_COUNT_MASK); for ( ; i < test_buf_size; i++) buf[i] = PATTERN_SRC | (~i & PATTERN_COUNT_MASK); buf++; @@ -270,7 +275,8 @@ static int dmatest_func(void *data) flags = DMA_CTRL_ACK | DMA_COMPL_SKIP_DEST_UNMAP | DMA_PREP_INTERRUPT; - while (!kthread_should_stop()) { + while (!kthread_should_stop() + && !(iterations && total_tests >= iterations)) { struct dma_device *dev = chan->device; struct dma_async_tx_descriptor *tx = NULL; dma_addr_t dma_srcs[src_cnt]; @@ -416,6 +422,13 @@ err_srcbuf: err_srcs: pr_notice("%s: terminating after %u tests, %u failures (status %d)\n", thread_name, total_tests, failed_tests, ret); + + if (iterations > 0) + while (!kthread_should_stop()) { + DECLARE_WAIT_QUEUE_HEAD(wait_dmatest_exit); + interruptible_sleep_on(&wait_dmatest_exit); + } + return ret; } @@ -495,11 +508,11 @@ static int dmatest_add_channel(struct dma_chan *chan) if (dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask)) { cnt = dmatest_add_threads(dtc, DMA_MEMCPY); - thread_count += cnt > 0 ?: 0; + thread_count += cnt > 0 ? cnt : 0; } if (dma_has_cap(DMA_XOR, dma_dev->cap_mask)) { cnt = dmatest_add_threads(dtc, DMA_XOR); - thread_count += cnt > 0 ?: 0; + thread_count += cnt > 0 ? cnt : 0; } pr_info("dmatest: Started %u threads using %s\n", diff --git a/drivers/dma/fsldma.c b/drivers/dma/fsldma.c index f18d1bde043..ef87a898414 100644 --- a/drivers/dma/fsldma.c +++ b/drivers/dma/fsldma.c @@ -12,6 +12,11 @@ * also fit for MPC8560, MPC8555, MPC8548, MPC8641, and etc. * The support for MPC8349 DMA contorller is also added. * + * This driver instructs the DMA controller to issue the PCI Read Multiple + * command for PCI read operations, instead of using the default PCI Read Line + * command. Please be aware that this setting may result in read pre-fetching + * on some platforms. + * * This is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or @@ -49,9 +54,10 @@ static void dma_init(struct fsl_dma_chan *fsl_chan) case FSL_DMA_IP_83XX: /* Set the channel to below modes: * EOTIE - End-of-transfer interrupt enable + * PRC_RM - PCI read multiple */ - DMA_OUT(fsl_chan, &fsl_chan->reg_base->mr, FSL_DMA_MR_EOTIE, - 32); + DMA_OUT(fsl_chan, &fsl_chan->reg_base->mr, FSL_DMA_MR_EOTIE + | FSL_DMA_MR_PRC_RM, 32); break; } @@ -136,15 +142,16 @@ static int dma_is_idle(struct fsl_dma_chan *fsl_chan) static void dma_start(struct fsl_dma_chan *fsl_chan) { - u32 mr_set = 0;; + u32 mr_set = 0; if (fsl_chan->feature & FSL_DMA_CHAN_PAUSE_EXT) { DMA_OUT(fsl_chan, &fsl_chan->reg_base->bcr, 0, 32); mr_set |= FSL_DMA_MR_EMP_EN; - } else + } else if ((fsl_chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_85XX) { DMA_OUT(fsl_chan, &fsl_chan->reg_base->mr, DMA_IN(fsl_chan, &fsl_chan->reg_base->mr, 32) & ~FSL_DMA_MR_EMP_EN, 32); + } if (fsl_chan->feature & FSL_DMA_CHAN_START_EXT) mr_set |= FSL_DMA_MR_EMS_EN; @@ -871,9 +878,9 @@ static int __devinit fsl_dma_chan_probe(struct fsl_dma_device *fdev, switch (new_fsl_chan->feature & FSL_DMA_IP_MASK) { case FSL_DMA_IP_85XX: - new_fsl_chan->toggle_ext_start = fsl_chan_toggle_ext_start; new_fsl_chan->toggle_ext_pause = fsl_chan_toggle_ext_pause; case FSL_DMA_IP_83XX: + new_fsl_chan->toggle_ext_start = fsl_chan_toggle_ext_start; new_fsl_chan->set_src_loop_size = fsl_chan_set_src_loop_size; new_fsl_chan->set_dest_loop_size = fsl_chan_set_dest_loop_size; } diff --git a/drivers/dma/fsldma.h b/drivers/dma/fsldma.h index 4f21a512d84..dc7f2686579 100644 --- a/drivers/dma/fsldma.h +++ b/drivers/dma/fsldma.h @@ -38,6 +38,7 @@ /* Special MR definition for MPC8349 */ #define FSL_DMA_MR_EOTIE 0x00000080 +#define FSL_DMA_MR_PRC_RM 0x00000800 #define FSL_DMA_SR_CH 0x00000020 #define FSL_DMA_SR_PE 0x00000010 diff --git a/drivers/dma/mv_xor.c b/drivers/dma/mv_xor.c index ddab94f5122..3f23eabe09f 100644 --- a/drivers/dma/mv_xor.c +++ b/drivers/dma/mv_xor.c @@ -1176,7 +1176,7 @@ static int __devinit mv_xor_probe(struct platform_device *pdev) if (dma_has_cap(DMA_MEMSET, dma_dev->cap_mask)) dma_dev->device_prep_dma_memset = mv_xor_prep_dma_memset; if (dma_has_cap(DMA_XOR, dma_dev->cap_mask)) { - dma_dev->max_xor = 8; ; + dma_dev->max_xor = 8; dma_dev->device_prep_dma_xor = mv_xor_prep_dma_xor; } diff --git a/drivers/edac/amd64_edac.c b/drivers/edac/amd64_edac.c index c36bf40568c..e2a10bcba7a 100644 --- a/drivers/edac/amd64_edac.c +++ b/drivers/edac/amd64_edac.c @@ -754,13 +754,13 @@ static void amd64_cpu_display_info(struct amd64_pvt *pvt) static enum edac_type amd64_determine_edac_cap(struct amd64_pvt *pvt) { int bit; - enum dev_type edac_cap = EDAC_NONE; + enum dev_type edac_cap = EDAC_FLAG_NONE; bit = (boot_cpu_data.x86 > 0xf || pvt->ext_model >= OPTERON_CPU_REV_F) ? 19 : 17; - if (pvt->dclr0 >> BIT(bit)) + if (pvt->dclr0 & BIT(bit)) edac_cap = EDAC_FLAG_SECDED; return edac_cap; @@ -868,6 +868,8 @@ static void amd64_read_dbam_reg(struct amd64_pvt *pvt) goto err_reg; } + return; + err_reg: debugf0("Error reading F2x%03x.\n", reg); } @@ -970,7 +972,7 @@ static void amd64_read_dct_base_mask(struct amd64_pvt *pvt) } for (cs = 0; cs < pvt->num_dcsm; cs++) { - reg = K8_DCSB0 + (cs * 4); + reg = K8_DCSM0 + (cs * 4); err = pci_read_config_dword(pvt->dram_f2_ctl, reg, &pvt->dcsm0[cs]); if (unlikely(err)) @@ -1269,7 +1271,7 @@ static int f10_early_channel_count(struct amd64_pvt *pvt) if (channels == 0) channels = 1; - debugf0("DIMM count= %d\n", channels); + debugf0("MCT channel count: %d\n", channels); return channels; @@ -2634,6 +2636,8 @@ static void amd64_read_mc_registers(struct amd64_pvt *pvt) amd64_dump_misc_regs(pvt); + return; + err_reg: debugf0("Reading an MC register failed\n"); @@ -2966,12 +2970,20 @@ static int amd64_check_ecc_enabled(struct amd64_pvt *pvt) " Use of the override can cause " "unknown side effects.\n"); ret = -ENODEV; - } + } else + /* + * enable further driver loading if ECC enable is + * overridden. + */ + ret = 0; } else { amd64_printk(KERN_INFO, "ECC is enabled by BIOS, Proceeding " "with EDAC module initialization\n"); + /* Signal good ECC status */ + ret = 0; + /* CLEAR the override, since BIOS controlled it */ ecc_enable_override = 0; } @@ -3006,7 +3018,6 @@ static void amd64_setup_mci_misc_attributes(struct mem_ctl_info *mci) mci->mtype_cap = MEM_FLAG_DDR2 | MEM_FLAG_RDDR2; mci->edac_ctl_cap = EDAC_FLAG_NONE; - mci->edac_cap = EDAC_FLAG_NONE; if (pvt->nbcap & K8_NBCAP_SECDED) mci->edac_ctl_cap |= EDAC_FLAG_SECDED; @@ -3052,7 +3063,7 @@ static int amd64_probe_one_instance(struct pci_dev *dram_f2_ctl, if (!pvt) goto err_exit; - pvt->mc_node_id = get_mc_node_id_from_pdev(dram_f2_ctl); + pvt->mc_node_id = get_node_id(dram_f2_ctl); pvt->dram_f2_ctl = dram_f2_ctl; pvt->ext_model = boot_cpu_data.x86_model >> 4; @@ -3179,8 +3190,7 @@ static int __devinit amd64_init_one_instance(struct pci_dev *pdev, { int ret = 0; - debugf0("(MC node=%d,mc_type='%s')\n", - get_mc_node_id_from_pdev(pdev), + debugf0("(MC node=%d,mc_type='%s')\n", get_node_id(pdev), get_amd_family_name(mc_type->driver_data)); ret = pci_enable_device(pdev); @@ -3319,15 +3329,17 @@ static int __init amd64_edac_init(void) err = amd64_init_2nd_stage(pvt_lookup[nb]); if (err) - goto err_exit; + goto err_2nd_stage; } amd64_setup_pci_device(); return 0; +err_2nd_stage: + debugf0("2nd stage failed\n"); + err_exit: - debugf0("'finish_setup' stage failed\n"); pci_unregister_driver(&amd64_pci_driver); return err; diff --git a/drivers/edac/amd64_edac.h b/drivers/edac/amd64_edac.h index a159957e167..ba73015af8e 100644 --- a/drivers/edac/amd64_edac.h +++ b/drivers/edac/amd64_edac.h @@ -444,7 +444,7 @@ enum { #define K8_MSR_MC4ADDR 0x0412 /* AMD sets the first MC device at device ID 0x18. */ -static inline int get_mc_node_id_from_pdev(struct pci_dev *pdev) +static inline int get_node_id(struct pci_dev *pdev) { return PCI_SLOT(pdev->devfn) - 0x18; } diff --git a/drivers/edac/edac_core.h b/drivers/edac/edac_core.h index 3493c6bdb82..871c13b4c14 100644 --- a/drivers/edac/edac_core.h +++ b/drivers/edac/edac_core.h @@ -150,6 +150,8 @@ enum mem_type { MEM_FB_DDR2, /* fully buffered DDR2 */ MEM_RDDR2, /* Registered DDR2 RAM */ MEM_XDR, /* Rambus XDR */ + MEM_DDR3, /* DDR3 RAM */ + MEM_RDDR3, /* Registered DDR3 RAM */ }; #define MEM_FLAG_EMPTY BIT(MEM_EMPTY) @@ -167,6 +169,8 @@ enum mem_type { #define MEM_FLAG_FB_DDR2 BIT(MEM_FB_DDR2) #define MEM_FLAG_RDDR2 BIT(MEM_RDDR2) #define MEM_FLAG_XDR BIT(MEM_XDR) +#define MEM_FLAG_DDR3 BIT(MEM_DDR3) +#define MEM_FLAG_RDDR3 BIT(MEM_RDDR3) /* chipset Error Detection and Correction capabilities and mode */ enum edac_type { diff --git a/drivers/edac/edac_mc_sysfs.c b/drivers/edac/edac_mc_sysfs.c index ad218fe4942..e1d4ce08348 100644 --- a/drivers/edac/edac_mc_sysfs.c +++ b/drivers/edac/edac_mc_sysfs.c @@ -94,7 +94,9 @@ static const char *mem_types[] = { [MEM_DDR2] = "Unbuffered-DDR2", [MEM_FB_DDR2] = "FullyBuffered-DDR2", [MEM_RDDR2] = "Registered-DDR2", - [MEM_XDR] = "XDR" + [MEM_XDR] = "XDR", + [MEM_DDR3] = "Unbuffered-DDR3", + [MEM_RDDR3] = "Registered-DDR3" }; static const char *dev_types[] = { diff --git a/drivers/edac/mpc85xx_edac.c b/drivers/edac/mpc85xx_edac.c index 7c8c2d72916..3f2ccfc6407 100644 --- a/drivers/edac/mpc85xx_edac.c +++ b/drivers/edac/mpc85xx_edac.c @@ -757,6 +757,9 @@ static void __devinit mpc85xx_init_csrows(struct mem_ctl_info *mci) case DSC_SDTYPE_DDR2: mtype = MEM_RDDR2; break; + case DSC_SDTYPE_DDR3: + mtype = MEM_RDDR3; + break; default: mtype = MEM_UNKNOWN; break; @@ -769,6 +772,9 @@ static void __devinit mpc85xx_init_csrows(struct mem_ctl_info *mci) case DSC_SDTYPE_DDR2: mtype = MEM_DDR2; break; + case DSC_SDTYPE_DDR3: + mtype = MEM_DDR3; + break; default: mtype = MEM_UNKNOWN; break; diff --git a/drivers/edac/mpc85xx_edac.h b/drivers/edac/mpc85xx_edac.h index 135b3539a03..52432ee7c4b 100644 --- a/drivers/edac/mpc85xx_edac.h +++ b/drivers/edac/mpc85xx_edac.h @@ -53,6 +53,7 @@ #define DSC_SDTYPE_DDR 0x02000000 #define DSC_SDTYPE_DDR2 0x03000000 +#define DSC_SDTYPE_DDR3 0x07000000 #define DSC_X32_EN 0x00000020 /* Err_Int_En */ diff --git a/drivers/edac/x38_edac.c b/drivers/edac/x38_edac.c index 2406c2ce284..d4ec6059317 100644 --- a/drivers/edac/x38_edac.c +++ b/drivers/edac/x38_edac.c @@ -30,7 +30,7 @@ /* Intel X38 register addresses - device 0 function 0 - DRAM Controller */ #define X38_MCHBAR_LOW 0x48 /* MCH Memory Mapped Register BAR */ -#define X38_MCHBAR_HIGH 0x4b +#define X38_MCHBAR_HIGH 0x4c #define X38_MCHBAR_MASK 0xfffffc000ULL /* bits 35:14 */ #define X38_MMR_WINDOW_SIZE 16384 diff --git a/drivers/firewire/core-card.c b/drivers/firewire/core-card.c index 543fccac81b..f74edae5cb4 100644 --- a/drivers/firewire/core-card.c +++ b/drivers/firewire/core-card.c @@ -196,8 +196,8 @@ static void allocate_broadcast_channel(struct fw_card *card, int generation) { int channel, bandwidth = 0; - fw_iso_resource_manage(card, generation, 1ULL << 31, - &channel, &bandwidth, true); + fw_iso_resource_manage(card, generation, 1ULL << 31, &channel, + &bandwidth, true, card->bm_transaction_data); if (channel == 31) { card->broadcast_channel_allocated = true; device_for_each_child(card->device, (void *)(long)generation, @@ -230,7 +230,6 @@ static void fw_card_bm_work(struct work_struct *work) bool do_reset = false; bool root_device_is_running; bool root_device_is_cmc; - __be32 lock_data[2]; spin_lock_irqsave(&card->lock, flags); @@ -273,22 +272,23 @@ static void fw_card_bm_work(struct work_struct *work) goto pick_me; } - lock_data[0] = cpu_to_be32(0x3f); - lock_data[1] = cpu_to_be32(local_id); + card->bm_transaction_data[0] = cpu_to_be32(0x3f); + card->bm_transaction_data[1] = cpu_to_be32(local_id); spin_unlock_irqrestore(&card->lock, flags); rcode = fw_run_transaction(card, TCODE_LOCK_COMPARE_SWAP, irm_id, generation, SCODE_100, CSR_REGISTER_BASE + CSR_BUS_MANAGER_ID, - lock_data, sizeof(lock_data)); + card->bm_transaction_data, + sizeof(card->bm_transaction_data)); if (rcode == RCODE_GENERATION) /* Another bus reset, BM work has been rescheduled. */ goto out; if (rcode == RCODE_COMPLETE && - lock_data[0] != cpu_to_be32(0x3f)) { + card->bm_transaction_data[0] != cpu_to_be32(0x3f)) { /* Somebody else is BM. Only act as IRM. */ if (local_id == irm_id) diff --git a/drivers/firewire/core-cdev.c b/drivers/firewire/core-cdev.c index d1d30c615b0..ced186d7e9a 100644 --- a/drivers/firewire/core-cdev.c +++ b/drivers/firewire/core-cdev.c @@ -125,6 +125,7 @@ struct iso_resource { int generation; u64 channels; s32 bandwidth; + __be32 transaction_data[2]; struct iso_resource_event *e_alloc, *e_dealloc; }; @@ -1049,7 +1050,8 @@ static void iso_resource_work(struct work_struct *work) r->channels, &channel, &bandwidth, todo == ISO_RES_ALLOC || todo == ISO_RES_REALLOC || - todo == ISO_RES_ALLOC_ONCE); + todo == ISO_RES_ALLOC_ONCE, + r->transaction_data); /* * Is this generation outdated already? As long as this resource sticks * in the idr, it will be scheduled again for a newer generation or at diff --git a/drivers/firewire/core-iso.c b/drivers/firewire/core-iso.c index 166f19c6d38..1c0b504a42f 100644 --- a/drivers/firewire/core-iso.c +++ b/drivers/firewire/core-iso.c @@ -177,9 +177,8 @@ EXPORT_SYMBOL(fw_iso_context_stop); */ static int manage_bandwidth(struct fw_card *card, int irm_id, int generation, - int bandwidth, bool allocate) + int bandwidth, bool allocate, __be32 data[2]) { - __be32 data[2]; int try, new, old = allocate ? BANDWIDTH_AVAILABLE_INITIAL : 0; /* @@ -197,7 +196,7 @@ static int manage_bandwidth(struct fw_card *card, int irm_id, int generation, switch (fw_run_transaction(card, TCODE_LOCK_COMPARE_SWAP, irm_id, generation, SCODE_100, CSR_REGISTER_BASE + CSR_BANDWIDTH_AVAILABLE, - data, sizeof(data))) { + data, 8)) { case RCODE_GENERATION: /* A generation change frees all bandwidth. */ return allocate ? -EAGAIN : bandwidth; @@ -215,9 +214,9 @@ static int manage_bandwidth(struct fw_card *card, int irm_id, int generation, } static int manage_channel(struct fw_card *card, int irm_id, int generation, - u32 channels_mask, u64 offset, bool allocate) + u32 channels_mask, u64 offset, bool allocate, __be32 data[2]) { - __be32 data[2], c, all, old; + __be32 c, all, old; int i, retry = 5; old = all = allocate ? cpu_to_be32(~0) : 0; @@ -234,7 +233,7 @@ static int manage_channel(struct fw_card *card, int irm_id, int generation, data[1] = old ^ c; switch (fw_run_transaction(card, TCODE_LOCK_COMPARE_SWAP, irm_id, generation, SCODE_100, - offset, data, sizeof(data))) { + offset, data, 8)) { case RCODE_GENERATION: /* A generation change frees all channels. */ return allocate ? -EAGAIN : i; @@ -260,7 +259,7 @@ static int manage_channel(struct fw_card *card, int irm_id, int generation, } static void deallocate_channel(struct fw_card *card, int irm_id, - int generation, int channel) + int generation, int channel, __be32 buffer[2]) { u32 mask; u64 offset; @@ -269,7 +268,7 @@ static void deallocate_channel(struct fw_card *card, int irm_id, offset = channel < 32 ? CSR_REGISTER_BASE + CSR_CHANNELS_AVAILABLE_HI : CSR_REGISTER_BASE + CSR_CHANNELS_AVAILABLE_LO; - manage_channel(card, irm_id, generation, mask, offset, false); + manage_channel(card, irm_id, generation, mask, offset, false, buffer); } /** @@ -298,7 +297,7 @@ static void deallocate_channel(struct fw_card *card, int irm_id, */ void fw_iso_resource_manage(struct fw_card *card, int generation, u64 channels_mask, int *channel, int *bandwidth, - bool allocate) + bool allocate, __be32 buffer[2]) { u32 channels_hi = channels_mask; /* channels 31...0 */ u32 channels_lo = channels_mask >> 32; /* channels 63...32 */ @@ -310,10 +309,12 @@ void fw_iso_resource_manage(struct fw_card *card, int generation, if (channels_hi) c = manage_channel(card, irm_id, generation, channels_hi, - CSR_REGISTER_BASE + CSR_CHANNELS_AVAILABLE_HI, allocate); + CSR_REGISTER_BASE + CSR_CHANNELS_AVAILABLE_HI, + allocate, buffer); if (channels_lo && c < 0) { c = manage_channel(card, irm_id, generation, channels_lo, - CSR_REGISTER_BASE + CSR_CHANNELS_AVAILABLE_LO, allocate); + CSR_REGISTER_BASE + CSR_CHANNELS_AVAILABLE_LO, + allocate, buffer); if (c >= 0) c += 32; } @@ -325,12 +326,13 @@ void fw_iso_resource_manage(struct fw_card *card, int generation, if (*bandwidth == 0) return; - ret = manage_bandwidth(card, irm_id, generation, *bandwidth, allocate); + ret = manage_bandwidth(card, irm_id, generation, *bandwidth, + allocate, buffer); if (ret < 0) *bandwidth = 0; if (allocate && ret < 0 && c >= 0) { - deallocate_channel(card, irm_id, generation, c); + deallocate_channel(card, irm_id, generation, c, buffer); *channel = ret; } } diff --git a/drivers/firewire/core.h b/drivers/firewire/core.h index c3cfc647e5e..6052816be35 100644 --- a/drivers/firewire/core.h +++ b/drivers/firewire/core.h @@ -120,7 +120,8 @@ void fw_node_event(struct fw_card *card, struct fw_node *node, int event); int fw_iso_buffer_map(struct fw_iso_buffer *buffer, struct vm_area_struct *vma); void fw_iso_resource_manage(struct fw_card *card, int generation, - u64 channels_mask, int *channel, int *bandwidth, bool allocate); + u64 channels_mask, int *channel, int *bandwidth, + bool allocate, __be32 buffer[2]); /* -topology */ diff --git a/drivers/firewire/ohci.c b/drivers/firewire/ohci.c index ecddd11b797..76b321bb73f 100644 --- a/drivers/firewire/ohci.c +++ b/drivers/firewire/ohci.c @@ -34,6 +34,7 @@ #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/pci.h> +#include <linux/pci_ids.h> #include <linux/spinlock.h> #include <linux/string.h> @@ -2372,6 +2373,9 @@ static void ohci_pmac_off(struct pci_dev *dev) #define ohci_pmac_off(dev) #endif /* CONFIG_PPC_PMAC */ +#define PCI_VENDOR_ID_AGERE PCI_VENDOR_ID_ATT +#define PCI_DEVICE_ID_AGERE_FW643 0x5901 + static int __devinit pci_probe(struct pci_dev *dev, const struct pci_device_id *ent) { @@ -2422,6 +2426,16 @@ static int __devinit pci_probe(struct pci_dev *dev, version = reg_read(ohci, OHCI1394_Version) & 0x00ff00ff; ohci->use_dualbuffer = version >= OHCI_VERSION_1_1; + /* dual-buffer mode is broken if more than one IR context is active */ + if (dev->vendor == PCI_VENDOR_ID_AGERE && + dev->device == PCI_DEVICE_ID_AGERE_FW643) + ohci->use_dualbuffer = false; + + /* dual-buffer mode is broken */ + if (dev->vendor == PCI_VENDOR_ID_RICOH && + dev->device == PCI_DEVICE_ID_RICOH_R5C832) + ohci->use_dualbuffer = false; + /* x86-32 currently doesn't use highmem for dma_alloc_coherent */ #if !defined(CONFIG_X86_32) /* dual-buffer mode is broken with descriptor addresses above 2G */ diff --git a/drivers/firewire/sbp2.c b/drivers/firewire/sbp2.c index 24c45635376..e5df822a813 100644 --- a/drivers/firewire/sbp2.c +++ b/drivers/firewire/sbp2.c @@ -201,6 +201,12 @@ static struct fw_device *target_device(struct sbp2_target *tgt) #define SBP2_CYCLE_LIMIT (0xc8 << 12) /* 200 125us cycles */ /* + * There is no transport protocol limit to the CDB length, but we implement + * a fixed length only. 16 bytes is enough for disks larger than 2 TB. + */ +#define SBP2_MAX_CDB_SIZE 16 + +/* * The default maximum s/g segment size of a FireWire controller is * usually 0x10000, but SBP-2 only allows 0xffff. Since buffers have to * be quadlet-aligned, we set the length limit to 0xffff & ~3. @@ -312,7 +318,7 @@ struct sbp2_command_orb { struct sbp2_pointer next; struct sbp2_pointer data_descriptor; __be32 misc; - u8 command_block[12]; + u8 command_block[SBP2_MAX_CDB_SIZE]; } request; struct scsi_cmnd *cmd; scsi_done_fn_t done; @@ -450,12 +456,12 @@ static void sbp2_status_write(struct fw_card *card, struct fw_request *request, } spin_unlock_irqrestore(&card->lock, flags); - if (&orb->link != &lu->orb_list) + if (&orb->link != &lu->orb_list) { orb->callback(orb, &status); - else + kref_put(&orb->kref, free_orb); + } else { fw_error("status write for unknown orb\n"); - - kref_put(&orb->kref, free_orb); + } fw_send_response(card, request, RCODE_COMPLETE); } @@ -1146,6 +1152,8 @@ static int sbp2_probe(struct device *dev) if (fw_device_enable_phys_dma(device) < 0) goto fail_shost_put; + shost->max_cmd_len = SBP2_MAX_CDB_SIZE; + if (scsi_add_host(shost, &unit->device) < 0) goto fail_shost_put; diff --git a/drivers/gpio/Kconfig b/drivers/gpio/Kconfig index 3582c39f972..96dda81c922 100644 --- a/drivers/gpio/Kconfig +++ b/drivers/gpio/Kconfig @@ -79,6 +79,12 @@ config GPIO_XILINX help Say yes here to support the Xilinx FPGA GPIO device +config GPIO_VR41XX + tristate "NEC VR4100 series General-purpose I/O Uint support" + depends on CPU_VR41XX + help + Say yes here to support the NEC VR4100 series General-purpose I/O Uint + comment "I2C GPIO expanders:" config GPIO_MAX732X diff --git a/drivers/gpio/Makefile b/drivers/gpio/Makefile index ef90203e8f3..9244c6fcd8b 100644 --- a/drivers/gpio/Makefile +++ b/drivers/gpio/Makefile @@ -13,3 +13,4 @@ obj-$(CONFIG_GPIO_PL061) += pl061.o obj-$(CONFIG_GPIO_TWL4030) += twl4030-gpio.o obj-$(CONFIG_GPIO_XILINX) += xilinx_gpio.o obj-$(CONFIG_GPIO_BT8XX) += bt8xxgpio.o +obj-$(CONFIG_GPIO_VR41XX) += vr41xx_giu.o diff --git a/drivers/gpio/pl061.c b/drivers/gpio/pl061.c index aa8e7cb020d..4ee4c8367a3 100644 --- a/drivers/gpio/pl061.c +++ b/drivers/gpio/pl061.c @@ -109,6 +109,16 @@ static void pl061_set_value(struct gpio_chip *gc, unsigned offset, int value) writeb(!!value << offset, chip->base + (1 << (offset + 2))); } +static int pl061_to_irq(struct gpio_chip *gc, unsigned offset) +{ + struct pl061_gpio *chip = container_of(gc, struct pl061_gpio, gc); + + if (chip->irq_base == (unsigned) -1) + return -EINVAL; + + return chip->irq_base + offset; +} + /* * PL061 GPIO IRQ */ @@ -200,7 +210,7 @@ static void pl061_irq_handler(unsigned irq, struct irq_desc *desc) desc->chip->ack(irq); list_for_each(ptr, chip_list) { unsigned long pending; - int gpio; + int offset; chip = list_entry(ptr, struct pl061_gpio, list); pending = readb(chip->base + GPIOMIS); @@ -209,8 +219,8 @@ static void pl061_irq_handler(unsigned irq, struct irq_desc *desc) if (pending == 0) continue; - for_each_bit(gpio, &pending, PL061_GPIO_NR) - generic_handle_irq(gpio_to_irq(gpio)); + for_each_bit(offset, &pending, PL061_GPIO_NR) + generic_handle_irq(pl061_to_irq(&chip->gc, offset)); } desc->chip->unmask(irq); } @@ -221,7 +231,7 @@ static int __init pl061_probe(struct amba_device *dev, struct amba_id *id) struct pl061_gpio *chip; struct list_head *chip_list; int ret, irq, i; - static unsigned long init_irq[BITS_TO_LONGS(NR_IRQS)]; + static DECLARE_BITMAP(init_irq, NR_IRQS); pdata = dev->dev.platform_data; if (pdata == NULL) @@ -251,6 +261,7 @@ static int __init pl061_probe(struct amba_device *dev, struct amba_id *id) chip->gc.direction_output = pl061_direction_output; chip->gc.get = pl061_get_value; chip->gc.set = pl061_set_value; + chip->gc.to_irq = pl061_to_irq; chip->gc.base = pdata->gpio_base; chip->gc.ngpio = PL061_GPIO_NR; chip->gc.label = dev_name(&dev->dev); @@ -280,6 +291,7 @@ static int __init pl061_probe(struct amba_device *dev, struct amba_id *id) if (!test_and_set_bit(irq, init_irq)) { /* list initialized? */ chip_list = kmalloc(sizeof(*chip_list), GFP_KERNEL); if (chip_list == NULL) { + clear_bit(irq, init_irq); ret = -ENOMEM; goto iounmap; } diff --git a/drivers/char/vr41xx_giu.c b/drivers/gpio/vr41xx_giu.c index 54c837288d1..b16c9a8c03f 100644 --- a/drivers/char/vr41xx_giu.c +++ b/drivers/gpio/vr41xx_giu.c @@ -2,8 +2,8 @@ * Driver for NEC VR4100 series General-purpose I/O Unit. * * Copyright (C) 2002 MontaVista Software Inc. - * Author: Yoichi Yuasa <yyuasa@mvista.com or source@mvista.com> - * Copyright (C) 2003-2007 Yoichi Yuasa <yoichi_yuasa@tripeaks.co.jp> + * Author: Yoichi Yuasa <source@mvista.com> + * Copyright (C) 2003-2009 Yoichi Yuasa <yuasa@linux-mips.org> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by @@ -21,29 +21,25 @@ */ #include <linux/errno.h> #include <linux/fs.h> +#include <linux/gpio.h> #include <linux/init.h> #include <linux/interrupt.h> +#include <linux/io.h> #include <linux/irq.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/platform_device.h> -#include <linux/smp_lock.h> #include <linux/spinlock.h> #include <linux/types.h> -#include <asm/io.h> #include <asm/vr41xx/giu.h> #include <asm/vr41xx/irq.h> #include <asm/vr41xx/vr41xx.h> -MODULE_AUTHOR("Yoichi Yuasa <yoichi_yuasa@tripeaks.co.jp>"); +MODULE_AUTHOR("Yoichi Yuasa <yuasa@linux-mips.org>"); MODULE_DESCRIPTION("NEC VR4100 series General-purpose I/O Unit driver"); MODULE_LICENSE("GPL"); -static int major; /* default is dynamic major device number */ -module_param(major, int, 0); -MODULE_PARM_DESC(major, "Major device number"); - #define GIUIOSELL 0x00 #define GIUIOSELH 0x02 #define GIUPIODL 0x04 @@ -76,9 +72,13 @@ MODULE_PARM_DESC(major, "Major device number"); #define GPIO_HAS_OUTPUT_ENABLE 0x0002 #define GPIO_HAS_INTERRUPT_EDGE_SELECT 0x0100 -static spinlock_t giu_lock; +enum { + GPIO_INPUT, + GPIO_OUTPUT, +}; + +static DEFINE_SPINLOCK(giu_lock); static unsigned long giu_flags; -static unsigned int giu_nr_pins; static void __iomem *giu_base; @@ -89,9 +89,9 @@ static void __iomem *giu_base; #define GIUINT_HIGH_OFFSET 16 #define GIUINT_HIGH_MAX 32 -static inline uint16_t giu_set(uint16_t offset, uint16_t set) +static inline u16 giu_set(u16 offset, u16 set) { - uint16_t data; + u16 data; data = giu_read(offset); data |= set; @@ -100,9 +100,9 @@ static inline uint16_t giu_set(uint16_t offset, uint16_t set) return data; } -static inline uint16_t giu_clear(uint16_t offset, uint16_t clear) +static inline u16 giu_clear(u16 offset, u16 clear) { - uint16_t data; + u16 data; data = giu_read(offset); data &= ~clear; @@ -145,7 +145,8 @@ static struct irq_chip giuint_low_irq_chip = { static void ack_giuint_high(unsigned int irq) { - giu_write(GIUINTSTATH, 1 << (GPIO_PIN_OF_IRQ(irq) - GIUINT_HIGH_OFFSET)); + giu_write(GIUINTSTATH, + 1 << (GPIO_PIN_OF_IRQ(irq) - GIUINT_HIGH_OFFSET)); } static void mask_giuint_high(unsigned int irq) @@ -177,7 +178,7 @@ static struct irq_chip giuint_high_irq_chip = { static int giu_get_irq(unsigned int irq) { - uint16_t pendl, pendh, maskl, maskh; + u16 pendl, pendh, maskl, maskh; int i; pendl = giu_read(GIUINTSTATL); @@ -208,14 +209,15 @@ static int giu_get_irq(unsigned int irq) return -EINVAL; } -void vr41xx_set_irq_trigger(unsigned int pin, irq_trigger_t trigger, irq_signal_t signal) +void vr41xx_set_irq_trigger(unsigned int pin, irq_trigger_t trigger, + irq_signal_t signal) { - uint16_t mask; + u16 mask; if (pin < GIUINT_HIGH_OFFSET) { mask = 1 << pin; if (trigger != IRQ_TRIGGER_LEVEL) { - giu_set(GIUINTTYPL, mask); + giu_set(GIUINTTYPL, mask); if (signal == IRQ_SIGNAL_HOLD) giu_set(GIUINTHTSELL, mask); else @@ -237,14 +239,14 @@ void vr41xx_set_irq_trigger(unsigned int pin, irq_trigger_t trigger, irq_signal_ } } set_irq_chip_and_handler(GIU_IRQ(pin), - &giuint_low_irq_chip, - handle_edge_irq); + &giuint_low_irq_chip, + handle_edge_irq); } else { giu_clear(GIUINTTYPL, mask); giu_clear(GIUINTHTSELL, mask); set_irq_chip_and_handler(GIU_IRQ(pin), - &giuint_low_irq_chip, - handle_level_irq); + &giuint_low_irq_chip, + handle_level_irq); } giu_write(GIUINTSTATL, mask); } else if (pin < GIUINT_HIGH_MAX) { @@ -272,14 +274,14 @@ void vr41xx_set_irq_trigger(unsigned int pin, irq_trigger_t trigger, irq_signal_ } } set_irq_chip_and_handler(GIU_IRQ(pin), - &giuint_high_irq_chip, - handle_edge_irq); + &giuint_high_irq_chip, + handle_edge_irq); } else { giu_clear(GIUINTTYPH, mask); giu_clear(GIUINTHTSELH, mask); set_irq_chip_and_handler(GIU_IRQ(pin), - &giuint_high_irq_chip, - handle_level_irq); + &giuint_high_irq_chip, + handle_level_irq); } giu_write(GIUINTSTATH, mask); } @@ -288,7 +290,7 @@ EXPORT_SYMBOL_GPL(vr41xx_set_irq_trigger); void vr41xx_set_irq_level(unsigned int pin, irq_level_t level) { - uint16_t mask; + u16 mask; if (pin < GIUINT_HIGH_OFFSET) { mask = 1 << pin; @@ -308,89 +310,24 @@ void vr41xx_set_irq_level(unsigned int pin, irq_level_t level) } EXPORT_SYMBOL_GPL(vr41xx_set_irq_level); -gpio_data_t vr41xx_gpio_get_pin(unsigned int pin) -{ - uint16_t reg, mask; - - if (pin >= giu_nr_pins) - return GPIO_DATA_INVAL; - - if (pin < 16) { - reg = giu_read(GIUPIODL); - mask = (uint16_t)1 << pin; - } else if (pin < 32) { - reg = giu_read(GIUPIODH); - mask = (uint16_t)1 << (pin - 16); - } else if (pin < 48) { - reg = giu_read(GIUPODATL); - mask = (uint16_t)1 << (pin - 32); - } else { - reg = giu_read(GIUPODATH); - mask = (uint16_t)1 << (pin - 48); - } - - if (reg & mask) - return GPIO_DATA_HIGH; - - return GPIO_DATA_LOW; -} -EXPORT_SYMBOL_GPL(vr41xx_gpio_get_pin); - -int vr41xx_gpio_set_pin(unsigned int pin, gpio_data_t data) -{ - uint16_t offset, mask, reg; - unsigned long flags; - - if (pin >= giu_nr_pins) - return -EINVAL; - - if (pin < 16) { - offset = GIUPIODL; - mask = (uint16_t)1 << pin; - } else if (pin < 32) { - offset = GIUPIODH; - mask = (uint16_t)1 << (pin - 16); - } else if (pin < 48) { - offset = GIUPODATL; - mask = (uint16_t)1 << (pin - 32); - } else { - offset = GIUPODATH; - mask = (uint16_t)1 << (pin - 48); - } - - spin_lock_irqsave(&giu_lock, flags); - - reg = giu_read(offset); - if (data == GPIO_DATA_HIGH) - reg |= mask; - else - reg &= ~mask; - giu_write(offset, reg); - - spin_unlock_irqrestore(&giu_lock, flags); - - return 0; -} -EXPORT_SYMBOL_GPL(vr41xx_gpio_set_pin); - -int vr41xx_gpio_set_direction(unsigned int pin, gpio_direction_t dir) +static int giu_set_direction(struct gpio_chip *chip, unsigned pin, int dir) { - uint16_t offset, mask, reg; + u16 offset, mask, reg; unsigned long flags; - if (pin >= giu_nr_pins) + if (pin >= chip->ngpio) return -EINVAL; if (pin < 16) { offset = GIUIOSELL; - mask = (uint16_t)1 << pin; + mask = 1 << pin; } else if (pin < 32) { offset = GIUIOSELH; - mask = (uint16_t)1 << (pin - 16); + mask = 1 << (pin - 16); } else { if (giu_flags & GPIO_HAS_OUTPUT_ENABLE) { offset = GIUPODATEN; - mask = (uint16_t)1 << (pin - 32); + mask = 1 << (pin - 32); } else { switch (pin) { case 48: @@ -420,11 +357,10 @@ int vr41xx_gpio_set_direction(unsigned int pin, gpio_direction_t dir) return 0; } -EXPORT_SYMBOL_GPL(vr41xx_gpio_set_direction); int vr41xx_gpio_pullupdown(unsigned int pin, gpio_pull_t pull) { - uint16_t reg, mask; + u16 reg, mask; unsigned long flags; if ((giu_flags & GPIO_HAS_PULLUPDOWN_IO) != GPIO_HAS_PULLUPDOWN_IO) @@ -433,7 +369,7 @@ int vr41xx_gpio_pullupdown(unsigned int pin, gpio_pull_t pull) if (pin >= 15) return -EINVAL; - mask = (uint16_t)1 << pin; + mask = 1 << pin; spin_lock_irqsave(&giu_lock, flags); @@ -460,146 +396,125 @@ int vr41xx_gpio_pullupdown(unsigned int pin, gpio_pull_t pull) } EXPORT_SYMBOL_GPL(vr41xx_gpio_pullupdown); -static ssize_t gpio_read(struct file *file, char __user *buf, size_t len, - loff_t *ppos) +static int vr41xx_gpio_get(struct gpio_chip *chip, unsigned pin) { - unsigned int pin; - char value = '0'; + u16 reg, mask; - pin = iminor(file->f_path.dentry->d_inode); - if (pin >= giu_nr_pins) - return -EBADF; - - if (vr41xx_gpio_get_pin(pin) == GPIO_DATA_HIGH) - value = '1'; + if (pin >= chip->ngpio) + return -EINVAL; - if (len <= 0) - return -EFAULT; + if (pin < 16) { + reg = giu_read(GIUPIODL); + mask = 1 << pin; + } else if (pin < 32) { + reg = giu_read(GIUPIODH); + mask = 1 << (pin - 16); + } else if (pin < 48) { + reg = giu_read(GIUPODATL); + mask = 1 << (pin - 32); + } else { + reg = giu_read(GIUPODATH); + mask = 1 << (pin - 48); + } - if (put_user(value, buf)) - return -EFAULT; + if (reg & mask) + return 1; - return 1; + return 0; } -static ssize_t gpio_write(struct file *file, const char __user *data, - size_t len, loff_t *ppos) +static void vr41xx_gpio_set(struct gpio_chip *chip, unsigned pin, + int value) { - unsigned int pin; - size_t i; - char c; - int retval = 0; - - pin = iminor(file->f_path.dentry->d_inode); - if (pin >= giu_nr_pins) - return -EBADF; - - for (i = 0; i < len; i++) { - if (get_user(c, data + i)) - return -EFAULT; - - switch (c) { - case '0': - retval = vr41xx_gpio_set_pin(pin, GPIO_DATA_LOW); - break; - case '1': - retval = vr41xx_gpio_set_pin(pin, GPIO_DATA_HIGH); - break; - case 'D': - printk(KERN_INFO "GPIO%d: pull down\n", pin); - retval = vr41xx_gpio_pullupdown(pin, GPIO_PULL_DOWN); - break; - case 'd': - printk(KERN_INFO "GPIO%d: pull up/down disable\n", pin); - retval = vr41xx_gpio_pullupdown(pin, GPIO_PULL_DISABLE); - break; - case 'I': - printk(KERN_INFO "GPIO%d: input\n", pin); - retval = vr41xx_gpio_set_direction(pin, GPIO_INPUT); - break; - case 'O': - printk(KERN_INFO "GPIO%d: output\n", pin); - retval = vr41xx_gpio_set_direction(pin, GPIO_OUTPUT); - break; - case 'o': - printk(KERN_INFO "GPIO%d: output disable\n", pin); - retval = vr41xx_gpio_set_direction(pin, GPIO_OUTPUT_DISABLE); - break; - case 'P': - printk(KERN_INFO "GPIO%d: pull up\n", pin); - retval = vr41xx_gpio_pullupdown(pin, GPIO_PULL_UP); - break; - case 'p': - printk(KERN_INFO "GPIO%d: pull up/down disable\n", pin); - retval = vr41xx_gpio_pullupdown(pin, GPIO_PULL_DISABLE); - break; - default: - break; - } + u16 offset, mask, reg; + unsigned long flags; + + if (pin >= chip->ngpio) + return; - if (retval < 0) - break; + if (pin < 16) { + offset = GIUPIODL; + mask = 1 << pin; + } else if (pin < 32) { + offset = GIUPIODH; + mask = 1 << (pin - 16); + } else if (pin < 48) { + offset = GIUPODATL; + mask = 1 << (pin - 32); + } else { + offset = GIUPODATH; + mask = 1 << (pin - 48); } - return i; + spin_lock_irqsave(&giu_lock, flags); + + reg = giu_read(offset); + if (value) + reg |= mask; + else + reg &= ~mask; + giu_write(offset, reg); + + spin_unlock_irqrestore(&giu_lock, flags); } -static int gpio_open(struct inode *inode, struct file *file) + +static int vr41xx_gpio_direction_input(struct gpio_chip *chip, unsigned offset) { - unsigned int pin; + return giu_set_direction(chip, offset, GPIO_INPUT); +} - cycle_kernel_lock(); - pin = iminor(inode); - if (pin >= giu_nr_pins) - return -EBADF; +static int vr41xx_gpio_direction_output(struct gpio_chip *chip, unsigned offset, + int value) +{ + vr41xx_gpio_set(chip, offset, value); - return nonseekable_open(inode, file); + return giu_set_direction(chip, offset, GPIO_OUTPUT); } -static int gpio_release(struct inode *inode, struct file *file) +static int vr41xx_gpio_to_irq(struct gpio_chip *chip, unsigned offset) { - unsigned int pin; - - pin = iminor(inode); - if (pin >= giu_nr_pins) - return -EBADF; + if (offset >= chip->ngpio) + return -EINVAL; - return 0; + return GIU_IRQ_BASE + offset; } -static const struct file_operations gpio_fops = { - .owner = THIS_MODULE, - .read = gpio_read, - .write = gpio_write, - .open = gpio_open, - .release = gpio_release, +static struct gpio_chip vr41xx_gpio_chip = { + .label = "vr41xx", + .owner = THIS_MODULE, + .direction_input = vr41xx_gpio_direction_input, + .get = vr41xx_gpio_get, + .direction_output = vr41xx_gpio_direction_output, + .set = vr41xx_gpio_set, + .to_irq = vr41xx_gpio_to_irq, }; -static int __devinit giu_probe(struct platform_device *dev) +static int __devinit giu_probe(struct platform_device *pdev) { struct resource *res; unsigned int trigger, i, pin; struct irq_chip *chip; int irq, retval; - switch (dev->id) { + switch (pdev->id) { case GPIO_50PINS_PULLUPDOWN: giu_flags = GPIO_HAS_PULLUPDOWN_IO; - giu_nr_pins = 50; + vr41xx_gpio_chip.ngpio = 50; break; case GPIO_36PINS: - giu_nr_pins = 36; + vr41xx_gpio_chip.ngpio = 36; break; case GPIO_48PINS_EDGE_SELECT: giu_flags = GPIO_HAS_INTERRUPT_EDGE_SELECT; - giu_nr_pins = 48; + vr41xx_gpio_chip.ngpio = 48; break; default: - printk(KERN_ERR "GIU: unknown ID %d\n", dev->id); + dev_err(&pdev->dev, "GIU: unknown ID %d\n", pdev->id); return -ENODEV; } - res = platform_get_resource(dev, IORESOURCE_MEM, 0); + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!res) return -EBUSY; @@ -607,19 +522,9 @@ static int __devinit giu_probe(struct platform_device *dev) if (!giu_base) return -ENOMEM; - retval = register_chrdev(major, "GIU", &gpio_fops); - if (retval < 0) { - iounmap(giu_base); - giu_base = NULL; - return retval; - } - - if (major == 0) { - major = retval; - printk(KERN_INFO "GIU: major number %d\n", major); - } + vr41xx_gpio_chip.dev = &pdev->dev; - spin_lock_init(&giu_lock); + retval = gpiochip_add(&vr41xx_gpio_chip); giu_write(GIUINTENL, 0); giu_write(GIUINTENH, 0); @@ -640,14 +545,14 @@ static int __devinit giu_probe(struct platform_device *dev) } - irq = platform_get_irq(dev, 0); + irq = platform_get_irq(pdev, 0); if (irq < 0 || irq >= nr_irqs) return -EBUSY; return cascade_irq(irq, giu_get_irq); } -static int __devexit giu_remove(struct platform_device *dev) +static int __devexit giu_remove(struct platform_device *pdev) { if (giu_base) { iounmap(giu_base); diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig index c961fe415ae..39b393d38bb 100644 --- a/drivers/gpu/drm/Kconfig +++ b/drivers/gpu/drm/Kconfig @@ -81,6 +81,7 @@ config DRM_I830 config DRM_I915 tristate "i915 driver" + depends on AGP_INTEL select FB_CFB_FILLRECT select FB_CFB_COPYAREA select FB_CFB_IMAGEBLIT diff --git a/drivers/gpu/drm/Makefile b/drivers/gpu/drm/Makefile index 4e89ab08b7b..fe23f29f7cb 100644 --- a/drivers/gpu/drm/Makefile +++ b/drivers/gpu/drm/Makefile @@ -16,6 +16,7 @@ drm-y := drm_auth.o drm_bufs.o drm_cache.o \ drm-$(CONFIG_COMPAT) += drm_ioc32.o obj-$(CONFIG_DRM) += drm.o +obj-$(CONFIG_DRM_TTM) += ttm/ obj-$(CONFIG_DRM_TDFX) += tdfx/ obj-$(CONFIG_DRM_R128) += r128/ obj-$(CONFIG_DRM_RADEON)+= radeon/ @@ -26,4 +27,3 @@ obj-$(CONFIG_DRM_I915) += i915/ obj-$(CONFIG_DRM_SIS) += sis/ obj-$(CONFIG_DRM_SAVAGE)+= savage/ obj-$(CONFIG_DRM_VIA) +=via/ -obj-$(CONFIG_DRM_TTM) += ttm/ diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c index 8fab7890a36..2f631c75f70 100644 --- a/drivers/gpu/drm/drm_crtc.c +++ b/drivers/gpu/drm/drm_crtc.c @@ -258,31 +258,6 @@ void *drm_mode_object_find(struct drm_device *dev, uint32_t id, uint32_t type) EXPORT_SYMBOL(drm_mode_object_find); /** - * drm_crtc_from_fb - find the CRTC structure associated with an fb - * @dev: DRM device - * @fb: framebuffer in question - * - * LOCKING: - * Caller must hold mode_config lock. - * - * Find CRTC in the mode_config structure that matches @fb. - * - * RETURNS: - * Pointer to the CRTC or NULL if it wasn't found. - */ -struct drm_crtc *drm_crtc_from_fb(struct drm_device *dev, - struct drm_framebuffer *fb) -{ - struct drm_crtc *crtc; - - list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { - if (crtc->fb == fb) - return crtc; - } - return NULL; -} - -/** * drm_framebuffer_init - initialize a framebuffer * @dev: DRM device * @@ -328,11 +303,20 @@ void drm_framebuffer_cleanup(struct drm_framebuffer *fb) { struct drm_device *dev = fb->dev; struct drm_crtc *crtc; + struct drm_mode_set set; + int ret; /* remove from any CRTC */ list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { - if (crtc->fb == fb) - crtc->fb = NULL; + if (crtc->fb == fb) { + /* should turn off the crtc */ + memset(&set, 0, sizeof(struct drm_mode_set)); + set.crtc = crtc; + set.fb = NULL; + ret = crtc->funcs->set_config(&set); + if (ret) + DRM_ERROR("failed to reset crtc %p when fb was deleted\n", crtc); + } } drm_mode_object_put(dev, &fb->base); @@ -1461,7 +1445,7 @@ int drm_mode_setcrtc(struct drm_device *dev, void *data, goto out; } - if (crtc_req->count_connectors > 0 && !mode && !fb) { + if (crtc_req->count_connectors > 0 && (!mode || !fb)) { DRM_DEBUG("Count connectors is %d but no mode or fb set\n", crtc_req->count_connectors); ret = -EINVAL; @@ -1511,7 +1495,7 @@ int drm_mode_setcrtc(struct drm_device *dev, void *data, set.mode = mode; set.connectors = connector_set; set.num_connectors = crtc_req->count_connectors; - set.fb =fb; + set.fb = fb; ret = crtc->funcs->set_config(&set); out: diff --git a/drivers/gpu/drm/drm_crtc_helper.c b/drivers/gpu/drm/drm_crtc_helper.c index a6f73f1e99d..6aaa2cb2336 100644 --- a/drivers/gpu/drm/drm_crtc_helper.c +++ b/drivers/gpu/drm/drm_crtc_helper.c @@ -706,8 +706,8 @@ int drm_crtc_helper_set_config(struct drm_mode_set *set) struct drm_encoder **save_encoders, *new_encoder; struct drm_framebuffer *old_fb = NULL; bool save_enabled; - bool mode_changed = false; - bool fb_changed = false; + bool mode_changed = false; /* if true do a full mode set */ + bool fb_changed = false; /* if true and !mode_changed just do a flip */ struct drm_connector *connector; int count = 0, ro, fail = 0; struct drm_crtc_helper_funcs *crtc_funcs; @@ -758,6 +758,8 @@ int drm_crtc_helper_set_config(struct drm_mode_set *set) if (set->crtc->fb == NULL) { DRM_DEBUG("crtc has no fb, full mode set\n"); mode_changed = true; + } else if (set->fb == NULL) { + mode_changed = true; } else if ((set->fb->bits_per_pixel != set->crtc->fb->bits_per_pixel) || set->fb->depth != set->crtc->fb->depth) @@ -1090,6 +1092,8 @@ int drm_helper_resume_force_mode(struct drm_device *dev) if (ret == false) DRM_ERROR("failed to set mode on crtc %p\n", crtc); } + /* disable the unused connectors while restoring the modesetting */ + drm_helper_disable_unused_functions(dev); return 0; } EXPORT_SYMBOL(drm_helper_resume_force_mode); diff --git a/drivers/gpu/drm/drm_debugfs.c b/drivers/gpu/drm/drm_debugfs.c index 2960b6d7345..9903f270e44 100644 --- a/drivers/gpu/drm/drm_debugfs.c +++ b/drivers/gpu/drm/drm_debugfs.c @@ -101,6 +101,10 @@ int drm_debugfs_create_files(struct drm_info_list *files, int count, continue; tmp = kmalloc(sizeof(struct drm_info_node), GFP_KERNEL); + if (tmp == NULL) { + ret = -1; + goto fail; + } ent = debugfs_create_file(files[i].name, S_IFREG | S_IRUGO, root, tmp, &drm_debugfs_fops); if (!ent) { diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c index 7d0835226f6..7f2728bbc16 100644 --- a/drivers/gpu/drm/drm_edid.c +++ b/drivers/gpu/drm/drm_edid.c @@ -294,10 +294,10 @@ static struct drm_display_mode *drm_mode_detailed(struct drm_device *dev, unsigned vactive = (pt->vactive_vblank_hi & 0xf0) << 4 | pt->vactive_lo; unsigned hblank = (pt->hactive_hblank_hi & 0xf) << 8 | pt->hblank_lo; unsigned vblank = (pt->vactive_vblank_hi & 0xf) << 8 | pt->vblank_lo; - unsigned hsync_offset = (pt->hsync_vsync_offset_pulse_width_hi & 0x3) << 8 | pt->hsync_offset_lo; - unsigned hsync_pulse_width = (pt->hsync_vsync_offset_pulse_width_hi & 0xc) << 6 | pt->hsync_pulse_width_lo; - unsigned vsync_offset = (pt->hsync_vsync_offset_pulse_width_hi & 0x30) | (pt->vsync_offset_pulse_width_lo & 0xf); - unsigned vsync_pulse_width = (pt->hsync_vsync_offset_pulse_width_hi & 0xc0) >> 2 | pt->vsync_offset_pulse_width_lo >> 4; + unsigned hsync_offset = (pt->hsync_vsync_offset_pulse_width_hi & 0xc0) << 2 | pt->hsync_offset_lo; + unsigned hsync_pulse_width = (pt->hsync_vsync_offset_pulse_width_hi & 0x30) << 4 | pt->hsync_pulse_width_lo; + unsigned vsync_offset = (pt->hsync_vsync_offset_pulse_width_hi & 0xc) >> 2 | pt->vsync_offset_pulse_width_lo >> 4; + unsigned vsync_pulse_width = (pt->hsync_vsync_offset_pulse_width_hi & 0x3) << 4 | (pt->vsync_offset_pulse_width_lo & 0xf); /* ignore tiny modes */ if (hactive < 64 || vactive < 64) @@ -347,8 +347,8 @@ static struct drm_display_mode *drm_mode_detailed(struct drm_device *dev, mode->flags |= (pt->misc & DRM_EDID_PT_VSYNC_POSITIVE) ? DRM_MODE_FLAG_PVSYNC : DRM_MODE_FLAG_NVSYNC; - mode->width_mm = pt->width_mm_lo | (pt->width_height_mm_hi & 0xf) << 8; - mode->height_mm = pt->height_mm_lo | (pt->width_height_mm_hi & 0xf0) << 4; + mode->width_mm = pt->width_mm_lo | (pt->width_height_mm_hi & 0xf0) << 4; + mode->height_mm = pt->height_mm_lo | (pt->width_height_mm_hi & 0xf) << 8; if (quirks & EDID_QUIRK_DETAILED_IN_CM) { mode->width_mm *= 10; @@ -502,12 +502,40 @@ static int add_detailed_info(struct drm_connector *connector, struct detailed_non_pixel *data = &timing->data.other_data; struct drm_display_mode *newmode; - /* EDID up to and including 1.2 may put monitor info here */ - if (edid->version == 1 && edid->revision < 3) - continue; - - /* Detailed mode timing */ - if (timing->pixel_clock) { + /* X server check is version 1.1 or higher */ + if (edid->version == 1 && edid->revision >= 1 && + !timing->pixel_clock) { + /* Other timing or info */ + switch (data->type) { + case EDID_DETAIL_MONITOR_SERIAL: + break; + case EDID_DETAIL_MONITOR_STRING: + break; + case EDID_DETAIL_MONITOR_RANGE: + /* Get monitor range data */ + break; + case EDID_DETAIL_MONITOR_NAME: + break; + case EDID_DETAIL_MONITOR_CPDATA: + break; + case EDID_DETAIL_STD_MODES: + /* Five modes per detailed section */ + for (j = 0; j < 5; i++) { + struct std_timing *std; + struct drm_display_mode *newmode; + + std = &data->data.timings[j]; + newmode = drm_mode_std(dev, std); + if (newmode) { + drm_mode_probed_add(connector, newmode); + modes++; + } + } + break; + default: + break; + } + } else { newmode = drm_mode_detailed(dev, edid, timing, quirks); if (!newmode) continue; @@ -518,38 +546,6 @@ static int add_detailed_info(struct drm_connector *connector, drm_mode_probed_add(connector, newmode); modes++; - continue; - } - - /* Other timing or info */ - switch (data->type) { - case EDID_DETAIL_MONITOR_SERIAL: - break; - case EDID_DETAIL_MONITOR_STRING: - break; - case EDID_DETAIL_MONITOR_RANGE: - /* Get monitor range data */ - break; - case EDID_DETAIL_MONITOR_NAME: - break; - case EDID_DETAIL_MONITOR_CPDATA: - break; - case EDID_DETAIL_STD_MODES: - /* Five modes per detailed section */ - for (j = 0; j < 5; i++) { - struct std_timing *std; - struct drm_display_mode *newmode; - - std = &data->data.timings[j]; - newmode = drm_mode_std(dev, std); - if (newmode) { - drm_mode_probed_add(connector, newmode); - modes++; - } - } - break; - default: - break; } } diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c index 8104ecaea26..ffe8f4394d5 100644 --- a/drivers/gpu/drm/drm_gem.c +++ b/drivers/gpu/drm/drm_gem.c @@ -134,26 +134,29 @@ drm_gem_object_alloc(struct drm_device *dev, size_t size) BUG_ON((size & (PAGE_SIZE - 1)) != 0); obj = kzalloc(sizeof(*obj), GFP_KERNEL); + if (!obj) + goto free; obj->dev = dev; obj->filp = shmem_file_setup("drm mm object", size, VM_NORESERVE); - if (IS_ERR(obj->filp)) { - kfree(obj); - return NULL; - } + if (IS_ERR(obj->filp)) + goto free; kref_init(&obj->refcount); kref_init(&obj->handlecount); obj->size = size; if (dev->driver->gem_init_object != NULL && dev->driver->gem_init_object(obj) != 0) { - fput(obj->filp); - kfree(obj); - return NULL; + goto fput; } atomic_inc(&dev->object_count); atomic_add(obj->size, &dev->object_memory); return obj; +fput: + fput(obj->filp); +free: + kfree(obj); + return NULL; } EXPORT_SYMBOL(drm_gem_object_alloc); diff --git a/drivers/gpu/drm/drm_irq.c b/drivers/gpu/drm/drm_irq.c index b4a3dbcebe9..f85aaf21e78 100644 --- a/drivers/gpu/drm/drm_irq.c +++ b/drivers/gpu/drm/drm_irq.c @@ -566,7 +566,7 @@ int drm_wait_vblank(struct drm_device *dev, void *data, ret = drm_vblank_get(dev, crtc); if (ret) { - DRM_ERROR("failed to acquire vblank counter, %d\n", ret); + DRM_DEBUG("failed to acquire vblank counter, %d\n", ret); return ret; } seq = drm_vblank_count(dev, crtc); diff --git a/drivers/gpu/drm/drm_modes.c b/drivers/gpu/drm/drm_modes.c index 54f492a488a..7914097b09c 100644 --- a/drivers/gpu/drm/drm_modes.c +++ b/drivers/gpu/drm/drm_modes.c @@ -566,6 +566,8 @@ void drm_mode_connector_list_update(struct drm_connector *connector) found_it = 1; /* if equal delete the probed mode */ mode->status = pmode->status; + /* Merge type bits together */ + mode->type |= pmode->type; list_del(&pmode->head); drm_mode_destroy(connector->dev, pmode); break; diff --git a/drivers/gpu/drm/drm_stub.c b/drivers/gpu/drm/drm_stub.c index 155a5bbce68..55bb8a82d61 100644 --- a/drivers/gpu/drm/drm_stub.c +++ b/drivers/gpu/drm/drm_stub.c @@ -489,7 +489,7 @@ int drm_put_minor(struct drm_minor **minor_p) */ void drm_put_dev(struct drm_device *dev) { - struct drm_driver *driver = dev->driver; + struct drm_driver *driver; struct drm_map_list *r_list, *list_temp; DRM_DEBUG("\n"); @@ -498,6 +498,7 @@ void drm_put_dev(struct drm_device *dev) DRM_ERROR("cleanup called no dev\n"); return; } + driver = dev->driver; drm_vblank_cleanup(dev); diff --git a/drivers/gpu/drm/drm_sysfs.c b/drivers/gpu/drm/drm_sysfs.c index 85ec31b3ff0..f7a615b80c7 100644 --- a/drivers/gpu/drm/drm_sysfs.c +++ b/drivers/gpu/drm/drm_sysfs.c @@ -22,44 +22,50 @@ #define to_drm_minor(d) container_of(d, struct drm_minor, kdev) #define to_drm_connector(d) container_of(d, struct drm_connector, kdev) +static struct device_type drm_sysfs_device_minor = { + .name = "drm_minor" +}; + /** - * drm_sysfs_suspend - DRM class suspend hook + * drm_class_suspend - DRM class suspend hook * @dev: Linux device to suspend * @state: power state to enter * * Just figures out what the actual struct drm_device associated with * @dev is and calls its suspend hook, if present. */ -static int drm_sysfs_suspend(struct device *dev, pm_message_t state) +static int drm_class_suspend(struct device *dev, pm_message_t state) { - struct drm_minor *drm_minor = to_drm_minor(dev); - struct drm_device *drm_dev = drm_minor->dev; - - if (drm_minor->type == DRM_MINOR_LEGACY && - !drm_core_check_feature(drm_dev, DRIVER_MODESET) && - drm_dev->driver->suspend) - return drm_dev->driver->suspend(drm_dev, state); - + if (dev->type == &drm_sysfs_device_minor) { + struct drm_minor *drm_minor = to_drm_minor(dev); + struct drm_device *drm_dev = drm_minor->dev; + + if (drm_minor->type == DRM_MINOR_LEGACY && + !drm_core_check_feature(drm_dev, DRIVER_MODESET) && + drm_dev->driver->suspend) + return drm_dev->driver->suspend(drm_dev, state); + } return 0; } /** - * drm_sysfs_resume - DRM class resume hook + * drm_class_resume - DRM class resume hook * @dev: Linux device to resume * * Just figures out what the actual struct drm_device associated with * @dev is and calls its resume hook, if present. */ -static int drm_sysfs_resume(struct device *dev) +static int drm_class_resume(struct device *dev) { - struct drm_minor *drm_minor = to_drm_minor(dev); - struct drm_device *drm_dev = drm_minor->dev; - - if (drm_minor->type == DRM_MINOR_LEGACY && - !drm_core_check_feature(drm_dev, DRIVER_MODESET) && - drm_dev->driver->resume) - return drm_dev->driver->resume(drm_dev); - + if (dev->type == &drm_sysfs_device_minor) { + struct drm_minor *drm_minor = to_drm_minor(dev); + struct drm_device *drm_dev = drm_minor->dev; + + if (drm_minor->type == DRM_MINOR_LEGACY && + !drm_core_check_feature(drm_dev, DRIVER_MODESET) && + drm_dev->driver->resume) + return drm_dev->driver->resume(drm_dev); + } return 0; } @@ -99,8 +105,8 @@ struct class *drm_sysfs_create(struct module *owner, char *name) goto err_out; } - class->suspend = drm_sysfs_suspend; - class->resume = drm_sysfs_resume; + class->suspend = drm_class_suspend; + class->resume = drm_class_resume; err = class_create_file(class, &class_attr_version); if (err) @@ -480,6 +486,7 @@ int drm_sysfs_device_add(struct drm_minor *minor) minor->kdev.class = drm_class; minor->kdev.release = drm_sysfs_device_release; minor->kdev.devt = minor->device; + minor->kdev.type = &drm_sysfs_device_minor; if (minor->type == DRM_MINOR_CONTROL) minor_str = "controlD%d"; else if (minor->type == DRM_MINOR_RENDER) diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile index 51c5a050aa7..30d6b99fb30 100644 --- a/drivers/gpu/drm/i915/Makefile +++ b/drivers/gpu/drm/i915/Makefile @@ -13,6 +13,8 @@ i915-y := i915_drv.o i915_dma.o i915_irq.o i915_mem.o \ intel_crt.o \ intel_lvds.o \ intel_bios.o \ + intel_dp.o \ + intel_dp_i2c.o \ intel_hdmi.o \ intel_sdvo.o \ intel_modes.o \ diff --git a/drivers/gpu/drm/i915/dvo.h b/drivers/gpu/drm/i915/dvo.h index e747ac42fe3..288fc50627e 100644 --- a/drivers/gpu/drm/i915/dvo.h +++ b/drivers/gpu/drm/i915/dvo.h @@ -37,7 +37,7 @@ struct intel_dvo_device { /* GPIO register used for i2c bus to control this device */ u32 gpio; int slave_addr; - struct intel_i2c_chan *i2c_bus; + struct i2c_adapter *i2c_bus; const struct intel_dvo_dev_ops *dev_ops; void *dev_priv; @@ -52,7 +52,7 @@ struct intel_dvo_dev_ops { * Returns NULL if the device does not exist. */ bool (*init)(struct intel_dvo_device *dvo, - struct intel_i2c_chan *i2cbus); + struct i2c_adapter *i2cbus); /* * Called to allow the output a chance to create properties after the diff --git a/drivers/gpu/drm/i915/dvo_ch7017.c b/drivers/gpu/drm/i915/dvo_ch7017.c index 03d4b4973b0..621815b531d 100644 --- a/drivers/gpu/drm/i915/dvo_ch7017.c +++ b/drivers/gpu/drm/i915/dvo_ch7017.c @@ -176,19 +176,20 @@ static void ch7017_dpms(struct intel_dvo_device *dvo, int mode); static bool ch7017_read(struct intel_dvo_device *dvo, int addr, uint8_t *val) { - struct intel_i2c_chan *i2cbus = dvo->i2c_bus; + struct i2c_adapter *adapter = dvo->i2c_bus; + struct intel_i2c_chan *i2cbus = container_of(adapter, struct intel_i2c_chan, adapter); u8 out_buf[2]; u8 in_buf[2]; struct i2c_msg msgs[] = { { - .addr = i2cbus->slave_addr, + .addr = dvo->slave_addr, .flags = 0, .len = 1, .buf = out_buf, }, { - .addr = i2cbus->slave_addr, + .addr = dvo->slave_addr, .flags = I2C_M_RD, .len = 1, .buf = in_buf, @@ -208,10 +209,11 @@ static bool ch7017_read(struct intel_dvo_device *dvo, int addr, uint8_t *val) static bool ch7017_write(struct intel_dvo_device *dvo, int addr, uint8_t val) { - struct intel_i2c_chan *i2cbus = dvo->i2c_bus; + struct i2c_adapter *adapter = dvo->i2c_bus; + struct intel_i2c_chan *i2cbus = container_of(adapter, struct intel_i2c_chan, adapter); uint8_t out_buf[2]; struct i2c_msg msg = { - .addr = i2cbus->slave_addr, + .addr = dvo->slave_addr, .flags = 0, .len = 2, .buf = out_buf, @@ -228,8 +230,9 @@ static bool ch7017_write(struct intel_dvo_device *dvo, int addr, uint8_t val) /** Probes for a CH7017 on the given bus and slave address. */ static bool ch7017_init(struct intel_dvo_device *dvo, - struct intel_i2c_chan *i2cbus) + struct i2c_adapter *adapter) { + struct intel_i2c_chan *i2cbus = container_of(adapter, struct intel_i2c_chan, adapter); struct ch7017_priv *priv; uint8_t val; @@ -237,8 +240,7 @@ static bool ch7017_init(struct intel_dvo_device *dvo, if (priv == NULL) return false; - dvo->i2c_bus = i2cbus; - dvo->i2c_bus->slave_addr = dvo->slave_addr; + dvo->i2c_bus = adapter; dvo->dev_priv = priv; if (!ch7017_read(dvo, CH7017_DEVICE_ID, &val)) @@ -248,7 +250,7 @@ static bool ch7017_init(struct intel_dvo_device *dvo, val != CH7018_DEVICE_ID_VALUE && val != CH7019_DEVICE_ID_VALUE) { DRM_DEBUG("ch701x not detected, got %d: from %s Slave %d.\n", - val, i2cbus->adapter.name,i2cbus->slave_addr); + val, i2cbus->adapter.name,dvo->slave_addr); goto fail; } diff --git a/drivers/gpu/drm/i915/dvo_ch7xxx.c b/drivers/gpu/drm/i915/dvo_ch7xxx.c index d2fd95dbd03..a9b89628968 100644 --- a/drivers/gpu/drm/i915/dvo_ch7xxx.c +++ b/drivers/gpu/drm/i915/dvo_ch7xxx.c @@ -123,19 +123,20 @@ static char *ch7xxx_get_id(uint8_t vid) static bool ch7xxx_readb(struct intel_dvo_device *dvo, int addr, uint8_t *ch) { struct ch7xxx_priv *ch7xxx= dvo->dev_priv; - struct intel_i2c_chan *i2cbus = dvo->i2c_bus; + struct i2c_adapter *adapter = dvo->i2c_bus; + struct intel_i2c_chan *i2cbus = container_of(adapter, struct intel_i2c_chan, adapter); u8 out_buf[2]; u8 in_buf[2]; struct i2c_msg msgs[] = { { - .addr = i2cbus->slave_addr, + .addr = dvo->slave_addr, .flags = 0, .len = 1, .buf = out_buf, }, { - .addr = i2cbus->slave_addr, + .addr = dvo->slave_addr, .flags = I2C_M_RD, .len = 1, .buf = in_buf, @@ -152,7 +153,7 @@ static bool ch7xxx_readb(struct intel_dvo_device *dvo, int addr, uint8_t *ch) if (!ch7xxx->quiet) { DRM_DEBUG("Unable to read register 0x%02x from %s:%02x.\n", - addr, i2cbus->adapter.name, i2cbus->slave_addr); + addr, i2cbus->adapter.name, dvo->slave_addr); } return false; } @@ -161,10 +162,11 @@ static bool ch7xxx_readb(struct intel_dvo_device *dvo, int addr, uint8_t *ch) static bool ch7xxx_writeb(struct intel_dvo_device *dvo, int addr, uint8_t ch) { struct ch7xxx_priv *ch7xxx = dvo->dev_priv; - struct intel_i2c_chan *i2cbus = dvo->i2c_bus; + struct i2c_adapter *adapter = dvo->i2c_bus; + struct intel_i2c_chan *i2cbus = container_of(adapter, struct intel_i2c_chan, adapter); uint8_t out_buf[2]; struct i2c_msg msg = { - .addr = i2cbus->slave_addr, + .addr = dvo->slave_addr, .flags = 0, .len = 2, .buf = out_buf, @@ -178,14 +180,14 @@ static bool ch7xxx_writeb(struct intel_dvo_device *dvo, int addr, uint8_t ch) if (!ch7xxx->quiet) { DRM_DEBUG("Unable to write register 0x%02x to %s:%d.\n", - addr, i2cbus->adapter.name, i2cbus->slave_addr); + addr, i2cbus->adapter.name, dvo->slave_addr); } return false; } static bool ch7xxx_init(struct intel_dvo_device *dvo, - struct intel_i2c_chan *i2cbus) + struct i2c_adapter *adapter) { /* this will detect the CH7xxx chip on the specified i2c bus */ struct ch7xxx_priv *ch7xxx; @@ -196,8 +198,7 @@ static bool ch7xxx_init(struct intel_dvo_device *dvo, if (ch7xxx == NULL) return false; - dvo->i2c_bus = i2cbus; - dvo->i2c_bus->slave_addr = dvo->slave_addr; + dvo->i2c_bus = adapter; dvo->dev_priv = ch7xxx; ch7xxx->quiet = true; @@ -207,7 +208,7 @@ static bool ch7xxx_init(struct intel_dvo_device *dvo, name = ch7xxx_get_id(vendor); if (!name) { DRM_DEBUG("ch7xxx not detected; got 0x%02x from %s slave %d.\n", - vendor, i2cbus->adapter.name, i2cbus->slave_addr); + vendor, adapter->name, dvo->slave_addr); goto out; } @@ -217,7 +218,7 @@ static bool ch7xxx_init(struct intel_dvo_device *dvo, if (device != CH7xxx_DID) { DRM_DEBUG("ch7xxx not detected; got 0x%02x from %s slave %d.\n", - vendor, i2cbus->adapter.name, i2cbus->slave_addr); + vendor, adapter->name, dvo->slave_addr); goto out; } diff --git a/drivers/gpu/drm/i915/dvo_ivch.c b/drivers/gpu/drm/i915/dvo_ivch.c index 0c8d375e8e3..aa176f9921f 100644 --- a/drivers/gpu/drm/i915/dvo_ivch.c +++ b/drivers/gpu/drm/i915/dvo_ivch.c @@ -169,13 +169,14 @@ static void ivch_dump_regs(struct intel_dvo_device *dvo); static bool ivch_read(struct intel_dvo_device *dvo, int addr, uint16_t *data) { struct ivch_priv *priv = dvo->dev_priv; - struct intel_i2c_chan *i2cbus = dvo->i2c_bus; + struct i2c_adapter *adapter = dvo->i2c_bus; + struct intel_i2c_chan *i2cbus = container_of(adapter, struct intel_i2c_chan, adapter); u8 out_buf[1]; u8 in_buf[2]; struct i2c_msg msgs[] = { { - .addr = i2cbus->slave_addr, + .addr = dvo->slave_addr, .flags = I2C_M_RD, .len = 0, }, @@ -186,7 +187,7 @@ static bool ivch_read(struct intel_dvo_device *dvo, int addr, uint16_t *data) .buf = out_buf, }, { - .addr = i2cbus->slave_addr, + .addr = dvo->slave_addr, .flags = I2C_M_RD | I2C_M_NOSTART, .len = 2, .buf = in_buf, @@ -202,7 +203,7 @@ static bool ivch_read(struct intel_dvo_device *dvo, int addr, uint16_t *data) if (!priv->quiet) { DRM_DEBUG("Unable to read register 0x%02x from %s:%02x.\n", - addr, i2cbus->adapter.name, i2cbus->slave_addr); + addr, i2cbus->adapter.name, dvo->slave_addr); } return false; } @@ -211,10 +212,11 @@ static bool ivch_read(struct intel_dvo_device *dvo, int addr, uint16_t *data) static bool ivch_write(struct intel_dvo_device *dvo, int addr, uint16_t data) { struct ivch_priv *priv = dvo->dev_priv; - struct intel_i2c_chan *i2cbus = dvo->i2c_bus; + struct i2c_adapter *adapter = dvo->i2c_bus; + struct intel_i2c_chan *i2cbus = container_of(adapter, struct intel_i2c_chan, adapter); u8 out_buf[3]; struct i2c_msg msg = { - .addr = i2cbus->slave_addr, + .addr = dvo->slave_addr, .flags = 0, .len = 3, .buf = out_buf, @@ -229,7 +231,7 @@ static bool ivch_write(struct intel_dvo_device *dvo, int addr, uint16_t data) if (!priv->quiet) { DRM_DEBUG("Unable to write register 0x%02x to %s:%d.\n", - addr, i2cbus->adapter.name, i2cbus->slave_addr); + addr, i2cbus->adapter.name, dvo->slave_addr); } return false; @@ -237,7 +239,7 @@ static bool ivch_write(struct intel_dvo_device *dvo, int addr, uint16_t data) /** Probes the given bus and slave address for an ivch */ static bool ivch_init(struct intel_dvo_device *dvo, - struct intel_i2c_chan *i2cbus) + struct i2c_adapter *adapter) { struct ivch_priv *priv; uint16_t temp; @@ -246,8 +248,7 @@ static bool ivch_init(struct intel_dvo_device *dvo, if (priv == NULL) return false; - dvo->i2c_bus = i2cbus; - dvo->i2c_bus->slave_addr = dvo->slave_addr; + dvo->i2c_bus = adapter; dvo->dev_priv = priv; priv->quiet = true; diff --git a/drivers/gpu/drm/i915/dvo_sil164.c b/drivers/gpu/drm/i915/dvo_sil164.c index 033a4bb070b..e1c1f7341e5 100644 --- a/drivers/gpu/drm/i915/dvo_sil164.c +++ b/drivers/gpu/drm/i915/dvo_sil164.c @@ -76,19 +76,20 @@ struct sil164_priv { static bool sil164_readb(struct intel_dvo_device *dvo, int addr, uint8_t *ch) { struct sil164_priv *sil = dvo->dev_priv; - struct intel_i2c_chan *i2cbus = dvo->i2c_bus; + struct i2c_adapter *adapter = dvo->i2c_bus; + struct intel_i2c_chan *i2cbus = container_of(adapter, struct intel_i2c_chan, adapter); u8 out_buf[2]; u8 in_buf[2]; struct i2c_msg msgs[] = { { - .addr = i2cbus->slave_addr, + .addr = dvo->slave_addr, .flags = 0, .len = 1, .buf = out_buf, }, { - .addr = i2cbus->slave_addr, + .addr = dvo->slave_addr, .flags = I2C_M_RD, .len = 1, .buf = in_buf, @@ -105,7 +106,7 @@ static bool sil164_readb(struct intel_dvo_device *dvo, int addr, uint8_t *ch) if (!sil->quiet) { DRM_DEBUG("Unable to read register 0x%02x from %s:%02x.\n", - addr, i2cbus->adapter.name, i2cbus->slave_addr); + addr, i2cbus->adapter.name, dvo->slave_addr); } return false; } @@ -113,10 +114,11 @@ static bool sil164_readb(struct intel_dvo_device *dvo, int addr, uint8_t *ch) static bool sil164_writeb(struct intel_dvo_device *dvo, int addr, uint8_t ch) { struct sil164_priv *sil= dvo->dev_priv; - struct intel_i2c_chan *i2cbus = dvo->i2c_bus; + struct i2c_adapter *adapter = dvo->i2c_bus; + struct intel_i2c_chan *i2cbus = container_of(adapter, struct intel_i2c_chan, adapter); uint8_t out_buf[2]; struct i2c_msg msg = { - .addr = i2cbus->slave_addr, + .addr = dvo->slave_addr, .flags = 0, .len = 2, .buf = out_buf, @@ -130,7 +132,7 @@ static bool sil164_writeb(struct intel_dvo_device *dvo, int addr, uint8_t ch) if (!sil->quiet) { DRM_DEBUG("Unable to write register 0x%02x to %s:%d.\n", - addr, i2cbus->adapter.name, i2cbus->slave_addr); + addr, i2cbus->adapter.name, dvo->slave_addr); } return false; @@ -138,7 +140,7 @@ static bool sil164_writeb(struct intel_dvo_device *dvo, int addr, uint8_t ch) /* Silicon Image 164 driver for chip on i2c bus */ static bool sil164_init(struct intel_dvo_device *dvo, - struct intel_i2c_chan *i2cbus) + struct i2c_adapter *adapter) { /* this will detect the SIL164 chip on the specified i2c bus */ struct sil164_priv *sil; @@ -148,8 +150,7 @@ static bool sil164_init(struct intel_dvo_device *dvo, if (sil == NULL) return false; - dvo->i2c_bus = i2cbus; - dvo->i2c_bus->slave_addr = dvo->slave_addr; + dvo->i2c_bus = adapter; dvo->dev_priv = sil; sil->quiet = true; @@ -158,7 +159,7 @@ static bool sil164_init(struct intel_dvo_device *dvo, if (ch != (SIL164_VID & 0xff)) { DRM_DEBUG("sil164 not detected got %d: from %s Slave %d.\n", - ch, i2cbus->adapter.name, i2cbus->slave_addr); + ch, adapter->name, dvo->slave_addr); goto out; } @@ -167,7 +168,7 @@ static bool sil164_init(struct intel_dvo_device *dvo, if (ch != (SIL164_DID & 0xff)) { DRM_DEBUG("sil164 not detected got %d: from %s Slave %d.\n", - ch, i2cbus->adapter.name, i2cbus->slave_addr); + ch, adapter->name, dvo->slave_addr); goto out; } sil->quiet = false; diff --git a/drivers/gpu/drm/i915/dvo_tfp410.c b/drivers/gpu/drm/i915/dvo_tfp410.c index 207fda806eb..9ecc907384e 100644 --- a/drivers/gpu/drm/i915/dvo_tfp410.c +++ b/drivers/gpu/drm/i915/dvo_tfp410.c @@ -101,19 +101,20 @@ struct tfp410_priv { static bool tfp410_readb(struct intel_dvo_device *dvo, int addr, uint8_t *ch) { struct tfp410_priv *tfp = dvo->dev_priv; - struct intel_i2c_chan *i2cbus = dvo->i2c_bus; + struct i2c_adapter *adapter = dvo->i2c_bus; + struct intel_i2c_chan *i2cbus = container_of(adapter, struct intel_i2c_chan, adapter); u8 out_buf[2]; u8 in_buf[2]; struct i2c_msg msgs[] = { { - .addr = i2cbus->slave_addr, + .addr = dvo->slave_addr, .flags = 0, .len = 1, .buf = out_buf, }, { - .addr = i2cbus->slave_addr, + .addr = dvo->slave_addr, .flags = I2C_M_RD, .len = 1, .buf = in_buf, @@ -130,7 +131,7 @@ static bool tfp410_readb(struct intel_dvo_device *dvo, int addr, uint8_t *ch) if (!tfp->quiet) { DRM_DEBUG("Unable to read register 0x%02x from %s:%02x.\n", - addr, i2cbus->adapter.name, i2cbus->slave_addr); + addr, i2cbus->adapter.name, dvo->slave_addr); } return false; } @@ -138,10 +139,11 @@ static bool tfp410_readb(struct intel_dvo_device *dvo, int addr, uint8_t *ch) static bool tfp410_writeb(struct intel_dvo_device *dvo, int addr, uint8_t ch) { struct tfp410_priv *tfp = dvo->dev_priv; - struct intel_i2c_chan *i2cbus = dvo->i2c_bus; + struct i2c_adapter *adapter = dvo->i2c_bus; + struct intel_i2c_chan *i2cbus = container_of(adapter, struct intel_i2c_chan, adapter); uint8_t out_buf[2]; struct i2c_msg msg = { - .addr = i2cbus->slave_addr, + .addr = dvo->slave_addr, .flags = 0, .len = 2, .buf = out_buf, @@ -155,7 +157,7 @@ static bool tfp410_writeb(struct intel_dvo_device *dvo, int addr, uint8_t ch) if (!tfp->quiet) { DRM_DEBUG("Unable to write register 0x%02x to %s:%d.\n", - addr, i2cbus->adapter.name, i2cbus->slave_addr); + addr, i2cbus->adapter.name, dvo->slave_addr); } return false; @@ -174,7 +176,7 @@ static int tfp410_getid(struct intel_dvo_device *dvo, int addr) /* Ti TFP410 driver for chip on i2c bus */ static bool tfp410_init(struct intel_dvo_device *dvo, - struct intel_i2c_chan *i2cbus) + struct i2c_adapter *adapter) { /* this will detect the tfp410 chip on the specified i2c bus */ struct tfp410_priv *tfp; @@ -184,20 +186,19 @@ static bool tfp410_init(struct intel_dvo_device *dvo, if (tfp == NULL) return false; - dvo->i2c_bus = i2cbus; - dvo->i2c_bus->slave_addr = dvo->slave_addr; + dvo->i2c_bus = adapter; dvo->dev_priv = tfp; tfp->quiet = true; if ((id = tfp410_getid(dvo, TFP410_VID_LO)) != TFP410_VID) { DRM_DEBUG("tfp410 not detected got VID %X: from %s Slave %d.\n", - id, i2cbus->adapter.name, i2cbus->slave_addr); + id, adapter->name, dvo->slave_addr); goto out; } if ((id = tfp410_getid(dvo, TFP410_DID_LO)) != TFP410_DID) { DRM_DEBUG("tfp410 not detected got DID %X: from %s Slave %d.\n", - id, i2cbus->adapter.name, i2cbus->slave_addr); + id, adapter->name, dvo->slave_addr); goto out; } tfp->quiet = false; diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c index f112c769d53..50d1f782768 100644 --- a/drivers/gpu/drm/i915/i915_dma.c +++ b/drivers/gpu/drm/i915/i915_dma.c @@ -846,7 +846,7 @@ static int i915_set_status_page(struct drm_device *dev, void *data, return 0; } - printk(KERN_DEBUG "set status page addr 0x%08x\n", (u32)hws->addr); + DRM_DEBUG("set status page addr 0x%08x\n", (u32)hws->addr); dev_priv->status_gfx_addr = hws->addr & (0x1ffff<<12); @@ -885,8 +885,8 @@ static int i915_set_status_page(struct drm_device *dev, void *data, * some RAM for the framebuffer at early boot. This code figures out * how much was set aside so we can use it for our own purposes. */ -static int i915_probe_agp(struct drm_device *dev, unsigned long *aperture_size, - unsigned long *preallocated_size) +static int i915_probe_agp(struct drm_device *dev, uint32_t *aperture_size, + uint32_t *preallocated_size) { struct pci_dev *bridge_dev; u16 tmp = 0; @@ -984,10 +984,11 @@ static int i915_probe_agp(struct drm_device *dev, unsigned long *aperture_size, return 0; } -static int i915_load_modeset_init(struct drm_device *dev) +static int i915_load_modeset_init(struct drm_device *dev, + unsigned long prealloc_size, + unsigned long agp_size) { struct drm_i915_private *dev_priv = dev->dev_private; - unsigned long agp_size, prealloc_size; int fb_bar = IS_I9XX(dev) ? 2 : 0; int ret = 0; @@ -1002,10 +1003,6 @@ static int i915_load_modeset_init(struct drm_device *dev) if (IS_I965G(dev) || IS_G33(dev)) dev_priv->cursor_needs_physical = false; - ret = i915_probe_agp(dev, &agp_size, &prealloc_size); - if (ret) - goto out; - /* Basic memrange allocator for stolen space (aka vram) */ drm_mm_init(&dev_priv->vram, 0, prealloc_size); @@ -1082,6 +1079,44 @@ void i915_master_destroy(struct drm_device *dev, struct drm_master *master) master->driver_priv = NULL; } +static void i915_get_mem_freq(struct drm_device *dev) +{ + drm_i915_private_t *dev_priv = dev->dev_private; + u32 tmp; + + if (!IS_IGD(dev)) + return; + + tmp = I915_READ(CLKCFG); + + switch (tmp & CLKCFG_FSB_MASK) { + case CLKCFG_FSB_533: + dev_priv->fsb_freq = 533; /* 133*4 */ + break; + case CLKCFG_FSB_800: + dev_priv->fsb_freq = 800; /* 200*4 */ + break; + case CLKCFG_FSB_667: + dev_priv->fsb_freq = 667; /* 167*4 */ + break; + case CLKCFG_FSB_400: + dev_priv->fsb_freq = 400; /* 100*4 */ + break; + } + + switch (tmp & CLKCFG_MEM_MASK) { + case CLKCFG_MEM_533: + dev_priv->mem_freq = 533; + break; + case CLKCFG_MEM_667: + dev_priv->mem_freq = 667; + break; + case CLKCFG_MEM_800: + dev_priv->mem_freq = 800; + break; + } +} + /** * i915_driver_load - setup chip and create an initial config * @dev: DRM device @@ -1098,6 +1133,7 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags) struct drm_i915_private *dev_priv = dev->dev_private; resource_size_t base, size; int ret = 0, mmio_bar = IS_I9XX(dev) ? 0 : 1; + uint32_t agp_size, prealloc_size; /* i915 has 4 more counters */ dev->counters += 4; @@ -1146,9 +1182,29 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags) "performance may suffer.\n"); } + ret = i915_probe_agp(dev, &agp_size, &prealloc_size); + if (ret) + goto out_iomapfree; + + dev_priv->wq = create_workqueue("i915"); + if (dev_priv->wq == NULL) { + DRM_ERROR("Failed to create our workqueue.\n"); + ret = -ENOMEM; + goto out_iomapfree; + } + /* enable GEM by default */ dev_priv->has_gem = 1; + if (prealloc_size > agp_size * 3 / 4) { + DRM_ERROR("Detected broken video BIOS with %d/%dkB of video " + "memory stolen.\n", + prealloc_size / 1024, agp_size / 1024); + DRM_ERROR("Disabling GEM. (try reducing stolen memory or " + "updating the BIOS to fix).\n"); + dev_priv->has_gem = 0; + } + dev->driver->get_vblank_counter = i915_get_vblank_counter; dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */ if (IS_G4X(dev) || IS_IGDNG(dev)) { @@ -1162,9 +1218,11 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags) if (!I915_NEED_GFX_HWS(dev)) { ret = i915_init_phys_hws(dev); if (ret != 0) - goto out_iomapfree; + goto out_workqueue_free; } + i915_get_mem_freq(dev); + /* On the 945G/GM, the chipset reports the MSI capability on the * integrated graphics even though the support isn't actually there * according to the published specs. It doesn't appear to function @@ -1180,6 +1238,7 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags) pci_enable_msi(dev->pdev); spin_lock_init(&dev_priv->user_irq_lock); + spin_lock_init(&dev_priv->error_lock); dev_priv->user_irq_refcount = 0; ret = drm_vblank_init(dev, I915_NUM_PIPE); @@ -1190,10 +1249,10 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags) } if (drm_core_check_feature(dev, DRIVER_MODESET)) { - ret = i915_load_modeset_init(dev); + ret = i915_load_modeset_init(dev, prealloc_size, agp_size); if (ret < 0) { DRM_ERROR("failed to init modeset\n"); - goto out_rmmap; + goto out_workqueue_free; } } @@ -1204,6 +1263,8 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags) return 0; +out_workqueue_free: + destroy_workqueue(dev_priv->wq); out_iomapfree: io_mapping_free(dev_priv->mm.gtt_mapping); out_rmmap: @@ -1217,6 +1278,8 @@ int i915_driver_unload(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; + destroy_workqueue(dev_priv->wq); + io_mapping_free(dev_priv->mm.gtt_mapping); if (dev_priv->mm.gtt_mtrr >= 0) { mtrr_del(dev_priv->mm.gtt_mtrr, dev->agp->base, diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c index 98560e1e899..fc4b68aa2d0 100644 --- a/drivers/gpu/drm/i915/i915_drv.c +++ b/drivers/gpu/drm/i915/i915_drv.c @@ -35,6 +35,7 @@ #include "drm_pciids.h" #include <linux/console.h> +#include "drm_crtc_helper.h" static unsigned int i915_modeset = -1; module_param_named(modeset, i915_modeset, int, 0400); @@ -57,8 +58,8 @@ static int i915_suspend(struct drm_device *dev, pm_message_t state) struct drm_i915_private *dev_priv = dev->dev_private; if (!dev || !dev_priv) { - printk(KERN_ERR "dev: %p, dev_priv: %p\n", dev, dev_priv); - printk(KERN_ERR "DRM not initialized, aborting suspend.\n"); + DRM_ERROR("dev: %p, dev_priv: %p\n", dev, dev_priv); + DRM_ERROR("DRM not initialized, aborting suspend.\n"); return -ENODEV; } @@ -67,8 +68,6 @@ static int i915_suspend(struct drm_device *dev, pm_message_t state) pci_save_state(dev->pdev); - i915_save_state(dev); - /* If KMS is active, we do the leavevt stuff here */ if (drm_core_check_feature(dev, DRIVER_MODESET)) { if (i915_gem_idle(dev)) @@ -77,6 +76,8 @@ static int i915_suspend(struct drm_device *dev, pm_message_t state) drm_irq_uninstall(dev); } + i915_save_state(dev); + intel_opregion_free(dev, 1); if (state.event == PM_EVENT_SUSPEND) { @@ -115,6 +116,10 @@ static int i915_resume(struct drm_device *dev) drm_irq_install(dev); } + if (drm_core_check_feature(dev, DRIVER_MODESET)) { + /* Resume the modeset for every activated CRTC */ + drm_helper_resume_force_mode(dev); + } return ret; } diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 7a84f04e843..5b4f87e5562 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -133,6 +133,22 @@ struct sdvo_device_mapping { u8 initialized; }; +struct drm_i915_error_state { + u32 eir; + u32 pgtbl_er; + u32 pipeastat; + u32 pipebstat; + u32 ipeir; + u32 ipehr; + u32 instdone; + u32 acthd; + u32 instpm; + u32 instps; + u32 instdone1; + u32 seqno; + struct timeval time; +}; + typedef struct drm_i915_private { struct drm_device *dev; @@ -203,12 +219,21 @@ typedef struct drm_i915_private { unsigned int lvds_vbt:1; unsigned int int_crt_support:1; unsigned int lvds_use_ssc:1; + unsigned int edp_support:1; int lvds_ssc_freq; + int crt_ddc_bus; /* -1 = unknown, else GPIO to use for CRT DDC */ struct drm_i915_fence_reg fence_regs[16]; /* assume 965 */ int fence_reg_start; /* 4 if userland hasn't ioctl'd us yet */ int num_fence_regs; /* 8 on pre-965, 16 otherwise */ + unsigned int fsb_freq, mem_freq; + + spinlock_t error_lock; + struct drm_i915_error_state *first_error; + struct work_struct error_work; + struct workqueue_struct *wq; + /* Register state */ u8 saveLBB; u32 saveDSPACNTR; @@ -306,6 +331,17 @@ typedef struct drm_i915_private { u32 saveCURBPOS; u32 saveCURBBASE; u32 saveCURSIZE; + u32 saveDP_B; + u32 saveDP_C; + u32 saveDP_D; + u32 savePIPEA_GMCH_DATA_M; + u32 savePIPEB_GMCH_DATA_M; + u32 savePIPEA_GMCH_DATA_N; + u32 savePIPEB_GMCH_DATA_N; + u32 savePIPEA_DP_LINK_M; + u32 savePIPEB_DP_LINK_M; + u32 savePIPEA_DP_LINK_N; + u32 savePIPEB_DP_LINK_N; struct { struct drm_mm gtt_space; @@ -349,6 +385,9 @@ typedef struct drm_i915_private { */ struct list_head inactive_list; + /** LRU list of objects with fence regs on them. */ + struct list_head fence_list; + /** * List of breadcrumbs associated with GPU requests currently * outstanding. @@ -416,6 +455,9 @@ struct drm_i915_gem_object { /** This object's place on the active/flushing/inactive lists */ struct list_head list; + /** This object's place on the fenced object LRU */ + struct list_head fence_list; + /** * This is set if the object is on the active or flushing lists * (has pending rendering), and is not set if it's on inactive (ready @@ -457,9 +499,6 @@ struct drm_i915_gem_object { */ int fence_reg; - /** Boolean whether this object has a valid gtt offset. */ - int gtt_bound; - /** How many users have pinned this object in GTT space */ int pin_count; @@ -644,6 +683,7 @@ void i915_gem_free_object(struct drm_gem_object *obj); int i915_gem_object_pin(struct drm_gem_object *obj, uint32_t alignment); void i915_gem_object_unpin(struct drm_gem_object *obj); int i915_gem_object_unbind(struct drm_gem_object *obj); +void i915_gem_release_mmap(struct drm_gem_object *obj); void i915_gem_lastclose(struct drm_device *dev); uint32_t i915_get_gem_seqno(struct drm_device *dev); int i915_gem_object_get_fence_reg(struct drm_gem_object *obj); @@ -857,7 +897,11 @@ extern int i915_wait_ring(struct drm_device * dev, int n, const char *caller); #define HAS_128_BYTE_Y_TILING(dev) (IS_I9XX(dev) && !(IS_I915G(dev) || \ IS_I915GM(dev))) #define SUPPORTS_INTEGRATED_HDMI(dev) (IS_G4X(dev) || IS_IGDNG(dev)) +#define SUPPORTS_INTEGRATED_DP(dev) (IS_G4X(dev) || IS_IGDNG(dev)) +#define SUPPORTS_EDP(dev) (IS_IGDNG_M(dev)) #define I915_HAS_HOTPLUG(dev) (IS_I945G(dev) || IS_I945GM(dev) || IS_I965G(dev)) +/* dsparb controlled by hw only */ +#define DSPARB_HWCONTROL(dev) (IS_G4X(dev) || IS_IGDNG(dev)) #define PRIMARY_RINGBUFFER_SIZE (128*1024) diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index fd2b8bdffe3..80e5ba490dc 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -978,6 +978,7 @@ int i915_gem_set_domain_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) { + struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_gem_set_domain *args = data; struct drm_gem_object *obj; uint32_t read_domains = args->read_domains; @@ -1006,12 +1007,22 @@ i915_gem_set_domain_ioctl(struct drm_device *dev, void *data, mutex_lock(&dev->struct_mutex); #if WATCH_BUF - DRM_INFO("set_domain_ioctl %p(%d), %08x %08x\n", + DRM_INFO("set_domain_ioctl %p(%zd), %08x %08x\n", obj, obj->size, read_domains, write_domain); #endif if (read_domains & I915_GEM_DOMAIN_GTT) { + struct drm_i915_gem_object *obj_priv = obj->driver_private; + ret = i915_gem_object_set_to_gtt_domain(obj, write_domain != 0); + /* Update the LRU on the fence for the CPU access that's + * about to occur. + */ + if (obj_priv->fence_reg != I915_FENCE_REG_NONE) { + list_move_tail(&obj_priv->fence_list, + &dev_priv->mm.fence_list); + } + /* Silently promote "you're not bound, there was nothing to do" * to success, since the client was just asking us to * make sure everything was done. @@ -1050,7 +1061,7 @@ i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data, } #if WATCH_BUF - DRM_INFO("%s: sw_finish %d (%p %d)\n", + DRM_INFO("%s: sw_finish %d (%p %zd)\n", __func__, args->handle, obj, obj->size); #endif obj_priv = obj->driver_private; @@ -1155,8 +1166,7 @@ int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf) } /* Need a new fence register? */ - if (obj_priv->fence_reg == I915_FENCE_REG_NONE && - obj_priv->tiling_mode != I915_TILING_NONE) { + if (obj_priv->tiling_mode != I915_TILING_NONE) { ret = i915_gem_object_get_fence_reg(obj); if (ret) { mutex_unlock(&dev->struct_mutex); @@ -1252,6 +1262,31 @@ out_free_list: return ret; } +/** + * i915_gem_release_mmap - remove physical page mappings + * @obj: obj in question + * + * Preserve the reservation of the mmaping with the DRM core code, but + * relinquish ownership of the pages back to the system. + * + * It is vital that we remove the page mapping if we have mapped a tiled + * object through the GTT and then lose the fence register due to + * resource pressure. Similarly if the object has been moved out of the + * aperture, than pages mapped into userspace must be revoked. Removing the + * mapping will then trigger a page fault on the next user access, allowing + * fixup by i915_gem_fault(). + */ +void +i915_gem_release_mmap(struct drm_gem_object *obj) +{ + struct drm_device *dev = obj->dev; + struct drm_i915_gem_object *obj_priv = obj->driver_private; + + if (dev->dev_mapping) + unmap_mapping_range(dev->dev_mapping, + obj_priv->mmap_offset, obj->size, 1); +} + static void i915_gem_free_mmap_offset(struct drm_gem_object *obj) { @@ -1545,7 +1580,7 @@ i915_add_request(struct drm_device *dev, struct drm_file *file_priv, } if (was_empty && !dev_priv->mm.suspended) - schedule_delayed_work(&dev_priv->mm.retire_work, HZ); + queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, HZ); return seqno; } @@ -1694,7 +1729,7 @@ i915_gem_retire_work_handler(struct work_struct *work) i915_gem_retire_requests(dev); if (!dev_priv->mm.suspended && !list_empty(&dev_priv->mm.request_list)) - schedule_delayed_work(&dev_priv->mm.retire_work, HZ); + queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, HZ); mutex_unlock(&dev->struct_mutex); } @@ -1861,7 +1896,6 @@ i915_gem_object_unbind(struct drm_gem_object *obj) { struct drm_device *dev = obj->dev; struct drm_i915_gem_object *obj_priv = obj->driver_private; - loff_t offset; int ret = 0; #if WATCH_BUF @@ -1898,9 +1932,7 @@ i915_gem_object_unbind(struct drm_gem_object *obj) BUG_ON(obj_priv->active); /* blow away mappings if mapped through GTT */ - offset = ((loff_t) obj->map_list.hash.key) << PAGE_SHIFT; - if (dev->dev_mapping) - unmap_mapping_range(dev->dev_mapping, offset, obj->size, 1); + i915_gem_release_mmap(obj); if (obj_priv->fence_reg != I915_FENCE_REG_NONE) i915_gem_clear_fence_reg(obj); @@ -2186,6 +2218,12 @@ i915_gem_object_get_fence_reg(struct drm_gem_object *obj) struct drm_i915_gem_object *old_obj_priv = NULL; int i, ret, avail; + /* Just update our place in the LRU if our fence is getting used. */ + if (obj_priv->fence_reg != I915_FENCE_REG_NONE) { + list_move_tail(&obj_priv->fence_list, &dev_priv->mm.fence_list); + return 0; + } + switch (obj_priv->tiling_mode) { case I915_TILING_NONE: WARN(1, "allocating a fence for non-tiled object?\n"); @@ -2207,7 +2245,6 @@ i915_gem_object_get_fence_reg(struct drm_gem_object *obj) } /* First try to find a free reg */ -try_again: avail = 0; for (i = dev_priv->fence_reg_start; i < dev_priv->num_fence_regs; i++) { reg = &dev_priv->fence_regs[i]; @@ -2221,67 +2258,62 @@ try_again: /* None available, try to steal one or wait for a user to finish */ if (i == dev_priv->num_fence_regs) { - uint32_t seqno = dev_priv->mm.next_gem_seqno; - loff_t offset; + struct drm_gem_object *old_obj = NULL; if (avail == 0) return -ENOSPC; - for (i = dev_priv->fence_reg_start; - i < dev_priv->num_fence_regs; i++) { - uint32_t this_seqno; - - reg = &dev_priv->fence_regs[i]; - old_obj_priv = reg->obj->driver_private; + list_for_each_entry(old_obj_priv, &dev_priv->mm.fence_list, + fence_list) { + old_obj = old_obj_priv->obj; if (old_obj_priv->pin_count) continue; + /* Take a reference, as otherwise the wait_rendering + * below may cause the object to get freed out from + * under us. + */ + drm_gem_object_reference(old_obj); + /* i915 uses fences for GPU access to tiled buffers */ if (IS_I965G(dev) || !old_obj_priv->active) break; - /* find the seqno of the first available fence */ - this_seqno = old_obj_priv->last_rendering_seqno; - if (this_seqno != 0 && - reg->obj->write_domain == 0 && - i915_seqno_passed(seqno, this_seqno)) - seqno = this_seqno; - } - - /* - * Now things get ugly... we have to wait for one of the - * objects to finish before trying again. - */ - if (i == dev_priv->num_fence_regs) { - if (seqno == dev_priv->mm.next_gem_seqno) { - i915_gem_flush(dev, - I915_GEM_GPU_DOMAINS, - I915_GEM_GPU_DOMAINS); - seqno = i915_add_request(dev, NULL, - I915_GEM_GPU_DOMAINS); - if (seqno == 0) - return -ENOMEM; + /* This brings the object to the head of the LRU if it + * had been written to. The only way this should + * result in us waiting longer than the expected + * optimal amount of time is if there was a + * fence-using buffer later that was read-only. + */ + i915_gem_object_flush_gpu_write_domain(old_obj); + ret = i915_gem_object_wait_rendering(old_obj); + if (ret != 0) { + drm_gem_object_unreference(old_obj); + return ret; } - ret = i915_wait_request(dev, seqno); - if (ret) - return ret; - goto try_again; + break; } /* * Zap this virtual mapping so we can set up a fence again * for this object next time we need it. */ - offset = ((loff_t) reg->obj->map_list.hash.key) << PAGE_SHIFT; - if (dev->dev_mapping) - unmap_mapping_range(dev->dev_mapping, offset, - reg->obj->size, 1); + i915_gem_release_mmap(old_obj); + + i = old_obj_priv->fence_reg; + reg = &dev_priv->fence_regs[i]; + old_obj_priv->fence_reg = I915_FENCE_REG_NONE; + list_del_init(&old_obj_priv->fence_list); + + drm_gem_object_unreference(old_obj); } obj_priv->fence_reg = i; + list_add_tail(&obj_priv->fence_list, &dev_priv->mm.fence_list); + reg->obj = obj; if (IS_I965G(dev)) @@ -2324,6 +2356,7 @@ i915_gem_clear_fence_reg(struct drm_gem_object *obj) dev_priv->fence_regs[obj_priv->fence_reg].obj = NULL; obj_priv->fence_reg = I915_FENCE_REG_NONE; + list_del_init(&obj_priv->fence_list); } /** @@ -2423,7 +2456,7 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment) } #if WATCH_BUF - DRM_INFO("Binding object of size %d at 0x%08x\n", + DRM_INFO("Binding object of size %zd at 0x%08x\n", obj->size, obj_priv->gtt_offset); #endif ret = i915_gem_object_get_pages(obj); @@ -3577,9 +3610,7 @@ i915_gem_object_pin(struct drm_gem_object *obj, uint32_t alignment) * Pre-965 chips need a fence register set up in order to * properly handle tiled surfaces. */ - if (!IS_I965G(dev) && - obj_priv->fence_reg == I915_FENCE_REG_NONE && - obj_priv->tiling_mode != I915_TILING_NONE) { + if (!IS_I965G(dev) && obj_priv->tiling_mode != I915_TILING_NONE) { ret = i915_gem_object_get_fence_reg(obj); if (ret != 0) { if (ret != -EBUSY && ret != -ERESTARTSYS) @@ -3788,6 +3819,7 @@ int i915_gem_init_object(struct drm_gem_object *obj) obj_priv->obj = obj; obj_priv->fence_reg = I915_FENCE_REG_NONE; INIT_LIST_HEAD(&obj_priv->list); + INIT_LIST_HEAD(&obj_priv->fence_list); return 0; } @@ -4200,15 +4232,11 @@ int i915_gem_leavevt_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) { - int ret; - if (drm_core_check_feature(dev, DRIVER_MODESET)) return 0; - ret = i915_gem_idle(dev); drm_irq_uninstall(dev); - - return ret; + return i915_gem_idle(dev); } void @@ -4227,6 +4255,7 @@ i915_gem_lastclose(struct drm_device *dev) void i915_gem_load(struct drm_device *dev) { + int i; drm_i915_private_t *dev_priv = dev->dev_private; spin_lock_init(&dev_priv->mm.active_list_lock); @@ -4234,6 +4263,7 @@ i915_gem_load(struct drm_device *dev) INIT_LIST_HEAD(&dev_priv->mm.flushing_list); INIT_LIST_HEAD(&dev_priv->mm.inactive_list); INIT_LIST_HEAD(&dev_priv->mm.request_list); + INIT_LIST_HEAD(&dev_priv->mm.fence_list); INIT_DELAYED_WORK(&dev_priv->mm.retire_work, i915_gem_retire_work_handler); dev_priv->mm.next_gem_seqno = 1; @@ -4246,6 +4276,18 @@ i915_gem_load(struct drm_device *dev) else dev_priv->num_fence_regs = 8; + /* Initialize fence registers to zero */ + if (IS_I965G(dev)) { + for (i = 0; i < 16; i++) + I915_WRITE64(FENCE_REG_965_0 + (i * 8), 0); + } else { + for (i = 0; i < 8; i++) + I915_WRITE(FENCE_REG_830_0 + (i * 4), 0); + if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev)) + for (i = 0; i < 8; i++) + I915_WRITE(FENCE_REG_945_8 + (i * 4), 0); + } + i915_gem_detect_bit_6_swizzle(dev); } diff --git a/drivers/gpu/drm/i915/i915_gem_debug.c b/drivers/gpu/drm/i915/i915_gem_debug.c index 8d0b943e2c5..e602614bd3f 100644 --- a/drivers/gpu/drm/i915/i915_gem_debug.c +++ b/drivers/gpu/drm/i915/i915_gem_debug.c @@ -87,7 +87,7 @@ i915_gem_dump_object(struct drm_gem_object *obj, int len, chunk_len = page_len - chunk; if (chunk_len > 128) chunk_len = 128; - i915_gem_dump_page(obj_priv->page_list[page], + i915_gem_dump_page(obj_priv->pages[page], chunk, chunk + chunk_len, obj_priv->gtt_offset + page * PAGE_SIZE, @@ -143,7 +143,7 @@ i915_gem_object_check_coherency(struct drm_gem_object *obj, int handle) uint32_t *backing_map = NULL; int bad_count = 0; - DRM_INFO("%s: checking coherency of object %p@0x%08x (%d, %dkb):\n", + DRM_INFO("%s: checking coherency of object %p@0x%08x (%d, %zdkb):\n", __func__, obj, obj_priv->gtt_offset, handle, obj->size / 1024); @@ -157,7 +157,7 @@ i915_gem_object_check_coherency(struct drm_gem_object *obj, int handle) for (page = 0; page < obj->size / PAGE_SIZE; page++) { int i; - backing_map = kmap_atomic(obj_priv->page_list[page], KM_USER0); + backing_map = kmap_atomic(obj_priv->pages[page], KM_USER0); if (backing_map == NULL) { DRM_ERROR("failed to map backing page\n"); diff --git a/drivers/gpu/drm/i915/i915_gem_debugfs.c b/drivers/gpu/drm/i915/i915_gem_debugfs.c index 28146e405e8..cb3b97405fb 100644 --- a/drivers/gpu/drm/i915/i915_gem_debugfs.c +++ b/drivers/gpu/drm/i915/i915_gem_debugfs.c @@ -75,11 +75,10 @@ static int i915_gem_object_list_info(struct seq_file *m, void *data) case ACTIVE_LIST: seq_printf(m, "Active:\n"); lock = &dev_priv->mm.active_list_lock; - spin_lock(lock); head = &dev_priv->mm.active_list; break; case INACTIVE_LIST: - seq_printf(m, "Inctive:\n"); + seq_printf(m, "Inactive:\n"); head = &dev_priv->mm.inactive_list; break; case FLUSHING_LIST: @@ -91,6 +90,8 @@ static int i915_gem_object_list_info(struct seq_file *m, void *data) return 0; } + if (lock) + spin_lock(lock); list_for_each_entry(obj_priv, head, list) { struct drm_gem_object *obj = obj_priv->obj; @@ -104,7 +105,10 @@ static int i915_gem_object_list_info(struct seq_file *m, void *data) if (obj->name) seq_printf(m, " (name: %d)", obj->name); if (obj_priv->fence_reg != I915_FENCE_REG_NONE) - seq_printf(m, " (fence: %d)\n", obj_priv->fence_reg); + seq_printf(m, " (fence: %d)", obj_priv->fence_reg); + if (obj_priv->gtt_space != NULL) + seq_printf(m, " (gtt_offset: %08x)", obj_priv->gtt_offset); + seq_printf(m, "\n"); } @@ -323,6 +327,41 @@ static int i915_ringbuffer_info(struct seq_file *m, void *data) return 0; } +static int i915_error_state(struct seq_file *m, void *unused) +{ + struct drm_info_node *node = (struct drm_info_node *) m->private; + struct drm_device *dev = node->minor->dev; + drm_i915_private_t *dev_priv = dev->dev_private; + struct drm_i915_error_state *error; + unsigned long flags; + + spin_lock_irqsave(&dev_priv->error_lock, flags); + if (!dev_priv->first_error) { + seq_printf(m, "no error state collected\n"); + goto out; + } + + error = dev_priv->first_error; + + seq_printf(m, "Time: %ld s %ld us\n", error->time.tv_sec, + error->time.tv_usec); + seq_printf(m, "EIR: 0x%08x\n", error->eir); + seq_printf(m, " PGTBL_ER: 0x%08x\n", error->pgtbl_er); + seq_printf(m, " INSTPM: 0x%08x\n", error->instpm); + seq_printf(m, " IPEIR: 0x%08x\n", error->ipeir); + seq_printf(m, " IPEHR: 0x%08x\n", error->ipehr); + seq_printf(m, " INSTDONE: 0x%08x\n", error->instdone); + seq_printf(m, " ACTHD: 0x%08x\n", error->acthd); + if (IS_I965G(dev)) { + seq_printf(m, " INSTPS: 0x%08x\n", error->instps); + seq_printf(m, " INSTDONE1: 0x%08x\n", error->instdone1); + } + +out: + spin_unlock_irqrestore(&dev_priv->error_lock, flags); + + return 0; +} static struct drm_info_list i915_gem_debugfs_list[] = { {"i915_gem_active", i915_gem_object_list_info, 0, (void *) ACTIVE_LIST}, @@ -336,6 +375,7 @@ static struct drm_info_list i915_gem_debugfs_list[] = { {"i915_ringbuffer_data", i915_ringbuffer_data, 0}, {"i915_ringbuffer_info", i915_ringbuffer_info, 0}, {"i915_batchbuffers", i915_batchbuffer_info, 0}, + {"i915_error_state", i915_error_state, 0}, }; #define I915_GEM_DEBUGFS_ENTRIES ARRAY_SIZE(i915_gem_debugfs_list) diff --git a/drivers/gpu/drm/i915/i915_gem_tiling.c b/drivers/gpu/drm/i915/i915_gem_tiling.c index 5c1ceec49f5..a2d527b22ec 100644 --- a/drivers/gpu/drm/i915/i915_gem_tiling.c +++ b/drivers/gpu/drm/i915/i915_gem_tiling.c @@ -114,11 +114,13 @@ intel_alloc_mchbar_resource(struct drm_device *dev) mchbar_addr = ((u64)temp_hi << 32) | temp_lo; /* If ACPI doesn't have it, assume we need to allocate it ourselves */ +#ifdef CONFIG_PNP if (mchbar_addr && pnp_range_reserved(mchbar_addr, mchbar_addr + MCHBAR_SIZE)) { ret = 0; goto out_put; } +#endif /* Get some space for it */ ret = pci_bus_alloc_resource(bridge_dev->bus, &dev_priv->mch_res, @@ -519,6 +521,12 @@ i915_gem_set_tiling(struct drm_device *dev, void *data, goto err; } + /* If we've changed tiling, GTT-mappings of the object + * need to re-fault to ensure that the correct fence register + * setup is in place. + */ + i915_gem_release_mmap(obj); + obj_priv->tiling_mode = args->tiling_mode; obj_priv->stride = args->stride; } diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index b86b7b7130c..7ebc84c2881 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c @@ -26,6 +26,7 @@ * */ +#include <linux/sysrq.h> #include "drmP.h" #include "drm.h" #include "i915_drm.h" @@ -41,9 +42,10 @@ * we leave them always unmasked in IMR and then control enabling them through * PIPESTAT alone. */ -#define I915_INTERRUPT_ENABLE_FIX (I915_ASLE_INTERRUPT | \ - I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | \ - I915_DISPLAY_PIPE_B_EVENT_INTERRUPT) +#define I915_INTERRUPT_ENABLE_FIX (I915_ASLE_INTERRUPT | \ + I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | \ + I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | \ + I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT) /** Interrupts that we mask and unmask at runtime. */ #define I915_INTERRUPT_ENABLE_VAR (I915_USER_INTERRUPT) @@ -188,7 +190,7 @@ u32 i915_get_vblank_counter(struct drm_device *dev, int pipe) low_frame = pipe ? PIPEBFRAMEPIXEL : PIPEAFRAMEPIXEL; if (!i915_pipe_enabled(dev, pipe)) { - DRM_ERROR("trying to get vblank count for disabled pipe %d\n", pipe); + DRM_DEBUG("trying to get vblank count for disabled pipe %d\n", pipe); return 0; } @@ -217,7 +219,7 @@ u32 gm45_get_vblank_counter(struct drm_device *dev, int pipe) int reg = pipe ? PIPEB_FRMCOUNT_GM45 : PIPEA_FRMCOUNT_GM45; if (!i915_pipe_enabled(dev, pipe)) { - DRM_ERROR("trying to get vblank count for disabled pipe %d\n", pipe); + DRM_DEBUG("trying to get vblank count for disabled pipe %d\n", pipe); return 0; } @@ -232,7 +234,17 @@ static void i915_hotplug_work_func(struct work_struct *work) drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t, hotplug_work); struct drm_device *dev = dev_priv->dev; - + struct drm_mode_config *mode_config = &dev->mode_config; + struct drm_connector *connector; + + if (mode_config->num_connector) { + list_for_each_entry(connector, &mode_config->connector_list, head) { + struct intel_output *intel_output = to_intel_output(connector); + + if (intel_output->hot_plug) + (*intel_output->hot_plug) (intel_output); + } + } /* Just fire off a uevent and let userspace tell us what to do */ drm_sysfs_hotplug_event(dev); } @@ -278,6 +290,201 @@ irqreturn_t igdng_irq_handler(struct drm_device *dev) return ret; } +/** + * i915_error_work_func - do process context error handling work + * @work: work struct + * + * Fire an error uevent so userspace can see that a hang or error + * was detected. + */ +static void i915_error_work_func(struct work_struct *work) +{ + drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t, + error_work); + struct drm_device *dev = dev_priv->dev; + char *event_string = "ERROR=1"; + char *envp[] = { event_string, NULL }; + + DRM_DEBUG("generating error event\n"); + + kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, envp); +} + +/** + * i915_capture_error_state - capture an error record for later analysis + * @dev: drm device + * + * Should be called when an error is detected (either a hang or an error + * interrupt) to capture error state from the time of the error. Fills + * out a structure which becomes available in debugfs for user level tools + * to pick up. + */ +static void i915_capture_error_state(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_error_state *error; + unsigned long flags; + + spin_lock_irqsave(&dev_priv->error_lock, flags); + if (dev_priv->first_error) + goto out; + + error = kmalloc(sizeof(*error), GFP_ATOMIC); + if (!error) { + DRM_DEBUG("out ot memory, not capturing error state\n"); + goto out; + } + + error->eir = I915_READ(EIR); + error->pgtbl_er = I915_READ(PGTBL_ER); + error->pipeastat = I915_READ(PIPEASTAT); + error->pipebstat = I915_READ(PIPEBSTAT); + error->instpm = I915_READ(INSTPM); + if (!IS_I965G(dev)) { + error->ipeir = I915_READ(IPEIR); + error->ipehr = I915_READ(IPEHR); + error->instdone = I915_READ(INSTDONE); + error->acthd = I915_READ(ACTHD); + } else { + error->ipeir = I915_READ(IPEIR_I965); + error->ipehr = I915_READ(IPEHR_I965); + error->instdone = I915_READ(INSTDONE_I965); + error->instps = I915_READ(INSTPS); + error->instdone1 = I915_READ(INSTDONE1); + error->acthd = I915_READ(ACTHD_I965); + } + + do_gettimeofday(&error->time); + + dev_priv->first_error = error; + +out: + spin_unlock_irqrestore(&dev_priv->error_lock, flags); +} + +/** + * i915_handle_error - handle an error interrupt + * @dev: drm device + * + * Do some basic checking of regsiter state at error interrupt time and + * dump it to the syslog. Also call i915_capture_error_state() to make + * sure we get a record and make it available in debugfs. Fire a uevent + * so userspace knows something bad happened (should trigger collection + * of a ring dump etc.). + */ +static void i915_handle_error(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + u32 eir = I915_READ(EIR); + u32 pipea_stats = I915_READ(PIPEASTAT); + u32 pipeb_stats = I915_READ(PIPEBSTAT); + + i915_capture_error_state(dev); + + printk(KERN_ERR "render error detected, EIR: 0x%08x\n", + eir); + + if (IS_G4X(dev)) { + if (eir & (GM45_ERROR_MEM_PRIV | GM45_ERROR_CP_PRIV)) { + u32 ipeir = I915_READ(IPEIR_I965); + + printk(KERN_ERR " IPEIR: 0x%08x\n", + I915_READ(IPEIR_I965)); + printk(KERN_ERR " IPEHR: 0x%08x\n", + I915_READ(IPEHR_I965)); + printk(KERN_ERR " INSTDONE: 0x%08x\n", + I915_READ(INSTDONE_I965)); + printk(KERN_ERR " INSTPS: 0x%08x\n", + I915_READ(INSTPS)); + printk(KERN_ERR " INSTDONE1: 0x%08x\n", + I915_READ(INSTDONE1)); + printk(KERN_ERR " ACTHD: 0x%08x\n", + I915_READ(ACTHD_I965)); + I915_WRITE(IPEIR_I965, ipeir); + (void)I915_READ(IPEIR_I965); + } + if (eir & GM45_ERROR_PAGE_TABLE) { + u32 pgtbl_err = I915_READ(PGTBL_ER); + printk(KERN_ERR "page table error\n"); + printk(KERN_ERR " PGTBL_ER: 0x%08x\n", + pgtbl_err); + I915_WRITE(PGTBL_ER, pgtbl_err); + (void)I915_READ(PGTBL_ER); + } + } + + if (IS_I9XX(dev)) { + if (eir & I915_ERROR_PAGE_TABLE) { + u32 pgtbl_err = I915_READ(PGTBL_ER); + printk(KERN_ERR "page table error\n"); + printk(KERN_ERR " PGTBL_ER: 0x%08x\n", + pgtbl_err); + I915_WRITE(PGTBL_ER, pgtbl_err); + (void)I915_READ(PGTBL_ER); + } + } + + if (eir & I915_ERROR_MEMORY_REFRESH) { + printk(KERN_ERR "memory refresh error\n"); + printk(KERN_ERR "PIPEASTAT: 0x%08x\n", + pipea_stats); + printk(KERN_ERR "PIPEBSTAT: 0x%08x\n", + pipeb_stats); + /* pipestat has already been acked */ + } + if (eir & I915_ERROR_INSTRUCTION) { + printk(KERN_ERR "instruction error\n"); + printk(KERN_ERR " INSTPM: 0x%08x\n", + I915_READ(INSTPM)); + if (!IS_I965G(dev)) { + u32 ipeir = I915_READ(IPEIR); + + printk(KERN_ERR " IPEIR: 0x%08x\n", + I915_READ(IPEIR)); + printk(KERN_ERR " IPEHR: 0x%08x\n", + I915_READ(IPEHR)); + printk(KERN_ERR " INSTDONE: 0x%08x\n", + I915_READ(INSTDONE)); + printk(KERN_ERR " ACTHD: 0x%08x\n", + I915_READ(ACTHD)); + I915_WRITE(IPEIR, ipeir); + (void)I915_READ(IPEIR); + } else { + u32 ipeir = I915_READ(IPEIR_I965); + + printk(KERN_ERR " IPEIR: 0x%08x\n", + I915_READ(IPEIR_I965)); + printk(KERN_ERR " IPEHR: 0x%08x\n", + I915_READ(IPEHR_I965)); + printk(KERN_ERR " INSTDONE: 0x%08x\n", + I915_READ(INSTDONE_I965)); + printk(KERN_ERR " INSTPS: 0x%08x\n", + I915_READ(INSTPS)); + printk(KERN_ERR " INSTDONE1: 0x%08x\n", + I915_READ(INSTDONE1)); + printk(KERN_ERR " ACTHD: 0x%08x\n", + I915_READ(ACTHD_I965)); + I915_WRITE(IPEIR_I965, ipeir); + (void)I915_READ(IPEIR_I965); + } + } + + I915_WRITE(EIR, eir); + (void)I915_READ(EIR); + eir = I915_READ(EIR); + if (eir) { + /* + * some errors might have become stuck, + * mask them. + */ + DRM_ERROR("EIR stuck: 0x%08x, masking\n", eir); + I915_WRITE(EMR, I915_READ(EMR) | eir); + I915_WRITE(IIR, I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT); + } + + queue_work(dev_priv->wq, &dev_priv->error_work); +} + irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS) { struct drm_device *dev = (struct drm_device *) arg; @@ -319,15 +526,22 @@ irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS) pipea_stats = I915_READ(PIPEASTAT); pipeb_stats = I915_READ(PIPEBSTAT); + if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT) + i915_handle_error(dev); + /* * Clear the PIPE(A|B)STAT regs before the IIR */ if (pipea_stats & 0x8000ffff) { + if (pipea_stats & PIPE_FIFO_UNDERRUN_STATUS) + DRM_DEBUG("pipe a underrun\n"); I915_WRITE(PIPEASTAT, pipea_stats); irq_received = 1; } if (pipeb_stats & 0x8000ffff) { + if (pipeb_stats & PIPE_FIFO_UNDERRUN_STATUS) + DRM_DEBUG("pipe b underrun\n"); I915_WRITE(PIPEBSTAT, pipeb_stats); irq_received = 1; } @@ -346,7 +560,8 @@ irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS) DRM_DEBUG("hotplug event received, stat 0x%08x\n", hotplug_status); if (hotplug_status & dev_priv->hotplug_supported_mask) - schedule_work(&dev_priv->hotplug_work); + queue_work(dev_priv->wq, + &dev_priv->hotplug_work); I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status); I915_READ(PORT_HOTPLUG_STAT); @@ -699,6 +914,7 @@ void i915_driver_irq_preinstall(struct drm_device * dev) atomic_set(&dev_priv->irq_received, 0); INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func); + INIT_WORK(&dev_priv->error_work, i915_error_work_func); if (IS_IGDNG(dev)) { igdng_irq_preinstall(dev); @@ -722,6 +938,7 @@ int i915_driver_irq_postinstall(struct drm_device *dev) { drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; u32 enable_mask = I915_INTERRUPT_ENABLE_FIX | I915_INTERRUPT_ENABLE_VAR; + u32 error_mask; DRM_INIT_WAITQUEUE(&dev_priv->irq_queue); @@ -758,6 +975,21 @@ int i915_driver_irq_postinstall(struct drm_device *dev) i915_enable_irq(dev_priv, I915_DISPLAY_PORT_INTERRUPT); } + /* + * Enable some error detection, note the instruction error mask + * bit is reserved, so we leave it masked. + */ + if (IS_G4X(dev)) { + error_mask = ~(GM45_ERROR_PAGE_TABLE | + GM45_ERROR_MEM_PRIV | + GM45_ERROR_CP_PRIV | + I915_ERROR_MEMORY_REFRESH); + } else { + error_mask = ~(I915_ERROR_PAGE_TABLE | + I915_ERROR_MEMORY_REFRESH); + } + I915_WRITE(EMR, error_mask); + /* Disable pipe interrupt enables, clear pending pipe status */ I915_WRITE(PIPEASTAT, I915_READ(PIPEASTAT) & 0x8000ffff); I915_WRITE(PIPEBSTAT, I915_READ(PIPEBSTAT) & 0x8000ffff); diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index f6237a0b113..2955083aa47 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h @@ -206,6 +206,7 @@ /* * Instruction and interrupt control regs */ +#define PGTBL_ER 0x02024 #define PRB0_TAIL 0x02030 #define PRB0_HEAD 0x02034 #define PRB0_START 0x02038 @@ -226,11 +227,18 @@ #define PRB1_HEAD 0x02044 /* 915+ only */ #define PRB1_START 0x02048 /* 915+ only */ #define PRB1_CTL 0x0204c /* 915+ only */ +#define IPEIR_I965 0x02064 +#define IPEHR_I965 0x02068 +#define INSTDONE_I965 0x0206c +#define INSTPS 0x02070 /* 965+ only */ +#define INSTDONE1 0x0207c /* 965+ only */ #define ACTHD_I965 0x02074 #define HWS_PGA 0x02080 #define HWS_ADDRESS_MASK 0xfffff000 #define HWS_START_ADDRESS_SHIFT 4 #define IPEIR 0x02088 +#define IPEHR 0x0208c +#define INSTDONE 0x02090 #define NOPID 0x02094 #define HWSTAM 0x02098 #define SCPD0 0x0209c /* 915+ only */ @@ -258,10 +266,22 @@ #define EIR 0x020b0 #define EMR 0x020b4 #define ESR 0x020b8 +#define GM45_ERROR_PAGE_TABLE (1<<5) +#define GM45_ERROR_MEM_PRIV (1<<4) +#define I915_ERROR_PAGE_TABLE (1<<4) +#define GM45_ERROR_CP_PRIV (1<<3) +#define I915_ERROR_MEMORY_REFRESH (1<<1) +#define I915_ERROR_INSTRUCTION (1<<0) #define INSTPM 0x020c0 #define ACTHD 0x020c8 #define FW_BLC 0x020d8 +#define FW_BLC2 0x020dc #define FW_BLC_SELF 0x020e0 /* 915+ only */ +#define FW_BLC_SELF_EN (1<<15) +#define MM_BURST_LENGTH 0x00700000 +#define MM_FIFO_WATERMARK 0x0001F000 +#define LM_BURST_LENGTH 0x00000700 +#define LM_FIFO_WATERMARK 0x0000001F #define MI_ARB_STATE 0x020e4 /* 915+ only */ #define CACHE_MODE_0 0x02120 /* 915+ only */ #define CM0_MASK_SHIFT 16 @@ -569,6 +589,23 @@ #define C0DRB3 0x10206 #define C1DRB3 0x10606 +/* Clocking configuration register */ +#define CLKCFG 0x10c00 +#define CLKCFG_FSB_400 (5 << 0) /* hrawclk 100 */ +#define CLKCFG_FSB_533 (1 << 0) /* hrawclk 133 */ +#define CLKCFG_FSB_667 (3 << 0) /* hrawclk 166 */ +#define CLKCFG_FSB_800 (2 << 0) /* hrawclk 200 */ +#define CLKCFG_FSB_1067 (6 << 0) /* hrawclk 266 */ +#define CLKCFG_FSB_1333 (7 << 0) /* hrawclk 333 */ +/* Note, below two are guess */ +#define CLKCFG_FSB_1600 (4 << 0) /* hrawclk 400 */ +#define CLKCFG_FSB_1600_ALT (0 << 0) /* hrawclk 400 */ +#define CLKCFG_FSB_MASK (7 << 0) +#define CLKCFG_MEM_533 (1 << 4) +#define CLKCFG_MEM_667 (2 << 4) +#define CLKCFG_MEM_800 (3 << 4) +#define CLKCFG_MEM_MASK (7 << 4) + /** GM965 GM45 render standby register */ #define MCHBAR_RENDER_STANDBY 0x111B8 @@ -834,9 +871,25 @@ #define HORIZ_INTERP_MASK (3 << 6) #define HORIZ_AUTO_SCALE (1 << 5) #define PANEL_8TO6_DITHER_ENABLE (1 << 3) +#define PFIT_FILTER_FUZZY (0 << 24) +#define PFIT_SCALING_AUTO (0 << 26) +#define PFIT_SCALING_PROGRAMMED (1 << 26) +#define PFIT_SCALING_PILLAR (2 << 26) +#define PFIT_SCALING_LETTER (3 << 26) #define PFIT_PGM_RATIOS 0x61234 #define PFIT_VERT_SCALE_MASK 0xfff00000 #define PFIT_HORIZ_SCALE_MASK 0x0000fff0 +/* Pre-965 */ +#define PFIT_VERT_SCALE_SHIFT 20 +#define PFIT_VERT_SCALE_MASK 0xfff00000 +#define PFIT_HORIZ_SCALE_SHIFT 4 +#define PFIT_HORIZ_SCALE_MASK 0x0000fff0 +/* 965+ */ +#define PFIT_VERT_SCALE_SHIFT_965 16 +#define PFIT_VERT_SCALE_MASK_965 0x1fff0000 +#define PFIT_HORIZ_SCALE_SHIFT_965 0 +#define PFIT_HORIZ_SCALE_MASK_965 0x00001fff + #define PFIT_AUTO_RATIOS 0x61238 /* Backlight control */ @@ -1342,6 +1395,7 @@ #define TV_V_CHROMA_42 0x684a8 /* Display Port */ +#define DP_A 0x64000 /* eDP */ #define DP_B 0x64100 #define DP_C 0x64200 #define DP_D 0x64300 @@ -1384,13 +1438,22 @@ /* Mystic DPCD version 1.1 special mode */ #define DP_ENHANCED_FRAMING (1 << 18) +/* eDP */ +#define DP_PLL_FREQ_270MHZ (0 << 16) +#define DP_PLL_FREQ_160MHZ (1 << 16) +#define DP_PLL_FREQ_MASK (3 << 16) + /** locked once port is enabled */ #define DP_PORT_REVERSAL (1 << 15) +/* eDP */ +#define DP_PLL_ENABLE (1 << 14) + /** sends the clock on lane 15 of the PEG for debug */ #define DP_CLOCK_OUTPUT_ENABLE (1 << 13) #define DP_SCRAMBLING_DISABLE (1 << 12) +#define DP_SCRAMBLING_DISABLE_IGDNG (1 << 7) /** limit RGB values to avoid confusing TVs */ #define DP_COLOR_RANGE_16_235 (1 << 8) @@ -1410,6 +1473,13 @@ * is 20 bytes in each direction, hence the 5 fixed * data registers */ +#define DPA_AUX_CH_CTL 0x64010 +#define DPA_AUX_CH_DATA1 0x64014 +#define DPA_AUX_CH_DATA2 0x64018 +#define DPA_AUX_CH_DATA3 0x6401c +#define DPA_AUX_CH_DATA4 0x64020 +#define DPA_AUX_CH_DATA5 0x64024 + #define DPB_AUX_CH_CTL 0x64110 #define DPB_AUX_CH_DATA1 0x64114 #define DPB_AUX_CH_DATA2 0x64118 @@ -1552,6 +1622,34 @@ #define DSPARB_CSTART_SHIFT 7 #define DSPARB_BSTART_MASK (0x7f) #define DSPARB_BSTART_SHIFT 0 +#define DSPARB_BEND_SHIFT 9 /* on 855 */ +#define DSPARB_AEND_SHIFT 0 + +#define DSPFW1 0x70034 +#define DSPFW2 0x70038 +#define DSPFW3 0x7003c +#define IGD_SELF_REFRESH_EN (1<<30) + +/* FIFO watermark sizes etc */ +#define I915_FIFO_LINE_SIZE 64 +#define I830_FIFO_LINE_SIZE 32 +#define I945_FIFO_SIZE 127 /* 945 & 965 */ +#define I915_FIFO_SIZE 95 +#define I855GM_FIFO_SIZE 127 /* In cachelines */ +#define I830_FIFO_SIZE 95 +#define I915_MAX_WM 0x3f + +#define IGD_DISPLAY_FIFO 512 /* in 64byte unit */ +#define IGD_FIFO_LINE_SIZE 64 +#define IGD_MAX_WM 0x1ff +#define IGD_DFT_WM 0x3f +#define IGD_DFT_HPLLOFF_WM 0 +#define IGD_GUARD_WM 10 +#define IGD_CURSOR_FIFO 64 +#define IGD_CURSOR_MAX_WM 0x3f +#define IGD_CURSOR_DFT_WM 0 +#define IGD_CURSOR_GUARD_WM 5 + /* * The two pipe frame counter registers are not synchronized, so * reading a stable value is somewhat tricky. The following code @@ -1767,6 +1865,8 @@ #define PFA_CTL_1 0x68080 #define PFB_CTL_1 0x68880 #define PF_ENABLE (1<<31) +#define PFA_WIN_SZ 0x68074 +#define PFB_WIN_SZ 0x68874 /* legacy palette */ #define LGC_PALETTE_A 0x4a000 @@ -2127,4 +2227,28 @@ #define PCH_PP_OFF_DELAYS 0xc720c #define PCH_PP_DIVISOR 0xc7210 +#define PCH_DP_B 0xe4100 +#define PCH_DPB_AUX_CH_CTL 0xe4110 +#define PCH_DPB_AUX_CH_DATA1 0xe4114 +#define PCH_DPB_AUX_CH_DATA2 0xe4118 +#define PCH_DPB_AUX_CH_DATA3 0xe411c +#define PCH_DPB_AUX_CH_DATA4 0xe4120 +#define PCH_DPB_AUX_CH_DATA5 0xe4124 + +#define PCH_DP_C 0xe4200 +#define PCH_DPC_AUX_CH_CTL 0xe4210 +#define PCH_DPC_AUX_CH_DATA1 0xe4214 +#define PCH_DPC_AUX_CH_DATA2 0xe4218 +#define PCH_DPC_AUX_CH_DATA3 0xe421c +#define PCH_DPC_AUX_CH_DATA4 0xe4220 +#define PCH_DPC_AUX_CH_DATA5 0xe4224 + +#define PCH_DP_D 0xe4300 +#define PCH_DPD_AUX_CH_CTL 0xe4310 +#define PCH_DPD_AUX_CH_DATA1 0xe4314 +#define PCH_DPD_AUX_CH_DATA2 0xe4318 +#define PCH_DPD_AUX_CH_DATA3 0xe431c +#define PCH_DPD_AUX_CH_DATA4 0xe4320 +#define PCH_DPD_AUX_CH_DATA5 0xe4324 + #endif /* _I915_REG_H_ */ diff --git a/drivers/gpu/drm/i915/i915_suspend.c b/drivers/gpu/drm/i915/i915_suspend.c index a98e2831ed3..1d04e1904ac 100644 --- a/drivers/gpu/drm/i915/i915_suspend.c +++ b/drivers/gpu/drm/i915/i915_suspend.c @@ -222,23 +222,12 @@ static void i915_restore_vga(struct drm_device *dev) I915_WRITE8(VGA_DACMASK, dev_priv->saveDACMASK); } -int i915_save_state(struct drm_device *dev) +static void i915_save_modeset_reg(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; - int i; - - pci_read_config_byte(dev->pdev, LBB, &dev_priv->saveLBB); - - /* Render Standby */ - if (IS_I965G(dev) && IS_MOBILE(dev)) - dev_priv->saveRENDERSTANDBY = I915_READ(MCHBAR_RENDER_STANDBY); - - /* Hardware status page */ - dev_priv->saveHWS = I915_READ(HWS_PGA); - - /* Display arbitration control */ - dev_priv->saveDSPARB = I915_READ(DSPARB); + if (drm_core_check_feature(dev, DRIVER_MODESET)) + return; /* Pipe & plane A info */ dev_priv->savePIPEACONF = I915_READ(PIPEACONF); dev_priv->savePIPEASRC = I915_READ(PIPEASRC); @@ -294,7 +283,122 @@ int i915_save_state(struct drm_device *dev) } i915_save_palette(dev, PIPE_B); dev_priv->savePIPEBSTAT = I915_READ(PIPEBSTAT); + return; +} +static void i915_restore_modeset_reg(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + + if (drm_core_check_feature(dev, DRIVER_MODESET)) + return; + + /* Pipe & plane A info */ + /* Prime the clock */ + if (dev_priv->saveDPLL_A & DPLL_VCO_ENABLE) { + I915_WRITE(DPLL_A, dev_priv->saveDPLL_A & + ~DPLL_VCO_ENABLE); + DRM_UDELAY(150); + } + I915_WRITE(FPA0, dev_priv->saveFPA0); + I915_WRITE(FPA1, dev_priv->saveFPA1); + /* Actually enable it */ + I915_WRITE(DPLL_A, dev_priv->saveDPLL_A); + DRM_UDELAY(150); + if (IS_I965G(dev)) + I915_WRITE(DPLL_A_MD, dev_priv->saveDPLL_A_MD); + DRM_UDELAY(150); + + /* Restore mode */ + I915_WRITE(HTOTAL_A, dev_priv->saveHTOTAL_A); + I915_WRITE(HBLANK_A, dev_priv->saveHBLANK_A); + I915_WRITE(HSYNC_A, dev_priv->saveHSYNC_A); + I915_WRITE(VTOTAL_A, dev_priv->saveVTOTAL_A); + I915_WRITE(VBLANK_A, dev_priv->saveVBLANK_A); + I915_WRITE(VSYNC_A, dev_priv->saveVSYNC_A); + I915_WRITE(BCLRPAT_A, dev_priv->saveBCLRPAT_A); + + /* Restore plane info */ + I915_WRITE(DSPASIZE, dev_priv->saveDSPASIZE); + I915_WRITE(DSPAPOS, dev_priv->saveDSPAPOS); + I915_WRITE(PIPEASRC, dev_priv->savePIPEASRC); + I915_WRITE(DSPAADDR, dev_priv->saveDSPAADDR); + I915_WRITE(DSPASTRIDE, dev_priv->saveDSPASTRIDE); + if (IS_I965G(dev)) { + I915_WRITE(DSPASURF, dev_priv->saveDSPASURF); + I915_WRITE(DSPATILEOFF, dev_priv->saveDSPATILEOFF); + } + + I915_WRITE(PIPEACONF, dev_priv->savePIPEACONF); + + i915_restore_palette(dev, PIPE_A); + /* Enable the plane */ + I915_WRITE(DSPACNTR, dev_priv->saveDSPACNTR); + I915_WRITE(DSPAADDR, I915_READ(DSPAADDR)); + + /* Pipe & plane B info */ + if (dev_priv->saveDPLL_B & DPLL_VCO_ENABLE) { + I915_WRITE(DPLL_B, dev_priv->saveDPLL_B & + ~DPLL_VCO_ENABLE); + DRM_UDELAY(150); + } + I915_WRITE(FPB0, dev_priv->saveFPB0); + I915_WRITE(FPB1, dev_priv->saveFPB1); + /* Actually enable it */ + I915_WRITE(DPLL_B, dev_priv->saveDPLL_B); + DRM_UDELAY(150); + if (IS_I965G(dev)) + I915_WRITE(DPLL_B_MD, dev_priv->saveDPLL_B_MD); + DRM_UDELAY(150); + + /* Restore mode */ + I915_WRITE(HTOTAL_B, dev_priv->saveHTOTAL_B); + I915_WRITE(HBLANK_B, dev_priv->saveHBLANK_B); + I915_WRITE(HSYNC_B, dev_priv->saveHSYNC_B); + I915_WRITE(VTOTAL_B, dev_priv->saveVTOTAL_B); + I915_WRITE(VBLANK_B, dev_priv->saveVBLANK_B); + I915_WRITE(VSYNC_B, dev_priv->saveVSYNC_B); + I915_WRITE(BCLRPAT_B, dev_priv->saveBCLRPAT_B); + + /* Restore plane info */ + I915_WRITE(DSPBSIZE, dev_priv->saveDSPBSIZE); + I915_WRITE(DSPBPOS, dev_priv->saveDSPBPOS); + I915_WRITE(PIPEBSRC, dev_priv->savePIPEBSRC); + I915_WRITE(DSPBADDR, dev_priv->saveDSPBADDR); + I915_WRITE(DSPBSTRIDE, dev_priv->saveDSPBSTRIDE); + if (IS_I965G(dev)) { + I915_WRITE(DSPBSURF, dev_priv->saveDSPBSURF); + I915_WRITE(DSPBTILEOFF, dev_priv->saveDSPBTILEOFF); + } + + I915_WRITE(PIPEBCONF, dev_priv->savePIPEBCONF); + + i915_restore_palette(dev, PIPE_B); + /* Enable the plane */ + I915_WRITE(DSPBCNTR, dev_priv->saveDSPBCNTR); + I915_WRITE(DSPBADDR, I915_READ(DSPBADDR)); + return; +} +int i915_save_state(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + int i; + + pci_read_config_byte(dev->pdev, LBB, &dev_priv->saveLBB); + + /* Render Standby */ + if (IS_I965G(dev) && IS_MOBILE(dev)) + dev_priv->saveRENDERSTANDBY = I915_READ(MCHBAR_RENDER_STANDBY); + + /* Hardware status page */ + dev_priv->saveHWS = I915_READ(HWS_PGA); + + /* Display arbitration control */ + dev_priv->saveDSPARB = I915_READ(DSPARB); + + /* This is only meaningful in non-KMS mode */ + /* Don't save them in KMS mode */ + i915_save_modeset_reg(dev); /* Cursor state */ dev_priv->saveCURACNTR = I915_READ(CURACNTR); dev_priv->saveCURAPOS = I915_READ(CURAPOS); @@ -322,6 +426,20 @@ int i915_save_state(struct drm_device *dev) dev_priv->savePP_OFF_DELAYS = I915_READ(PP_OFF_DELAYS); dev_priv->savePP_DIVISOR = I915_READ(PP_DIVISOR); + /* Display Port state */ + if (SUPPORTS_INTEGRATED_DP(dev)) { + dev_priv->saveDP_B = I915_READ(DP_B); + dev_priv->saveDP_C = I915_READ(DP_C); + dev_priv->saveDP_D = I915_READ(DP_D); + dev_priv->savePIPEA_GMCH_DATA_M = I915_READ(PIPEA_GMCH_DATA_M); + dev_priv->savePIPEB_GMCH_DATA_M = I915_READ(PIPEB_GMCH_DATA_M); + dev_priv->savePIPEA_GMCH_DATA_N = I915_READ(PIPEA_GMCH_DATA_N); + dev_priv->savePIPEB_GMCH_DATA_N = I915_READ(PIPEB_GMCH_DATA_N); + dev_priv->savePIPEA_DP_LINK_M = I915_READ(PIPEA_DP_LINK_M); + dev_priv->savePIPEB_DP_LINK_M = I915_READ(PIPEB_DP_LINK_M); + dev_priv->savePIPEA_DP_LINK_N = I915_READ(PIPEA_DP_LINK_N); + dev_priv->savePIPEB_DP_LINK_N = I915_READ(PIPEB_DP_LINK_N); + } /* FIXME: save TV & SDVO state */ /* FBC state */ @@ -404,92 +522,21 @@ int i915_restore_state(struct drm_device *dev) for (i = 0; i < 8; i++) I915_WRITE(FENCE_REG_945_8 + (i * 4), dev_priv->saveFENCE[i+8]); } - - /* Pipe & plane A info */ - /* Prime the clock */ - if (dev_priv->saveDPLL_A & DPLL_VCO_ENABLE) { - I915_WRITE(DPLL_A, dev_priv->saveDPLL_A & - ~DPLL_VCO_ENABLE); - DRM_UDELAY(150); + + /* Display port ratios (must be done before clock is set) */ + if (SUPPORTS_INTEGRATED_DP(dev)) { + I915_WRITE(PIPEA_GMCH_DATA_M, dev_priv->savePIPEA_GMCH_DATA_M); + I915_WRITE(PIPEB_GMCH_DATA_M, dev_priv->savePIPEB_GMCH_DATA_M); + I915_WRITE(PIPEA_GMCH_DATA_N, dev_priv->savePIPEA_GMCH_DATA_N); + I915_WRITE(PIPEB_GMCH_DATA_N, dev_priv->savePIPEB_GMCH_DATA_N); + I915_WRITE(PIPEA_DP_LINK_M, dev_priv->savePIPEA_DP_LINK_M); + I915_WRITE(PIPEB_DP_LINK_M, dev_priv->savePIPEB_DP_LINK_M); + I915_WRITE(PIPEA_DP_LINK_N, dev_priv->savePIPEA_DP_LINK_N); + I915_WRITE(PIPEB_DP_LINK_N, dev_priv->savePIPEB_DP_LINK_N); } - I915_WRITE(FPA0, dev_priv->saveFPA0); - I915_WRITE(FPA1, dev_priv->saveFPA1); - /* Actually enable it */ - I915_WRITE(DPLL_A, dev_priv->saveDPLL_A); - DRM_UDELAY(150); - if (IS_I965G(dev)) - I915_WRITE(DPLL_A_MD, dev_priv->saveDPLL_A_MD); - DRM_UDELAY(150); - - /* Restore mode */ - I915_WRITE(HTOTAL_A, dev_priv->saveHTOTAL_A); - I915_WRITE(HBLANK_A, dev_priv->saveHBLANK_A); - I915_WRITE(HSYNC_A, dev_priv->saveHSYNC_A); - I915_WRITE(VTOTAL_A, dev_priv->saveVTOTAL_A); - I915_WRITE(VBLANK_A, dev_priv->saveVBLANK_A); - I915_WRITE(VSYNC_A, dev_priv->saveVSYNC_A); - I915_WRITE(BCLRPAT_A, dev_priv->saveBCLRPAT_A); - - /* Restore plane info */ - I915_WRITE(DSPASIZE, dev_priv->saveDSPASIZE); - I915_WRITE(DSPAPOS, dev_priv->saveDSPAPOS); - I915_WRITE(PIPEASRC, dev_priv->savePIPEASRC); - I915_WRITE(DSPAADDR, dev_priv->saveDSPAADDR); - I915_WRITE(DSPASTRIDE, dev_priv->saveDSPASTRIDE); - if (IS_I965G(dev)) { - I915_WRITE(DSPASURF, dev_priv->saveDSPASURF); - I915_WRITE(DSPATILEOFF, dev_priv->saveDSPATILEOFF); - } - - I915_WRITE(PIPEACONF, dev_priv->savePIPEACONF); - - i915_restore_palette(dev, PIPE_A); - /* Enable the plane */ - I915_WRITE(DSPACNTR, dev_priv->saveDSPACNTR); - I915_WRITE(DSPAADDR, I915_READ(DSPAADDR)); - - /* Pipe & plane B info */ - if (dev_priv->saveDPLL_B & DPLL_VCO_ENABLE) { - I915_WRITE(DPLL_B, dev_priv->saveDPLL_B & - ~DPLL_VCO_ENABLE); - DRM_UDELAY(150); - } - I915_WRITE(FPB0, dev_priv->saveFPB0); - I915_WRITE(FPB1, dev_priv->saveFPB1); - /* Actually enable it */ - I915_WRITE(DPLL_B, dev_priv->saveDPLL_B); - DRM_UDELAY(150); - if (IS_I965G(dev)) - I915_WRITE(DPLL_B_MD, dev_priv->saveDPLL_B_MD); - DRM_UDELAY(150); - - /* Restore mode */ - I915_WRITE(HTOTAL_B, dev_priv->saveHTOTAL_B); - I915_WRITE(HBLANK_B, dev_priv->saveHBLANK_B); - I915_WRITE(HSYNC_B, dev_priv->saveHSYNC_B); - I915_WRITE(VTOTAL_B, dev_priv->saveVTOTAL_B); - I915_WRITE(VBLANK_B, dev_priv->saveVBLANK_B); - I915_WRITE(VSYNC_B, dev_priv->saveVSYNC_B); - I915_WRITE(BCLRPAT_B, dev_priv->saveBCLRPAT_B); - - /* Restore plane info */ - I915_WRITE(DSPBSIZE, dev_priv->saveDSPBSIZE); - I915_WRITE(DSPBPOS, dev_priv->saveDSPBPOS); - I915_WRITE(PIPEBSRC, dev_priv->savePIPEBSRC); - I915_WRITE(DSPBADDR, dev_priv->saveDSPBADDR); - I915_WRITE(DSPBSTRIDE, dev_priv->saveDSPBSTRIDE); - if (IS_I965G(dev)) { - I915_WRITE(DSPBSURF, dev_priv->saveDSPBSURF); - I915_WRITE(DSPBTILEOFF, dev_priv->saveDSPBTILEOFF); - } - - I915_WRITE(PIPEBCONF, dev_priv->savePIPEBCONF); - - i915_restore_palette(dev, PIPE_B); - /* Enable the plane */ - I915_WRITE(DSPBCNTR, dev_priv->saveDSPBCNTR); - I915_WRITE(DSPBADDR, I915_READ(DSPBADDR)); - + /* This is only meaningful in non-KMS mode */ + /* Don't restore them in KMS mode */ + i915_restore_modeset_reg(dev); /* Cursor state */ I915_WRITE(CURAPOS, dev_priv->saveCURAPOS); I915_WRITE(CURACNTR, dev_priv->saveCURACNTR); @@ -518,6 +565,12 @@ int i915_restore_state(struct drm_device *dev) I915_WRITE(PP_DIVISOR, dev_priv->savePP_DIVISOR); I915_WRITE(PP_CONTROL, dev_priv->savePP_CONTROL); + /* Display Port state */ + if (SUPPORTS_INTEGRATED_DP(dev)) { + I915_WRITE(DP_B, dev_priv->saveDP_B); + I915_WRITE(DP_C, dev_priv->saveDP_C); + I915_WRITE(DP_D, dev_priv->saveDP_D); + } /* FIXME: restore TV & SDVO state */ /* FBC info */ @@ -545,7 +598,7 @@ int i915_restore_state(struct drm_device *dev) for (i = 0; i < 16; i++) { I915_WRITE(SWF00 + (i << 2), dev_priv->saveSWF0[i]); - I915_WRITE(SWF10 + (i << 2), dev_priv->saveSWF1[i+7]); + I915_WRITE(SWF10 + (i << 2), dev_priv->saveSWF1[i]); } for (i = 0; i < 3; i++) I915_WRITE(SWF30 + (i << 2), dev_priv->saveSWF2[i]); diff --git a/drivers/gpu/drm/i915/intel_bios.c b/drivers/gpu/drm/i915/intel_bios.c index cdd126d068a..f806fcc54e0 100644 --- a/drivers/gpu/drm/i915/intel_bios.c +++ b/drivers/gpu/drm/i915/intel_bios.c @@ -59,6 +59,16 @@ find_section(struct bdb_header *bdb, int section_id) return NULL; } +static u16 +get_blocksize(void *p) +{ + u16 *block_ptr, block_size; + + block_ptr = (u16 *)((char *)p - 2); + block_size = *block_ptr; + return block_size; +} + static void fill_detail_timing_data(struct drm_display_mode *panel_fixed_mode, struct lvds_dvo_timing *dvo_timing) @@ -99,9 +109,11 @@ parse_lfp_panel_data(struct drm_i915_private *dev_priv, { struct bdb_lvds_options *lvds_options; struct bdb_lvds_lfp_data *lvds_lfp_data; + struct bdb_lvds_lfp_data_ptrs *lvds_lfp_data_ptrs; struct bdb_lvds_lfp_data_entry *entry; struct lvds_dvo_timing *dvo_timing; struct drm_display_mode *panel_fixed_mode; + int lfp_data_size, dvo_timing_offset; /* Defaults if we can't find VBT info */ dev_priv->lvds_dither = 0; @@ -119,10 +131,27 @@ parse_lfp_panel_data(struct drm_i915_private *dev_priv, if (!lvds_lfp_data) return; + lvds_lfp_data_ptrs = find_section(bdb, BDB_LVDS_LFP_DATA_PTRS); + if (!lvds_lfp_data_ptrs) + return; + dev_priv->lvds_vbt = 1; - entry = &lvds_lfp_data->data[lvds_options->panel_type]; - dvo_timing = &entry->dvo_timing; + lfp_data_size = lvds_lfp_data_ptrs->ptr[1].dvo_timing_offset - + lvds_lfp_data_ptrs->ptr[0].dvo_timing_offset; + entry = (struct bdb_lvds_lfp_data_entry *) + ((uint8_t *)lvds_lfp_data->data + (lfp_data_size * + lvds_options->panel_type)); + dvo_timing_offset = lvds_lfp_data_ptrs->ptr[0].dvo_timing_offset - + lvds_lfp_data_ptrs->ptr[0].fp_timing_offset; + + /* + * the size of fp_timing varies on the different platform. + * So calculate the DVO timing relative offset in LVDS data + * entry to get the DVO timing entry + */ + dvo_timing = (struct lvds_dvo_timing *) + ((unsigned char *)entry + dvo_timing_offset); panel_fixed_mode = kzalloc(sizeof(*panel_fixed_mode), GFP_KERNEL); @@ -185,10 +214,47 @@ parse_general_features(struct drm_i915_private *dev_priv, dev_priv->lvds_use_ssc = general->enable_ssc; if (dev_priv->lvds_use_ssc) { - if (IS_I855(dev_priv->dev)) - dev_priv->lvds_ssc_freq = general->ssc_freq ? 66 : 48; - else - dev_priv->lvds_ssc_freq = general->ssc_freq ? 100 : 96; + if (IS_I85X(dev_priv->dev)) + dev_priv->lvds_ssc_freq = + general->ssc_freq ? 66 : 48; + else + dev_priv->lvds_ssc_freq = + general->ssc_freq ? 100 : 96; + } + } +} + +static void +parse_general_definitions(struct drm_i915_private *dev_priv, + struct bdb_header *bdb) +{ + struct bdb_general_definitions *general; + const int crt_bus_map_table[] = { + GPIOB, + GPIOA, + GPIOC, + GPIOD, + GPIOE, + GPIOF, + }; + + /* Set sensible defaults in case we can't find the general block + or it is the wrong chipset */ + dev_priv->crt_ddc_bus = -1; + + general = find_section(bdb, BDB_GENERAL_DEFINITIONS); + if (general) { + u16 block_size = get_blocksize(general); + if (block_size >= sizeof(*general)) { + int bus_pin = general->crt_ddc_gmbus_pin; + DRM_DEBUG("crt_ddc_bus_pin: %d\n", bus_pin); + if ((bus_pin >= 1) && (bus_pin <= 6)) { + dev_priv->crt_ddc_bus = + crt_bus_map_table[bus_pin-1]; + } + } else { + DRM_DEBUG("BDB_GD too small (%d). Invalid.\n", + block_size); } } } @@ -201,7 +267,7 @@ parse_sdvo_device_mapping(struct drm_i915_private *dev_priv, struct bdb_general_definitions *p_defs; struct child_device_config *p_child; int i, child_device_num, count; - u16 block_size, *block_ptr; + u16 block_size; p_defs = find_section(bdb, BDB_GENERAL_DEFINITIONS); if (!p_defs) { @@ -219,8 +285,7 @@ parse_sdvo_device_mapping(struct drm_i915_private *dev_priv, return; } /* get the block size of general definitions */ - block_ptr = (u16 *)((char *)p_defs - 2); - block_size = *block_ptr; + block_size = get_blocksize(p_defs); /* get the number of child device */ child_device_num = (block_size - sizeof(*p_defs)) / sizeof(*p_child); @@ -275,6 +340,25 @@ parse_sdvo_device_mapping(struct drm_i915_private *dev_priv, } return; } + +static void +parse_driver_features(struct drm_i915_private *dev_priv, + struct bdb_header *bdb) +{ + struct drm_device *dev = dev_priv->dev; + struct bdb_driver_features *driver; + + /* set default for chips without eDP */ + if (!SUPPORTS_EDP(dev)) { + dev_priv->edp_support = 0; + return; + } + + driver = find_section(bdb, BDB_DRIVER_FEATURES); + if (driver && driver->lvds_config == BDB_DRIVER_FEATURE_EDP) + dev_priv->edp_support = 1; +} + /** * intel_init_bios - initialize VBIOS settings & find VBT * @dev: DRM device @@ -322,9 +406,12 @@ intel_init_bios(struct drm_device *dev) /* Grab useful general definitions */ parse_general_features(dev_priv, bdb); + parse_general_definitions(dev_priv, bdb); parse_lfp_panel_data(dev_priv, bdb); parse_sdvo_panel_data(dev_priv, bdb); parse_sdvo_device_mapping(dev_priv, bdb); + parse_driver_features(dev_priv, bdb); + pci_unmap_rom(pdev, bios); return 0; diff --git a/drivers/gpu/drm/i915/intel_bios.h b/drivers/gpu/drm/i915/intel_bios.h index fe72e1c225d..0f8e5f69ac7 100644 --- a/drivers/gpu/drm/i915/intel_bios.h +++ b/drivers/gpu/drm/i915/intel_bios.h @@ -381,6 +381,51 @@ struct bdb_sdvo_lvds_options { } __attribute__((packed)); +#define BDB_DRIVER_FEATURE_NO_LVDS 0 +#define BDB_DRIVER_FEATURE_INT_LVDS 1 +#define BDB_DRIVER_FEATURE_SDVO_LVDS 2 +#define BDB_DRIVER_FEATURE_EDP 3 + +struct bdb_driver_features { + u8 boot_dev_algorithm:1; + u8 block_display_switch:1; + u8 allow_display_switch:1; + u8 hotplug_dvo:1; + u8 dual_view_zoom:1; + u8 int15h_hook:1; + u8 sprite_in_clone:1; + u8 primary_lfp_id:1; + + u16 boot_mode_x; + u16 boot_mode_y; + u8 boot_mode_bpp; + u8 boot_mode_refresh; + + u16 enable_lfp_primary:1; + u16 selective_mode_pruning:1; + u16 dual_frequency:1; + u16 render_clock_freq:1; /* 0: high freq; 1: low freq */ + u16 nt_clone_support:1; + u16 power_scheme_ui:1; /* 0: CUI; 1: 3rd party */ + u16 sprite_display_assign:1; /* 0: secondary; 1: primary */ + u16 cui_aspect_scaling:1; + u16 preserve_aspect_ratio:1; + u16 sdvo_device_power_down:1; + u16 crt_hotplug:1; + u16 lvds_config:2; + u16 tv_hotplug:1; + u16 hdmi_config:2; + + u8 static_display:1; + u8 reserved2:7; + u16 legacy_crt_max_x; + u16 legacy_crt_max_y; + u8 legacy_crt_max_refresh; + + u8 hdmi_termination; + u8 custom_vbt_version; +} __attribute__((packed)); + bool intel_init_bios(struct drm_device *dev); /* diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c index 6de97fc6602..590f81c8f59 100644 --- a/drivers/gpu/drm/i915/intel_crt.c +++ b/drivers/gpu/drm/i915/intel_crt.c @@ -46,7 +46,7 @@ static void intel_crt_dpms(struct drm_encoder *encoder, int mode) temp = I915_READ(reg); temp &= ~(ADPA_HSYNC_CNTL_DISABLE | ADPA_VSYNC_CNTL_DISABLE); - temp |= ADPA_DAC_ENABLE; + temp &= ~ADPA_DAC_ENABLE; switch(mode) { case DRM_MODE_DPMS_ON: @@ -156,6 +156,9 @@ static bool intel_igdng_crt_detect_hotplug(struct drm_connector *connector) temp = adpa = I915_READ(PCH_ADPA); + adpa &= ~ADPA_DAC_ENABLE; + I915_WRITE(PCH_ADPA, adpa); + adpa &= ~ADPA_CRT_HOTPLUG_MASK; adpa |= (ADPA_CRT_HOTPLUG_PERIOD_128 | @@ -169,13 +172,14 @@ static bool intel_igdng_crt_detect_hotplug(struct drm_connector *connector) DRM_DEBUG("pch crt adpa 0x%x", adpa); I915_WRITE(PCH_ADPA, adpa); - /* This might not be needed as not specified in spec...*/ - udelay(1000); + while ((I915_READ(PCH_ADPA) & ADPA_CRT_HOTPLUG_FORCE_TRIGGER) != 0) + ; /* Check the status to see if both blue and green are on now */ adpa = I915_READ(PCH_ADPA); - if ((adpa & ADPA_CRT_HOTPLUG_MONITOR_MASK) == - ADPA_CRT_HOTPLUG_MONITOR_COLOR) + adpa &= ADPA_CRT_HOTPLUG_MONITOR_MASK; + if ((adpa == ADPA_CRT_HOTPLUG_MONITOR_COLOR) || + (adpa == ADPA_CRT_HOTPLUG_MONITOR_MONO)) ret = true; else ret = false; @@ -428,8 +432,34 @@ static void intel_crt_destroy(struct drm_connector *connector) static int intel_crt_get_modes(struct drm_connector *connector) { + int ret; struct intel_output *intel_output = to_intel_output(connector); - return intel_ddc_get_modes(intel_output); + struct i2c_adapter *ddcbus; + struct drm_device *dev = connector->dev; + + + ret = intel_ddc_get_modes(intel_output); + if (ret || !IS_G4X(dev)) + goto end; + + ddcbus = intel_output->ddc_bus; + /* Try to probe digital port for output in DVI-I -> VGA mode. */ + intel_output->ddc_bus = + intel_i2c_create(connector->dev, GPIOD, "CRTDDC_D"); + + if (!intel_output->ddc_bus) { + intel_output->ddc_bus = ddcbus; + dev_printk(KERN_ERR, &connector->dev->pdev->dev, + "DDC bus registration failed for CRTDDC_D.\n"); + goto end; + } + /* Try to get modes by GPIOD port */ + ret = intel_ddc_get_modes(intel_output); + intel_i2c_destroy(ddcbus); + +end: + return ret; + } static int intel_crt_set_property(struct drm_connector *connector, @@ -478,6 +508,7 @@ void intel_crt_init(struct drm_device *dev) { struct drm_connector *connector; struct intel_output *intel_output; + struct drm_i915_private *dev_priv = dev->dev_private; u32 i2c_reg; intel_output = kzalloc(sizeof(struct intel_output), GFP_KERNEL); @@ -497,8 +528,12 @@ void intel_crt_init(struct drm_device *dev) /* Set up the DDC bus. */ if (IS_IGDNG(dev)) i2c_reg = PCH_GPIOA; - else + else { i2c_reg = GPIOA; + /* Use VBT information for CRT DDC if available */ + if (dev_priv->crt_ddc_bus != -1) + i2c_reg = dev_priv->crt_ddc_bus; + } intel_output->ddc_bus = intel_i2c_create(dev, i2c_reg, "CRTDDC_A"); if (!intel_output->ddc_bus) { dev_printk(KERN_ERR, &dev->pdev->dev, "DDC bus registration " @@ -507,6 +542,10 @@ void intel_crt_init(struct drm_device *dev) } intel_output->type = INTEL_OUTPUT_ANALOG; + intel_output->clone_mask = (1 << INTEL_SDVO_NON_TV_CLONE_BIT) | + (1 << INTEL_ANALOG_CLONE_BIT) | + (1 << INTEL_SDVO_LVDS_CLONE_BIT); + intel_output->crtc_mask = (1 << 0) | (1 << 1); connector->interlace_allowed = 0; connector->doublescan_allowed = 0; diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 3e1c7816211..748ed50c55c 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -25,14 +25,19 @@ */ #include <linux/i2c.h> +#include <linux/kernel.h> #include "drmP.h" #include "intel_drv.h" #include "i915_drm.h" #include "i915_drv.h" +#include "intel_dp.h" #include "drm_crtc_helper.h" +#define HAS_eDP (intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP)) + bool intel_pipe_has_type (struct drm_crtc *crtc, int type); +static void intel_update_watermarks(struct drm_device *dev); typedef struct { /* given values */ @@ -85,7 +90,7 @@ struct intel_limit { #define I8XX_P2_SLOW 4 #define I8XX_P2_FAST 2 #define I8XX_P2_LVDS_SLOW 14 -#define I8XX_P2_LVDS_FAST 14 /* No fast option */ +#define I8XX_P2_LVDS_FAST 7 #define I8XX_P2_SLOW_LIMIT 165000 #define I9XX_DOT_MIN 20000 @@ -127,19 +132,6 @@ struct intel_limit { #define I9XX_P2_LVDS_FAST 7 #define I9XX_P2_LVDS_SLOW_LIMIT 112000 -#define INTEL_LIMIT_I8XX_DVO_DAC 0 -#define INTEL_LIMIT_I8XX_LVDS 1 -#define INTEL_LIMIT_I9XX_SDVO_DAC 2 -#define INTEL_LIMIT_I9XX_LVDS 3 -#define INTEL_LIMIT_G4X_SDVO 4 -#define INTEL_LIMIT_G4X_HDMI_DAC 5 -#define INTEL_LIMIT_G4X_SINGLE_CHANNEL_LVDS 6 -#define INTEL_LIMIT_G4X_DUAL_CHANNEL_LVDS 7 -#define INTEL_LIMIT_IGD_SDVO_DAC 8 -#define INTEL_LIMIT_IGD_LVDS 9 -#define INTEL_LIMIT_IGDNG_SDVO_DAC 10 -#define INTEL_LIMIT_IGDNG_LVDS 11 - /*The parameter is for SDVO on G4x platform*/ #define G4X_DOT_SDVO_MIN 25000 #define G4X_DOT_SDVO_MAX 270000 @@ -218,6 +210,25 @@ struct intel_limit { #define G4X_P2_DUAL_CHANNEL_LVDS_FAST 7 #define G4X_P2_DUAL_CHANNEL_LVDS_LIMIT 0 +/*The parameter is for DISPLAY PORT on G4x platform*/ +#define G4X_DOT_DISPLAY_PORT_MIN 161670 +#define G4X_DOT_DISPLAY_PORT_MAX 227000 +#define G4X_N_DISPLAY_PORT_MIN 1 +#define G4X_N_DISPLAY_PORT_MAX 2 +#define G4X_M_DISPLAY_PORT_MIN 97 +#define G4X_M_DISPLAY_PORT_MAX 108 +#define G4X_M1_DISPLAY_PORT_MIN 0x10 +#define G4X_M1_DISPLAY_PORT_MAX 0x12 +#define G4X_M2_DISPLAY_PORT_MIN 0x05 +#define G4X_M2_DISPLAY_PORT_MAX 0x06 +#define G4X_P_DISPLAY_PORT_MIN 10 +#define G4X_P_DISPLAY_PORT_MAX 20 +#define G4X_P1_DISPLAY_PORT_MIN 1 +#define G4X_P1_DISPLAY_PORT_MAX 2 +#define G4X_P2_DISPLAY_PORT_SLOW 10 +#define G4X_P2_DISPLAY_PORT_FAST 10 +#define G4X_P2_DISPLAY_PORT_LIMIT 0 + /* IGDNG */ /* as we calculate clock using (register_value + 2) for N/M1/M2, so here the range value for them is (actual_value-2). @@ -256,8 +267,14 @@ static bool intel_igdng_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc, int target, int refclk, intel_clock_t *best_clock); -static const intel_limit_t intel_limits[] = { - { /* INTEL_LIMIT_I8XX_DVO_DAC */ +static bool +intel_find_pll_g4x_dp(const intel_limit_t *, struct drm_crtc *crtc, + int target, int refclk, intel_clock_t *best_clock); +static bool +intel_find_pll_igdng_dp(const intel_limit_t *, struct drm_crtc *crtc, + int target, int refclk, intel_clock_t *best_clock); + +static const intel_limit_t intel_limits_i8xx_dvo = { .dot = { .min = I8XX_DOT_MIN, .max = I8XX_DOT_MAX }, .vco = { .min = I8XX_VCO_MIN, .max = I8XX_VCO_MAX }, .n = { .min = I8XX_N_MIN, .max = I8XX_N_MAX }, @@ -269,8 +286,9 @@ static const intel_limit_t intel_limits[] = { .p2 = { .dot_limit = I8XX_P2_SLOW_LIMIT, .p2_slow = I8XX_P2_SLOW, .p2_fast = I8XX_P2_FAST }, .find_pll = intel_find_best_PLL, - }, - { /* INTEL_LIMIT_I8XX_LVDS */ +}; + +static const intel_limit_t intel_limits_i8xx_lvds = { .dot = { .min = I8XX_DOT_MIN, .max = I8XX_DOT_MAX }, .vco = { .min = I8XX_VCO_MIN, .max = I8XX_VCO_MAX }, .n = { .min = I8XX_N_MIN, .max = I8XX_N_MAX }, @@ -282,8 +300,9 @@ static const intel_limit_t intel_limits[] = { .p2 = { .dot_limit = I8XX_P2_SLOW_LIMIT, .p2_slow = I8XX_P2_LVDS_SLOW, .p2_fast = I8XX_P2_LVDS_FAST }, .find_pll = intel_find_best_PLL, - }, - { /* INTEL_LIMIT_I9XX_SDVO_DAC */ +}; + +static const intel_limit_t intel_limits_i9xx_sdvo = { .dot = { .min = I9XX_DOT_MIN, .max = I9XX_DOT_MAX }, .vco = { .min = I9XX_VCO_MIN, .max = I9XX_VCO_MAX }, .n = { .min = I9XX_N_MIN, .max = I9XX_N_MAX }, @@ -295,8 +314,9 @@ static const intel_limit_t intel_limits[] = { .p2 = { .dot_limit = I9XX_P2_SDVO_DAC_SLOW_LIMIT, .p2_slow = I9XX_P2_SDVO_DAC_SLOW, .p2_fast = I9XX_P2_SDVO_DAC_FAST }, .find_pll = intel_find_best_PLL, - }, - { /* INTEL_LIMIT_I9XX_LVDS */ +}; + +static const intel_limit_t intel_limits_i9xx_lvds = { .dot = { .min = I9XX_DOT_MIN, .max = I9XX_DOT_MAX }, .vco = { .min = I9XX_VCO_MIN, .max = I9XX_VCO_MAX }, .n = { .min = I9XX_N_MIN, .max = I9XX_N_MAX }, @@ -311,9 +331,10 @@ static const intel_limit_t intel_limits[] = { .p2 = { .dot_limit = I9XX_P2_LVDS_SLOW_LIMIT, .p2_slow = I9XX_P2_LVDS_SLOW, .p2_fast = I9XX_P2_LVDS_FAST }, .find_pll = intel_find_best_PLL, - }, +}; + /* below parameter and function is for G4X Chipset Family*/ - { /* INTEL_LIMIT_G4X_SDVO */ +static const intel_limit_t intel_limits_g4x_sdvo = { .dot = { .min = G4X_DOT_SDVO_MIN, .max = G4X_DOT_SDVO_MAX }, .vco = { .min = G4X_VCO_MIN, .max = G4X_VCO_MAX}, .n = { .min = G4X_N_SDVO_MIN, .max = G4X_N_SDVO_MAX }, @@ -327,8 +348,9 @@ static const intel_limit_t intel_limits[] = { .p2_fast = G4X_P2_SDVO_FAST }, .find_pll = intel_g4x_find_best_PLL, - }, - { /* INTEL_LIMIT_G4X_HDMI_DAC */ +}; + +static const intel_limit_t intel_limits_g4x_hdmi = { .dot = { .min = G4X_DOT_HDMI_DAC_MIN, .max = G4X_DOT_HDMI_DAC_MAX }, .vco = { .min = G4X_VCO_MIN, .max = G4X_VCO_MAX}, .n = { .min = G4X_N_HDMI_DAC_MIN, .max = G4X_N_HDMI_DAC_MAX }, @@ -342,8 +364,9 @@ static const intel_limit_t intel_limits[] = { .p2_fast = G4X_P2_HDMI_DAC_FAST }, .find_pll = intel_g4x_find_best_PLL, - }, - { /* INTEL_LIMIT_G4X_SINGLE_CHANNEL_LVDS */ +}; + +static const intel_limit_t intel_limits_g4x_single_channel_lvds = { .dot = { .min = G4X_DOT_SINGLE_CHANNEL_LVDS_MIN, .max = G4X_DOT_SINGLE_CHANNEL_LVDS_MAX }, .vco = { .min = G4X_VCO_MIN, @@ -365,8 +388,9 @@ static const intel_limit_t intel_limits[] = { .p2_fast = G4X_P2_SINGLE_CHANNEL_LVDS_FAST }, .find_pll = intel_g4x_find_best_PLL, - }, - { /* INTEL_LIMIT_G4X_DUAL_CHANNEL_LVDS */ +}; + +static const intel_limit_t intel_limits_g4x_dual_channel_lvds = { .dot = { .min = G4X_DOT_DUAL_CHANNEL_LVDS_MIN, .max = G4X_DOT_DUAL_CHANNEL_LVDS_MAX }, .vco = { .min = G4X_VCO_MIN, @@ -388,8 +412,32 @@ static const intel_limit_t intel_limits[] = { .p2_fast = G4X_P2_DUAL_CHANNEL_LVDS_FAST }, .find_pll = intel_g4x_find_best_PLL, - }, - { /* INTEL_LIMIT_IGD_SDVO */ +}; + +static const intel_limit_t intel_limits_g4x_display_port = { + .dot = { .min = G4X_DOT_DISPLAY_PORT_MIN, + .max = G4X_DOT_DISPLAY_PORT_MAX }, + .vco = { .min = G4X_VCO_MIN, + .max = G4X_VCO_MAX}, + .n = { .min = G4X_N_DISPLAY_PORT_MIN, + .max = G4X_N_DISPLAY_PORT_MAX }, + .m = { .min = G4X_M_DISPLAY_PORT_MIN, + .max = G4X_M_DISPLAY_PORT_MAX }, + .m1 = { .min = G4X_M1_DISPLAY_PORT_MIN, + .max = G4X_M1_DISPLAY_PORT_MAX }, + .m2 = { .min = G4X_M2_DISPLAY_PORT_MIN, + .max = G4X_M2_DISPLAY_PORT_MAX }, + .p = { .min = G4X_P_DISPLAY_PORT_MIN, + .max = G4X_P_DISPLAY_PORT_MAX }, + .p1 = { .min = G4X_P1_DISPLAY_PORT_MIN, + .max = G4X_P1_DISPLAY_PORT_MAX}, + .p2 = { .dot_limit = G4X_P2_DISPLAY_PORT_LIMIT, + .p2_slow = G4X_P2_DISPLAY_PORT_SLOW, + .p2_fast = G4X_P2_DISPLAY_PORT_FAST }, + .find_pll = intel_find_pll_g4x_dp, +}; + +static const intel_limit_t intel_limits_igd_sdvo = { .dot = { .min = I9XX_DOT_MIN, .max = I9XX_DOT_MAX}, .vco = { .min = IGD_VCO_MIN, .max = IGD_VCO_MAX }, .n = { .min = IGD_N_MIN, .max = IGD_N_MAX }, @@ -401,8 +449,9 @@ static const intel_limit_t intel_limits[] = { .p2 = { .dot_limit = I9XX_P2_SDVO_DAC_SLOW_LIMIT, .p2_slow = I9XX_P2_SDVO_DAC_SLOW, .p2_fast = I9XX_P2_SDVO_DAC_FAST }, .find_pll = intel_find_best_PLL, - }, - { /* INTEL_LIMIT_IGD_LVDS */ +}; + +static const intel_limit_t intel_limits_igd_lvds = { .dot = { .min = I9XX_DOT_MIN, .max = I9XX_DOT_MAX }, .vco = { .min = IGD_VCO_MIN, .max = IGD_VCO_MAX }, .n = { .min = IGD_N_MIN, .max = IGD_N_MAX }, @@ -415,8 +464,9 @@ static const intel_limit_t intel_limits[] = { .p2 = { .dot_limit = I9XX_P2_LVDS_SLOW_LIMIT, .p2_slow = I9XX_P2_LVDS_SLOW, .p2_fast = I9XX_P2_LVDS_SLOW }, .find_pll = intel_find_best_PLL, - }, - { /* INTEL_LIMIT_IGDNG_SDVO_DAC */ +}; + +static const intel_limit_t intel_limits_igdng_sdvo = { .dot = { .min = IGDNG_DOT_MIN, .max = IGDNG_DOT_MAX }, .vco = { .min = IGDNG_VCO_MIN, .max = IGDNG_VCO_MAX }, .n = { .min = IGDNG_N_MIN, .max = IGDNG_N_MAX }, @@ -429,8 +479,9 @@ static const intel_limit_t intel_limits[] = { .p2_slow = IGDNG_P2_SDVO_DAC_SLOW, .p2_fast = IGDNG_P2_SDVO_DAC_FAST }, .find_pll = intel_igdng_find_best_PLL, - }, - { /* INTEL_LIMIT_IGDNG_LVDS */ +}; + +static const intel_limit_t intel_limits_igdng_lvds = { .dot = { .min = IGDNG_DOT_MIN, .max = IGDNG_DOT_MAX }, .vco = { .min = IGDNG_VCO_MIN, .max = IGDNG_VCO_MAX }, .n = { .min = IGDNG_N_MIN, .max = IGDNG_N_MAX }, @@ -443,16 +494,15 @@ static const intel_limit_t intel_limits[] = { .p2_slow = IGDNG_P2_LVDS_SLOW, .p2_fast = IGDNG_P2_LVDS_FAST }, .find_pll = intel_igdng_find_best_PLL, - }, }; static const intel_limit_t *intel_igdng_limit(struct drm_crtc *crtc) { const intel_limit_t *limit; if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) - limit = &intel_limits[INTEL_LIMIT_IGDNG_LVDS]; + limit = &intel_limits_igdng_lvds; else - limit = &intel_limits[INTEL_LIMIT_IGDNG_SDVO_DAC]; + limit = &intel_limits_igdng_sdvo; return limit; } @@ -467,19 +517,19 @@ static const intel_limit_t *intel_g4x_limit(struct drm_crtc *crtc) if ((I915_READ(LVDS) & LVDS_CLKB_POWER_MASK) == LVDS_CLKB_POWER_UP) /* LVDS with dual channel */ - limit = &intel_limits - [INTEL_LIMIT_G4X_DUAL_CHANNEL_LVDS]; + limit = &intel_limits_g4x_dual_channel_lvds; else /* LVDS with dual channel */ - limit = &intel_limits - [INTEL_LIMIT_G4X_SINGLE_CHANNEL_LVDS]; + limit = &intel_limits_g4x_single_channel_lvds; } else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_HDMI) || intel_pipe_has_type(crtc, INTEL_OUTPUT_ANALOG)) { - limit = &intel_limits[INTEL_LIMIT_G4X_HDMI_DAC]; + limit = &intel_limits_g4x_hdmi; } else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_SDVO)) { - limit = &intel_limits[INTEL_LIMIT_G4X_SDVO]; + limit = &intel_limits_g4x_sdvo; + } else if (intel_pipe_has_type (crtc, INTEL_OUTPUT_DISPLAYPORT)) { + limit = &intel_limits_g4x_display_port; } else /* The option is for other outputs */ - limit = &intel_limits[INTEL_LIMIT_I9XX_SDVO_DAC]; + limit = &intel_limits_i9xx_sdvo; return limit; } @@ -495,19 +545,19 @@ static const intel_limit_t *intel_limit(struct drm_crtc *crtc) limit = intel_g4x_limit(crtc); } else if (IS_I9XX(dev) && !IS_IGD(dev)) { if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) - limit = &intel_limits[INTEL_LIMIT_I9XX_LVDS]; + limit = &intel_limits_i9xx_lvds; else - limit = &intel_limits[INTEL_LIMIT_I9XX_SDVO_DAC]; + limit = &intel_limits_i9xx_sdvo; } else if (IS_IGD(dev)) { if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) - limit = &intel_limits[INTEL_LIMIT_IGD_LVDS]; + limit = &intel_limits_igd_lvds; else - limit = &intel_limits[INTEL_LIMIT_IGD_SDVO_DAC]; + limit = &intel_limits_igd_sdvo; } else { if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) - limit = &intel_limits[INTEL_LIMIT_I8XX_LVDS]; + limit = &intel_limits_i8xx_lvds; else - limit = &intel_limits[INTEL_LIMIT_I8XX_DVO_DAC]; + limit = &intel_limits_i8xx_dvo; } return limit; } @@ -553,6 +603,23 @@ bool intel_pipe_has_type (struct drm_crtc *crtc, int type) return false; } +struct drm_connector * +intel_pipe_get_output (struct drm_crtc *crtc) +{ + struct drm_device *dev = crtc->dev; + struct drm_mode_config *mode_config = &dev->mode_config; + struct drm_connector *l_entry, *ret = NULL; + + list_for_each_entry(l_entry, &mode_config->connector_list, head) { + if (l_entry->encoder && + l_entry->encoder->crtc == crtc) { + ret = l_entry; + break; + } + } + return ret; +} + #define INTELPllInvalid(s) do { /* DRM_DEBUG(s); */ return false; } while (0) /** * Returns whether the given set of divisors are valid for a given refclk with @@ -599,8 +666,8 @@ intel_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc, intel_clock_t clock; int err = target; - if (IS_I9XX(dev) && intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) && - (I915_READ(LVDS) & LVDS_PORT_EN) != 0) { + if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) && + (I915_READ(LVDS)) != 0) { /* * For LVDS, if the panel is on, just rely on its current * settings for dual-channel. We haven't figured out how to @@ -707,6 +774,30 @@ intel_g4x_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc, } static bool +intel_find_pll_igdng_dp(const intel_limit_t *limit, struct drm_crtc *crtc, + int target, int refclk, intel_clock_t *best_clock) +{ + struct drm_device *dev = crtc->dev; + intel_clock_t clock; + if (target < 200000) { + clock.n = 1; + clock.p1 = 2; + clock.p2 = 10; + clock.m1 = 12; + clock.m2 = 9; + } else { + clock.n = 2; + clock.p1 = 1; + clock.p2 = 10; + clock.m1 = 14; + clock.m2 = 8; + } + intel_clock(dev, refclk, &clock); + memcpy(best_clock, &clock, sizeof(intel_clock_t)); + return true; +} + +static bool intel_igdng_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc, int target, int refclk, intel_clock_t *best_clock) { @@ -718,6 +809,14 @@ intel_igdng_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc, int err_most = 47; found = false; + /* eDP has only 2 clock choice, no n/m/p setting */ + if (HAS_eDP) + return true; + + if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT)) + return intel_find_pll_igdng_dp(limit, crtc, target, + refclk, best_clock); + if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) { if ((I915_READ(LVDS) & LVDS_CLKB_POWER_MASK) == LVDS_CLKB_POWER_UP) @@ -764,6 +863,32 @@ out: return found; } +/* DisplayPort has only two frequencies, 162MHz and 270MHz */ +static bool +intel_find_pll_g4x_dp(const intel_limit_t *limit, struct drm_crtc *crtc, + int target, int refclk, intel_clock_t *best_clock) +{ + intel_clock_t clock; + if (target < 200000) { + clock.p1 = 2; + clock.p2 = 10; + clock.n = 2; + clock.m1 = 23; + clock.m2 = 8; + } else { + clock.p1 = 1; + clock.p2 = 10; + clock.n = 1; + clock.m1 = 14; + clock.m2 = 2; + } + clock.m = 5 * (clock.m1 + 2) + (clock.m2 + 2); + clock.p = (clock.p1 * clock.p2); + clock.dot = 96000 * clock.m / (clock.n + 2) / clock.p; + memcpy(best_clock, &clock, sizeof(intel_clock_t)); + return true; +} + void intel_wait_for_vblank(struct drm_device *dev) { @@ -927,13 +1052,97 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y, return 0; } +/* Disable the VGA plane that we never use */ +static void i915_disable_vga (struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + u8 sr1; + u32 vga_reg; + + if (IS_IGDNG(dev)) + vga_reg = CPU_VGACNTRL; + else + vga_reg = VGACNTRL; + + if (I915_READ(vga_reg) & VGA_DISP_DISABLE) + return; + + I915_WRITE8(VGA_SR_INDEX, 1); + sr1 = I915_READ8(VGA_SR_DATA); + I915_WRITE8(VGA_SR_DATA, sr1 | (1 << 5)); + udelay(100); + + I915_WRITE(vga_reg, VGA_DISP_DISABLE); +} + +static void igdng_disable_pll_edp (struct drm_crtc *crtc) +{ + struct drm_device *dev = crtc->dev; + struct drm_i915_private *dev_priv = dev->dev_private; + u32 dpa_ctl; + + DRM_DEBUG("\n"); + dpa_ctl = I915_READ(DP_A); + dpa_ctl &= ~DP_PLL_ENABLE; + I915_WRITE(DP_A, dpa_ctl); +} + +static void igdng_enable_pll_edp (struct drm_crtc *crtc) +{ + struct drm_device *dev = crtc->dev; + struct drm_i915_private *dev_priv = dev->dev_private; + u32 dpa_ctl; + + dpa_ctl = I915_READ(DP_A); + dpa_ctl |= DP_PLL_ENABLE; + I915_WRITE(DP_A, dpa_ctl); + udelay(200); +} + + +static void igdng_set_pll_edp (struct drm_crtc *crtc, int clock) +{ + struct drm_device *dev = crtc->dev; + struct drm_i915_private *dev_priv = dev->dev_private; + u32 dpa_ctl; + + DRM_DEBUG("eDP PLL enable for clock %d\n", clock); + dpa_ctl = I915_READ(DP_A); + dpa_ctl &= ~DP_PLL_FREQ_MASK; + + if (clock < 200000) { + u32 temp; + dpa_ctl |= DP_PLL_FREQ_160MHZ; + /* workaround for 160Mhz: + 1) program 0x4600c bits 15:0 = 0x8124 + 2) program 0x46010 bit 0 = 1 + 3) program 0x46034 bit 24 = 1 + 4) program 0x64000 bit 14 = 1 + */ + temp = I915_READ(0x4600c); + temp &= 0xffff0000; + I915_WRITE(0x4600c, temp | 0x8124); + + temp = I915_READ(0x46010); + I915_WRITE(0x46010, temp | 1); + + temp = I915_READ(0x46034); + I915_WRITE(0x46034, temp | (1 << 24)); + } else { + dpa_ctl |= DP_PLL_FREQ_270MHZ; + } + I915_WRITE(DP_A, dpa_ctl); + + udelay(500); +} + static void igdng_crtc_dpms(struct drm_crtc *crtc, int mode) { struct drm_device *dev = crtc->dev; struct drm_i915_private *dev_priv = dev->dev_private; struct intel_crtc *intel_crtc = to_intel_crtc(crtc); int pipe = intel_crtc->pipe; - int plane = intel_crtc->pipe; + int plane = intel_crtc->plane; int pch_dpll_reg = (pipe == 0) ? PCH_DPLL_A : PCH_DPLL_B; int pipeconf_reg = (pipe == 0) ? PIPEACONF : PIPEBCONF; int dspcntr_reg = (plane == 0) ? DSPACNTR : DSPBCNTR; @@ -944,6 +1153,7 @@ static void igdng_crtc_dpms(struct drm_crtc *crtc, int mode) int fdi_rx_imr_reg = (pipe == 0) ? FDI_RXA_IMR : FDI_RXB_IMR; int transconf_reg = (pipe == 0) ? TRANSACONF : TRANSBCONF; int pf_ctl_reg = (pipe == 0) ? PFA_CTL_1 : PFB_CTL_1; + int pf_win_size = (pipe == 0) ? PFA_WIN_SZ : PFB_WIN_SZ; int cpu_htot_reg = (pipe == 0) ? HTOTAL_A : HTOTAL_B; int cpu_hblank_reg = (pipe == 0) ? HBLANK_A : HBLANK_B; int cpu_hsync_reg = (pipe == 0) ? HSYNC_A : HSYNC_B; @@ -957,7 +1167,7 @@ static void igdng_crtc_dpms(struct drm_crtc *crtc, int mode) int trans_vblank_reg = (pipe == 0) ? TRANS_VBLANK_A : TRANS_VBLANK_B; int trans_vsync_reg = (pipe == 0) ? TRANS_VSYNC_A : TRANS_VSYNC_B; u32 temp; - int tries = 5, j; + int tries = 5, j, n; /* XXX: When our outputs are all unaware of DPMS modes other than off * and on, we should map those modes to DRM_MODE_DPMS_OFF in the CRTC. @@ -967,27 +1177,32 @@ static void igdng_crtc_dpms(struct drm_crtc *crtc, int mode) case DRM_MODE_DPMS_STANDBY: case DRM_MODE_DPMS_SUSPEND: DRM_DEBUG("crtc %d dpms on\n", pipe); - /* enable PCH DPLL */ - temp = I915_READ(pch_dpll_reg); - if ((temp & DPLL_VCO_ENABLE) == 0) { - I915_WRITE(pch_dpll_reg, temp | DPLL_VCO_ENABLE); - I915_READ(pch_dpll_reg); - } - - /* enable PCH FDI RX PLL, wait warmup plus DMI latency */ - temp = I915_READ(fdi_rx_reg); - I915_WRITE(fdi_rx_reg, temp | FDI_RX_PLL_ENABLE | - FDI_SEL_PCDCLK | - FDI_DP_PORT_WIDTH_X4); /* default 4 lanes */ - I915_READ(fdi_rx_reg); - udelay(200); + if (HAS_eDP) { + /* enable eDP PLL */ + igdng_enable_pll_edp(crtc); + } else { + /* enable PCH DPLL */ + temp = I915_READ(pch_dpll_reg); + if ((temp & DPLL_VCO_ENABLE) == 0) { + I915_WRITE(pch_dpll_reg, temp | DPLL_VCO_ENABLE); + I915_READ(pch_dpll_reg); + } - /* Enable CPU FDI TX PLL, always on for IGDNG */ - temp = I915_READ(fdi_tx_reg); - if ((temp & FDI_TX_PLL_ENABLE) == 0) { - I915_WRITE(fdi_tx_reg, temp | FDI_TX_PLL_ENABLE); - I915_READ(fdi_tx_reg); - udelay(100); + /* enable PCH FDI RX PLL, wait warmup plus DMI latency */ + temp = I915_READ(fdi_rx_reg); + I915_WRITE(fdi_rx_reg, temp | FDI_RX_PLL_ENABLE | + FDI_SEL_PCDCLK | + FDI_DP_PORT_WIDTH_X4); /* default 4 lanes */ + I915_READ(fdi_rx_reg); + udelay(200); + + /* Enable CPU FDI TX PLL, always on for IGDNG */ + temp = I915_READ(fdi_tx_reg); + if ((temp & FDI_TX_PLL_ENABLE) == 0) { + I915_WRITE(fdi_tx_reg, temp | FDI_TX_PLL_ENABLE); + I915_READ(fdi_tx_reg); + udelay(100); + } } /* Enable CPU pipe */ @@ -1006,122 +1221,126 @@ static void igdng_crtc_dpms(struct drm_crtc *crtc, int mode) I915_WRITE(dspbase_reg, I915_READ(dspbase_reg)); } - /* enable CPU FDI TX and PCH FDI RX */ - temp = I915_READ(fdi_tx_reg); - temp |= FDI_TX_ENABLE; - temp |= FDI_DP_PORT_WIDTH_X4; /* default */ - temp &= ~FDI_LINK_TRAIN_NONE; - temp |= FDI_LINK_TRAIN_PATTERN_1; - I915_WRITE(fdi_tx_reg, temp); - I915_READ(fdi_tx_reg); + if (!HAS_eDP) { + /* enable CPU FDI TX and PCH FDI RX */ + temp = I915_READ(fdi_tx_reg); + temp |= FDI_TX_ENABLE; + temp |= FDI_DP_PORT_WIDTH_X4; /* default */ + temp &= ~FDI_LINK_TRAIN_NONE; + temp |= FDI_LINK_TRAIN_PATTERN_1; + I915_WRITE(fdi_tx_reg, temp); + I915_READ(fdi_tx_reg); - temp = I915_READ(fdi_rx_reg); - temp &= ~FDI_LINK_TRAIN_NONE; - temp |= FDI_LINK_TRAIN_PATTERN_1; - I915_WRITE(fdi_rx_reg, temp | FDI_RX_ENABLE); - I915_READ(fdi_rx_reg); + temp = I915_READ(fdi_rx_reg); + temp &= ~FDI_LINK_TRAIN_NONE; + temp |= FDI_LINK_TRAIN_PATTERN_1; + I915_WRITE(fdi_rx_reg, temp | FDI_RX_ENABLE); + I915_READ(fdi_rx_reg); - udelay(150); + udelay(150); - /* Train FDI. */ - /* umask FDI RX Interrupt symbol_lock and bit_lock bit - for train result */ - temp = I915_READ(fdi_rx_imr_reg); - temp &= ~FDI_RX_SYMBOL_LOCK; - temp &= ~FDI_RX_BIT_LOCK; - I915_WRITE(fdi_rx_imr_reg, temp); - I915_READ(fdi_rx_imr_reg); - udelay(150); + /* Train FDI. */ + /* umask FDI RX Interrupt symbol_lock and bit_lock bit + for train result */ + temp = I915_READ(fdi_rx_imr_reg); + temp &= ~FDI_RX_SYMBOL_LOCK; + temp &= ~FDI_RX_BIT_LOCK; + I915_WRITE(fdi_rx_imr_reg, temp); + I915_READ(fdi_rx_imr_reg); + udelay(150); - temp = I915_READ(fdi_rx_iir_reg); - DRM_DEBUG("FDI_RX_IIR 0x%x\n", temp); + temp = I915_READ(fdi_rx_iir_reg); + DRM_DEBUG("FDI_RX_IIR 0x%x\n", temp); - if ((temp & FDI_RX_BIT_LOCK) == 0) { - for (j = 0; j < tries; j++) { - temp = I915_READ(fdi_rx_iir_reg); - DRM_DEBUG("FDI_RX_IIR 0x%x\n", temp); - if (temp & FDI_RX_BIT_LOCK) - break; - udelay(200); - } - if (j != tries) + if ((temp & FDI_RX_BIT_LOCK) == 0) { + for (j = 0; j < tries; j++) { + temp = I915_READ(fdi_rx_iir_reg); + DRM_DEBUG("FDI_RX_IIR 0x%x\n", temp); + if (temp & FDI_RX_BIT_LOCK) + break; + udelay(200); + } + if (j != tries) + I915_WRITE(fdi_rx_iir_reg, + temp | FDI_RX_BIT_LOCK); + else + DRM_DEBUG("train 1 fail\n"); + } else { I915_WRITE(fdi_rx_iir_reg, temp | FDI_RX_BIT_LOCK); - else - DRM_DEBUG("train 1 fail\n"); - } else { - I915_WRITE(fdi_rx_iir_reg, - temp | FDI_RX_BIT_LOCK); - DRM_DEBUG("train 1 ok 2!\n"); - } - temp = I915_READ(fdi_tx_reg); - temp &= ~FDI_LINK_TRAIN_NONE; - temp |= FDI_LINK_TRAIN_PATTERN_2; - I915_WRITE(fdi_tx_reg, temp); - - temp = I915_READ(fdi_rx_reg); - temp &= ~FDI_LINK_TRAIN_NONE; - temp |= FDI_LINK_TRAIN_PATTERN_2; - I915_WRITE(fdi_rx_reg, temp); + DRM_DEBUG("train 1 ok 2!\n"); + } + temp = I915_READ(fdi_tx_reg); + temp &= ~FDI_LINK_TRAIN_NONE; + temp |= FDI_LINK_TRAIN_PATTERN_2; + I915_WRITE(fdi_tx_reg, temp); + + temp = I915_READ(fdi_rx_reg); + temp &= ~FDI_LINK_TRAIN_NONE; + temp |= FDI_LINK_TRAIN_PATTERN_2; + I915_WRITE(fdi_rx_reg, temp); - udelay(150); + udelay(150); - temp = I915_READ(fdi_rx_iir_reg); - DRM_DEBUG("FDI_RX_IIR 0x%x\n", temp); + temp = I915_READ(fdi_rx_iir_reg); + DRM_DEBUG("FDI_RX_IIR 0x%x\n", temp); - if ((temp & FDI_RX_SYMBOL_LOCK) == 0) { - for (j = 0; j < tries; j++) { - temp = I915_READ(fdi_rx_iir_reg); - DRM_DEBUG("FDI_RX_IIR 0x%x\n", temp); - if (temp & FDI_RX_SYMBOL_LOCK) - break; - udelay(200); - } - if (j != tries) { + if ((temp & FDI_RX_SYMBOL_LOCK) == 0) { + for (j = 0; j < tries; j++) { + temp = I915_READ(fdi_rx_iir_reg); + DRM_DEBUG("FDI_RX_IIR 0x%x\n", temp); + if (temp & FDI_RX_SYMBOL_LOCK) + break; + udelay(200); + } + if (j != tries) { + I915_WRITE(fdi_rx_iir_reg, + temp | FDI_RX_SYMBOL_LOCK); + DRM_DEBUG("train 2 ok 1!\n"); + } else + DRM_DEBUG("train 2 fail\n"); + } else { I915_WRITE(fdi_rx_iir_reg, temp | FDI_RX_SYMBOL_LOCK); - DRM_DEBUG("train 2 ok 1!\n"); - } else - DRM_DEBUG("train 2 fail\n"); - } else { - I915_WRITE(fdi_rx_iir_reg, temp | FDI_RX_SYMBOL_LOCK); - DRM_DEBUG("train 2 ok 2!\n"); - } - DRM_DEBUG("train done\n"); + DRM_DEBUG("train 2 ok 2!\n"); + } + DRM_DEBUG("train done\n"); - /* set transcoder timing */ - I915_WRITE(trans_htot_reg, I915_READ(cpu_htot_reg)); - I915_WRITE(trans_hblank_reg, I915_READ(cpu_hblank_reg)); - I915_WRITE(trans_hsync_reg, I915_READ(cpu_hsync_reg)); + /* set transcoder timing */ + I915_WRITE(trans_htot_reg, I915_READ(cpu_htot_reg)); + I915_WRITE(trans_hblank_reg, I915_READ(cpu_hblank_reg)); + I915_WRITE(trans_hsync_reg, I915_READ(cpu_hsync_reg)); - I915_WRITE(trans_vtot_reg, I915_READ(cpu_vtot_reg)); - I915_WRITE(trans_vblank_reg, I915_READ(cpu_vblank_reg)); - I915_WRITE(trans_vsync_reg, I915_READ(cpu_vsync_reg)); + I915_WRITE(trans_vtot_reg, I915_READ(cpu_vtot_reg)); + I915_WRITE(trans_vblank_reg, I915_READ(cpu_vblank_reg)); + I915_WRITE(trans_vsync_reg, I915_READ(cpu_vsync_reg)); - /* enable PCH transcoder */ - temp = I915_READ(transconf_reg); - I915_WRITE(transconf_reg, temp | TRANS_ENABLE); - I915_READ(transconf_reg); + /* enable PCH transcoder */ + temp = I915_READ(transconf_reg); + I915_WRITE(transconf_reg, temp | TRANS_ENABLE); + I915_READ(transconf_reg); - while ((I915_READ(transconf_reg) & TRANS_STATE_ENABLE) == 0) - ; + while ((I915_READ(transconf_reg) & TRANS_STATE_ENABLE) == 0) + ; - /* enable normal */ + /* enable normal */ - temp = I915_READ(fdi_tx_reg); - temp &= ~FDI_LINK_TRAIN_NONE; - I915_WRITE(fdi_tx_reg, temp | FDI_LINK_TRAIN_NONE | - FDI_TX_ENHANCE_FRAME_ENABLE); - I915_READ(fdi_tx_reg); + temp = I915_READ(fdi_tx_reg); + temp &= ~FDI_LINK_TRAIN_NONE; + I915_WRITE(fdi_tx_reg, temp | FDI_LINK_TRAIN_NONE | + FDI_TX_ENHANCE_FRAME_ENABLE); + I915_READ(fdi_tx_reg); - temp = I915_READ(fdi_rx_reg); - temp &= ~FDI_LINK_TRAIN_NONE; - I915_WRITE(fdi_rx_reg, temp | FDI_LINK_TRAIN_NONE | - FDI_RX_ENHANCE_FRAME_ENABLE); - I915_READ(fdi_rx_reg); + temp = I915_READ(fdi_rx_reg); + temp &= ~FDI_LINK_TRAIN_NONE; + I915_WRITE(fdi_rx_reg, temp | FDI_LINK_TRAIN_NONE | + FDI_RX_ENHANCE_FRAME_ENABLE); + I915_READ(fdi_rx_reg); - /* wait one idle pattern time */ - udelay(100); + /* wait one idle pattern time */ + udelay(100); + + } intel_crtc_load_lut(crtc); @@ -1129,8 +1348,7 @@ static void igdng_crtc_dpms(struct drm_crtc *crtc, int mode) case DRM_MODE_DPMS_OFF: DRM_DEBUG("crtc %d dpms off\n", pipe); - /* Disable the VGA plane that we never use */ - I915_WRITE(CPU_VGACNTRL, VGA_DISP_DISABLE); + i915_disable_vga(dev); /* Disable display plane */ temp = I915_READ(dspcntr_reg); @@ -1146,17 +1364,23 @@ static void igdng_crtc_dpms(struct drm_crtc *crtc, int mode) if ((temp & PIPEACONF_ENABLE) != 0) { I915_WRITE(pipeconf_reg, temp & ~PIPEACONF_ENABLE); I915_READ(pipeconf_reg); + n = 0; /* wait for cpu pipe off, pipe state */ - while ((I915_READ(pipeconf_reg) & I965_PIPECONF_ACTIVE) != 0) - ; + while ((I915_READ(pipeconf_reg) & I965_PIPECONF_ACTIVE) != 0) { + n++; + if (n < 60) { + udelay(500); + continue; + } else { + DRM_DEBUG("pipe %d off delay\n", pipe); + break; + } + } } else DRM_DEBUG("crtc %d is disabled\n", pipe); - /* IGDNG-A : disable cpu panel fitter ? */ - temp = I915_READ(pf_ctl_reg); - if ((temp & PF_ENABLE) != 0) { - I915_WRITE(pf_ctl_reg, temp & ~PF_ENABLE); - I915_READ(pf_ctl_reg); + if (HAS_eDP) { + igdng_disable_pll_edp(crtc); } /* disable CPU FDI tx and PCH FDI rx */ @@ -1168,6 +1392,8 @@ static void igdng_crtc_dpms(struct drm_crtc *crtc, int mode) I915_WRITE(fdi_rx_reg, temp & ~FDI_RX_ENABLE); I915_READ(fdi_rx_reg); + udelay(100); + /* still set train pattern 1 */ temp = I915_READ(fdi_tx_reg); temp &= ~FDI_LINK_TRAIN_NONE; @@ -1179,14 +1405,25 @@ static void igdng_crtc_dpms(struct drm_crtc *crtc, int mode) temp |= FDI_LINK_TRAIN_PATTERN_1; I915_WRITE(fdi_rx_reg, temp); + udelay(100); + /* disable PCH transcoder */ temp = I915_READ(transconf_reg); if ((temp & TRANS_ENABLE) != 0) { I915_WRITE(transconf_reg, temp & ~TRANS_ENABLE); I915_READ(transconf_reg); + n = 0; /* wait for PCH transcoder off, transcoder state */ - while ((I915_READ(transconf_reg) & TRANS_STATE_ENABLE) != 0) - ; + while ((I915_READ(transconf_reg) & TRANS_STATE_ENABLE) != 0) { + n++; + if (n < 60) { + udelay(500); + continue; + } else { + DRM_DEBUG("transcoder %d off delay\n", pipe); + break; + } + } } /* disable PCH DPLL */ @@ -1204,6 +1441,22 @@ static void igdng_crtc_dpms(struct drm_crtc *crtc, int mode) I915_READ(fdi_rx_reg); } + /* Disable CPU FDI TX PLL */ + temp = I915_READ(fdi_tx_reg); + if ((temp & FDI_TX_PLL_ENABLE) != 0) { + I915_WRITE(fdi_tx_reg, temp & ~FDI_TX_PLL_ENABLE); + I915_READ(fdi_tx_reg); + udelay(100); + } + + /* Disable PF */ + temp = I915_READ(pf_ctl_reg); + if ((temp & PF_ENABLE) != 0) { + I915_WRITE(pf_ctl_reg, temp & ~PF_ENABLE); + I915_READ(pf_ctl_reg); + } + I915_WRITE(pf_win_size, 0); + /* Wait for the clocks to turn off. */ udelay(150); break; @@ -1263,13 +1516,15 @@ static void i9xx_crtc_dpms(struct drm_crtc *crtc, int mode) /* Give the overlay scaler a chance to enable if it's on this pipe */ //intel_crtc_dpms_video(crtc, true); TODO + intel_update_watermarks(dev); break; case DRM_MODE_DPMS_OFF: + intel_update_watermarks(dev); /* Give the overlay scaler a chance to disable if it's on this pipe */ //intel_crtc_dpms_video(crtc, FALSE); TODO /* Disable the VGA plane that we never use */ - I915_WRITE(VGACNTRL, VGA_DISP_DISABLE); + i915_disable_vga(dev); /* Disable display plane */ temp = I915_READ(dspcntr_reg); @@ -1443,7 +1698,6 @@ static int intel_get_core_clock_speed(struct drm_device *dev) return 0; /* Silence gcc warning */ } - /** * Return the pipe currently connected to the panel fitter, * or -1 if the panel fitter is not present or not in use @@ -1502,7 +1756,7 @@ igdng_compute_m_n(int bytes_per_pixel, int nlanes, temp = (u64) DATA_N * pixel_clock; temp = div_u64(temp, link_clock); - m_n->gmch_m = (temp * bytes_per_pixel) / nlanes; + m_n->gmch_m = div_u64(temp * bytes_per_pixel, nlanes); m_n->gmch_n = DATA_N; fdi_reduce_ratio(&m_n->gmch_m, &m_n->gmch_n); @@ -1513,6 +1767,478 @@ igdng_compute_m_n(int bytes_per_pixel, int nlanes, } +struct intel_watermark_params { + unsigned long fifo_size; + unsigned long max_wm; + unsigned long default_wm; + unsigned long guard_size; + unsigned long cacheline_size; +}; + +/* IGD has different values for various configs */ +static struct intel_watermark_params igd_display_wm = { + IGD_DISPLAY_FIFO, + IGD_MAX_WM, + IGD_DFT_WM, + IGD_GUARD_WM, + IGD_FIFO_LINE_SIZE +}; +static struct intel_watermark_params igd_display_hplloff_wm = { + IGD_DISPLAY_FIFO, + IGD_MAX_WM, + IGD_DFT_HPLLOFF_WM, + IGD_GUARD_WM, + IGD_FIFO_LINE_SIZE +}; +static struct intel_watermark_params igd_cursor_wm = { + IGD_CURSOR_FIFO, + IGD_CURSOR_MAX_WM, + IGD_CURSOR_DFT_WM, + IGD_CURSOR_GUARD_WM, + IGD_FIFO_LINE_SIZE, +}; +static struct intel_watermark_params igd_cursor_hplloff_wm = { + IGD_CURSOR_FIFO, + IGD_CURSOR_MAX_WM, + IGD_CURSOR_DFT_WM, + IGD_CURSOR_GUARD_WM, + IGD_FIFO_LINE_SIZE +}; +static struct intel_watermark_params i945_wm_info = { + I945_FIFO_SIZE, + I915_MAX_WM, + 1, + 2, + I915_FIFO_LINE_SIZE +}; +static struct intel_watermark_params i915_wm_info = { + I915_FIFO_SIZE, + I915_MAX_WM, + 1, + 2, + I915_FIFO_LINE_SIZE +}; +static struct intel_watermark_params i855_wm_info = { + I855GM_FIFO_SIZE, + I915_MAX_WM, + 1, + 2, + I830_FIFO_LINE_SIZE +}; +static struct intel_watermark_params i830_wm_info = { + I830_FIFO_SIZE, + I915_MAX_WM, + 1, + 2, + I830_FIFO_LINE_SIZE +}; + +/** + * intel_calculate_wm - calculate watermark level + * @clock_in_khz: pixel clock + * @wm: chip FIFO params + * @pixel_size: display pixel size + * @latency_ns: memory latency for the platform + * + * Calculate the watermark level (the level at which the display plane will + * start fetching from memory again). Each chip has a different display + * FIFO size and allocation, so the caller needs to figure that out and pass + * in the correct intel_watermark_params structure. + * + * As the pixel clock runs, the FIFO will be drained at a rate that depends + * on the pixel size. When it reaches the watermark level, it'll start + * fetching FIFO line sized based chunks from memory until the FIFO fills + * past the watermark point. If the FIFO drains completely, a FIFO underrun + * will occur, and a display engine hang could result. + */ +static unsigned long intel_calculate_wm(unsigned long clock_in_khz, + struct intel_watermark_params *wm, + int pixel_size, + unsigned long latency_ns) +{ + long entries_required, wm_size; + + entries_required = (clock_in_khz * pixel_size * latency_ns) / 1000000; + entries_required /= wm->cacheline_size; + + DRM_DEBUG("FIFO entries required for mode: %d\n", entries_required); + + wm_size = wm->fifo_size - (entries_required + wm->guard_size); + + DRM_DEBUG("FIFO watermark level: %d\n", wm_size); + + /* Don't promote wm_size to unsigned... */ + if (wm_size > (long)wm->max_wm) + wm_size = wm->max_wm; + if (wm_size <= 0) + wm_size = wm->default_wm; + return wm_size; +} + +struct cxsr_latency { + int is_desktop; + unsigned long fsb_freq; + unsigned long mem_freq; + unsigned long display_sr; + unsigned long display_hpll_disable; + unsigned long cursor_sr; + unsigned long cursor_hpll_disable; +}; + +static struct cxsr_latency cxsr_latency_table[] = { + {1, 800, 400, 3382, 33382, 3983, 33983}, /* DDR2-400 SC */ + {1, 800, 667, 3354, 33354, 3807, 33807}, /* DDR2-667 SC */ + {1, 800, 800, 3347, 33347, 3763, 33763}, /* DDR2-800 SC */ + + {1, 667, 400, 3400, 33400, 4021, 34021}, /* DDR2-400 SC */ + {1, 667, 667, 3372, 33372, 3845, 33845}, /* DDR2-667 SC */ + {1, 667, 800, 3386, 33386, 3822, 33822}, /* DDR2-800 SC */ + + {1, 400, 400, 3472, 33472, 4173, 34173}, /* DDR2-400 SC */ + {1, 400, 667, 3443, 33443, 3996, 33996}, /* DDR2-667 SC */ + {1, 400, 800, 3430, 33430, 3946, 33946}, /* DDR2-800 SC */ + + {0, 800, 400, 3438, 33438, 4065, 34065}, /* DDR2-400 SC */ + {0, 800, 667, 3410, 33410, 3889, 33889}, /* DDR2-667 SC */ + {0, 800, 800, 3403, 33403, 3845, 33845}, /* DDR2-800 SC */ + + {0, 667, 400, 3456, 33456, 4103, 34106}, /* DDR2-400 SC */ + {0, 667, 667, 3428, 33428, 3927, 33927}, /* DDR2-667 SC */ + {0, 667, 800, 3443, 33443, 3905, 33905}, /* DDR2-800 SC */ + + {0, 400, 400, 3528, 33528, 4255, 34255}, /* DDR2-400 SC */ + {0, 400, 667, 3500, 33500, 4079, 34079}, /* DDR2-667 SC */ + {0, 400, 800, 3487, 33487, 4029, 34029}, /* DDR2-800 SC */ +}; + +static struct cxsr_latency *intel_get_cxsr_latency(int is_desktop, int fsb, + int mem) +{ + int i; + struct cxsr_latency *latency; + + if (fsb == 0 || mem == 0) + return NULL; + + for (i = 0; i < ARRAY_SIZE(cxsr_latency_table); i++) { + latency = &cxsr_latency_table[i]; + if (is_desktop == latency->is_desktop && + fsb == latency->fsb_freq && mem == latency->mem_freq) + break; + } + if (i >= ARRAY_SIZE(cxsr_latency_table)) { + DRM_DEBUG("Unknown FSB/MEM found, disable CxSR\n"); + return NULL; + } + return latency; +} + +static void igd_disable_cxsr(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + u32 reg; + + /* deactivate cxsr */ + reg = I915_READ(DSPFW3); + reg &= ~(IGD_SELF_REFRESH_EN); + I915_WRITE(DSPFW3, reg); + DRM_INFO("Big FIFO is disabled\n"); +} + +static void igd_enable_cxsr(struct drm_device *dev, unsigned long clock, + int pixel_size) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + u32 reg; + unsigned long wm; + struct cxsr_latency *latency; + + latency = intel_get_cxsr_latency(IS_IGDG(dev), dev_priv->fsb_freq, + dev_priv->mem_freq); + if (!latency) { + DRM_DEBUG("Unknown FSB/MEM found, disable CxSR\n"); + igd_disable_cxsr(dev); + return; + } + + /* Display SR */ + wm = intel_calculate_wm(clock, &igd_display_wm, pixel_size, + latency->display_sr); + reg = I915_READ(DSPFW1); + reg &= 0x7fffff; + reg |= wm << 23; + I915_WRITE(DSPFW1, reg); + DRM_DEBUG("DSPFW1 register is %x\n", reg); + + /* cursor SR */ + wm = intel_calculate_wm(clock, &igd_cursor_wm, pixel_size, + latency->cursor_sr); + reg = I915_READ(DSPFW3); + reg &= ~(0x3f << 24); + reg |= (wm & 0x3f) << 24; + I915_WRITE(DSPFW3, reg); + + /* Display HPLL off SR */ + wm = intel_calculate_wm(clock, &igd_display_hplloff_wm, + latency->display_hpll_disable, I915_FIFO_LINE_SIZE); + reg = I915_READ(DSPFW3); + reg &= 0xfffffe00; + reg |= wm & 0x1ff; + I915_WRITE(DSPFW3, reg); + + /* cursor HPLL off SR */ + wm = intel_calculate_wm(clock, &igd_cursor_hplloff_wm, pixel_size, + latency->cursor_hpll_disable); + reg = I915_READ(DSPFW3); + reg &= ~(0x3f << 16); + reg |= (wm & 0x3f) << 16; + I915_WRITE(DSPFW3, reg); + DRM_DEBUG("DSPFW3 register is %x\n", reg); + + /* activate cxsr */ + reg = I915_READ(DSPFW3); + reg |= IGD_SELF_REFRESH_EN; + I915_WRITE(DSPFW3, reg); + + DRM_INFO("Big FIFO is enabled\n"); + + return; +} + +/* + * Latency for FIFO fetches is dependent on several factors: + * - memory configuration (speed, channels) + * - chipset + * - current MCH state + * It can be fairly high in some situations, so here we assume a fairly + * pessimal value. It's a tradeoff between extra memory fetches (if we + * set this value too high, the FIFO will fetch frequently to stay full) + * and power consumption (set it too low to save power and we might see + * FIFO underruns and display "flicker"). + * + * A value of 5us seems to be a good balance; safe for very low end + * platforms but not overly aggressive on lower latency configs. + */ +const static int latency_ns = 5000; + +static int intel_get_fifo_size(struct drm_device *dev, int plane) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + uint32_t dsparb = I915_READ(DSPARB); + int size; + + if (IS_I9XX(dev)) { + if (plane == 0) + size = dsparb & 0x7f; + else + size = ((dsparb >> DSPARB_CSTART_SHIFT) & 0x7f) - + (dsparb & 0x7f); + } else if (IS_I85X(dev)) { + if (plane == 0) + size = dsparb & 0x1ff; + else + size = ((dsparb >> DSPARB_BEND_SHIFT) & 0x1ff) - + (dsparb & 0x1ff); + size >>= 1; /* Convert to cachelines */ + } else if (IS_845G(dev)) { + size = dsparb & 0x7f; + size >>= 2; /* Convert to cachelines */ + } else { + size = dsparb & 0x7f; + size >>= 1; /* Convert to cachelines */ + } + + DRM_DEBUG("FIFO size - (0x%08x) %s: %d\n", dsparb, plane ? "B" : "A", + size); + + return size; +} + +static void i965_update_wm(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + + DRM_DEBUG("Setting FIFO watermarks - A: 8, B: 8, C: 8, SR 8\n"); + + /* 965 has limitations... */ + I915_WRITE(DSPFW1, (8 << 16) | (8 << 8) | (8 << 0)); + I915_WRITE(DSPFW2, (8 << 8) | (8 << 0)); +} + +static void i9xx_update_wm(struct drm_device *dev, int planea_clock, + int planeb_clock, int sr_hdisplay, int pixel_size) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + uint32_t fwater_lo; + uint32_t fwater_hi; + int total_size, cacheline_size, cwm, srwm = 1; + int planea_wm, planeb_wm; + struct intel_watermark_params planea_params, planeb_params; + unsigned long line_time_us; + int sr_clock, sr_entries = 0; + + /* Create copies of the base settings for each pipe */ + if (IS_I965GM(dev) || IS_I945GM(dev)) + planea_params = planeb_params = i945_wm_info; + else if (IS_I9XX(dev)) + planea_params = planeb_params = i915_wm_info; + else + planea_params = planeb_params = i855_wm_info; + + /* Grab a couple of global values before we overwrite them */ + total_size = planea_params.fifo_size; + cacheline_size = planea_params.cacheline_size; + + /* Update per-plane FIFO sizes */ + planea_params.fifo_size = intel_get_fifo_size(dev, 0); + planeb_params.fifo_size = intel_get_fifo_size(dev, 1); + + planea_wm = intel_calculate_wm(planea_clock, &planea_params, + pixel_size, latency_ns); + planeb_wm = intel_calculate_wm(planeb_clock, &planeb_params, + pixel_size, latency_ns); + DRM_DEBUG("FIFO watermarks - A: %d, B: %d\n", planea_wm, planeb_wm); + + /* + * Overlay gets an aggressive default since video jitter is bad. + */ + cwm = 2; + + /* Calc sr entries for one plane configs */ + if (sr_hdisplay && (!planea_clock || !planeb_clock)) { + /* self-refresh has much higher latency */ + const static int sr_latency_ns = 6000; + + sr_clock = planea_clock ? planea_clock : planeb_clock; + line_time_us = ((sr_hdisplay * 1000) / sr_clock); + + /* Use ns/us then divide to preserve precision */ + sr_entries = (((sr_latency_ns / line_time_us) + 1) * + pixel_size * sr_hdisplay) / 1000; + sr_entries = roundup(sr_entries / cacheline_size, 1); + DRM_DEBUG("self-refresh entries: %d\n", sr_entries); + srwm = total_size - sr_entries; + if (srwm < 0) + srwm = 1; + if (IS_I9XX(dev)) + I915_WRITE(FW_BLC_SELF, (srwm & 0x3f)); + } + + DRM_DEBUG("Setting FIFO watermarks - A: %d, B: %d, C: %d, SR %d\n", + planea_wm, planeb_wm, cwm, srwm); + + fwater_lo = ((planeb_wm & 0x3f) << 16) | (planea_wm & 0x3f); + fwater_hi = (cwm & 0x1f); + + /* Set request length to 8 cachelines per fetch */ + fwater_lo = fwater_lo | (1 << 24) | (1 << 8); + fwater_hi = fwater_hi | (1 << 8); + + I915_WRITE(FW_BLC, fwater_lo); + I915_WRITE(FW_BLC2, fwater_hi); +} + +static void i830_update_wm(struct drm_device *dev, int planea_clock, + int pixel_size) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + uint32_t fwater_lo = I915_READ(FW_BLC) & ~0xfff; + int planea_wm; + + i830_wm_info.fifo_size = intel_get_fifo_size(dev, 0); + + planea_wm = intel_calculate_wm(planea_clock, &i830_wm_info, + pixel_size, latency_ns); + fwater_lo |= (3<<8) | planea_wm; + + DRM_DEBUG("Setting FIFO watermarks - A: %d\n", planea_wm); + + I915_WRITE(FW_BLC, fwater_lo); +} + +/** + * intel_update_watermarks - update FIFO watermark values based on current modes + * + * Calculate watermark values for the various WM regs based on current mode + * and plane configuration. + * + * There are several cases to deal with here: + * - normal (i.e. non-self-refresh) + * - self-refresh (SR) mode + * - lines are large relative to FIFO size (buffer can hold up to 2) + * - lines are small relative to FIFO size (buffer can hold more than 2 + * lines), so need to account for TLB latency + * + * The normal calculation is: + * watermark = dotclock * bytes per pixel * latency + * where latency is platform & configuration dependent (we assume pessimal + * values here). + * + * The SR calculation is: + * watermark = (trunc(latency/line time)+1) * surface width * + * bytes per pixel + * where + * line time = htotal / dotclock + * and latency is assumed to be high, as above. + * + * The final value programmed to the register should always be rounded up, + * and include an extra 2 entries to account for clock crossings. + * + * We don't use the sprite, so we can ignore that. And on Crestline we have + * to set the non-SR watermarks to 8. + */ +static void intel_update_watermarks(struct drm_device *dev) +{ + struct drm_crtc *crtc; + struct intel_crtc *intel_crtc; + int sr_hdisplay = 0; + unsigned long planea_clock = 0, planeb_clock = 0, sr_clock = 0; + int enabled = 0, pixel_size = 0; + + if (DSPARB_HWCONTROL(dev)) + return; + + /* Get the clock config from both planes */ + list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { + intel_crtc = to_intel_crtc(crtc); + if (crtc->enabled) { + enabled++; + if (intel_crtc->plane == 0) { + DRM_DEBUG("plane A (pipe %d) clock: %d\n", + intel_crtc->pipe, crtc->mode.clock); + planea_clock = crtc->mode.clock; + } else { + DRM_DEBUG("plane B (pipe %d) clock: %d\n", + intel_crtc->pipe, crtc->mode.clock); + planeb_clock = crtc->mode.clock; + } + sr_hdisplay = crtc->mode.hdisplay; + sr_clock = crtc->mode.clock; + if (crtc->fb) + pixel_size = crtc->fb->bits_per_pixel / 8; + else + pixel_size = 4; /* by default */ + } + } + + if (enabled <= 0) + return; + + /* Single plane configs can enable self refresh */ + if (enabled == 1 && IS_IGD(dev)) + igd_enable_cxsr(dev, sr_clock, pixel_size); + else if (IS_IGD(dev)) + igd_disable_cxsr(dev); + + if (IS_I965G(dev)) + i965_update_wm(dev); + else if (IS_I9XX(dev) || IS_MOBILE(dev)) + i9xx_update_wm(dev, planea_clock, planeb_clock, sr_hdisplay, + pixel_size); + else + i830_update_wm(dev, planea_clock, pixel_size); +} + static int intel_crtc_mode_set(struct drm_crtc *crtc, struct drm_display_mode *mode, struct drm_display_mode *adjusted_mode, @@ -1541,7 +2267,8 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc, intel_clock_t clock; u32 dpll = 0, fp = 0, dspcntr, pipeconf; bool ok, is_sdvo = false, is_dvo = false; - bool is_crt = false, is_lvds = false, is_tv = false; + bool is_crt = false, is_lvds = false, is_tv = false, is_dp = false; + bool is_edp = false; struct drm_mode_config *mode_config = &dev->mode_config; struct drm_connector *connector; const intel_limit_t *limit; @@ -1557,6 +2284,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc, int lvds_reg = LVDS; u32 temp; int sdvo_pixel_multiply; + int target_clock; drm_vblank_pre_modeset(dev, pipe); @@ -1585,6 +2313,12 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc, case INTEL_OUTPUT_ANALOG: is_crt = true; break; + case INTEL_OUTPUT_DISPLAYPORT: + is_dp = true; + break; + case INTEL_OUTPUT_EDP: + is_edp = true; + break; } num_outputs++; @@ -1600,6 +2334,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc, } else { refclk = 48000; } + /* * Returns a set of divisors for the desired target clock with the given @@ -1635,11 +2370,29 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc, } /* FDI link */ - if (IS_IGDNG(dev)) - igdng_compute_m_n(3, 4, /* lane num 4 */ - adjusted_mode->clock, - 270000, /* lane clock */ - &m_n); + if (IS_IGDNG(dev)) { + int lane, link_bw; + /* eDP doesn't require FDI link, so just set DP M/N + according to current link config */ + if (is_edp) { + struct drm_connector *edp; + target_clock = mode->clock; + edp = intel_pipe_get_output(crtc); + intel_edp_link_config(to_intel_output(edp), + &lane, &link_bw); + } else { + /* DP over FDI requires target mode clock + instead of link clock */ + if (is_dp) + target_clock = mode->clock; + else + target_clock = adjusted_mode->clock; + lane = 4; + link_bw = 270000; + } + igdng_compute_m_n(3, lane, target_clock, + link_bw, &m_n); + } if (IS_IGD(dev)) fp = (1 << clock.n) << 16 | clock.m1 << 8 | clock.m2; @@ -1657,11 +2410,13 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc, if (is_sdvo) { dpll |= DPLL_DVO_HIGH_SPEED; sdvo_pixel_multiply = adjusted_mode->clock / mode->clock; - if (IS_I945G(dev) || IS_I945GM(dev)) + if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev)) dpll |= (sdvo_pixel_multiply - 1) << SDVO_MULTIPLIER_SHIFT_HIRES; else if (IS_IGDNG(dev)) dpll |= (sdvo_pixel_multiply - 1) << PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT; } + if (is_dp) + dpll |= DPLL_DVO_HIGH_SPEED; /* compute bitmask from p1 value */ if (IS_IGD(dev)) @@ -1758,29 +2513,15 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc, dpll_reg = pch_dpll_reg; } - if (dpll & DPLL_VCO_ENABLE) { + if (is_edp) { + igdng_disable_pll_edp(crtc); + } else if ((dpll & DPLL_VCO_ENABLE)) { I915_WRITE(fp_reg, fp); I915_WRITE(dpll_reg, dpll & ~DPLL_VCO_ENABLE); I915_READ(dpll_reg); udelay(150); } - if (IS_IGDNG(dev)) { - /* enable PCH clock reference source */ - /* XXX need to change the setting for other outputs */ - u32 temp; - temp = I915_READ(PCH_DREF_CONTROL); - temp &= ~DREF_NONSPREAD_SOURCE_MASK; - temp |= DREF_NONSPREAD_CK505_ENABLE; - temp &= ~DREF_SSC_SOURCE_MASK; - temp |= DREF_SSC_SOURCE_ENABLE; - temp &= ~DREF_SSC1_ENABLE; - /* if no eDP, disable source output to CPU */ - temp &= ~DREF_CPU_SOURCE_OUTPUT_MASK; - temp |= DREF_CPU_SOURCE_OUTPUT_DISABLE; - I915_WRITE(PCH_DREF_CONTROL, temp); - } - /* The LVDS pin pair needs to be on before the DPLLs are enabled. * This is an exception to the general rule that mode_set doesn't turn * things on. @@ -1809,24 +2550,28 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc, I915_WRITE(lvds_reg, lvds); I915_READ(lvds_reg); } + if (is_dp) + intel_dp_set_m_n(crtc, mode, adjusted_mode); - I915_WRITE(fp_reg, fp); - I915_WRITE(dpll_reg, dpll); - I915_READ(dpll_reg); - /* Wait for the clocks to stabilize. */ - udelay(150); - - if (IS_I965G(dev) && !IS_IGDNG(dev)) { - sdvo_pixel_multiply = adjusted_mode->clock / mode->clock; - I915_WRITE(dpll_md_reg, (0 << DPLL_MD_UDI_DIVIDER_SHIFT) | - ((sdvo_pixel_multiply - 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT)); - } else { - /* write it again -- the BIOS does, after all */ + if (!is_edp) { + I915_WRITE(fp_reg, fp); I915_WRITE(dpll_reg, dpll); + I915_READ(dpll_reg); + /* Wait for the clocks to stabilize. */ + udelay(150); + + if (IS_I965G(dev) && !IS_IGDNG(dev)) { + sdvo_pixel_multiply = adjusted_mode->clock / mode->clock; + I915_WRITE(dpll_md_reg, (0 << DPLL_MD_UDI_DIVIDER_SHIFT) | + ((sdvo_pixel_multiply - 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT)); + } else { + /* write it again -- the BIOS does, after all */ + I915_WRITE(dpll_reg, dpll); + } + I915_READ(dpll_reg); + /* Wait for the clocks to stabilize. */ + udelay(150); } - I915_READ(dpll_reg); - /* Wait for the clocks to stabilize. */ - udelay(150); I915_WRITE(htot_reg, (adjusted_mode->crtc_hdisplay - 1) | ((adjusted_mode->crtc_htotal - 1) << 16)); @@ -1856,10 +2601,14 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc, I915_WRITE(link_m1_reg, m_n.link_m); I915_WRITE(link_n1_reg, m_n.link_n); - /* enable FDI RX PLL too */ - temp = I915_READ(fdi_rx_reg); - I915_WRITE(fdi_rx_reg, temp | FDI_RX_PLL_ENABLE); - udelay(200); + if (is_edp) { + igdng_set_pll_edp(crtc, adjusted_mode->clock); + } else { + /* enable FDI RX PLL too */ + temp = I915_READ(fdi_rx_reg); + I915_WRITE(fdi_rx_reg, temp | FDI_RX_PLL_ENABLE); + udelay(200); + } } I915_WRITE(pipeconf_reg, pipeconf); @@ -1871,6 +2620,9 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc, /* Flush the plane changes */ ret = intel_pipe_set_base(crtc, x, y, old_fb); + + intel_update_watermarks(dev); + drm_vblank_post_modeset(dev, pipe); return ret; @@ -2359,6 +3111,7 @@ static void intel_crtc_init(struct drm_device *dev, int pipe) drm_mode_crtc_set_gamma_size(&intel_crtc->base, 256); intel_crtc->pipe = pipe; + intel_crtc->plane = pipe; for (i = 0; i < 256; i++) { intel_crtc->lut_r[i] = i; intel_crtc->lut_g[i] = i; @@ -2431,7 +3184,7 @@ static int intel_connector_clones(struct drm_device *dev, int type_mask) list_for_each_entry(connector, &dev->mode_config.connector_list, head) { struct intel_output *intel_output = to_intel_output(connector); - if (type_mask & (1 << intel_output->type)) + if (type_mask & intel_output->clone_mask) index_mask |= (1 << entry); entry++; } @@ -2453,12 +3206,17 @@ static void intel_setup_outputs(struct drm_device *dev) if (IS_IGDNG(dev)) { int found; + if (IS_MOBILE(dev) && (I915_READ(DP_A) & DP_DETECTED)) + intel_dp_init(dev, DP_A); + if (I915_READ(HDMIB) & PORT_DETECTED) { /* check SDVOB */ /* found = intel_sdvo_init(dev, HDMIB); */ found = 0; if (!found) intel_hdmi_init(dev, HDMIB); + if (!found && (I915_READ(PCH_DP_B) & DP_DETECTED)) + intel_dp_init(dev, PCH_DP_B); } if (I915_READ(HDMIC) & PORT_DETECTED) @@ -2467,27 +3225,39 @@ static void intel_setup_outputs(struct drm_device *dev) if (I915_READ(HDMID) & PORT_DETECTED) intel_hdmi_init(dev, HDMID); + if (I915_READ(PCH_DP_C) & DP_DETECTED) + intel_dp_init(dev, PCH_DP_C); + + if (I915_READ(PCH_DP_D) & DP_DETECTED) + intel_dp_init(dev, PCH_DP_D); + } else if (IS_I9XX(dev)) { - int found; - u32 reg; + bool found = false; if (I915_READ(SDVOB) & SDVO_DETECTED) { found = intel_sdvo_init(dev, SDVOB); if (!found && SUPPORTS_INTEGRATED_HDMI(dev)) intel_hdmi_init(dev, SDVOB); + + if (!found && SUPPORTS_INTEGRATED_DP(dev)) + intel_dp_init(dev, DP_B); } /* Before G4X SDVOC doesn't have its own detect register */ - if (IS_G4X(dev)) - reg = SDVOC; - else - reg = SDVOB; - if (I915_READ(reg) & SDVO_DETECTED) { + if (I915_READ(SDVOB) & SDVO_DETECTED) found = intel_sdvo_init(dev, SDVOC); - if (!found && SUPPORTS_INTEGRATED_HDMI(dev)) + + if (!found && (I915_READ(SDVOC) & SDVO_DETECTED)) { + + if (SUPPORTS_INTEGRATED_HDMI(dev)) intel_hdmi_init(dev, SDVOC); + if (SUPPORTS_INTEGRATED_DP(dev)) + intel_dp_init(dev, DP_C); } + + if (SUPPORTS_INTEGRATED_DP(dev) && (I915_READ(DP_D) & DP_DETECTED)) + intel_dp_init(dev, DP_D); } else intel_dvo_init(dev); @@ -2497,42 +3267,10 @@ static void intel_setup_outputs(struct drm_device *dev) list_for_each_entry(connector, &dev->mode_config.connector_list, head) { struct intel_output *intel_output = to_intel_output(connector); struct drm_encoder *encoder = &intel_output->enc; - int crtc_mask = 0, clone_mask = 0; - /* valid crtcs */ - switch(intel_output->type) { - case INTEL_OUTPUT_HDMI: - crtc_mask = ((1 << 0)| - (1 << 1)); - clone_mask = ((1 << INTEL_OUTPUT_HDMI)); - break; - case INTEL_OUTPUT_DVO: - case INTEL_OUTPUT_SDVO: - crtc_mask = ((1 << 0)| - (1 << 1)); - clone_mask = ((1 << INTEL_OUTPUT_ANALOG) | - (1 << INTEL_OUTPUT_DVO) | - (1 << INTEL_OUTPUT_SDVO)); - break; - case INTEL_OUTPUT_ANALOG: - crtc_mask = ((1 << 0)| - (1 << 1)); - clone_mask = ((1 << INTEL_OUTPUT_ANALOG) | - (1 << INTEL_OUTPUT_DVO) | - (1 << INTEL_OUTPUT_SDVO)); - break; - case INTEL_OUTPUT_LVDS: - crtc_mask = (1 << 1); - clone_mask = (1 << INTEL_OUTPUT_LVDS); - break; - case INTEL_OUTPUT_TVOUT: - crtc_mask = ((1 << 0) | - (1 << 1)); - clone_mask = (1 << INTEL_OUTPUT_TVOUT); - break; - } - encoder->possible_crtcs = crtc_mask; - encoder->possible_clones = intel_connector_clones(dev, clone_mask); + encoder->possible_crtcs = intel_output->crtc_mask; + encoder->possible_clones = intel_connector_clones(dev, + intel_output->clone_mask); } } @@ -2639,6 +3377,9 @@ void intel_modeset_init(struct drm_device *dev) if (IS_I965G(dev)) { dev->mode_config.max_width = 8192; dev->mode_config.max_height = 8192; + } else if (IS_I9XX(dev)) { + dev->mode_config.max_width = 4096; + dev->mode_config.max_height = 4096; } else { dev->mode_config.max_width = 2048; dev->mode_config.max_height = 2048; diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c new file mode 100644 index 00000000000..2b914d73207 --- /dev/null +++ b/drivers/gpu/drm/i915/intel_dp.c @@ -0,0 +1,1330 @@ +/* + * Copyright © 2008 Intel Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + * + * Authors: + * Keith Packard <keithp@keithp.com> + * + */ + +#include <linux/i2c.h> +#include "drmP.h" +#include "drm.h" +#include "drm_crtc.h" +#include "drm_crtc_helper.h" +#include "intel_drv.h" +#include "i915_drm.h" +#include "i915_drv.h" +#include "intel_dp.h" + +#define DP_LINK_STATUS_SIZE 6 +#define DP_LINK_CHECK_TIMEOUT (10 * 1000) + +#define DP_LINK_CONFIGURATION_SIZE 9 + +#define IS_eDP(i) ((i)->type == INTEL_OUTPUT_EDP) + +struct intel_dp_priv { + uint32_t output_reg; + uint32_t DP; + uint8_t link_configuration[DP_LINK_CONFIGURATION_SIZE]; + uint32_t save_DP; + uint8_t save_link_configuration[DP_LINK_CONFIGURATION_SIZE]; + bool has_audio; + int dpms_mode; + uint8_t link_bw; + uint8_t lane_count; + uint8_t dpcd[4]; + struct intel_output *intel_output; + struct i2c_adapter adapter; + struct i2c_algo_dp_aux_data algo; +}; + +static void +intel_dp_link_train(struct intel_output *intel_output, uint32_t DP, + uint8_t link_configuration[DP_LINK_CONFIGURATION_SIZE]); + +static void +intel_dp_link_down(struct intel_output *intel_output, uint32_t DP); + +void +intel_edp_link_config (struct intel_output *intel_output, + int *lane_num, int *link_bw) +{ + struct intel_dp_priv *dp_priv = intel_output->dev_priv; + + *lane_num = dp_priv->lane_count; + if (dp_priv->link_bw == DP_LINK_BW_1_62) + *link_bw = 162000; + else if (dp_priv->link_bw == DP_LINK_BW_2_7) + *link_bw = 270000; +} + +static int +intel_dp_max_lane_count(struct intel_output *intel_output) +{ + struct intel_dp_priv *dp_priv = intel_output->dev_priv; + int max_lane_count = 4; + + if (dp_priv->dpcd[0] >= 0x11) { + max_lane_count = dp_priv->dpcd[2] & 0x1f; + switch (max_lane_count) { + case 1: case 2: case 4: + break; + default: + max_lane_count = 4; + } + } + return max_lane_count; +} + +static int +intel_dp_max_link_bw(struct intel_output *intel_output) +{ + struct intel_dp_priv *dp_priv = intel_output->dev_priv; + int max_link_bw = dp_priv->dpcd[1]; + + switch (max_link_bw) { + case DP_LINK_BW_1_62: + case DP_LINK_BW_2_7: + break; + default: + max_link_bw = DP_LINK_BW_1_62; + break; + } + return max_link_bw; +} + +static int +intel_dp_link_clock(uint8_t link_bw) +{ + if (link_bw == DP_LINK_BW_2_7) + return 270000; + else + return 162000; +} + +/* I think this is a fiction */ +static int +intel_dp_link_required(int pixel_clock) +{ + return pixel_clock * 3; +} + +static int +intel_dp_mode_valid(struct drm_connector *connector, + struct drm_display_mode *mode) +{ + struct intel_output *intel_output = to_intel_output(connector); + int max_link_clock = intel_dp_link_clock(intel_dp_max_link_bw(intel_output)); + int max_lanes = intel_dp_max_lane_count(intel_output); + + if (intel_dp_link_required(mode->clock) > max_link_clock * max_lanes) + return MODE_CLOCK_HIGH; + + if (mode->clock < 10000) + return MODE_CLOCK_LOW; + + return MODE_OK; +} + +static uint32_t +pack_aux(uint8_t *src, int src_bytes) +{ + int i; + uint32_t v = 0; + + if (src_bytes > 4) + src_bytes = 4; + for (i = 0; i < src_bytes; i++) + v |= ((uint32_t) src[i]) << ((3-i) * 8); + return v; +} + +static void +unpack_aux(uint32_t src, uint8_t *dst, int dst_bytes) +{ + int i; + if (dst_bytes > 4) + dst_bytes = 4; + for (i = 0; i < dst_bytes; i++) + dst[i] = src >> ((3-i) * 8); +} + +/* hrawclock is 1/4 the FSB frequency */ +static int +intel_hrawclk(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + uint32_t clkcfg; + + clkcfg = I915_READ(CLKCFG); + switch (clkcfg & CLKCFG_FSB_MASK) { + case CLKCFG_FSB_400: + return 100; + case CLKCFG_FSB_533: + return 133; + case CLKCFG_FSB_667: + return 166; + case CLKCFG_FSB_800: + return 200; + case CLKCFG_FSB_1067: + return 266; + case CLKCFG_FSB_1333: + return 333; + /* these two are just a guess; one of them might be right */ + case CLKCFG_FSB_1600: + case CLKCFG_FSB_1600_ALT: + return 400; + default: + return 133; + } +} + +static int +intel_dp_aux_ch(struct intel_output *intel_output, + uint8_t *send, int send_bytes, + uint8_t *recv, int recv_size) +{ + struct intel_dp_priv *dp_priv = intel_output->dev_priv; + uint32_t output_reg = dp_priv->output_reg; + struct drm_device *dev = intel_output->base.dev; + struct drm_i915_private *dev_priv = dev->dev_private; + uint32_t ch_ctl = output_reg + 0x10; + uint32_t ch_data = ch_ctl + 4; + int i; + int recv_bytes; + uint32_t ctl; + uint32_t status; + uint32_t aux_clock_divider; + int try; + + /* The clock divider is based off the hrawclk, + * and would like to run at 2MHz. So, take the + * hrawclk value and divide by 2 and use that + */ + if (IS_eDP(intel_output)) + aux_clock_divider = 225; /* eDP input clock at 450Mhz */ + else if (IS_IGDNG(dev)) + aux_clock_divider = 62; /* IGDNG: input clock fixed at 125Mhz */ + else + aux_clock_divider = intel_hrawclk(dev) / 2; + + /* Must try at least 3 times according to DP spec */ + for (try = 0; try < 5; try++) { + /* Load the send data into the aux channel data registers */ + for (i = 0; i < send_bytes; i += 4) { + uint32_t d = pack_aux(send + i, send_bytes - i);; + + I915_WRITE(ch_data + i, d); + } + + ctl = (DP_AUX_CH_CTL_SEND_BUSY | + DP_AUX_CH_CTL_TIME_OUT_400us | + (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) | + (5 << DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT) | + (aux_clock_divider << DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT) | + DP_AUX_CH_CTL_DONE | + DP_AUX_CH_CTL_TIME_OUT_ERROR | + DP_AUX_CH_CTL_RECEIVE_ERROR); + + /* Send the command and wait for it to complete */ + I915_WRITE(ch_ctl, ctl); + (void) I915_READ(ch_ctl); + for (;;) { + udelay(100); + status = I915_READ(ch_ctl); + if ((status & DP_AUX_CH_CTL_SEND_BUSY) == 0) + break; + } + + /* Clear done status and any errors */ + I915_WRITE(ch_ctl, (status | + DP_AUX_CH_CTL_DONE | + DP_AUX_CH_CTL_TIME_OUT_ERROR | + DP_AUX_CH_CTL_RECEIVE_ERROR)); + (void) I915_READ(ch_ctl); + if ((status & DP_AUX_CH_CTL_TIME_OUT_ERROR) == 0) + break; + } + + if ((status & DP_AUX_CH_CTL_DONE) == 0) { + DRM_ERROR("dp_aux_ch not done status 0x%08x\n", status); + return -EBUSY; + } + + /* Check for timeout or receive error. + * Timeouts occur when the sink is not connected + */ + if (status & DP_AUX_CH_CTL_RECEIVE_ERROR) { + DRM_ERROR("dp_aux_ch receive error status 0x%08x\n", status); + return -EIO; + } + + /* Timeouts occur when the device isn't connected, so they're + * "normal" -- don't fill the kernel log with these */ + if (status & DP_AUX_CH_CTL_TIME_OUT_ERROR) { + DRM_DEBUG("dp_aux_ch timeout status 0x%08x\n", status); + return -ETIMEDOUT; + } + + /* Unload any bytes sent back from the other side */ + recv_bytes = ((status & DP_AUX_CH_CTL_MESSAGE_SIZE_MASK) >> + DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT); + + if (recv_bytes > recv_size) + recv_bytes = recv_size; + + for (i = 0; i < recv_bytes; i += 4) { + uint32_t d = I915_READ(ch_data + i); + + unpack_aux(d, recv + i, recv_bytes - i); + } + + return recv_bytes; +} + +/* Write data to the aux channel in native mode */ +static int +intel_dp_aux_native_write(struct intel_output *intel_output, + uint16_t address, uint8_t *send, int send_bytes) +{ + int ret; + uint8_t msg[20]; + int msg_bytes; + uint8_t ack; + + if (send_bytes > 16) + return -1; + msg[0] = AUX_NATIVE_WRITE << 4; + msg[1] = address >> 8; + msg[2] = address & 0xff; + msg[3] = send_bytes - 1; + memcpy(&msg[4], send, send_bytes); + msg_bytes = send_bytes + 4; + for (;;) { + ret = intel_dp_aux_ch(intel_output, msg, msg_bytes, &ack, 1); + if (ret < 0) + return ret; + if ((ack & AUX_NATIVE_REPLY_MASK) == AUX_NATIVE_REPLY_ACK) + break; + else if ((ack & AUX_NATIVE_REPLY_MASK) == AUX_NATIVE_REPLY_DEFER) + udelay(100); + else + return -EIO; + } + return send_bytes; +} + +/* Write a single byte to the aux channel in native mode */ +static int +intel_dp_aux_native_write_1(struct intel_output *intel_output, + uint16_t address, uint8_t byte) +{ + return intel_dp_aux_native_write(intel_output, address, &byte, 1); +} + +/* read bytes from a native aux channel */ +static int +intel_dp_aux_native_read(struct intel_output *intel_output, + uint16_t address, uint8_t *recv, int recv_bytes) +{ + uint8_t msg[4]; + int msg_bytes; + uint8_t reply[20]; + int reply_bytes; + uint8_t ack; + int ret; + + msg[0] = AUX_NATIVE_READ << 4; + msg[1] = address >> 8; + msg[2] = address & 0xff; + msg[3] = recv_bytes - 1; + + msg_bytes = 4; + reply_bytes = recv_bytes + 1; + + for (;;) { + ret = intel_dp_aux_ch(intel_output, msg, msg_bytes, + reply, reply_bytes); + if (ret == 0) + return -EPROTO; + if (ret < 0) + return ret; + ack = reply[0]; + if ((ack & AUX_NATIVE_REPLY_MASK) == AUX_NATIVE_REPLY_ACK) { + memcpy(recv, reply + 1, ret - 1); + return ret - 1; + } + else if ((ack & AUX_NATIVE_REPLY_MASK) == AUX_NATIVE_REPLY_DEFER) + udelay(100); + else + return -EIO; + } +} + +static int +intel_dp_i2c_aux_ch(struct i2c_adapter *adapter, + uint8_t *send, int send_bytes, + uint8_t *recv, int recv_bytes) +{ + struct intel_dp_priv *dp_priv = container_of(adapter, + struct intel_dp_priv, + adapter); + struct intel_output *intel_output = dp_priv->intel_output; + + return intel_dp_aux_ch(intel_output, + send, send_bytes, recv, recv_bytes); +} + +static int +intel_dp_i2c_init(struct intel_output *intel_output, const char *name) +{ + struct intel_dp_priv *dp_priv = intel_output->dev_priv; + + DRM_ERROR("i2c_init %s\n", name); + dp_priv->algo.running = false; + dp_priv->algo.address = 0; + dp_priv->algo.aux_ch = intel_dp_i2c_aux_ch; + + memset(&dp_priv->adapter, '\0', sizeof (dp_priv->adapter)); + dp_priv->adapter.owner = THIS_MODULE; + dp_priv->adapter.class = I2C_CLASS_DDC; + strncpy (dp_priv->adapter.name, name, sizeof(dp_priv->adapter.name) - 1); + dp_priv->adapter.name[sizeof(dp_priv->adapter.name) - 1] = '\0'; + dp_priv->adapter.algo_data = &dp_priv->algo; + dp_priv->adapter.dev.parent = &intel_output->base.kdev; + + return i2c_dp_aux_add_bus(&dp_priv->adapter); +} + +static bool +intel_dp_mode_fixup(struct drm_encoder *encoder, struct drm_display_mode *mode, + struct drm_display_mode *adjusted_mode) +{ + struct intel_output *intel_output = enc_to_intel_output(encoder); + struct intel_dp_priv *dp_priv = intel_output->dev_priv; + int lane_count, clock; + int max_lane_count = intel_dp_max_lane_count(intel_output); + int max_clock = intel_dp_max_link_bw(intel_output) == DP_LINK_BW_2_7 ? 1 : 0; + static int bws[2] = { DP_LINK_BW_1_62, DP_LINK_BW_2_7 }; + + for (lane_count = 1; lane_count <= max_lane_count; lane_count <<= 1) { + for (clock = 0; clock <= max_clock; clock++) { + int link_avail = intel_dp_link_clock(bws[clock]) * lane_count; + + if (intel_dp_link_required(mode->clock) <= link_avail) { + dp_priv->link_bw = bws[clock]; + dp_priv->lane_count = lane_count; + adjusted_mode->clock = intel_dp_link_clock(dp_priv->link_bw); + DRM_DEBUG("Display port link bw %02x lane count %d clock %d\n", + dp_priv->link_bw, dp_priv->lane_count, + adjusted_mode->clock); + return true; + } + } + } + return false; +} + +struct intel_dp_m_n { + uint32_t tu; + uint32_t gmch_m; + uint32_t gmch_n; + uint32_t link_m; + uint32_t link_n; +}; + +static void +intel_reduce_ratio(uint32_t *num, uint32_t *den) +{ + while (*num > 0xffffff || *den > 0xffffff) { + *num >>= 1; + *den >>= 1; + } +} + +static void +intel_dp_compute_m_n(int bytes_per_pixel, + int nlanes, + int pixel_clock, + int link_clock, + struct intel_dp_m_n *m_n) +{ + m_n->tu = 64; + m_n->gmch_m = pixel_clock * bytes_per_pixel; + m_n->gmch_n = link_clock * nlanes; + intel_reduce_ratio(&m_n->gmch_m, &m_n->gmch_n); + m_n->link_m = pixel_clock; + m_n->link_n = link_clock; + intel_reduce_ratio(&m_n->link_m, &m_n->link_n); +} + +void +intel_dp_set_m_n(struct drm_crtc *crtc, struct drm_display_mode *mode, + struct drm_display_mode *adjusted_mode) +{ + struct drm_device *dev = crtc->dev; + struct drm_mode_config *mode_config = &dev->mode_config; + struct drm_connector *connector; + struct drm_i915_private *dev_priv = dev->dev_private; + struct intel_crtc *intel_crtc = to_intel_crtc(crtc); + int lane_count = 4; + struct intel_dp_m_n m_n; + + /* + * Find the lane count in the intel_output private + */ + list_for_each_entry(connector, &mode_config->connector_list, head) { + struct intel_output *intel_output = to_intel_output(connector); + struct intel_dp_priv *dp_priv = intel_output->dev_priv; + + if (!connector->encoder || connector->encoder->crtc != crtc) + continue; + + if (intel_output->type == INTEL_OUTPUT_DISPLAYPORT) { + lane_count = dp_priv->lane_count; + break; + } + } + + /* + * Compute the GMCH and Link ratios. The '3' here is + * the number of bytes_per_pixel post-LUT, which we always + * set up for 8-bits of R/G/B, or 3 bytes total. + */ + intel_dp_compute_m_n(3, lane_count, + mode->clock, adjusted_mode->clock, &m_n); + + if (IS_IGDNG(dev)) { + if (intel_crtc->pipe == 0) { + I915_WRITE(TRANSA_DATA_M1, + ((m_n.tu - 1) << PIPE_GMCH_DATA_M_TU_SIZE_SHIFT) | + m_n.gmch_m); + I915_WRITE(TRANSA_DATA_N1, m_n.gmch_n); + I915_WRITE(TRANSA_DP_LINK_M1, m_n.link_m); + I915_WRITE(TRANSA_DP_LINK_N1, m_n.link_n); + } else { + I915_WRITE(TRANSB_DATA_M1, + ((m_n.tu - 1) << PIPE_GMCH_DATA_M_TU_SIZE_SHIFT) | + m_n.gmch_m); + I915_WRITE(TRANSB_DATA_N1, m_n.gmch_n); + I915_WRITE(TRANSB_DP_LINK_M1, m_n.link_m); + I915_WRITE(TRANSB_DP_LINK_N1, m_n.link_n); + } + } else { + if (intel_crtc->pipe == 0) { + I915_WRITE(PIPEA_GMCH_DATA_M, + ((m_n.tu - 1) << PIPE_GMCH_DATA_M_TU_SIZE_SHIFT) | + m_n.gmch_m); + I915_WRITE(PIPEA_GMCH_DATA_N, + m_n.gmch_n); + I915_WRITE(PIPEA_DP_LINK_M, m_n.link_m); + I915_WRITE(PIPEA_DP_LINK_N, m_n.link_n); + } else { + I915_WRITE(PIPEB_GMCH_DATA_M, + ((m_n.tu - 1) << PIPE_GMCH_DATA_M_TU_SIZE_SHIFT) | + m_n.gmch_m); + I915_WRITE(PIPEB_GMCH_DATA_N, + m_n.gmch_n); + I915_WRITE(PIPEB_DP_LINK_M, m_n.link_m); + I915_WRITE(PIPEB_DP_LINK_N, m_n.link_n); + } + } +} + +static void +intel_dp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode, + struct drm_display_mode *adjusted_mode) +{ + struct intel_output *intel_output = enc_to_intel_output(encoder); + struct intel_dp_priv *dp_priv = intel_output->dev_priv; + struct drm_crtc *crtc = intel_output->enc.crtc; + struct intel_crtc *intel_crtc = to_intel_crtc(crtc); + + dp_priv->DP = (DP_LINK_TRAIN_OFF | + DP_VOLTAGE_0_4 | + DP_PRE_EMPHASIS_0 | + DP_SYNC_VS_HIGH | + DP_SYNC_HS_HIGH); + + switch (dp_priv->lane_count) { + case 1: + dp_priv->DP |= DP_PORT_WIDTH_1; + break; + case 2: + dp_priv->DP |= DP_PORT_WIDTH_2; + break; + case 4: + dp_priv->DP |= DP_PORT_WIDTH_4; + break; + } + if (dp_priv->has_audio) + dp_priv->DP |= DP_AUDIO_OUTPUT_ENABLE; + + memset(dp_priv->link_configuration, 0, DP_LINK_CONFIGURATION_SIZE); + dp_priv->link_configuration[0] = dp_priv->link_bw; + dp_priv->link_configuration[1] = dp_priv->lane_count; + + /* + * Check for DPCD version > 1.1, + * enable enahanced frame stuff in that case + */ + if (dp_priv->dpcd[0] >= 0x11) { + dp_priv->link_configuration[1] |= DP_LANE_COUNT_ENHANCED_FRAME_EN; + dp_priv->DP |= DP_ENHANCED_FRAMING; + } + + if (intel_crtc->pipe == 1) + dp_priv->DP |= DP_PIPEB_SELECT; + + if (IS_eDP(intel_output)) { + /* don't miss out required setting for eDP */ + dp_priv->DP |= DP_PLL_ENABLE; + if (adjusted_mode->clock < 200000) + dp_priv->DP |= DP_PLL_FREQ_160MHZ; + else + dp_priv->DP |= DP_PLL_FREQ_270MHZ; + } +} + +static void igdng_edp_backlight_on (struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + u32 pp; + + DRM_DEBUG("\n"); + pp = I915_READ(PCH_PP_CONTROL); + pp |= EDP_BLC_ENABLE; + I915_WRITE(PCH_PP_CONTROL, pp); +} + +static void igdng_edp_backlight_off (struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + u32 pp; + + DRM_DEBUG("\n"); + pp = I915_READ(PCH_PP_CONTROL); + pp &= ~EDP_BLC_ENABLE; + I915_WRITE(PCH_PP_CONTROL, pp); +} + +static void +intel_dp_dpms(struct drm_encoder *encoder, int mode) +{ + struct intel_output *intel_output = enc_to_intel_output(encoder); + struct intel_dp_priv *dp_priv = intel_output->dev_priv; + struct drm_device *dev = intel_output->base.dev; + struct drm_i915_private *dev_priv = dev->dev_private; + uint32_t dp_reg = I915_READ(dp_priv->output_reg); + + if (mode != DRM_MODE_DPMS_ON) { + if (dp_reg & DP_PORT_EN) { + intel_dp_link_down(intel_output, dp_priv->DP); + if (IS_eDP(intel_output)) + igdng_edp_backlight_off(dev); + } + } else { + if (!(dp_reg & DP_PORT_EN)) { + intel_dp_link_train(intel_output, dp_priv->DP, dp_priv->link_configuration); + if (IS_eDP(intel_output)) + igdng_edp_backlight_on(dev); + } + } + dp_priv->dpms_mode = mode; +} + +/* + * Fetch AUX CH registers 0x202 - 0x207 which contain + * link status information + */ +static bool +intel_dp_get_link_status(struct intel_output *intel_output, + uint8_t link_status[DP_LINK_STATUS_SIZE]) +{ + int ret; + + ret = intel_dp_aux_native_read(intel_output, + DP_LANE0_1_STATUS, + link_status, DP_LINK_STATUS_SIZE); + if (ret != DP_LINK_STATUS_SIZE) + return false; + return true; +} + +static uint8_t +intel_dp_link_status(uint8_t link_status[DP_LINK_STATUS_SIZE], + int r) +{ + return link_status[r - DP_LANE0_1_STATUS]; +} + +static void +intel_dp_save(struct drm_connector *connector) +{ + struct intel_output *intel_output = to_intel_output(connector); + struct drm_device *dev = intel_output->base.dev; + struct drm_i915_private *dev_priv = dev->dev_private; + struct intel_dp_priv *dp_priv = intel_output->dev_priv; + + dp_priv->save_DP = I915_READ(dp_priv->output_reg); + intel_dp_aux_native_read(intel_output, DP_LINK_BW_SET, + dp_priv->save_link_configuration, + sizeof (dp_priv->save_link_configuration)); +} + +static uint8_t +intel_get_adjust_request_voltage(uint8_t link_status[DP_LINK_STATUS_SIZE], + int lane) +{ + int i = DP_ADJUST_REQUEST_LANE0_1 + (lane >> 1); + int s = ((lane & 1) ? + DP_ADJUST_VOLTAGE_SWING_LANE1_SHIFT : + DP_ADJUST_VOLTAGE_SWING_LANE0_SHIFT); + uint8_t l = intel_dp_link_status(link_status, i); + + return ((l >> s) & 3) << DP_TRAIN_VOLTAGE_SWING_SHIFT; +} + +static uint8_t +intel_get_adjust_request_pre_emphasis(uint8_t link_status[DP_LINK_STATUS_SIZE], + int lane) +{ + int i = DP_ADJUST_REQUEST_LANE0_1 + (lane >> 1); + int s = ((lane & 1) ? + DP_ADJUST_PRE_EMPHASIS_LANE1_SHIFT : + DP_ADJUST_PRE_EMPHASIS_LANE0_SHIFT); + uint8_t l = intel_dp_link_status(link_status, i); + + return ((l >> s) & 3) << DP_TRAIN_PRE_EMPHASIS_SHIFT; +} + + +#if 0 +static char *voltage_names[] = { + "0.4V", "0.6V", "0.8V", "1.2V" +}; +static char *pre_emph_names[] = { + "0dB", "3.5dB", "6dB", "9.5dB" +}; +static char *link_train_names[] = { + "pattern 1", "pattern 2", "idle", "off" +}; +#endif + +/* + * These are source-specific values; current Intel hardware supports + * a maximum voltage of 800mV and a maximum pre-emphasis of 6dB + */ +#define I830_DP_VOLTAGE_MAX DP_TRAIN_VOLTAGE_SWING_800 + +static uint8_t +intel_dp_pre_emphasis_max(uint8_t voltage_swing) +{ + switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) { + case DP_TRAIN_VOLTAGE_SWING_400: + return DP_TRAIN_PRE_EMPHASIS_6; + case DP_TRAIN_VOLTAGE_SWING_600: + return DP_TRAIN_PRE_EMPHASIS_6; + case DP_TRAIN_VOLTAGE_SWING_800: + return DP_TRAIN_PRE_EMPHASIS_3_5; + case DP_TRAIN_VOLTAGE_SWING_1200: + default: + return DP_TRAIN_PRE_EMPHASIS_0; + } +} + +static void +intel_get_adjust_train(struct intel_output *intel_output, + uint8_t link_status[DP_LINK_STATUS_SIZE], + int lane_count, + uint8_t train_set[4]) +{ + uint8_t v = 0; + uint8_t p = 0; + int lane; + + for (lane = 0; lane < lane_count; lane++) { + uint8_t this_v = intel_get_adjust_request_voltage(link_status, lane); + uint8_t this_p = intel_get_adjust_request_pre_emphasis(link_status, lane); + + if (this_v > v) + v = this_v; + if (this_p > p) + p = this_p; + } + + if (v >= I830_DP_VOLTAGE_MAX) + v = I830_DP_VOLTAGE_MAX | DP_TRAIN_MAX_SWING_REACHED; + + if (p >= intel_dp_pre_emphasis_max(v)) + p = intel_dp_pre_emphasis_max(v) | DP_TRAIN_MAX_PRE_EMPHASIS_REACHED; + + for (lane = 0; lane < 4; lane++) + train_set[lane] = v | p; +} + +static uint32_t +intel_dp_signal_levels(uint8_t train_set, int lane_count) +{ + uint32_t signal_levels = 0; + + switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) { + case DP_TRAIN_VOLTAGE_SWING_400: + default: + signal_levels |= DP_VOLTAGE_0_4; + break; + case DP_TRAIN_VOLTAGE_SWING_600: + signal_levels |= DP_VOLTAGE_0_6; + break; + case DP_TRAIN_VOLTAGE_SWING_800: + signal_levels |= DP_VOLTAGE_0_8; + break; + case DP_TRAIN_VOLTAGE_SWING_1200: + signal_levels |= DP_VOLTAGE_1_2; + break; + } + switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) { + case DP_TRAIN_PRE_EMPHASIS_0: + default: + signal_levels |= DP_PRE_EMPHASIS_0; + break; + case DP_TRAIN_PRE_EMPHASIS_3_5: + signal_levels |= DP_PRE_EMPHASIS_3_5; + break; + case DP_TRAIN_PRE_EMPHASIS_6: + signal_levels |= DP_PRE_EMPHASIS_6; + break; + case DP_TRAIN_PRE_EMPHASIS_9_5: + signal_levels |= DP_PRE_EMPHASIS_9_5; + break; + } + return signal_levels; +} + +static uint8_t +intel_get_lane_status(uint8_t link_status[DP_LINK_STATUS_SIZE], + int lane) +{ + int i = DP_LANE0_1_STATUS + (lane >> 1); + int s = (lane & 1) * 4; + uint8_t l = intel_dp_link_status(link_status, i); + + return (l >> s) & 0xf; +} + +/* Check for clock recovery is done on all channels */ +static bool +intel_clock_recovery_ok(uint8_t link_status[DP_LINK_STATUS_SIZE], int lane_count) +{ + int lane; + uint8_t lane_status; + + for (lane = 0; lane < lane_count; lane++) { + lane_status = intel_get_lane_status(link_status, lane); + if ((lane_status & DP_LANE_CR_DONE) == 0) + return false; + } + return true; +} + +/* Check to see if channel eq is done on all channels */ +#define CHANNEL_EQ_BITS (DP_LANE_CR_DONE|\ + DP_LANE_CHANNEL_EQ_DONE|\ + DP_LANE_SYMBOL_LOCKED) +static bool +intel_channel_eq_ok(uint8_t link_status[DP_LINK_STATUS_SIZE], int lane_count) +{ + uint8_t lane_align; + uint8_t lane_status; + int lane; + + lane_align = intel_dp_link_status(link_status, + DP_LANE_ALIGN_STATUS_UPDATED); + if ((lane_align & DP_INTERLANE_ALIGN_DONE) == 0) + return false; + for (lane = 0; lane < lane_count; lane++) { + lane_status = intel_get_lane_status(link_status, lane); + if ((lane_status & CHANNEL_EQ_BITS) != CHANNEL_EQ_BITS) + return false; + } + return true; +} + +static bool +intel_dp_set_link_train(struct intel_output *intel_output, + uint32_t dp_reg_value, + uint8_t dp_train_pat, + uint8_t train_set[4], + bool first) +{ + struct drm_device *dev = intel_output->base.dev; + struct drm_i915_private *dev_priv = dev->dev_private; + struct intel_dp_priv *dp_priv = intel_output->dev_priv; + int ret; + + I915_WRITE(dp_priv->output_reg, dp_reg_value); + POSTING_READ(dp_priv->output_reg); + if (first) + intel_wait_for_vblank(dev); + + intel_dp_aux_native_write_1(intel_output, + DP_TRAINING_PATTERN_SET, + dp_train_pat); + + ret = intel_dp_aux_native_write(intel_output, + DP_TRAINING_LANE0_SET, train_set, 4); + if (ret != 4) + return false; + + return true; +} + +static void +intel_dp_link_train(struct intel_output *intel_output, uint32_t DP, + uint8_t link_configuration[DP_LINK_CONFIGURATION_SIZE]) +{ + struct drm_device *dev = intel_output->base.dev; + struct drm_i915_private *dev_priv = dev->dev_private; + struct intel_dp_priv *dp_priv = intel_output->dev_priv; + uint8_t train_set[4]; + uint8_t link_status[DP_LINK_STATUS_SIZE]; + int i; + uint8_t voltage; + bool clock_recovery = false; + bool channel_eq = false; + bool first = true; + int tries; + + /* Write the link configuration data */ + intel_dp_aux_native_write(intel_output, 0x100, + link_configuration, DP_LINK_CONFIGURATION_SIZE); + + DP |= DP_PORT_EN; + DP &= ~DP_LINK_TRAIN_MASK; + memset(train_set, 0, 4); + voltage = 0xff; + tries = 0; + clock_recovery = false; + for (;;) { + /* Use train_set[0] to set the voltage and pre emphasis values */ + uint32_t signal_levels = intel_dp_signal_levels(train_set[0], dp_priv->lane_count); + DP = (DP & ~(DP_VOLTAGE_MASK|DP_PRE_EMPHASIS_MASK)) | signal_levels; + + if (!intel_dp_set_link_train(intel_output, DP | DP_LINK_TRAIN_PAT_1, + DP_TRAINING_PATTERN_1, train_set, first)) + break; + first = false; + /* Set training pattern 1 */ + + udelay(100); + if (!intel_dp_get_link_status(intel_output, link_status)) + break; + + if (intel_clock_recovery_ok(link_status, dp_priv->lane_count)) { + clock_recovery = true; + break; + } + + /* Check to see if we've tried the max voltage */ + for (i = 0; i < dp_priv->lane_count; i++) + if ((train_set[i] & DP_TRAIN_MAX_SWING_REACHED) == 0) + break; + if (i == dp_priv->lane_count) + break; + + /* Check to see if we've tried the same voltage 5 times */ + if ((train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK) == voltage) { + ++tries; + if (tries == 5) + break; + } else + tries = 0; + voltage = train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK; + + /* Compute new train_set as requested by target */ + intel_get_adjust_train(intel_output, link_status, dp_priv->lane_count, train_set); + } + + /* channel equalization */ + tries = 0; + channel_eq = false; + for (;;) { + /* Use train_set[0] to set the voltage and pre emphasis values */ + uint32_t signal_levels = intel_dp_signal_levels(train_set[0], dp_priv->lane_count); + DP = (DP & ~(DP_VOLTAGE_MASK|DP_PRE_EMPHASIS_MASK)) | signal_levels; + + /* channel eq pattern */ + if (!intel_dp_set_link_train(intel_output, DP | DP_LINK_TRAIN_PAT_2, + DP_TRAINING_PATTERN_2, train_set, + false)) + break; + + udelay(400); + if (!intel_dp_get_link_status(intel_output, link_status)) + break; + + if (intel_channel_eq_ok(link_status, dp_priv->lane_count)) { + channel_eq = true; + break; + } + + /* Try 5 times */ + if (tries > 5) + break; + + /* Compute new train_set as requested by target */ + intel_get_adjust_train(intel_output, link_status, dp_priv->lane_count, train_set); + ++tries; + } + + I915_WRITE(dp_priv->output_reg, DP | DP_LINK_TRAIN_OFF); + POSTING_READ(dp_priv->output_reg); + intel_dp_aux_native_write_1(intel_output, + DP_TRAINING_PATTERN_SET, DP_TRAINING_PATTERN_DISABLE); +} + +static void +intel_dp_link_down(struct intel_output *intel_output, uint32_t DP) +{ + struct drm_device *dev = intel_output->base.dev; + struct drm_i915_private *dev_priv = dev->dev_private; + struct intel_dp_priv *dp_priv = intel_output->dev_priv; + + DRM_DEBUG("\n"); + + if (IS_eDP(intel_output)) { + DP &= ~DP_PLL_ENABLE; + I915_WRITE(dp_priv->output_reg, DP); + POSTING_READ(dp_priv->output_reg); + udelay(100); + } + + DP &= ~DP_LINK_TRAIN_MASK; + I915_WRITE(dp_priv->output_reg, DP | DP_LINK_TRAIN_PAT_IDLE); + POSTING_READ(dp_priv->output_reg); + + udelay(17000); + + if (IS_eDP(intel_output)) + DP |= DP_LINK_TRAIN_OFF; + I915_WRITE(dp_priv->output_reg, DP & ~DP_PORT_EN); + POSTING_READ(dp_priv->output_reg); +} + +static void +intel_dp_restore(struct drm_connector *connector) +{ + struct intel_output *intel_output = to_intel_output(connector); + struct intel_dp_priv *dp_priv = intel_output->dev_priv; + + if (dp_priv->save_DP & DP_PORT_EN) + intel_dp_link_train(intel_output, dp_priv->save_DP, dp_priv->save_link_configuration); + else + intel_dp_link_down(intel_output, dp_priv->save_DP); +} + +/* + * According to DP spec + * 5.1.2: + * 1. Read DPCD + * 2. Configure link according to Receiver Capabilities + * 3. Use Link Training from 2.5.3.3 and 3.5.1.3 + * 4. Check link status on receipt of hot-plug interrupt + */ + +static void +intel_dp_check_link_status(struct intel_output *intel_output) +{ + struct intel_dp_priv *dp_priv = intel_output->dev_priv; + uint8_t link_status[DP_LINK_STATUS_SIZE]; + + if (!intel_output->enc.crtc) + return; + + if (!intel_dp_get_link_status(intel_output, link_status)) { + intel_dp_link_down(intel_output, dp_priv->DP); + return; + } + + if (!intel_channel_eq_ok(link_status, dp_priv->lane_count)) + intel_dp_link_train(intel_output, dp_priv->DP, dp_priv->link_configuration); +} + +static enum drm_connector_status +igdng_dp_detect(struct drm_connector *connector) +{ + struct intel_output *intel_output = to_intel_output(connector); + struct intel_dp_priv *dp_priv = intel_output->dev_priv; + enum drm_connector_status status; + + status = connector_status_disconnected; + if (intel_dp_aux_native_read(intel_output, + 0x000, dp_priv->dpcd, + sizeof (dp_priv->dpcd)) == sizeof (dp_priv->dpcd)) + { + if (dp_priv->dpcd[0] != 0) + status = connector_status_connected; + } + return status; +} + +/** + * Uses CRT_HOTPLUG_EN and CRT_HOTPLUG_STAT to detect DP connection. + * + * \return true if DP port is connected. + * \return false if DP port is disconnected. + */ +static enum drm_connector_status +intel_dp_detect(struct drm_connector *connector) +{ + struct intel_output *intel_output = to_intel_output(connector); + struct drm_device *dev = intel_output->base.dev; + struct drm_i915_private *dev_priv = dev->dev_private; + struct intel_dp_priv *dp_priv = intel_output->dev_priv; + uint32_t temp, bit; + enum drm_connector_status status; + + dp_priv->has_audio = false; + + if (IS_IGDNG(dev)) + return igdng_dp_detect(connector); + + temp = I915_READ(PORT_HOTPLUG_EN); + + I915_WRITE(PORT_HOTPLUG_EN, + temp | + DPB_HOTPLUG_INT_EN | + DPC_HOTPLUG_INT_EN | + DPD_HOTPLUG_INT_EN); + + POSTING_READ(PORT_HOTPLUG_EN); + + switch (dp_priv->output_reg) { + case DP_B: + bit = DPB_HOTPLUG_INT_STATUS; + break; + case DP_C: + bit = DPC_HOTPLUG_INT_STATUS; + break; + case DP_D: + bit = DPD_HOTPLUG_INT_STATUS; + break; + default: + return connector_status_unknown; + } + + temp = I915_READ(PORT_HOTPLUG_STAT); + + if ((temp & bit) == 0) + return connector_status_disconnected; + + status = connector_status_disconnected; + if (intel_dp_aux_native_read(intel_output, + 0x000, dp_priv->dpcd, + sizeof (dp_priv->dpcd)) == sizeof (dp_priv->dpcd)) + { + if (dp_priv->dpcd[0] != 0) + status = connector_status_connected; + } + return status; +} + +static int intel_dp_get_modes(struct drm_connector *connector) +{ + struct intel_output *intel_output = to_intel_output(connector); + struct drm_device *dev = intel_output->base.dev; + struct drm_i915_private *dev_priv = dev->dev_private; + int ret; + + /* We should parse the EDID data and find out if it has an audio sink + */ + + ret = intel_ddc_get_modes(intel_output); + if (ret) + return ret; + + /* if eDP has no EDID, try to use fixed panel mode from VBT */ + if (IS_eDP(intel_output)) { + if (dev_priv->panel_fixed_mode != NULL) { + struct drm_display_mode *mode; + mode = drm_mode_duplicate(dev, dev_priv->panel_fixed_mode); + drm_mode_probed_add(connector, mode); + return 1; + } + } + return 0; +} + +static void +intel_dp_destroy (struct drm_connector *connector) +{ + struct intel_output *intel_output = to_intel_output(connector); + + if (intel_output->i2c_bus) + intel_i2c_destroy(intel_output->i2c_bus); + drm_sysfs_connector_remove(connector); + drm_connector_cleanup(connector); + kfree(intel_output); +} + +static const struct drm_encoder_helper_funcs intel_dp_helper_funcs = { + .dpms = intel_dp_dpms, + .mode_fixup = intel_dp_mode_fixup, + .prepare = intel_encoder_prepare, + .mode_set = intel_dp_mode_set, + .commit = intel_encoder_commit, +}; + +static const struct drm_connector_funcs intel_dp_connector_funcs = { + .dpms = drm_helper_connector_dpms, + .save = intel_dp_save, + .restore = intel_dp_restore, + .detect = intel_dp_detect, + .fill_modes = drm_helper_probe_single_connector_modes, + .destroy = intel_dp_destroy, +}; + +static const struct drm_connector_helper_funcs intel_dp_connector_helper_funcs = { + .get_modes = intel_dp_get_modes, + .mode_valid = intel_dp_mode_valid, + .best_encoder = intel_best_encoder, +}; + +static void intel_dp_enc_destroy(struct drm_encoder *encoder) +{ + drm_encoder_cleanup(encoder); +} + +static const struct drm_encoder_funcs intel_dp_enc_funcs = { + .destroy = intel_dp_enc_destroy, +}; + +void +intel_dp_hot_plug(struct intel_output *intel_output) +{ + struct intel_dp_priv *dp_priv = intel_output->dev_priv; + + if (dp_priv->dpms_mode == DRM_MODE_DPMS_ON) + intel_dp_check_link_status(intel_output); +} + +void +intel_dp_init(struct drm_device *dev, int output_reg) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_connector *connector; + struct intel_output *intel_output; + struct intel_dp_priv *dp_priv; + const char *name = NULL; + + intel_output = kcalloc(sizeof(struct intel_output) + + sizeof(struct intel_dp_priv), 1, GFP_KERNEL); + if (!intel_output) + return; + + dp_priv = (struct intel_dp_priv *)(intel_output + 1); + + connector = &intel_output->base; + drm_connector_init(dev, connector, &intel_dp_connector_funcs, + DRM_MODE_CONNECTOR_DisplayPort); + drm_connector_helper_add(connector, &intel_dp_connector_helper_funcs); + + if (output_reg == DP_A) + intel_output->type = INTEL_OUTPUT_EDP; + else + intel_output->type = INTEL_OUTPUT_DISPLAYPORT; + + if (output_reg == DP_B) + intel_output->clone_mask = (1 << INTEL_DP_B_CLONE_BIT); + else if (output_reg == DP_C) + intel_output->clone_mask = (1 << INTEL_DP_C_CLONE_BIT); + else if (output_reg == DP_D) + intel_output->clone_mask = (1 << INTEL_DP_D_CLONE_BIT); + + if (IS_eDP(intel_output)) { + intel_output->crtc_mask = (1 << 1); + intel_output->clone_mask = (1 << INTEL_EDP_CLONE_BIT); + } else + intel_output->crtc_mask = (1 << 0) | (1 << 1); + connector->interlace_allowed = true; + connector->doublescan_allowed = 0; + + dp_priv->intel_output = intel_output; + dp_priv->output_reg = output_reg; + dp_priv->has_audio = false; + dp_priv->dpms_mode = DRM_MODE_DPMS_ON; + intel_output->dev_priv = dp_priv; + + drm_encoder_init(dev, &intel_output->enc, &intel_dp_enc_funcs, + DRM_MODE_ENCODER_TMDS); + drm_encoder_helper_add(&intel_output->enc, &intel_dp_helper_funcs); + + drm_mode_connector_attach_encoder(&intel_output->base, + &intel_output->enc); + drm_sysfs_connector_add(connector); + + /* Set up the DDC bus. */ + switch (output_reg) { + case DP_A: + name = "DPDDC-A"; + break; + case DP_B: + case PCH_DP_B: + name = "DPDDC-B"; + break; + case DP_C: + case PCH_DP_C: + name = "DPDDC-C"; + break; + case DP_D: + case PCH_DP_D: + name = "DPDDC-D"; + break; + } + + intel_dp_i2c_init(intel_output, name); + + intel_output->ddc_bus = &dp_priv->adapter; + intel_output->hot_plug = intel_dp_hot_plug; + + if (output_reg == DP_A) { + /* initialize panel mode from VBT if available for eDP */ + if (dev_priv->lfp_lvds_vbt_mode) { + dev_priv->panel_fixed_mode = + drm_mode_duplicate(dev, dev_priv->lfp_lvds_vbt_mode); + if (dev_priv->panel_fixed_mode) { + dev_priv->panel_fixed_mode->type |= + DRM_MODE_TYPE_PREFERRED; + } + } + } + + /* For G4X desktop chip, PEG_BAND_GAP_DATA 3:0 must first be written + * 0xd. Failure to do so will result in spurious interrupts being + * generated on the port when a cable is not attached. + */ + if (IS_G4X(dev) && !IS_GM45(dev)) { + u32 temp = I915_READ(PEG_BAND_GAP_DATA); + I915_WRITE(PEG_BAND_GAP_DATA, (temp & ~0xf) | 0xd); + } +} diff --git a/drivers/gpu/drm/i915/intel_dp.h b/drivers/gpu/drm/i915/intel_dp.h new file mode 100644 index 00000000000..2b38054d3b6 --- /dev/null +++ b/drivers/gpu/drm/i915/intel_dp.h @@ -0,0 +1,144 @@ +/* + * Copyright © 2008 Keith Packard + * + * Permission to use, copy, modify, distribute, and sell this software and its + * documentation for any purpose is hereby granted without fee, provided that + * the above copyright notice appear in all copies and that both that copyright + * notice and this permission notice appear in supporting documentation, and + * that the name of the copyright holders not be used in advertising or + * publicity pertaining to distribution of the software without specific, + * written prior permission. The copyright holders make no representations + * about the suitability of this software for any purpose. It is provided "as + * is" without express or implied warranty. + * + * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, + * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO + * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR + * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, + * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE + * OF THIS SOFTWARE. + */ + +#ifndef _INTEL_DP_H_ +#define _INTEL_DP_H_ + +/* From the VESA DisplayPort spec */ + +#define AUX_NATIVE_WRITE 0x8 +#define AUX_NATIVE_READ 0x9 +#define AUX_I2C_WRITE 0x0 +#define AUX_I2C_READ 0x1 +#define AUX_I2C_STATUS 0x2 +#define AUX_I2C_MOT 0x4 + +#define AUX_NATIVE_REPLY_ACK (0x0 << 4) +#define AUX_NATIVE_REPLY_NACK (0x1 << 4) +#define AUX_NATIVE_REPLY_DEFER (0x2 << 4) +#define AUX_NATIVE_REPLY_MASK (0x3 << 4) + +#define AUX_I2C_REPLY_ACK (0x0 << 6) +#define AUX_I2C_REPLY_NACK (0x1 << 6) +#define AUX_I2C_REPLY_DEFER (0x2 << 6) +#define AUX_I2C_REPLY_MASK (0x3 << 6) + +/* AUX CH addresses */ +#define DP_LINK_BW_SET 0x100 +# define DP_LINK_BW_1_62 0x06 +# define DP_LINK_BW_2_7 0x0a + +#define DP_LANE_COUNT_SET 0x101 +# define DP_LANE_COUNT_MASK 0x0f +# define DP_LANE_COUNT_ENHANCED_FRAME_EN (1 << 7) + +#define DP_TRAINING_PATTERN_SET 0x102 + +# define DP_TRAINING_PATTERN_DISABLE 0 +# define DP_TRAINING_PATTERN_1 1 +# define DP_TRAINING_PATTERN_2 2 +# define DP_TRAINING_PATTERN_MASK 0x3 + +# define DP_LINK_QUAL_PATTERN_DISABLE (0 << 2) +# define DP_LINK_QUAL_PATTERN_D10_2 (1 << 2) +# define DP_LINK_QUAL_PATTERN_ERROR_RATE (2 << 2) +# define DP_LINK_QUAL_PATTERN_PRBS7 (3 << 2) +# define DP_LINK_QUAL_PATTERN_MASK (3 << 2) + +# define DP_RECOVERED_CLOCK_OUT_EN (1 << 4) +# define DP_LINK_SCRAMBLING_DISABLE (1 << 5) + +# define DP_SYMBOL_ERROR_COUNT_BOTH (0 << 6) +# define DP_SYMBOL_ERROR_COUNT_DISPARITY (1 << 6) +# define DP_SYMBOL_ERROR_COUNT_SYMBOL (2 << 6) +# define DP_SYMBOL_ERROR_COUNT_MASK (3 << 6) + +#define DP_TRAINING_LANE0_SET 0x103 +#define DP_TRAINING_LANE1_SET 0x104 +#define DP_TRAINING_LANE2_SET 0x105 +#define DP_TRAINING_LANE3_SET 0x106 + +# define DP_TRAIN_VOLTAGE_SWING_MASK 0x3 +# define DP_TRAIN_VOLTAGE_SWING_SHIFT 0 +# define DP_TRAIN_MAX_SWING_REACHED (1 << 2) +# define DP_TRAIN_VOLTAGE_SWING_400 (0 << 0) +# define DP_TRAIN_VOLTAGE_SWING_600 (1 << 0) +# define DP_TRAIN_VOLTAGE_SWING_800 (2 << 0) +# define DP_TRAIN_VOLTAGE_SWING_1200 (3 << 0) + +# define DP_TRAIN_PRE_EMPHASIS_MASK (3 << 3) +# define DP_TRAIN_PRE_EMPHASIS_0 (0 << 3) +# define DP_TRAIN_PRE_EMPHASIS_3_5 (1 << 3) +# define DP_TRAIN_PRE_EMPHASIS_6 (2 << 3) +# define DP_TRAIN_PRE_EMPHASIS_9_5 (3 << 3) + +# define DP_TRAIN_PRE_EMPHASIS_SHIFT 3 +# define DP_TRAIN_MAX_PRE_EMPHASIS_REACHED (1 << 5) + +#define DP_DOWNSPREAD_CTRL 0x107 +# define DP_SPREAD_AMP_0_5 (1 << 4) + +#define DP_MAIN_LINK_CHANNEL_CODING_SET 0x108 +# define DP_SET_ANSI_8B10B (1 << 0) + +#define DP_LANE0_1_STATUS 0x202 +#define DP_LANE2_3_STATUS 0x203 + +# define DP_LANE_CR_DONE (1 << 0) +# define DP_LANE_CHANNEL_EQ_DONE (1 << 1) +# define DP_LANE_SYMBOL_LOCKED (1 << 2) + +#define DP_LANE_ALIGN_STATUS_UPDATED 0x204 + +#define DP_INTERLANE_ALIGN_DONE (1 << 0) +#define DP_DOWNSTREAM_PORT_STATUS_CHANGED (1 << 6) +#define DP_LINK_STATUS_UPDATED (1 << 7) + +#define DP_SINK_STATUS 0x205 + +#define DP_RECEIVE_PORT_0_STATUS (1 << 0) +#define DP_RECEIVE_PORT_1_STATUS (1 << 1) + +#define DP_ADJUST_REQUEST_LANE0_1 0x206 +#define DP_ADJUST_REQUEST_LANE2_3 0x207 + +#define DP_ADJUST_VOLTAGE_SWING_LANE0_MASK 0x03 +#define DP_ADJUST_VOLTAGE_SWING_LANE0_SHIFT 0 +#define DP_ADJUST_PRE_EMPHASIS_LANE0_MASK 0x0c +#define DP_ADJUST_PRE_EMPHASIS_LANE0_SHIFT 2 +#define DP_ADJUST_VOLTAGE_SWING_LANE1_MASK 0x30 +#define DP_ADJUST_VOLTAGE_SWING_LANE1_SHIFT 4 +#define DP_ADJUST_PRE_EMPHASIS_LANE1_MASK 0xc0 +#define DP_ADJUST_PRE_EMPHASIS_LANE1_SHIFT 6 + +struct i2c_algo_dp_aux_data { + bool running; + u16 address; + int (*aux_ch) (struct i2c_adapter *adapter, + uint8_t *send, int send_bytes, + uint8_t *recv, int recv_bytes); +}; + +int +i2c_dp_aux_add_bus(struct i2c_adapter *adapter); + +#endif /* _INTEL_DP_H_ */ diff --git a/drivers/gpu/drm/i915/intel_dp_i2c.c b/drivers/gpu/drm/i915/intel_dp_i2c.c new file mode 100644 index 00000000000..a63b6f57d2d --- /dev/null +++ b/drivers/gpu/drm/i915/intel_dp_i2c.c @@ -0,0 +1,273 @@ +/* + * Copyright © 2009 Keith Packard + * + * Permission to use, copy, modify, distribute, and sell this software and its + * documentation for any purpose is hereby granted without fee, provided that + * the above copyright notice appear in all copies and that both that copyright + * notice and this permission notice appear in supporting documentation, and + * that the name of the copyright holders not be used in advertising or + * publicity pertaining to distribution of the software without specific, + * written prior permission. The copyright holders make no representations + * about the suitability of this software for any purpose. It is provided "as + * is" without express or implied warranty. + * + * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, + * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO + * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR + * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, + * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE + * OF THIS SOFTWARE. + */ + +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/delay.h> +#include <linux/slab.h> +#include <linux/init.h> +#include <linux/errno.h> +#include <linux/sched.h> +#include <linux/i2c.h> +#include "intel_dp.h" +#include "drmP.h" + +/* Run a single AUX_CH I2C transaction, writing/reading data as necessary */ + +#define MODE_I2C_START 1 +#define MODE_I2C_WRITE 2 +#define MODE_I2C_READ 4 +#define MODE_I2C_STOP 8 + +static int +i2c_algo_dp_aux_transaction(struct i2c_adapter *adapter, int mode, + uint8_t write_byte, uint8_t *read_byte) +{ + struct i2c_algo_dp_aux_data *algo_data = adapter->algo_data; + uint16_t address = algo_data->address; + uint8_t msg[5]; + uint8_t reply[2]; + int msg_bytes; + int reply_bytes; + int ret; + + /* Set up the command byte */ + if (mode & MODE_I2C_READ) + msg[0] = AUX_I2C_READ << 4; + else + msg[0] = AUX_I2C_WRITE << 4; + + if (!(mode & MODE_I2C_STOP)) + msg[0] |= AUX_I2C_MOT << 4; + + msg[1] = address >> 8; + msg[2] = address; + + switch (mode) { + case MODE_I2C_WRITE: + msg[3] = 0; + msg[4] = write_byte; + msg_bytes = 5; + reply_bytes = 1; + break; + case MODE_I2C_READ: + msg[3] = 0; + msg_bytes = 4; + reply_bytes = 2; + break; + default: + msg_bytes = 3; + reply_bytes = 1; + break; + } + + for (;;) { + ret = (*algo_data->aux_ch)(adapter, + msg, msg_bytes, + reply, reply_bytes); + if (ret < 0) { + DRM_DEBUG("aux_ch failed %d\n", ret); + return ret; + } + switch (reply[0] & AUX_I2C_REPLY_MASK) { + case AUX_I2C_REPLY_ACK: + if (mode == MODE_I2C_READ) { + *read_byte = reply[1]; + } + return reply_bytes - 1; + case AUX_I2C_REPLY_NACK: + DRM_DEBUG("aux_ch nack\n"); + return -EREMOTEIO; + case AUX_I2C_REPLY_DEFER: + DRM_DEBUG("aux_ch defer\n"); + udelay(100); + break; + default: + DRM_ERROR("aux_ch invalid reply 0x%02x\n", reply[0]); + return -EREMOTEIO; + } + } +} + +/* + * I2C over AUX CH + */ + +/* + * Send the address. If the I2C link is running, this 'restarts' + * the connection with the new address, this is used for doing + * a write followed by a read (as needed for DDC) + */ +static int +i2c_algo_dp_aux_address(struct i2c_adapter *adapter, u16 address, bool reading) +{ + struct i2c_algo_dp_aux_data *algo_data = adapter->algo_data; + int mode = MODE_I2C_START; + int ret; + + if (reading) + mode |= MODE_I2C_READ; + else + mode |= MODE_I2C_WRITE; + algo_data->address = address; + algo_data->running = true; + ret = i2c_algo_dp_aux_transaction(adapter, mode, 0, NULL); + return ret; +} + +/* + * Stop the I2C transaction. This closes out the link, sending + * a bare address packet with the MOT bit turned off + */ +static void +i2c_algo_dp_aux_stop(struct i2c_adapter *adapter, bool reading) +{ + struct i2c_algo_dp_aux_data *algo_data = adapter->algo_data; + int mode = MODE_I2C_STOP; + + if (reading) + mode |= MODE_I2C_READ; + else + mode |= MODE_I2C_WRITE; + if (algo_data->running) { + (void) i2c_algo_dp_aux_transaction(adapter, mode, 0, NULL); + algo_data->running = false; + } +} + +/* + * Write a single byte to the current I2C address, the + * the I2C link must be running or this returns -EIO + */ +static int +i2c_algo_dp_aux_put_byte(struct i2c_adapter *adapter, u8 byte) +{ + struct i2c_algo_dp_aux_data *algo_data = adapter->algo_data; + int ret; + + if (!algo_data->running) + return -EIO; + + ret = i2c_algo_dp_aux_transaction(adapter, MODE_I2C_WRITE, byte, NULL); + return ret; +} + +/* + * Read a single byte from the current I2C address, the + * I2C link must be running or this returns -EIO + */ +static int +i2c_algo_dp_aux_get_byte(struct i2c_adapter *adapter, u8 *byte_ret) +{ + struct i2c_algo_dp_aux_data *algo_data = adapter->algo_data; + int ret; + + if (!algo_data->running) + return -EIO; + + ret = i2c_algo_dp_aux_transaction(adapter, MODE_I2C_READ, 0, byte_ret); + return ret; +} + +static int +i2c_algo_dp_aux_xfer(struct i2c_adapter *adapter, + struct i2c_msg *msgs, + int num) +{ + int ret = 0; + bool reading = false; + int m; + int b; + + for (m = 0; m < num; m++) { + u16 len = msgs[m].len; + u8 *buf = msgs[m].buf; + reading = (msgs[m].flags & I2C_M_RD) != 0; + ret = i2c_algo_dp_aux_address(adapter, msgs[m].addr, reading); + if (ret < 0) + break; + if (reading) { + for (b = 0; b < len; b++) { + ret = i2c_algo_dp_aux_get_byte(adapter, &buf[b]); + if (ret < 0) + break; + } + } else { + for (b = 0; b < len; b++) { + ret = i2c_algo_dp_aux_put_byte(adapter, buf[b]); + if (ret < 0) + break; + } + } + if (ret < 0) + break; + } + if (ret >= 0) + ret = num; + i2c_algo_dp_aux_stop(adapter, reading); + DRM_DEBUG("dp_aux_xfer return %d\n", ret); + return ret; +} + +static u32 +i2c_algo_dp_aux_functionality(struct i2c_adapter *adapter) +{ + return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL | + I2C_FUNC_SMBUS_READ_BLOCK_DATA | + I2C_FUNC_SMBUS_BLOCK_PROC_CALL | + I2C_FUNC_10BIT_ADDR; +} + +static const struct i2c_algorithm i2c_dp_aux_algo = { + .master_xfer = i2c_algo_dp_aux_xfer, + .functionality = i2c_algo_dp_aux_functionality, +}; + +static void +i2c_dp_aux_reset_bus(struct i2c_adapter *adapter) +{ + (void) i2c_algo_dp_aux_address(adapter, 0, false); + (void) i2c_algo_dp_aux_stop(adapter, false); + +} + +static int +i2c_dp_aux_prepare_bus(struct i2c_adapter *adapter) +{ + adapter->algo = &i2c_dp_aux_algo; + adapter->retries = 3; + i2c_dp_aux_reset_bus(adapter); + return 0; +} + +int +i2c_dp_aux_add_bus(struct i2c_adapter *adapter) +{ + int error; + + error = i2c_dp_aux_prepare_bus(adapter); + if (error) + return error; + error = i2c_add_adapter(adapter); + return error; +} +EXPORT_SYMBOL(i2c_dp_aux_add_bus); diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h index cd4b9c5f715..26a6227c15f 100644 --- a/drivers/gpu/drm/i915/intel_drv.h +++ b/drivers/gpu/drm/i915/intel_drv.h @@ -54,6 +54,27 @@ #define INTEL_OUTPUT_LVDS 4 #define INTEL_OUTPUT_TVOUT 5 #define INTEL_OUTPUT_HDMI 6 +#define INTEL_OUTPUT_DISPLAYPORT 7 +#define INTEL_OUTPUT_EDP 8 + +/* Intel Pipe Clone Bit */ +#define INTEL_HDMIB_CLONE_BIT 1 +#define INTEL_HDMIC_CLONE_BIT 2 +#define INTEL_HDMID_CLONE_BIT 3 +#define INTEL_HDMIE_CLONE_BIT 4 +#define INTEL_HDMIF_CLONE_BIT 5 +#define INTEL_SDVO_NON_TV_CLONE_BIT 6 +#define INTEL_SDVO_TV_CLONE_BIT 7 +#define INTEL_SDVO_LVDS_CLONE_BIT 8 +#define INTEL_ANALOG_CLONE_BIT 9 +#define INTEL_TV_CLONE_BIT 10 +#define INTEL_DP_B_CLONE_BIT 11 +#define INTEL_DP_C_CLONE_BIT 12 +#define INTEL_DP_D_CLONE_BIT 13 +#define INTEL_LVDS_CLONE_BIT 14 +#define INTEL_DVO_TMDS_CLONE_BIT 15 +#define INTEL_DVO_LVDS_CLONE_BIT 16 +#define INTEL_EDP_CLONE_BIT 17 #define INTEL_DVO_CHIP_NONE 0 #define INTEL_DVO_CHIP_LVDS 1 @@ -65,7 +86,6 @@ struct intel_i2c_chan { u32 reg; /* GPIO reg */ struct i2c_adapter adapter; struct i2c_algo_bit_data algo; - u8 slave_addr; }; struct intel_framebuffer { @@ -79,11 +99,14 @@ struct intel_output { struct drm_encoder enc; int type; - struct intel_i2c_chan *i2c_bus; /* for control functions */ - struct intel_i2c_chan *ddc_bus; /* for DDC only stuff */ + struct i2c_adapter *i2c_bus; + struct i2c_adapter *ddc_bus; bool load_detect_temp; bool needs_tv_clock; void *dev_priv; + void (*hot_plug)(struct intel_output *); + int crtc_mask; + int clone_mask; }; struct intel_crtc { @@ -104,9 +127,9 @@ struct intel_crtc { #define enc_to_intel_output(x) container_of(x, struct intel_output, enc) #define to_intel_framebuffer(x) container_of(x, struct intel_framebuffer, base) -struct intel_i2c_chan *intel_i2c_create(struct drm_device *dev, const u32 reg, - const char *name); -void intel_i2c_destroy(struct intel_i2c_chan *chan); +struct i2c_adapter *intel_i2c_create(struct drm_device *dev, const u32 reg, + const char *name); +void intel_i2c_destroy(struct i2c_adapter *adapter); int intel_ddc_get_modes(struct intel_output *intel_output); extern bool intel_ddc_probe(struct intel_output *intel_output); void intel_i2c_quirk_set(struct drm_device *dev, bool enable); @@ -116,6 +139,12 @@ extern bool intel_sdvo_init(struct drm_device *dev, int output_device); extern void intel_dvo_init(struct drm_device *dev); extern void intel_tv_init(struct drm_device *dev); extern void intel_lvds_init(struct drm_device *dev); +extern void intel_dp_init(struct drm_device *dev, int dp_reg); +void +intel_dp_set_m_n(struct drm_crtc *crtc, struct drm_display_mode *mode, + struct drm_display_mode *adjusted_mode); +extern void intel_edp_link_config (struct intel_output *, int *, int *); + extern void intel_crtc_load_lut(struct drm_crtc *crtc); extern void intel_encoder_prepare (struct drm_encoder *encoder); diff --git a/drivers/gpu/drm/i915/intel_dvo.c b/drivers/gpu/drm/i915/intel_dvo.c index 1ee3007d6ec..a4d2606de77 100644 --- a/drivers/gpu/drm/i915/intel_dvo.c +++ b/drivers/gpu/drm/i915/intel_dvo.c @@ -384,10 +384,9 @@ void intel_dvo_init(struct drm_device *dev) { struct intel_output *intel_output; struct intel_dvo_device *dvo; - struct intel_i2c_chan *i2cbus = NULL; + struct i2c_adapter *i2cbus = NULL; int ret = 0; int i; - int gpio_inited = 0; int encoder_type = DRM_MODE_ENCODER_NONE; intel_output = kzalloc (sizeof(struct intel_output), GFP_KERNEL); if (!intel_output) @@ -420,14 +419,11 @@ void intel_dvo_init(struct drm_device *dev) * It appears that everything is on GPIOE except for panels * on i830 laptops, which are on GPIOB (DVOA). */ - if (gpio_inited != gpio) { - if (i2cbus != NULL) - intel_i2c_destroy(i2cbus); - if (!(i2cbus = intel_i2c_create(dev, gpio, - gpio == GPIOB ? "DVOI2C_B" : "DVOI2C_E"))) { - continue; - } - gpio_inited = gpio; + if (i2cbus != NULL) + intel_i2c_destroy(i2cbus); + if (!(i2cbus = intel_i2c_create(dev, gpio, + gpio == GPIOB ? "DVOI2C_B" : "DVOI2C_E"))) { + continue; } if (dvo->dev_ops!= NULL) @@ -439,14 +435,20 @@ void intel_dvo_init(struct drm_device *dev) continue; intel_output->type = INTEL_OUTPUT_DVO; + intel_output->crtc_mask = (1 << 0) | (1 << 1); switch (dvo->type) { case INTEL_DVO_CHIP_TMDS: + intel_output->clone_mask = + (1 << INTEL_DVO_TMDS_CLONE_BIT) | + (1 << INTEL_ANALOG_CLONE_BIT); drm_connector_init(dev, connector, &intel_dvo_connector_funcs, DRM_MODE_CONNECTOR_DVII); encoder_type = DRM_MODE_ENCODER_TMDS; break; case INTEL_DVO_CHIP_LVDS: + intel_output->clone_mask = + (1 << INTEL_DVO_LVDS_CLONE_BIT); drm_connector_init(dev, connector, &intel_dvo_connector_funcs, DRM_MODE_CONNECTOR_LVDS); diff --git a/drivers/gpu/drm/i915/intel_fb.c b/drivers/gpu/drm/i915/intel_fb.c index 1af7d68e380..1d30802e773 100644 --- a/drivers/gpu/drm/i915/intel_fb.c +++ b/drivers/gpu/drm/i915/intel_fb.c @@ -453,7 +453,7 @@ static int intelfb_create(struct drm_device *dev, uint32_t fb_width, size = ALIGN(size, PAGE_SIZE); fbo = drm_gem_object_alloc(dev, size); if (!fbo) { - printk(KERN_ERR "failed to allocate framebuffer\n"); + DRM_ERROR("failed to allocate framebuffer\n"); ret = -ENOMEM; goto out; } @@ -610,8 +610,8 @@ static int intelfb_create(struct drm_device *dev, uint32_t fb_width, par->dev = dev; /* To allow resizeing without swapping buffers */ - printk("allocated %dx%d fb: 0x%08x, bo %p\n", intel_fb->base.width, - intel_fb->base.height, obj_priv->gtt_offset, fbo); + DRM_DEBUG("allocated %dx%d fb: 0x%08x, bo %p\n", intel_fb->base.width, + intel_fb->base.height, obj_priv->gtt_offset, fbo); mutex_unlock(&dev->struct_mutex); return 0; @@ -698,13 +698,13 @@ static int intelfb_multi_fb_probe_crtc(struct drm_device *dev, struct drm_crtc * } else intelfb_set_par(info); - printk(KERN_INFO "fb%d: %s frame buffer device\n", info->node, + DRM_INFO("fb%d: %s frame buffer device\n", info->node, info->fix.id); /* Switch back to kernel console on panic */ kernelfb_mode = *modeset; atomic_notifier_chain_register(&panic_notifier_list, &paniced); - printk(KERN_INFO "registered panic notifier\n"); + DRM_DEBUG("registered panic notifier\n"); return 0; } @@ -852,13 +852,13 @@ static int intelfb_single_fb_probe(struct drm_device *dev) } else intelfb_set_par(info); - printk(KERN_INFO "fb%d: %s frame buffer device\n", info->node, + DRM_INFO("fb%d: %s frame buffer device\n", info->node, info->fix.id); /* Switch back to kernel console on panic */ kernelfb_mode = *modeset; atomic_notifier_chain_register(&panic_notifier_list, &paniced); - printk(KERN_INFO "registered panic notifier\n"); + DRM_DEBUG("registered panic notifier\n"); return 0; } @@ -872,8 +872,8 @@ void intelfb_restore(void) { int ret; if ((ret = drm_crtc_helper_set_config(&kernelfb_mode)) != 0) { - printk(KERN_ERR "Failed to restore crtc configuration: %d\n", - ret); + DRM_ERROR("Failed to restore crtc configuration: %d\n", + ret); } } diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c index 4ea2a651b92..fa304e13601 100644 --- a/drivers/gpu/drm/i915/intel_hdmi.c +++ b/drivers/gpu/drm/i915/intel_hdmi.c @@ -31,6 +31,7 @@ #include "drmP.h" #include "drm.h" #include "drm_crtc.h" +#include "drm_edid.h" #include "intel_drv.h" #include "i915_drm.h" #include "i915_drv.h" @@ -56,8 +57,7 @@ static void intel_hdmi_mode_set(struct drm_encoder *encoder, sdvox = SDVO_ENCODING_HDMI | SDVO_BORDER_ENABLE | SDVO_VSYNC_ACTIVE_HIGH | - SDVO_HSYNC_ACTIVE_HIGH | - SDVO_NULL_PACKETS_DURING_VSYNC; + SDVO_HSYNC_ACTIVE_HIGH; if (hdmi_priv->has_hdmi_sink) sdvox |= SDVO_AUDIO_ENABLE; @@ -129,83 +129,28 @@ static bool intel_hdmi_mode_fixup(struct drm_encoder *encoder, return true; } -static void -intel_hdmi_sink_detect(struct drm_connector *connector) -{ - struct intel_output *intel_output = to_intel_output(connector); - struct intel_hdmi_priv *hdmi_priv = intel_output->dev_priv; - struct edid *edid = NULL; - - edid = drm_get_edid(&intel_output->base, - &intel_output->ddc_bus->adapter); - if (edid != NULL) { - hdmi_priv->has_hdmi_sink = drm_detect_hdmi_monitor(edid); - kfree(edid); - intel_output->base.display_info.raw_edid = NULL; - } -} - -static enum drm_connector_status -igdng_hdmi_detect(struct drm_connector *connector) -{ - struct intel_output *intel_output = to_intel_output(connector); - struct intel_hdmi_priv *hdmi_priv = intel_output->dev_priv; - - /* FIXME hotplug detect */ - - hdmi_priv->has_hdmi_sink = false; - intel_hdmi_sink_detect(connector); - if (hdmi_priv->has_hdmi_sink) - return connector_status_connected; - else - return connector_status_disconnected; -} - static enum drm_connector_status intel_hdmi_detect(struct drm_connector *connector) { - struct drm_device *dev = connector->dev; - struct drm_i915_private *dev_priv = dev->dev_private; struct intel_output *intel_output = to_intel_output(connector); struct intel_hdmi_priv *hdmi_priv = intel_output->dev_priv; - u32 temp, bit; - - if (IS_IGDNG(dev)) - return igdng_hdmi_detect(connector); - - temp = I915_READ(PORT_HOTPLUG_EN); - - switch (hdmi_priv->sdvox_reg) { - case SDVOB: - temp |= HDMIB_HOTPLUG_INT_EN; - break; - case SDVOC: - temp |= HDMIC_HOTPLUG_INT_EN; - break; - default: - return connector_status_unknown; - } - - I915_WRITE(PORT_HOTPLUG_EN, temp); + struct edid *edid = NULL; + enum drm_connector_status status = connector_status_disconnected; - POSTING_READ(PORT_HOTPLUG_EN); + hdmi_priv->has_hdmi_sink = false; + edid = drm_get_edid(&intel_output->base, + intel_output->ddc_bus); - switch (hdmi_priv->sdvox_reg) { - case SDVOB: - bit = HDMIB_HOTPLUG_INT_STATUS; - break; - case SDVOC: - bit = HDMIC_HOTPLUG_INT_STATUS; - break; - default: - return connector_status_unknown; + if (edid) { + if (edid->input & DRM_EDID_INPUT_DIGITAL) { + status = connector_status_connected; + hdmi_priv->has_hdmi_sink = drm_detect_hdmi_monitor(edid); + } + intel_output->base.display_info.raw_edid = NULL; + kfree(edid); } - if ((I915_READ(PORT_HOTPLUG_STAT) & bit) != 0) { - intel_hdmi_sink_detect(connector); - return connector_status_connected; - } else - return connector_status_disconnected; + return status; } static int intel_hdmi_get_modes(struct drm_connector *connector) @@ -285,22 +230,28 @@ void intel_hdmi_init(struct drm_device *dev, int sdvox_reg) connector->interlace_allowed = 0; connector->doublescan_allowed = 0; + intel_output->crtc_mask = (1 << 0) | (1 << 1); /* Set up the DDC bus. */ - if (sdvox_reg == SDVOB) + if (sdvox_reg == SDVOB) { + intel_output->clone_mask = (1 << INTEL_HDMIB_CLONE_BIT); intel_output->ddc_bus = intel_i2c_create(dev, GPIOE, "HDMIB"); - else if (sdvox_reg == SDVOC) + } else if (sdvox_reg == SDVOC) { + intel_output->clone_mask = (1 << INTEL_HDMIC_CLONE_BIT); intel_output->ddc_bus = intel_i2c_create(dev, GPIOD, "HDMIC"); - else if (sdvox_reg == HDMIB) + } else if (sdvox_reg == HDMIB) { + intel_output->clone_mask = (1 << INTEL_HDMID_CLONE_BIT); intel_output->ddc_bus = intel_i2c_create(dev, PCH_GPIOE, "HDMIB"); - else if (sdvox_reg == HDMIC) + } else if (sdvox_reg == HDMIC) { + intel_output->clone_mask = (1 << INTEL_HDMIE_CLONE_BIT); intel_output->ddc_bus = intel_i2c_create(dev, PCH_GPIOD, "HDMIC"); - else if (sdvox_reg == HDMID) + } else if (sdvox_reg == HDMID) { + intel_output->clone_mask = (1 << INTEL_HDMIF_CLONE_BIT); intel_output->ddc_bus = intel_i2c_create(dev, PCH_GPIOF, "HDMID"); - + } if (!intel_output->ddc_bus) goto err_connector; diff --git a/drivers/gpu/drm/i915/intel_i2c.c b/drivers/gpu/drm/i915/intel_i2c.c index f7061f68d05..62b8bead765 100644 --- a/drivers/gpu/drm/i915/intel_i2c.c +++ b/drivers/gpu/drm/i915/intel_i2c.c @@ -124,6 +124,7 @@ static void set_data(void *data, int state_high) * @output: driver specific output device * @reg: GPIO reg to use * @name: name for this bus + * @slave_addr: slave address (if fixed) * * Creates and registers a new i2c bus with the Linux i2c layer, for use * in output probing and control (e.g. DDC or SDVO control functions). @@ -139,8 +140,8 @@ static void set_data(void *data, int state_high) * %GPIOH * see PRM for details on how these different busses are used. */ -struct intel_i2c_chan *intel_i2c_create(struct drm_device *dev, const u32 reg, - const char *name) +struct i2c_adapter *intel_i2c_create(struct drm_device *dev, const u32 reg, + const char *name) { struct intel_i2c_chan *chan; @@ -174,7 +175,7 @@ struct intel_i2c_chan *intel_i2c_create(struct drm_device *dev, const u32 reg, intel_i2c_quirk_set(dev, false); udelay(20); - return chan; + return &chan->adapter; out_free: kfree(chan); @@ -187,11 +188,16 @@ out_free: * * Unregister the adapter from the i2c layer, then free the structure. */ -void intel_i2c_destroy(struct intel_i2c_chan *chan) +void intel_i2c_destroy(struct i2c_adapter *adapter) { - if (!chan) + struct intel_i2c_chan *chan; + + if (!adapter) return; + chan = container_of(adapter, + struct intel_i2c_chan, + adapter); i2c_del_adapter(&chan->adapter); kfree(chan); } diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c index f073ed8432e..8df02ef8926 100644 --- a/drivers/gpu/drm/i915/intel_lvds.c +++ b/drivers/gpu/drm/i915/intel_lvds.c @@ -36,9 +36,25 @@ #include "intel_drv.h" #include "i915_drm.h" #include "i915_drv.h" +#include <linux/acpi.h> #define I915_LVDS "i915_lvds" +/* + * the following four scaling options are defined. + * #define DRM_MODE_SCALE_NON_GPU 0 + * #define DRM_MODE_SCALE_FULLSCREEN 1 + * #define DRM_MODE_SCALE_NO_SCALE 2 + * #define DRM_MODE_SCALE_ASPECT 3 + */ + +/* Private structure for the integrated LVDS support */ +struct intel_lvds_priv { + int fitting_mode; + u32 pfit_control; + u32 pfit_pgm_ratios; +}; + /** * Sets the backlight level. * @@ -213,26 +229,45 @@ static bool intel_lvds_mode_fixup(struct drm_encoder *encoder, struct drm_display_mode *mode, struct drm_display_mode *adjusted_mode) { + /* + * float point operation is not supported . So the PANEL_RATIO_FACTOR + * is defined, which can avoid the float point computation when + * calculating the panel ratio. + */ +#define PANEL_RATIO_FACTOR 8192 struct drm_device *dev = encoder->dev; struct drm_i915_private *dev_priv = dev->dev_private; struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc); struct drm_encoder *tmp_encoder; + struct intel_output *intel_output = enc_to_intel_output(encoder); + struct intel_lvds_priv *lvds_priv = intel_output->dev_priv; + u32 pfit_control = 0, pfit_pgm_ratios = 0; + int left_border = 0, right_border = 0, top_border = 0; + int bottom_border = 0; + bool border = 0; + int panel_ratio, desired_ratio, vert_scale, horiz_scale; + int horiz_ratio, vert_ratio; + u32 hsync_width, vsync_width; + u32 hblank_width, vblank_width; + u32 hsync_pos, vsync_pos; /* Should never happen!! */ if (!IS_I965G(dev) && intel_crtc->pipe == 0) { - printk(KERN_ERR "Can't support LVDS on pipe A\n"); + DRM_ERROR("Can't support LVDS on pipe A\n"); return false; } /* Should never happen!! */ list_for_each_entry(tmp_encoder, &dev->mode_config.encoder_list, head) { if (tmp_encoder != encoder && tmp_encoder->crtc == encoder->crtc) { - printk(KERN_ERR "Can't enable LVDS and another " + DRM_ERROR("Can't enable LVDS and another " "encoder on the same pipe\n"); return false; } } - + /* If we don't have a panel mode, there is nothing we can do */ + if (dev_priv->panel_fixed_mode == NULL) + return true; /* * If we have timings from the BIOS for the panel, put them in * to the adjusted mode. The CRTC will be set up for this mode, @@ -256,6 +291,243 @@ static bool intel_lvds_mode_fixup(struct drm_encoder *encoder, drm_mode_set_crtcinfo(adjusted_mode, CRTC_INTERLACE_HALVE_V); } + /* Make sure pre-965s set dither correctly */ + if (!IS_I965G(dev)) { + if (dev_priv->panel_wants_dither || dev_priv->lvds_dither) + pfit_control |= PANEL_8TO6_DITHER_ENABLE; + } + + /* Native modes don't need fitting */ + if (adjusted_mode->hdisplay == mode->hdisplay && + adjusted_mode->vdisplay == mode->vdisplay) { + pfit_pgm_ratios = 0; + border = 0; + goto out; + } + + /* 965+ wants fuzzy fitting */ + if (IS_I965G(dev)) + pfit_control |= (intel_crtc->pipe << PFIT_PIPE_SHIFT) | + PFIT_FILTER_FUZZY; + + hsync_width = adjusted_mode->crtc_hsync_end - + adjusted_mode->crtc_hsync_start; + vsync_width = adjusted_mode->crtc_vsync_end - + adjusted_mode->crtc_vsync_start; + hblank_width = adjusted_mode->crtc_hblank_end - + adjusted_mode->crtc_hblank_start; + vblank_width = adjusted_mode->crtc_vblank_end - + adjusted_mode->crtc_vblank_start; + /* + * Deal with panel fitting options. Figure out how to stretch the + * image based on its aspect ratio & the current panel fitting mode. + */ + panel_ratio = adjusted_mode->hdisplay * PANEL_RATIO_FACTOR / + adjusted_mode->vdisplay; + desired_ratio = mode->hdisplay * PANEL_RATIO_FACTOR / + mode->vdisplay; + /* + * Enable automatic panel scaling for non-native modes so that they fill + * the screen. Should be enabled before the pipe is enabled, according + * to register description and PRM. + * Change the value here to see the borders for debugging + */ + I915_WRITE(BCLRPAT_A, 0); + I915_WRITE(BCLRPAT_B, 0); + + switch (lvds_priv->fitting_mode) { + case DRM_MODE_SCALE_NO_SCALE: + /* + * For centered modes, we have to calculate border widths & + * heights and modify the values programmed into the CRTC. + */ + left_border = (adjusted_mode->hdisplay - mode->hdisplay) / 2; + right_border = left_border; + if (mode->hdisplay & 1) + right_border++; + top_border = (adjusted_mode->vdisplay - mode->vdisplay) / 2; + bottom_border = top_border; + if (mode->vdisplay & 1) + bottom_border++; + /* Set active & border values */ + adjusted_mode->crtc_hdisplay = mode->hdisplay; + /* Keep the boder be even */ + if (right_border & 1) + right_border++; + /* use the border directly instead of border minuse one */ + adjusted_mode->crtc_hblank_start = mode->hdisplay + + right_border; + /* keep the blank width constant */ + adjusted_mode->crtc_hblank_end = + adjusted_mode->crtc_hblank_start + hblank_width; + /* get the hsync pos relative to hblank start */ + hsync_pos = (hblank_width - hsync_width) / 2; + /* keep the hsync pos be even */ + if (hsync_pos & 1) + hsync_pos++; + adjusted_mode->crtc_hsync_start = + adjusted_mode->crtc_hblank_start + hsync_pos; + /* keep the hsync width constant */ + adjusted_mode->crtc_hsync_end = + adjusted_mode->crtc_hsync_start + hsync_width; + adjusted_mode->crtc_vdisplay = mode->vdisplay; + /* use the border instead of border minus one */ + adjusted_mode->crtc_vblank_start = mode->vdisplay + + bottom_border; + /* keep the vblank width constant */ + adjusted_mode->crtc_vblank_end = + adjusted_mode->crtc_vblank_start + vblank_width; + /* get the vsync start postion relative to vblank start */ + vsync_pos = (vblank_width - vsync_width) / 2; + adjusted_mode->crtc_vsync_start = + adjusted_mode->crtc_vblank_start + vsync_pos; + /* keep the vsync width constant */ + adjusted_mode->crtc_vsync_end = + adjusted_mode->crtc_vblank_start + vsync_width; + border = 1; + break; + case DRM_MODE_SCALE_ASPECT: + /* Scale but preserve the spect ratio */ + pfit_control |= PFIT_ENABLE; + if (IS_I965G(dev)) { + /* 965+ is easy, it does everything in hw */ + if (panel_ratio > desired_ratio) + pfit_control |= PFIT_SCALING_PILLAR; + else if (panel_ratio < desired_ratio) + pfit_control |= PFIT_SCALING_LETTER; + else + pfit_control |= PFIT_SCALING_AUTO; + } else { + /* + * For earlier chips we have to calculate the scaling + * ratio by hand and program it into the + * PFIT_PGM_RATIO register + */ + u32 horiz_bits, vert_bits, bits = 12; + horiz_ratio = mode->hdisplay * PANEL_RATIO_FACTOR/ + adjusted_mode->hdisplay; + vert_ratio = mode->vdisplay * PANEL_RATIO_FACTOR/ + adjusted_mode->vdisplay; + horiz_scale = adjusted_mode->hdisplay * + PANEL_RATIO_FACTOR / mode->hdisplay; + vert_scale = adjusted_mode->vdisplay * + PANEL_RATIO_FACTOR / mode->vdisplay; + + /* retain aspect ratio */ + if (panel_ratio > desired_ratio) { /* Pillar */ + u32 scaled_width; + scaled_width = mode->hdisplay * vert_scale / + PANEL_RATIO_FACTOR; + horiz_ratio = vert_ratio; + pfit_control |= (VERT_AUTO_SCALE | + VERT_INTERP_BILINEAR | + HORIZ_INTERP_BILINEAR); + /* Pillar will have left/right borders */ + left_border = (adjusted_mode->hdisplay - + scaled_width) / 2; + right_border = left_border; + if (mode->hdisplay & 1) /* odd resolutions */ + right_border++; + /* keep the border be even */ + if (right_border & 1) + right_border++; + adjusted_mode->crtc_hdisplay = scaled_width; + /* use border instead of border minus one */ + adjusted_mode->crtc_hblank_start = + scaled_width + right_border; + /* keep the hblank width constant */ + adjusted_mode->crtc_hblank_end = + adjusted_mode->crtc_hblank_start + + hblank_width; + /* + * get the hsync start pos relative to + * hblank start + */ + hsync_pos = (hblank_width - hsync_width) / 2; + /* keep the hsync_pos be even */ + if (hsync_pos & 1) + hsync_pos++; + adjusted_mode->crtc_hsync_start = + adjusted_mode->crtc_hblank_start + + hsync_pos; + /* keept hsync width constant */ + adjusted_mode->crtc_hsync_end = + adjusted_mode->crtc_hsync_start + + hsync_width; + border = 1; + } else if (panel_ratio < desired_ratio) { /* letter */ + u32 scaled_height = mode->vdisplay * + horiz_scale / PANEL_RATIO_FACTOR; + vert_ratio = horiz_ratio; + pfit_control |= (HORIZ_AUTO_SCALE | + VERT_INTERP_BILINEAR | + HORIZ_INTERP_BILINEAR); + /* Letterbox will have top/bottom border */ + top_border = (adjusted_mode->vdisplay - + scaled_height) / 2; + bottom_border = top_border; + if (mode->vdisplay & 1) + bottom_border++; + adjusted_mode->crtc_vdisplay = scaled_height; + /* use border instead of border minus one */ + adjusted_mode->crtc_vblank_start = + scaled_height + bottom_border; + /* keep the vblank width constant */ + adjusted_mode->crtc_vblank_end = + adjusted_mode->crtc_vblank_start + + vblank_width; + /* + * get the vsync start pos relative to + * vblank start + */ + vsync_pos = (vblank_width - vsync_width) / 2; + adjusted_mode->crtc_vsync_start = + adjusted_mode->crtc_vblank_start + + vsync_pos; + /* keep the vsync width constant */ + adjusted_mode->crtc_vsync_end = + adjusted_mode->crtc_vsync_start + + vsync_width; + border = 1; + } else { + /* Aspects match, Let hw scale both directions */ + pfit_control |= (VERT_AUTO_SCALE | + HORIZ_AUTO_SCALE | + VERT_INTERP_BILINEAR | + HORIZ_INTERP_BILINEAR); + } + horiz_bits = (1 << bits) * horiz_ratio / + PANEL_RATIO_FACTOR; + vert_bits = (1 << bits) * vert_ratio / + PANEL_RATIO_FACTOR; + pfit_pgm_ratios = + ((vert_bits << PFIT_VERT_SCALE_SHIFT) & + PFIT_VERT_SCALE_MASK) | + ((horiz_bits << PFIT_HORIZ_SCALE_SHIFT) & + PFIT_HORIZ_SCALE_MASK); + } + break; + + case DRM_MODE_SCALE_FULLSCREEN: + /* + * Full scaling, even if it changes the aspect ratio. + * Fortunately this is all done for us in hw. + */ + pfit_control |= PFIT_ENABLE; + if (IS_I965G(dev)) + pfit_control |= PFIT_SCALING_AUTO; + else + pfit_control |= (VERT_AUTO_SCALE | HORIZ_AUTO_SCALE | + VERT_INTERP_BILINEAR | + HORIZ_INTERP_BILINEAR); + break; + default: + break; + } + +out: + lvds_priv->pfit_control = pfit_control; + lvds_priv->pfit_pgm_ratios = pfit_pgm_ratios; /* * XXX: It would be nice to support lower refresh rates on the * panels to reduce power consumption, and perhaps match the @@ -301,8 +573,8 @@ static void intel_lvds_mode_set(struct drm_encoder *encoder, { struct drm_device *dev = encoder->dev; struct drm_i915_private *dev_priv = dev->dev_private; - struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc); - u32 pfit_control; + struct intel_output *intel_output = enc_to_intel_output(encoder); + struct intel_lvds_priv *lvds_priv = intel_output->dev_priv; /* * The LVDS pin pair will already have been turned on in the @@ -319,22 +591,8 @@ static void intel_lvds_mode_set(struct drm_encoder *encoder, * screen. Should be enabled before the pipe is enabled, according to * register description and PRM. */ - if (mode->hdisplay != adjusted_mode->hdisplay || - mode->vdisplay != adjusted_mode->vdisplay) - pfit_control = (PFIT_ENABLE | VERT_AUTO_SCALE | - HORIZ_AUTO_SCALE | VERT_INTERP_BILINEAR | - HORIZ_INTERP_BILINEAR); - else - pfit_control = 0; - - if (!IS_I965G(dev)) { - if (dev_priv->panel_wants_dither || dev_priv->lvds_dither) - pfit_control |= PANEL_8TO6_DITHER_ENABLE; - } - else - pfit_control |= intel_crtc->pipe << PFIT_PIPE_SHIFT; - - I915_WRITE(PFIT_CONTROL, pfit_control); + I915_WRITE(PFIT_PGM_RATIOS, lvds_priv->pfit_pgm_ratios); + I915_WRITE(PFIT_CONTROL, lvds_priv->pfit_control); } /** @@ -406,6 +664,34 @@ static int intel_lvds_set_property(struct drm_connector *connector, struct drm_property *property, uint64_t value) { + struct drm_device *dev = connector->dev; + struct intel_output *intel_output = + to_intel_output(connector); + + if (property == dev->mode_config.scaling_mode_property && + connector->encoder) { + struct drm_crtc *crtc = connector->encoder->crtc; + struct intel_lvds_priv *lvds_priv = intel_output->dev_priv; + if (value == DRM_MODE_SCALE_NON_GPU) { + DRM_DEBUG_KMS(I915_LVDS, + "non_GPU property is unsupported\n"); + return 0; + } + if (lvds_priv->fitting_mode == value) { + /* the LVDS scaling property is not changed */ + return 0; + } + lvds_priv->fitting_mode = value; + if (crtc && crtc->enabled) { + /* + * If the CRTC is enabled, the display will be changed + * according to the new panel fitting mode. + */ + drm_crtc_helper_set_mode(crtc, &crtc->mode, + crtc->x, crtc->y, crtc->fb); + } + } + return 0; } @@ -456,7 +742,7 @@ static const struct dmi_system_id intel_no_lvds[] = { .callback = intel_no_lvds_dmi_callback, .ident = "Apple Mac Mini (Core series)", .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."), + DMI_MATCH(DMI_SYS_VENDOR, "Apple"), DMI_MATCH(DMI_PRODUCT_NAME, "Macmini1,1"), }, }, @@ -464,7 +750,7 @@ static const struct dmi_system_id intel_no_lvds[] = { .callback = intel_no_lvds_dmi_callback, .ident = "Apple Mac Mini (Core 2 series)", .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."), + DMI_MATCH(DMI_SYS_VENDOR, "Apple"), DMI_MATCH(DMI_PRODUCT_NAME, "Macmini2,1"), }, }, @@ -494,6 +780,14 @@ static const struct dmi_system_id intel_no_lvds[] = { }, { .callback = intel_no_lvds_dmi_callback, + .ident = "AOpen Mini PC MP915", + .matches = { + DMI_MATCH(DMI_BOARD_VENDOR, "AOpen"), + DMI_MATCH(DMI_BOARD_NAME, "i915GMx-F"), + }, + }, + { + .callback = intel_no_lvds_dmi_callback, .ident = "Aopen i945GTt-VFA", .matches = { DMI_MATCH(DMI_PRODUCT_VERSION, "AO00001JW"), @@ -503,6 +797,65 @@ static const struct dmi_system_id intel_no_lvds[] = { { } /* terminating entry */ }; +#ifdef CONFIG_ACPI +/* + * check_lid_device -- check whether @handle is an ACPI LID device. + * @handle: ACPI device handle + * @level : depth in the ACPI namespace tree + * @context: the number of LID device when we find the device + * @rv: a return value to fill if desired (Not use) + */ +static acpi_status +check_lid_device(acpi_handle handle, u32 level, void *context, + void **return_value) +{ + struct acpi_device *acpi_dev; + int *lid_present = context; + + acpi_dev = NULL; + /* Get the acpi device for device handle */ + if (acpi_bus_get_device(handle, &acpi_dev) || !acpi_dev) { + /* If there is no ACPI device for handle, return */ + return AE_OK; + } + + if (!strncmp(acpi_device_hid(acpi_dev), "PNP0C0D", 7)) + *lid_present = 1; + + return AE_OK; +} + +/** + * check whether there exists the ACPI LID device by enumerating the ACPI + * device tree. + */ +static int intel_lid_present(void) +{ + int lid_present = 0; + + if (acpi_disabled) { + /* If ACPI is disabled, there is no ACPI device tree to + * check, so assume the LID device would have been present. + */ + return 1; + } + + acpi_walk_namespace(ACPI_TYPE_DEVICE, ACPI_ROOT_OBJECT, + ACPI_UINT32_MAX, + check_lid_device, &lid_present, NULL); + + return lid_present; +} +#else +static int intel_lid_present(void) +{ + /* In the absence of ACPI built in, assume that the LID device would + * have been present. + */ + return 1; +} +#endif + /** * intel_lvds_init - setup LVDS connectors on this device * @dev: drm device @@ -518,6 +871,7 @@ void intel_lvds_init(struct drm_device *dev) struct drm_encoder *encoder; struct drm_display_mode *scan; /* *modes, *bios_mode; */ struct drm_crtc *crtc; + struct intel_lvds_priv *lvds_priv; u32 lvds; int pipe, gpio = GPIOC; @@ -525,13 +879,28 @@ void intel_lvds_init(struct drm_device *dev) if (dmi_check_system(intel_no_lvds)) return; + /* Assume that any device without an ACPI LID device also doesn't + * have an integrated LVDS. We would be better off parsing the BIOS + * to get a reliable indicator, but that code isn't written yet. + * + * In the case of all-in-one desktops using LVDS that we've seen, + * they're using SDVO LVDS. + */ + if (!intel_lid_present()) + return; + if (IS_IGDNG(dev)) { if ((I915_READ(PCH_LVDS) & LVDS_DETECTED) == 0) return; + if (dev_priv->edp_support) { + DRM_DEBUG("disable LVDS for eDP support\n"); + return; + } gpio = PCH_GPIOC; } - intel_output = kzalloc(sizeof(struct intel_output), GFP_KERNEL); + intel_output = kzalloc(sizeof(struct intel_output) + + sizeof(struct intel_lvds_priv), GFP_KERNEL); if (!intel_output) { return; } @@ -547,13 +916,26 @@ void intel_lvds_init(struct drm_device *dev) drm_mode_connector_attach_encoder(&intel_output->base, &intel_output->enc); intel_output->type = INTEL_OUTPUT_LVDS; + intel_output->clone_mask = (1 << INTEL_LVDS_CLONE_BIT); + intel_output->crtc_mask = (1 << 1); drm_encoder_helper_add(encoder, &intel_lvds_helper_funcs); drm_connector_helper_add(connector, &intel_lvds_connector_helper_funcs); connector->display_info.subpixel_order = SubPixelHorizontalRGB; connector->interlace_allowed = false; connector->doublescan_allowed = false; + lvds_priv = (struct intel_lvds_priv *)(intel_output + 1); + intel_output->dev_priv = lvds_priv; + /* create the scaling mode property */ + drm_mode_create_scaling_mode_property(dev); + /* + * the initial panel fitting mode will be FULL_SCREEN. + */ + drm_connector_attach_property(&intel_output->base, + dev->mode_config.scaling_mode_property, + DRM_MODE_SCALE_FULLSCREEN); + lvds_priv->fitting_mode = DRM_MODE_SCALE_FULLSCREEN; /* * LVDS discovery: * 1) check for EDID on DDC @@ -649,5 +1031,5 @@ failed: if (intel_output->ddc_bus) intel_i2c_destroy(intel_output->ddc_bus); drm_connector_cleanup(connector); - kfree(connector); + kfree(intel_output); } diff --git a/drivers/gpu/drm/i915/intel_modes.c b/drivers/gpu/drm/i915/intel_modes.c index e0910fefce8..67e2f4632a2 100644 --- a/drivers/gpu/drm/i915/intel_modes.c +++ b/drivers/gpu/drm/i915/intel_modes.c @@ -53,10 +53,9 @@ bool intel_ddc_probe(struct intel_output *intel_output) } }; - intel_i2c_quirk_set(intel_output->ddc_bus->drm_dev, true); - ret = i2c_transfer(&intel_output->ddc_bus->adapter, msgs, 2); - intel_i2c_quirk_set(intel_output->ddc_bus->drm_dev, false); - + intel_i2c_quirk_set(intel_output->base.dev, true); + ret = i2c_transfer(intel_output->ddc_bus, msgs, 2); + intel_i2c_quirk_set(intel_output->base.dev, false); if (ret == 2) return true; @@ -74,10 +73,9 @@ int intel_ddc_get_modes(struct intel_output *intel_output) struct edid *edid; int ret = 0; - intel_i2c_quirk_set(intel_output->ddc_bus->drm_dev, true); - edid = drm_get_edid(&intel_output->base, - &intel_output->ddc_bus->adapter); - intel_i2c_quirk_set(intel_output->ddc_bus->drm_dev, false); + intel_i2c_quirk_set(intel_output->base.dev, true); + edid = drm_get_edid(&intel_output->base, intel_output->ddc_bus); + intel_i2c_quirk_set(intel_output->base.dev, false); if (edid) { drm_mode_connector_update_edid_property(&intel_output->base, edid); diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c index 9a00adb3a50..d3b74ba62b4 100644 --- a/drivers/gpu/drm/i915/intel_sdvo.c +++ b/drivers/gpu/drm/i915/intel_sdvo.c @@ -31,6 +31,7 @@ #include "drm.h" #include "drm_crtc.h" #include "intel_drv.h" +#include "drm_edid.h" #include "i915_drm.h" #include "i915_drv.h" #include "intel_sdvo_regs.h" @@ -38,8 +39,7 @@ #undef SDVO_DEBUG #define I915_SDVO "i915_sdvo" struct intel_sdvo_priv { - struct intel_i2c_chan *i2c_bus; - int slaveaddr; + u8 slave_addr; /* Register for the SDVO device: SDVOB or SDVOC */ int output_device; @@ -56,6 +56,12 @@ struct intel_sdvo_priv { /* Pixel clock limitations reported by the SDVO device, in kHz */ int pixel_clock_min, pixel_clock_max; + /* + * For multiple function SDVO device, + * this is for current attached outputs. + */ + uint16_t attached_output; + /** * This is set if we're going to treat the device as TV-out. * @@ -69,12 +75,23 @@ struct intel_sdvo_priv { * This is set if we treat the device as HDMI, instead of DVI. */ bool is_hdmi; + /** * This is set if we detect output of sdvo device as LVDS. */ bool is_lvds; /** + * This is sdvo flags for input timing. + */ + uint8_t sdvo_flags; + + /** + * This is sdvo fixed pannel mode pointer + */ + struct drm_display_mode *sdvo_lvds_fixed_mode; + + /** * Returned SDTV resolutions allowed for the current format, if the * device reported it. */ @@ -104,6 +121,9 @@ struct intel_sdvo_priv { u32 save_SDVOX; }; +static bool +intel_sdvo_output_setup(struct intel_output *intel_output, uint16_t flags); + /** * Writes the SDVOB or SDVOC with the given value, but always writes both * SDVOB and SDVOC to work around apparent hardware issues (according to @@ -146,13 +166,13 @@ static bool intel_sdvo_read_byte(struct intel_output *intel_output, u8 addr, struct i2c_msg msgs[] = { { - .addr = sdvo_priv->i2c_bus->slave_addr, + .addr = sdvo_priv->slave_addr >> 1, .flags = 0, .len = 1, .buf = out_buf, }, { - .addr = sdvo_priv->i2c_bus->slave_addr, + .addr = sdvo_priv->slave_addr >> 1, .flags = I2C_M_RD, .len = 1, .buf = buf, @@ -162,7 +182,7 @@ static bool intel_sdvo_read_byte(struct intel_output *intel_output, u8 addr, out_buf[0] = addr; out_buf[1] = 0; - if ((ret = i2c_transfer(&sdvo_priv->i2c_bus->adapter, msgs, 2)) == 2) + if ((ret = i2c_transfer(intel_output->i2c_bus, msgs, 2)) == 2) { *ch = buf[0]; return true; @@ -175,10 +195,11 @@ static bool intel_sdvo_read_byte(struct intel_output *intel_output, u8 addr, static bool intel_sdvo_write_byte(struct intel_output *intel_output, int addr, u8 ch) { + struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv; u8 out_buf[2]; struct i2c_msg msgs[] = { { - .addr = intel_output->i2c_bus->slave_addr, + .addr = sdvo_priv->slave_addr >> 1, .flags = 0, .len = 2, .buf = out_buf, @@ -188,7 +209,7 @@ static bool intel_sdvo_write_byte(struct intel_output *intel_output, int addr, out_buf[0] = addr; out_buf[1] = ch; - if (i2c_transfer(&intel_output->i2c_bus->adapter, msgs, 1) == 1) + if (i2c_transfer(intel_output->i2c_bus, msgs, 1) == 1) { return true; } @@ -592,6 +613,7 @@ intel_sdvo_create_preferred_input_timing(struct intel_output *output, uint16_t height) { struct intel_sdvo_preferred_input_timing_args args; + struct intel_sdvo_priv *sdvo_priv = output->dev_priv; uint8_t status; memset(&args, 0, sizeof(args)); @@ -599,7 +621,12 @@ intel_sdvo_create_preferred_input_timing(struct intel_output *output, args.width = width; args.height = height; args.interlace = 0; - args.scaled = 0; + + if (sdvo_priv->is_lvds && + (sdvo_priv->sdvo_lvds_fixed_mode->hdisplay != width || + sdvo_priv->sdvo_lvds_fixed_mode->vdisplay != height)) + args.scaled = 1; + intel_sdvo_write_cmd(output, SDVO_CMD_CREATE_PREFERRED_INPUT_TIMING, &args, sizeof(args)); status = intel_sdvo_read_response(output, NULL, 0); @@ -944,12 +971,7 @@ static bool intel_sdvo_mode_fixup(struct drm_encoder *encoder, struct intel_output *output = enc_to_intel_output(encoder); struct intel_sdvo_priv *dev_priv = output->dev_priv; - if (!dev_priv->is_tv) { - /* Make the CRTC code factor in the SDVO pixel multiplier. The - * SDVO device will be told of the multiplier during mode_set. - */ - adjusted_mode->clock *= intel_sdvo_get_pixel_multiplier(mode); - } else { + if (dev_priv->is_tv) { struct intel_sdvo_dtd output_dtd; bool success; @@ -980,6 +1002,47 @@ static bool intel_sdvo_mode_fixup(struct drm_encoder *encoder, intel_sdvo_get_preferred_input_timing(output, &input_dtd); intel_sdvo_get_mode_from_dtd(adjusted_mode, &input_dtd); + dev_priv->sdvo_flags = input_dtd.part2.sdvo_flags; + + drm_mode_set_crtcinfo(adjusted_mode, 0); + + mode->clock = adjusted_mode->clock; + + adjusted_mode->clock *= + intel_sdvo_get_pixel_multiplier(mode); + } else { + return false; + } + } else if (dev_priv->is_lvds) { + struct intel_sdvo_dtd output_dtd; + bool success; + + drm_mode_set_crtcinfo(dev_priv->sdvo_lvds_fixed_mode, 0); + /* Set output timings */ + intel_sdvo_get_dtd_from_mode(&output_dtd, + dev_priv->sdvo_lvds_fixed_mode); + + intel_sdvo_set_target_output(output, + dev_priv->controlled_output); + intel_sdvo_set_output_timing(output, &output_dtd); + + /* Set the input timing to the screen. Assume always input 0. */ + intel_sdvo_set_target_input(output, true, false); + + + success = intel_sdvo_create_preferred_input_timing( + output, + mode->clock / 10, + mode->hdisplay, + mode->vdisplay); + + if (success) { + struct intel_sdvo_dtd input_dtd; + + intel_sdvo_get_preferred_input_timing(output, + &input_dtd); + intel_sdvo_get_mode_from_dtd(adjusted_mode, &input_dtd); + dev_priv->sdvo_flags = input_dtd.part2.sdvo_flags; drm_mode_set_crtcinfo(adjusted_mode, 0); @@ -990,6 +1053,12 @@ static bool intel_sdvo_mode_fixup(struct drm_encoder *encoder, } else { return false; } + + } else { + /* Make the CRTC code factor in the SDVO pixel multiplier. The + * SDVO device will be told of the multiplier during mode_set. + */ + adjusted_mode->clock *= intel_sdvo_get_pixel_multiplier(mode); } return true; } @@ -1033,15 +1102,16 @@ static void intel_sdvo_mode_set(struct drm_encoder *encoder, /* We have tried to get input timing in mode_fixup, and filled into adjusted_mode */ - if (sdvo_priv->is_tv) + if (sdvo_priv->is_tv || sdvo_priv->is_lvds) { intel_sdvo_get_dtd_from_mode(&input_dtd, adjusted_mode); - else + input_dtd.part2.sdvo_flags = sdvo_priv->sdvo_flags; + } else intel_sdvo_get_dtd_from_mode(&input_dtd, mode); /* If it's a TV, we already set the output timing in mode_fixup. * Otherwise, the output timing is equal to the input timing. */ - if (!sdvo_priv->is_tv) { + if (!sdvo_priv->is_tv && !sdvo_priv->is_lvds) { /* Set the output timing to the screen */ intel_sdvo_set_target_output(output, sdvo_priv->controlled_output); @@ -1116,6 +1186,8 @@ static void intel_sdvo_mode_set(struct drm_encoder *encoder, sdvox |= (sdvo_pixel_multiply - 1) << SDVO_PORT_MULTIPLY_SHIFT; } + if (sdvo_priv->sdvo_flags & SDVO_NEED_TO_STALL) + sdvox |= SDVO_STALL_SELECT; intel_sdvo_write_sdvox(output, sdvox); } @@ -1276,6 +1348,17 @@ static int intel_sdvo_mode_valid(struct drm_connector *connector, if (sdvo_priv->pixel_clock_max < mode->clock) return MODE_CLOCK_HIGH; + if (sdvo_priv->is_lvds == true) { + if (sdvo_priv->sdvo_lvds_fixed_mode == NULL) + return MODE_PANEL; + + if (mode->hdisplay > sdvo_priv->sdvo_lvds_fixed_mode->hdisplay) + return MODE_PANEL; + + if (mode->vdisplay > sdvo_priv->sdvo_lvds_fixed_mode->vdisplay) + return MODE_PANEL; + } + return MODE_OK; } @@ -1362,42 +1445,96 @@ void intel_sdvo_set_hotplug(struct drm_connector *connector, int on) intel_sdvo_read_response(intel_output, &response, 2); } -static void -intel_sdvo_hdmi_sink_detect(struct drm_connector *connector) +static bool +intel_sdvo_multifunc_encoder(struct intel_output *intel_output) +{ + struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv; + int caps = 0; + + if (sdvo_priv->caps.output_flags & + (SDVO_OUTPUT_TMDS0 | SDVO_OUTPUT_TMDS1)) + caps++; + if (sdvo_priv->caps.output_flags & + (SDVO_OUTPUT_RGB0 | SDVO_OUTPUT_RGB1)) + caps++; + if (sdvo_priv->caps.output_flags & + (SDVO_OUTPUT_SVID0 | SDVO_OUTPUT_SVID1)) + caps++; + if (sdvo_priv->caps.output_flags & + (SDVO_OUTPUT_CVBS0 | SDVO_OUTPUT_CVBS1)) + caps++; + if (sdvo_priv->caps.output_flags & + (SDVO_OUTPUT_YPRPB0 | SDVO_OUTPUT_YPRPB1)) + caps++; + + if (sdvo_priv->caps.output_flags & + (SDVO_OUTPUT_SCART0 | SDVO_OUTPUT_SCART1)) + caps++; + + if (sdvo_priv->caps.output_flags & + (SDVO_OUTPUT_LVDS0 | SDVO_OUTPUT_LVDS1)) + caps++; + + return (caps > 1); +} + +enum drm_connector_status +intel_sdvo_hdmi_sink_detect(struct drm_connector *connector, u16 response) { struct intel_output *intel_output = to_intel_output(connector); struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv; + enum drm_connector_status status = connector_status_connected; struct edid *edid = NULL; - intel_sdvo_set_control_bus_switch(intel_output, sdvo_priv->ddc_bus); edid = drm_get_edid(&intel_output->base, - &intel_output->ddc_bus->adapter); + intel_output->ddc_bus); if (edid != NULL) { - sdvo_priv->is_hdmi = drm_detect_hdmi_monitor(edid); + /* Don't report the output as connected if it's a DVI-I + * connector with a non-digital EDID coming out. + */ + if (response & (SDVO_OUTPUT_TMDS0 | SDVO_OUTPUT_TMDS1)) { + if (edid->input & DRM_EDID_INPUT_DIGITAL) + sdvo_priv->is_hdmi = + drm_detect_hdmi_monitor(edid); + else + status = connector_status_disconnected; + } + kfree(edid); intel_output->base.display_info.raw_edid = NULL; - } + + } else if (response & (SDVO_OUTPUT_TMDS0 | SDVO_OUTPUT_TMDS1)) + status = connector_status_disconnected; + + return status; } static enum drm_connector_status intel_sdvo_detect(struct drm_connector *connector) { - u8 response[2]; + uint16_t response; u8 status; struct intel_output *intel_output = to_intel_output(connector); + struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv; intel_sdvo_write_cmd(intel_output, SDVO_CMD_GET_ATTACHED_DISPLAYS, NULL, 0); status = intel_sdvo_read_response(intel_output, &response, 2); - DRM_DEBUG("SDVO response %d %d\n", response[0], response[1]); + DRM_DEBUG("SDVO response %d %d\n", response & 0xff, response >> 8); if (status != SDVO_CMD_STATUS_SUCCESS) return connector_status_unknown; - if ((response[0] != 0) || (response[1] != 0)) { - intel_sdvo_hdmi_sink_detect(connector); - return connector_status_connected; - } else + if (response == 0) return connector_status_disconnected; + + if (intel_sdvo_multifunc_encoder(intel_output) && + sdvo_priv->attached_output != response) { + if (sdvo_priv->controlled_output != response && + intel_sdvo_output_setup(intel_output, response) != true) + return connector_status_unknown; + sdvo_priv->attached_output = response; + } + return intel_sdvo_hdmi_sink_detect(connector, response); } static void intel_sdvo_get_ddc_modes(struct drm_connector *connector) @@ -1549,23 +1686,21 @@ static void intel_sdvo_get_tv_modes(struct drm_connector *connector) static void intel_sdvo_get_lvds_modes(struct drm_connector *connector) { struct intel_output *intel_output = to_intel_output(connector); - struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv; struct drm_i915_private *dev_priv = connector->dev->dev_private; + struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv; + struct drm_display_mode *newmode; /* * Attempt to get the mode list from DDC. * Assume that the preferred modes are * arranged in priority order. */ - /* set the bus switch and get the modes */ - intel_sdvo_set_control_bus_switch(intel_output, sdvo_priv->ddc_bus); intel_ddc_get_modes(intel_output); if (list_empty(&connector->probed_modes) == false) - return; + goto end; /* Fetch modes from VBT */ if (dev_priv->sdvo_lvds_vbt_mode != NULL) { - struct drm_display_mode *newmode; newmode = drm_mode_duplicate(connector->dev, dev_priv->sdvo_lvds_vbt_mode); if (newmode != NULL) { @@ -1575,6 +1710,16 @@ static void intel_sdvo_get_lvds_modes(struct drm_connector *connector) drm_mode_probed_add(connector, newmode); } } + +end: + list_for_each_entry(newmode, &connector->probed_modes, head) { + if (newmode->type & DRM_MODE_TYPE_PREFERRED) { + sdvo_priv->sdvo_lvds_fixed_mode = + drm_mode_duplicate(connector->dev, newmode); + break; + } + } + } static int intel_sdvo_get_modes(struct drm_connector *connector) @@ -1597,14 +1742,20 @@ static int intel_sdvo_get_modes(struct drm_connector *connector) static void intel_sdvo_destroy(struct drm_connector *connector) { struct intel_output *intel_output = to_intel_output(connector); + struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv; if (intel_output->i2c_bus) intel_i2c_destroy(intel_output->i2c_bus); if (intel_output->ddc_bus) intel_i2c_destroy(intel_output->ddc_bus); + if (sdvo_priv->sdvo_lvds_fixed_mode != NULL) + drm_mode_destroy(connector->dev, + sdvo_priv->sdvo_lvds_fixed_mode); + drm_sysfs_connector_remove(connector); drm_connector_cleanup(connector); + kfree(intel_output); } @@ -1709,7 +1860,7 @@ intel_sdvo_chan_to_intel_output(struct intel_i2c_chan *chan) list_for_each_entry(connector, &dev->mode_config.connector_list, head) { - if (to_intel_output(connector)->ddc_bus == chan) { + if (to_intel_output(connector)->ddc_bus == &chan->adapter) { intel_output = to_intel_output(connector); break; } @@ -1723,7 +1874,7 @@ static int intel_sdvo_master_xfer(struct i2c_adapter *i2c_adap, struct intel_output *intel_output; struct intel_sdvo_priv *sdvo_priv; struct i2c_algo_bit_data *algo_data; - struct i2c_algorithm *algo; + const struct i2c_algorithm *algo; algo_data = (struct i2c_algo_bit_data *)i2c_adap->algo_data; intel_output = @@ -1733,7 +1884,7 @@ static int intel_sdvo_master_xfer(struct i2c_adapter *i2c_adap, return -EINVAL; sdvo_priv = intel_output->dev_priv; - algo = (struct i2c_algorithm *)intel_output->i2c_bus->adapter.algo; + algo = intel_output->i2c_bus->algo; intel_sdvo_set_control_bus_switch(intel_output, sdvo_priv->ddc_bus); return algo->master_xfer(i2c_adap, msgs, num); @@ -1780,18 +1931,112 @@ intel_sdvo_get_slave_addr(struct drm_device *dev, int output_device) return 0x72; } +static bool +intel_sdvo_output_setup(struct intel_output *intel_output, uint16_t flags) +{ + struct drm_connector *connector = &intel_output->base; + struct drm_encoder *encoder = &intel_output->enc; + struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv; + bool ret = true, registered = false; + + sdvo_priv->is_tv = false; + intel_output->needs_tv_clock = false; + sdvo_priv->is_lvds = false; + + if (device_is_registered(&connector->kdev)) { + drm_sysfs_connector_remove(connector); + registered = true; + } + + if (flags & + (SDVO_OUTPUT_TMDS0 | SDVO_OUTPUT_TMDS1)) { + if (sdvo_priv->caps.output_flags & SDVO_OUTPUT_TMDS0) + sdvo_priv->controlled_output = SDVO_OUTPUT_TMDS0; + else + sdvo_priv->controlled_output = SDVO_OUTPUT_TMDS1; + + encoder->encoder_type = DRM_MODE_ENCODER_TMDS; + connector->connector_type = DRM_MODE_CONNECTOR_DVID; + + if (intel_sdvo_get_supp_encode(intel_output, + &sdvo_priv->encode) && + intel_sdvo_get_digital_encoding_mode(intel_output) && + sdvo_priv->is_hdmi) { + /* enable hdmi encoding mode if supported */ + intel_sdvo_set_encode(intel_output, SDVO_ENCODE_HDMI); + intel_sdvo_set_colorimetry(intel_output, + SDVO_COLORIMETRY_RGB256); + connector->connector_type = DRM_MODE_CONNECTOR_HDMIA; + intel_output->clone_mask = + (1 << INTEL_SDVO_NON_TV_CLONE_BIT) | + (1 << INTEL_ANALOG_CLONE_BIT); + } + } else if (flags & SDVO_OUTPUT_SVID0) { + + sdvo_priv->controlled_output = SDVO_OUTPUT_SVID0; + encoder->encoder_type = DRM_MODE_ENCODER_TVDAC; + connector->connector_type = DRM_MODE_CONNECTOR_SVIDEO; + sdvo_priv->is_tv = true; + intel_output->needs_tv_clock = true; + intel_output->clone_mask = 1 << INTEL_SDVO_TV_CLONE_BIT; + } else if (flags & SDVO_OUTPUT_RGB0) { + + sdvo_priv->controlled_output = SDVO_OUTPUT_RGB0; + encoder->encoder_type = DRM_MODE_ENCODER_DAC; + connector->connector_type = DRM_MODE_CONNECTOR_VGA; + intel_output->clone_mask = (1 << INTEL_SDVO_NON_TV_CLONE_BIT) | + (1 << INTEL_ANALOG_CLONE_BIT); + } else if (flags & SDVO_OUTPUT_RGB1) { + + sdvo_priv->controlled_output = SDVO_OUTPUT_RGB1; + encoder->encoder_type = DRM_MODE_ENCODER_DAC; + connector->connector_type = DRM_MODE_CONNECTOR_VGA; + } else if (flags & SDVO_OUTPUT_LVDS0) { + + sdvo_priv->controlled_output = SDVO_OUTPUT_LVDS0; + encoder->encoder_type = DRM_MODE_ENCODER_LVDS; + connector->connector_type = DRM_MODE_CONNECTOR_LVDS; + sdvo_priv->is_lvds = true; + intel_output->clone_mask = (1 << INTEL_ANALOG_CLONE_BIT) | + (1 << INTEL_SDVO_LVDS_CLONE_BIT); + } else if (flags & SDVO_OUTPUT_LVDS1) { + + sdvo_priv->controlled_output = SDVO_OUTPUT_LVDS1; + encoder->encoder_type = DRM_MODE_ENCODER_LVDS; + connector->connector_type = DRM_MODE_CONNECTOR_LVDS; + sdvo_priv->is_lvds = true; + intel_output->clone_mask = (1 << INTEL_ANALOG_CLONE_BIT) | + (1 << INTEL_SDVO_LVDS_CLONE_BIT); + } else { + + unsigned char bytes[2]; + + sdvo_priv->controlled_output = 0; + memcpy(bytes, &sdvo_priv->caps.output_flags, 2); + DRM_DEBUG_KMS(I915_SDVO, + "%s: Unknown SDVO output type (0x%02x%02x)\n", + SDVO_NAME(sdvo_priv), + bytes[0], bytes[1]); + ret = false; + } + intel_output->crtc_mask = (1 << 0) | (1 << 1); + + if (ret && registered) + ret = drm_sysfs_connector_add(connector) == 0 ? true : false; + + + return ret; + +} + bool intel_sdvo_init(struct drm_device *dev, int output_device) { struct drm_connector *connector; struct intel_output *intel_output; struct intel_sdvo_priv *sdvo_priv; - struct intel_i2c_chan *i2cbus = NULL; - struct intel_i2c_chan *ddcbus = NULL; - int connector_type; + u8 ch[0x40]; int i; - int encoder_type, output_id; - u8 slave_addr; intel_output = kcalloc(sizeof(struct intel_output)+sizeof(struct intel_sdvo_priv), 1, GFP_KERNEL); if (!intel_output) { @@ -1799,29 +2044,24 @@ bool intel_sdvo_init(struct drm_device *dev, int output_device) } sdvo_priv = (struct intel_sdvo_priv *)(intel_output + 1); + sdvo_priv->output_device = output_device; + + intel_output->dev_priv = sdvo_priv; intel_output->type = INTEL_OUTPUT_SDVO; /* setup the DDC bus. */ if (output_device == SDVOB) - i2cbus = intel_i2c_create(dev, GPIOE, "SDVOCTRL_E for SDVOB"); + intel_output->i2c_bus = intel_i2c_create(dev, GPIOE, "SDVOCTRL_E for SDVOB"); else - i2cbus = intel_i2c_create(dev, GPIOE, "SDVOCTRL_E for SDVOC"); + intel_output->i2c_bus = intel_i2c_create(dev, GPIOE, "SDVOCTRL_E for SDVOC"); - if (!i2cbus) + if (!intel_output->i2c_bus) goto err_inteloutput; - slave_addr = intel_sdvo_get_slave_addr(dev, output_device); - sdvo_priv->i2c_bus = i2cbus; + sdvo_priv->slave_addr = intel_sdvo_get_slave_addr(dev, output_device); - if (output_device == SDVOB) { - output_id = 1; - } else { - output_id = 2; - } - sdvo_priv->i2c_bus->slave_addr = slave_addr >> 1; - sdvo_priv->output_device = output_device; - intel_output->i2c_bus = i2cbus; - intel_output->dev_priv = sdvo_priv; + /* Save the bit-banging i2c functionality for use by the DDC wrapper */ + intel_sdvo_i2c_bit_algo.functionality = intel_output->i2c_bus->algo->functionality; /* Read the regs to test if we can talk to the device */ for (i = 0; i < 0x40; i++) { @@ -1835,101 +2075,39 @@ bool intel_sdvo_init(struct drm_device *dev, int output_device) /* setup the DDC bus. */ if (output_device == SDVOB) - ddcbus = intel_i2c_create(dev, GPIOE, "SDVOB DDC BUS"); + intel_output->ddc_bus = intel_i2c_create(dev, GPIOE, "SDVOB DDC BUS"); else - ddcbus = intel_i2c_create(dev, GPIOE, "SDVOC DDC BUS"); + intel_output->ddc_bus = intel_i2c_create(dev, GPIOE, "SDVOC DDC BUS"); - if (ddcbus == NULL) + if (intel_output->ddc_bus == NULL) goto err_i2c; - intel_sdvo_i2c_bit_algo.functionality = - intel_output->i2c_bus->adapter.algo->functionality; - ddcbus->adapter.algo = &intel_sdvo_i2c_bit_algo; - intel_output->ddc_bus = ddcbus; + /* Wrap with our custom algo which switches to DDC mode */ + intel_output->ddc_bus->algo = &intel_sdvo_i2c_bit_algo; /* In defaut case sdvo lvds is false */ - sdvo_priv->is_lvds = false; intel_sdvo_get_capabilities(intel_output, &sdvo_priv->caps); - if (sdvo_priv->caps.output_flags & - (SDVO_OUTPUT_TMDS0 | SDVO_OUTPUT_TMDS1)) { - if (sdvo_priv->caps.output_flags & SDVO_OUTPUT_TMDS0) - sdvo_priv->controlled_output = SDVO_OUTPUT_TMDS0; - else - sdvo_priv->controlled_output = SDVO_OUTPUT_TMDS1; - - encoder_type = DRM_MODE_ENCODER_TMDS; - connector_type = DRM_MODE_CONNECTOR_DVID; - - if (intel_sdvo_get_supp_encode(intel_output, - &sdvo_priv->encode) && - intel_sdvo_get_digital_encoding_mode(intel_output) && - sdvo_priv->is_hdmi) { - /* enable hdmi encoding mode if supported */ - intel_sdvo_set_encode(intel_output, SDVO_ENCODE_HDMI); - intel_sdvo_set_colorimetry(intel_output, - SDVO_COLORIMETRY_RGB256); - connector_type = DRM_MODE_CONNECTOR_HDMIA; - } - } - else if (sdvo_priv->caps.output_flags & SDVO_OUTPUT_SVID0) - { - sdvo_priv->controlled_output = SDVO_OUTPUT_SVID0; - encoder_type = DRM_MODE_ENCODER_TVDAC; - connector_type = DRM_MODE_CONNECTOR_SVIDEO; - sdvo_priv->is_tv = true; - intel_output->needs_tv_clock = true; - } - else if (sdvo_priv->caps.output_flags & SDVO_OUTPUT_RGB0) - { - sdvo_priv->controlled_output = SDVO_OUTPUT_RGB0; - encoder_type = DRM_MODE_ENCODER_DAC; - connector_type = DRM_MODE_CONNECTOR_VGA; - } - else if (sdvo_priv->caps.output_flags & SDVO_OUTPUT_RGB1) - { - sdvo_priv->controlled_output = SDVO_OUTPUT_RGB1; - encoder_type = DRM_MODE_ENCODER_DAC; - connector_type = DRM_MODE_CONNECTOR_VGA; - } - else if (sdvo_priv->caps.output_flags & SDVO_OUTPUT_LVDS0) - { - sdvo_priv->controlled_output = SDVO_OUTPUT_LVDS0; - encoder_type = DRM_MODE_ENCODER_LVDS; - connector_type = DRM_MODE_CONNECTOR_LVDS; - sdvo_priv->is_lvds = true; - } - else if (sdvo_priv->caps.output_flags & SDVO_OUTPUT_LVDS1) - { - sdvo_priv->controlled_output = SDVO_OUTPUT_LVDS1; - encoder_type = DRM_MODE_ENCODER_LVDS; - connector_type = DRM_MODE_CONNECTOR_LVDS; - sdvo_priv->is_lvds = true; - } - else - { - unsigned char bytes[2]; - - sdvo_priv->controlled_output = 0; - memcpy (bytes, &sdvo_priv->caps.output_flags, 2); - DRM_DEBUG_KMS(I915_SDVO, - "%s: Unknown SDVO output type (0x%02x%02x)\n", - SDVO_NAME(sdvo_priv), - bytes[0], bytes[1]); - encoder_type = DRM_MODE_ENCODER_NONE; - connector_type = DRM_MODE_CONNECTOR_Unknown; + if (intel_sdvo_output_setup(intel_output, + sdvo_priv->caps.output_flags) != true) { + DRM_DEBUG("SDVO output failed to setup on SDVO%c\n", + output_device == SDVOB ? 'B' : 'C'); goto err_i2c; } + connector = &intel_output->base; drm_connector_init(dev, connector, &intel_sdvo_connector_funcs, - connector_type); + connector->connector_type); + drm_connector_helper_add(connector, &intel_sdvo_connector_helper_funcs); connector->interlace_allowed = 0; connector->doublescan_allowed = 0; connector->display_info.subpixel_order = SubPixelHorizontalRGB; - drm_encoder_init(dev, &intel_output->enc, &intel_sdvo_enc_funcs, encoder_type); + drm_encoder_init(dev, &intel_output->enc, + &intel_sdvo_enc_funcs, intel_output->enc.encoder_type); + drm_encoder_helper_add(&intel_output->enc, &intel_sdvo_helper_funcs); drm_mode_connector_attach_encoder(&intel_output->base, &intel_output->enc); @@ -1965,9 +2143,10 @@ bool intel_sdvo_init(struct drm_device *dev, int output_device) return true; err_i2c: - if (ddcbus != NULL) + if (intel_output->ddc_bus != NULL) intel_i2c_destroy(intel_output->ddc_bus); - intel_i2c_destroy(intel_output->i2c_bus); + if (intel_output->i2c_bus != NULL) + intel_i2c_destroy(intel_output->i2c_bus); err_inteloutput: kfree(intel_output); diff --git a/drivers/gpu/drm/i915/intel_sdvo_regs.h b/drivers/gpu/drm/i915/intel_sdvo_regs.h index 193938b7d7f..ba5cdf8ae40 100644 --- a/drivers/gpu/drm/i915/intel_sdvo_regs.h +++ b/drivers/gpu/drm/i915/intel_sdvo_regs.h @@ -715,6 +715,7 @@ struct intel_sdvo_enhancements_arg { #define SDVO_HBUF_TX_ONCE (2 << 6) #define SDVO_HBUF_TX_VSYNC (3 << 6) #define SDVO_CMD_GET_AUDIO_TX_INFO 0x9c +#define SDVO_NEED_TO_STALL (1 << 7) struct intel_sdvo_encode{ u8 dvi_rev; diff --git a/drivers/gpu/drm/i915/intel_tv.c b/drivers/gpu/drm/i915/intel_tv.c index ea68992e441..5b1c9e9fdba 100644 --- a/drivers/gpu/drm/i915/intel_tv.c +++ b/drivers/gpu/drm/i915/intel_tv.c @@ -1383,34 +1383,31 @@ intel_tv_detect_type (struct drm_crtc *crtc, struct intel_output *intel_output) /* * Detect TV by polling) */ - if (intel_output->load_detect_temp) { - /* TV not currently running, prod it with destructive detect */ - save_tv_dac = tv_dac; - tv_ctl = I915_READ(TV_CTL); - save_tv_ctl = tv_ctl; - tv_ctl &= ~TV_ENC_ENABLE; - tv_ctl &= ~TV_TEST_MODE_MASK; - tv_ctl |= TV_TEST_MODE_MONITOR_DETECT; - tv_dac &= ~TVDAC_SENSE_MASK; - tv_dac &= ~DAC_A_MASK; - tv_dac &= ~DAC_B_MASK; - tv_dac &= ~DAC_C_MASK; - tv_dac |= (TVDAC_STATE_CHG_EN | - TVDAC_A_SENSE_CTL | - TVDAC_B_SENSE_CTL | - TVDAC_C_SENSE_CTL | - DAC_CTL_OVERRIDE | - DAC_A_0_7_V | - DAC_B_0_7_V | - DAC_C_0_7_V); - I915_WRITE(TV_CTL, tv_ctl); - I915_WRITE(TV_DAC, tv_dac); - intel_wait_for_vblank(dev); - tv_dac = I915_READ(TV_DAC); - I915_WRITE(TV_DAC, save_tv_dac); - I915_WRITE(TV_CTL, save_tv_ctl); - intel_wait_for_vblank(dev); - } + save_tv_dac = tv_dac; + tv_ctl = I915_READ(TV_CTL); + save_tv_ctl = tv_ctl; + tv_ctl &= ~TV_ENC_ENABLE; + tv_ctl &= ~TV_TEST_MODE_MASK; + tv_ctl |= TV_TEST_MODE_MONITOR_DETECT; + tv_dac &= ~TVDAC_SENSE_MASK; + tv_dac &= ~DAC_A_MASK; + tv_dac &= ~DAC_B_MASK; + tv_dac &= ~DAC_C_MASK; + tv_dac |= (TVDAC_STATE_CHG_EN | + TVDAC_A_SENSE_CTL | + TVDAC_B_SENSE_CTL | + TVDAC_C_SENSE_CTL | + DAC_CTL_OVERRIDE | + DAC_A_0_7_V | + DAC_B_0_7_V | + DAC_C_0_7_V); + I915_WRITE(TV_CTL, tv_ctl); + I915_WRITE(TV_DAC, tv_dac); + intel_wait_for_vblank(dev); + tv_dac = I915_READ(TV_DAC); + I915_WRITE(TV_DAC, save_tv_dac); + I915_WRITE(TV_CTL, save_tv_ctl); + intel_wait_for_vblank(dev); /* * A B C * 0 1 1 Composite @@ -1493,6 +1490,27 @@ static struct input_res { {"1920x1080", 1920, 1080}, }; +/* + * Chose preferred mode according to line number of TV format + */ +static void +intel_tv_chose_preferred_modes(struct drm_connector *connector, + struct drm_display_mode *mode_ptr) +{ + struct intel_output *intel_output = to_intel_output(connector); + const struct tv_mode *tv_mode = intel_tv_mode_find(intel_output); + + if (tv_mode->nbr_end < 480 && mode_ptr->vdisplay == 480) + mode_ptr->type |= DRM_MODE_TYPE_PREFERRED; + else if (tv_mode->nbr_end > 480) { + if (tv_mode->progressive == true && tv_mode->nbr_end < 720) { + if (mode_ptr->vdisplay == 720) + mode_ptr->type |= DRM_MODE_TYPE_PREFERRED; + } else if (mode_ptr->vdisplay == 1080) + mode_ptr->type |= DRM_MODE_TYPE_PREFERRED; + } +} + /** * Stub get_modes function. * @@ -1547,6 +1565,7 @@ intel_tv_get_modes(struct drm_connector *connector) mode_ptr->clock = (int) tmp; mode_ptr->type = DRM_MODE_TYPE_DRIVER; + intel_tv_chose_preferred_modes(connector, mode_ptr); drm_mode_probed_add(connector, mode_ptr); count++; } @@ -1699,6 +1718,7 @@ intel_tv_init(struct drm_device *dev) if (!intel_output) { return; } + connector = &intel_output->base; drm_connector_init(dev, connector, &intel_tv_connector_funcs, @@ -1710,6 +1730,8 @@ intel_tv_init(struct drm_device *dev) drm_mode_connector_attach_encoder(&intel_output->base, &intel_output->enc); tv_priv = (struct intel_tv_priv *)(intel_output + 1); intel_output->type = INTEL_OUTPUT_TVOUT; + intel_output->crtc_mask = (1 << 0) | (1 << 1); + intel_output->clone_mask = (1 << INTEL_TV_CLONE_BIT); intel_output->enc.possible_crtcs = ((1 << 0) | (1 << 1)); intel_output->enc.possible_clones = (1 << INTEL_OUTPUT_TVOUT); intel_output->dev_priv = tv_priv; diff --git a/drivers/gpu/drm/radeon/Makefile b/drivers/gpu/drm/radeon/Makefile index 5fae1e074b4..013d3805994 100644 --- a/drivers/gpu/drm/radeon/Makefile +++ b/drivers/gpu/drm/radeon/Makefile @@ -13,7 +13,8 @@ radeon-$(CONFIG_DRM_RADEON_KMS) += radeon_device.o radeon_kms.o \ radeon_encoders.o radeon_display.o radeon_cursor.o radeon_i2c.o \ radeon_clocks.o radeon_fb.o radeon_gem.o radeon_ring.o radeon_irq_kms.o \ radeon_cs.o radeon_bios.o radeon_benchmark.o r100.o r300.o r420.o \ - rs400.o rs600.o rs690.o rv515.o r520.o r600.o rs780.o rv770.o + rs400.o rs600.o rs690.o rv515.o r520.o r600.o rs780.o rv770.o \ + radeon_test.o radeon-$(CONFIG_COMPAT) += radeon_ioc32.o diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c index c0080cc9bf8..74d034f77c6 100644 --- a/drivers/gpu/drm/radeon/atombios_crtc.c +++ b/drivers/gpu/drm/radeon/atombios_crtc.c @@ -31,6 +31,132 @@ #include "atom.h" #include "atom-bits.h" +static void atombios_overscan_setup(struct drm_crtc *crtc, + struct drm_display_mode *mode, + struct drm_display_mode *adjusted_mode) +{ + struct drm_device *dev = crtc->dev; + struct radeon_device *rdev = dev->dev_private; + struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); + SET_CRTC_OVERSCAN_PS_ALLOCATION args; + int index = GetIndexIntoMasterTable(COMMAND, SetCRTC_OverScan); + int a1, a2; + + memset(&args, 0, sizeof(args)); + + args.usOverscanRight = 0; + args.usOverscanLeft = 0; + args.usOverscanBottom = 0; + args.usOverscanTop = 0; + args.ucCRTC = radeon_crtc->crtc_id; + + switch (radeon_crtc->rmx_type) { + case RMX_CENTER: + args.usOverscanTop = (adjusted_mode->crtc_vdisplay - mode->crtc_vdisplay) / 2; + args.usOverscanBottom = (adjusted_mode->crtc_vdisplay - mode->crtc_vdisplay) / 2; + args.usOverscanLeft = (adjusted_mode->crtc_hdisplay - mode->crtc_hdisplay) / 2; + args.usOverscanRight = (adjusted_mode->crtc_hdisplay - mode->crtc_hdisplay) / 2; + atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); + break; + case RMX_ASPECT: + a1 = mode->crtc_vdisplay * adjusted_mode->crtc_hdisplay; + a2 = adjusted_mode->crtc_vdisplay * mode->crtc_hdisplay; + + if (a1 > a2) { + args.usOverscanLeft = (adjusted_mode->crtc_hdisplay - (a2 / mode->crtc_vdisplay)) / 2; + args.usOverscanRight = (adjusted_mode->crtc_hdisplay - (a2 / mode->crtc_vdisplay)) / 2; + } else if (a2 > a1) { + args.usOverscanLeft = (adjusted_mode->crtc_vdisplay - (a1 / mode->crtc_hdisplay)) / 2; + args.usOverscanRight = (adjusted_mode->crtc_vdisplay - (a1 / mode->crtc_hdisplay)) / 2; + } + atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); + break; + case RMX_FULL: + default: + args.usOverscanRight = 0; + args.usOverscanLeft = 0; + args.usOverscanBottom = 0; + args.usOverscanTop = 0; + atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); + break; + } +} + +static void atombios_scaler_setup(struct drm_crtc *crtc) +{ + struct drm_device *dev = crtc->dev; + struct radeon_device *rdev = dev->dev_private; + struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); + ENABLE_SCALER_PS_ALLOCATION args; + int index = GetIndexIntoMasterTable(COMMAND, EnableScaler); + /* fixme - fill in enc_priv for atom dac */ + enum radeon_tv_std tv_std = TV_STD_NTSC; + + if (!ASIC_IS_AVIVO(rdev) && radeon_crtc->crtc_id) + return; + + memset(&args, 0, sizeof(args)); + + args.ucScaler = radeon_crtc->crtc_id; + + if (radeon_crtc->devices & (ATOM_DEVICE_TV_SUPPORT)) { + switch (tv_std) { + case TV_STD_NTSC: + default: + args.ucTVStandard = ATOM_TV_NTSC; + break; + case TV_STD_PAL: + args.ucTVStandard = ATOM_TV_PAL; + break; + case TV_STD_PAL_M: + args.ucTVStandard = ATOM_TV_PALM; + break; + case TV_STD_PAL_60: + args.ucTVStandard = ATOM_TV_PAL60; + break; + case TV_STD_NTSC_J: + args.ucTVStandard = ATOM_TV_NTSCJ; + break; + case TV_STD_SCART_PAL: + args.ucTVStandard = ATOM_TV_PAL; /* ??? */ + break; + case TV_STD_SECAM: + args.ucTVStandard = ATOM_TV_SECAM; + break; + case TV_STD_PAL_CN: + args.ucTVStandard = ATOM_TV_PALCN; + break; + } + args.ucEnable = SCALER_ENABLE_MULTITAP_MODE; + } else if (radeon_crtc->devices & (ATOM_DEVICE_CV_SUPPORT)) { + args.ucTVStandard = ATOM_TV_CV; + args.ucEnable = SCALER_ENABLE_MULTITAP_MODE; + } else { + switch (radeon_crtc->rmx_type) { + case RMX_FULL: + args.ucEnable = ATOM_SCALER_EXPANSION; + break; + case RMX_CENTER: + args.ucEnable = ATOM_SCALER_CENTER; + break; + case RMX_ASPECT: + args.ucEnable = ATOM_SCALER_EXPANSION; + break; + default: + if (ASIC_IS_AVIVO(rdev)) + args.ucEnable = ATOM_SCALER_DISABLE; + else + args.ucEnable = ATOM_SCALER_CENTER; + break; + } + } + atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); + if (radeon_crtc->devices & (ATOM_DEVICE_CV_SUPPORT | ATOM_DEVICE_TV_SUPPORT) + && rdev->family >= CHIP_RV515 && rdev->family <= CHIP_RV570) { + atom_rv515_force_tv_scaler(rdev); + } +} + static void atombios_lock_crtc(struct drm_crtc *crtc, int lock) { struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); @@ -203,6 +329,12 @@ void atombios_crtc_set_pll(struct drm_crtc *crtc, struct drm_display_mode *mode) if (ASIC_IS_AVIVO(rdev)) { uint32_t ss_cntl; + if ((rdev->family == CHIP_RS600) || + (rdev->family == CHIP_RS690) || + (rdev->family == CHIP_RS740)) + pll_flags |= (RADEON_PLL_USE_FRAC_FB_DIV | + RADEON_PLL_PREFER_CLOSEST_LOWER); + if (ASIC_IS_DCE32(rdev) && mode->clock > 200000) /* range limits??? */ pll_flags |= RADEON_PLL_PREFER_HIGH_FB_DIV; else @@ -321,7 +453,7 @@ int atombios_crtc_set_base(struct drm_crtc *crtc, int x, int y, struct drm_gem_object *obj; struct drm_radeon_gem_object *obj_priv; uint64_t fb_location; - uint32_t fb_format, fb_pitch_pixels; + uint32_t fb_format, fb_pitch_pixels, tiling_flags; if (!crtc->fb) return -EINVAL; @@ -358,7 +490,14 @@ int atombios_crtc_set_base(struct drm_crtc *crtc, int x, int y, return -EINVAL; } - /* TODO tiling */ + radeon_object_get_tiling_flags(obj->driver_private, + &tiling_flags, NULL); + if (tiling_flags & RADEON_TILING_MACRO) + fb_format |= AVIVO_D1GRPH_MACRO_ADDRESS_MODE; + + if (tiling_flags & RADEON_TILING_MICRO) + fb_format |= AVIVO_D1GRPH_TILED; + if (radeon_crtc->crtc_id == 0) WREG32(AVIVO_D1VGA_CONTROL, 0); else @@ -509,6 +648,9 @@ int atombios_crtc_mode_set(struct drm_crtc *crtc, radeon_crtc_set_base(crtc, x, y, old_fb); radeon_legacy_atom_set_surface(crtc); } + atombios_overscan_setup(crtc, mode, adjusted_mode); + atombios_scaler_setup(crtc); + radeon_bandwidth_update(rdev); return 0; } @@ -516,6 +658,8 @@ static bool atombios_crtc_mode_fixup(struct drm_crtc *crtc, struct drm_display_mode *mode, struct drm_display_mode *adjusted_mode) { + if (!radeon_crtc_scaling_mode_fixup(crtc, mode, adjusted_mode)) + return false; return true; } @@ -548,148 +692,3 @@ void radeon_atombios_init_crtc(struct drm_device *dev, AVIVO_D2CRTC_H_TOTAL - AVIVO_D1CRTC_H_TOTAL; drm_crtc_helper_add(&radeon_crtc->base, &atombios_helper_funcs); } - -void radeon_init_disp_bw_avivo(struct drm_device *dev, - struct drm_display_mode *mode1, - uint32_t pixel_bytes1, - struct drm_display_mode *mode2, - uint32_t pixel_bytes2) -{ - struct radeon_device *rdev = dev->dev_private; - fixed20_12 min_mem_eff; - fixed20_12 peak_disp_bw, mem_bw, pix_clk, pix_clk2, temp_ff; - fixed20_12 sclk_ff, mclk_ff; - uint32_t dc_lb_memory_split, temp; - - min_mem_eff.full = rfixed_const_8(0); - if (rdev->disp_priority == 2) { - uint32_t mc_init_misc_lat_timer = 0; - if (rdev->family == CHIP_RV515) - mc_init_misc_lat_timer = - RREG32_MC(RV515_MC_INIT_MISC_LAT_TIMER); - else if (rdev->family == CHIP_RS690) - mc_init_misc_lat_timer = - RREG32_MC(RS690_MC_INIT_MISC_LAT_TIMER); - - mc_init_misc_lat_timer &= - ~(R300_MC_DISP1R_INIT_LAT_MASK << - R300_MC_DISP1R_INIT_LAT_SHIFT); - mc_init_misc_lat_timer &= - ~(R300_MC_DISP0R_INIT_LAT_MASK << - R300_MC_DISP0R_INIT_LAT_SHIFT); - - if (mode2) - mc_init_misc_lat_timer |= - (1 << R300_MC_DISP1R_INIT_LAT_SHIFT); - if (mode1) - mc_init_misc_lat_timer |= - (1 << R300_MC_DISP0R_INIT_LAT_SHIFT); - - if (rdev->family == CHIP_RV515) - WREG32_MC(RV515_MC_INIT_MISC_LAT_TIMER, - mc_init_misc_lat_timer); - else if (rdev->family == CHIP_RS690) - WREG32_MC(RS690_MC_INIT_MISC_LAT_TIMER, - mc_init_misc_lat_timer); - } - - /* - * determine is there is enough bw for current mode - */ - temp_ff.full = rfixed_const(100); - mclk_ff.full = rfixed_const(rdev->clock.default_mclk); - mclk_ff.full = rfixed_div(mclk_ff, temp_ff); - sclk_ff.full = rfixed_const(rdev->clock.default_sclk); - sclk_ff.full = rfixed_div(sclk_ff, temp_ff); - - temp = (rdev->mc.vram_width / 8) * (rdev->mc.vram_is_ddr ? 2 : 1); - temp_ff.full = rfixed_const(temp); - mem_bw.full = rfixed_mul(mclk_ff, temp_ff); - mem_bw.full = rfixed_mul(mem_bw, min_mem_eff); - - pix_clk.full = 0; - pix_clk2.full = 0; - peak_disp_bw.full = 0; - if (mode1) { - temp_ff.full = rfixed_const(1000); - pix_clk.full = rfixed_const(mode1->clock); /* convert to fixed point */ - pix_clk.full = rfixed_div(pix_clk, temp_ff); - temp_ff.full = rfixed_const(pixel_bytes1); - peak_disp_bw.full += rfixed_mul(pix_clk, temp_ff); - } - if (mode2) { - temp_ff.full = rfixed_const(1000); - pix_clk2.full = rfixed_const(mode2->clock); /* convert to fixed point */ - pix_clk2.full = rfixed_div(pix_clk2, temp_ff); - temp_ff.full = rfixed_const(pixel_bytes2); - peak_disp_bw.full += rfixed_mul(pix_clk2, temp_ff); - } - - if (peak_disp_bw.full >= mem_bw.full) { - DRM_ERROR - ("You may not have enough display bandwidth for current mode\n" - "If you have flickering problem, try to lower resolution, refresh rate, or color depth\n"); - printk("peak disp bw %d, mem_bw %d\n", - rfixed_trunc(peak_disp_bw), rfixed_trunc(mem_bw)); - } - - /* - * Line Buffer Setup - * There is a single line buffer shared by both display controllers. - * DC_LB_MEMORY_SPLIT controls how that line buffer is shared between the display - * controllers. The paritioning can either be done manually or via one of four - * preset allocations specified in bits 1:0: - * 0 - line buffer is divided in half and shared between each display controller - * 1 - D1 gets 3/4 of the line buffer, D2 gets 1/4 - * 2 - D1 gets the whole buffer - * 3 - D1 gets 1/4 of the line buffer, D2 gets 3/4 - * Setting bit 2 of DC_LB_MEMORY_SPLIT controls switches to manual allocation mode. - * In manual allocation mode, D1 always starts at 0, D1 end/2 is specified in bits - * 14:4; D2 allocation follows D1. - */ - - /* is auto or manual better ? */ - dc_lb_memory_split = - RREG32(AVIVO_DC_LB_MEMORY_SPLIT) & ~AVIVO_DC_LB_MEMORY_SPLIT_MASK; - dc_lb_memory_split &= ~AVIVO_DC_LB_MEMORY_SPLIT_SHIFT_MODE; -#if 1 - /* auto */ - if (mode1 && mode2) { - if (mode1->hdisplay > mode2->hdisplay) { - if (mode1->hdisplay > 2560) - dc_lb_memory_split |= - AVIVO_DC_LB_MEMORY_SPLIT_D1_3Q_D2_1Q; - else - dc_lb_memory_split |= - AVIVO_DC_LB_MEMORY_SPLIT_D1HALF_D2HALF; - } else if (mode2->hdisplay > mode1->hdisplay) { - if (mode2->hdisplay > 2560) - dc_lb_memory_split |= - AVIVO_DC_LB_MEMORY_SPLIT_D1_1Q_D2_3Q; - else - dc_lb_memory_split |= - AVIVO_DC_LB_MEMORY_SPLIT_D1HALF_D2HALF; - } else - dc_lb_memory_split |= - AVIVO_DC_LB_MEMORY_SPLIT_D1HALF_D2HALF; - } else if (mode1) { - dc_lb_memory_split |= AVIVO_DC_LB_MEMORY_SPLIT_D1_ONLY; - } else if (mode2) { - dc_lb_memory_split |= AVIVO_DC_LB_MEMORY_SPLIT_D1_1Q_D2_3Q; - } -#else - /* manual */ - dc_lb_memory_split |= AVIVO_DC_LB_MEMORY_SPLIT_SHIFT_MODE; - dc_lb_memory_split &= - ~(AVIVO_DC_LB_DISP1_END_ADR_MASK << - AVIVO_DC_LB_DISP1_END_ADR_SHIFT); - if (mode1) { - dc_lb_memory_split |= - ((((mode1->hdisplay / 2) + 64) & AVIVO_DC_LB_DISP1_END_ADR_MASK) - << AVIVO_DC_LB_DISP1_END_ADR_SHIFT); - } else if (mode2) { - dc_lb_memory_split |= (0 << AVIVO_DC_LB_DISP1_END_ADR_SHIFT); - } -#endif - WREG32(AVIVO_DC_LB_MEMORY_SPLIT, dc_lb_memory_split); -} diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c index c550932a108..68e728e8be4 100644 --- a/drivers/gpu/drm/radeon/r100.c +++ b/drivers/gpu/drm/radeon/r100.c @@ -110,7 +110,7 @@ int r100_pci_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr) if (i < 0 || i > rdev->gart.num_gpu_pages) { return -EINVAL; } - rdev->gart.table.ram.ptr[i] = cpu_to_le32((uint32_t)addr); + rdev->gart.table.ram.ptr[i] = cpu_to_le32(lower_32_bits(addr)); return 0; } @@ -173,8 +173,12 @@ void r100_mc_setup(struct radeon_device *rdev) DRM_ERROR("Failed to register debugfs file for R100 MC !\n"); } /* Write VRAM size in case we are limiting it */ - WREG32(RADEON_CONFIG_MEMSIZE, rdev->mc.vram_size); - tmp = rdev->mc.vram_location + rdev->mc.vram_size - 1; + WREG32(RADEON_CONFIG_MEMSIZE, rdev->mc.real_vram_size); + /* Novell bug 204882 for RN50/M6/M7 with 8/16/32MB VRAM, + * if the aperture is 64MB but we have 32MB VRAM + * we report only 32MB VRAM but we have to set MC_FB_LOCATION + * to 64MB, otherwise the gpu accidentially dies */ + tmp = rdev->mc.vram_location + rdev->mc.mc_vram_size - 1; tmp = REG_SET(RADEON_MC_FB_TOP, tmp >> 16); tmp |= REG_SET(RADEON_MC_FB_START, rdev->mc.vram_location >> 16); WREG32(RADEON_MC_FB_LOCATION, tmp); @@ -215,7 +219,6 @@ int r100_mc_init(struct radeon_device *rdev) r100_pci_gart_disable(rdev); /* Setup GPU memory space */ - rdev->mc.vram_location = 0xFFFFFFFFUL; rdev->mc.gtt_location = 0xFFFFFFFFUL; if (rdev->flags & RADEON_IS_AGP) { r = radeon_agp_init(rdev); @@ -251,6 +254,72 @@ void r100_mc_fini(struct radeon_device *rdev) /* + * Interrupts + */ +int r100_irq_set(struct radeon_device *rdev) +{ + uint32_t tmp = 0; + + if (rdev->irq.sw_int) { + tmp |= RADEON_SW_INT_ENABLE; + } + if (rdev->irq.crtc_vblank_int[0]) { + tmp |= RADEON_CRTC_VBLANK_MASK; + } + if (rdev->irq.crtc_vblank_int[1]) { + tmp |= RADEON_CRTC2_VBLANK_MASK; + } + WREG32(RADEON_GEN_INT_CNTL, tmp); + return 0; +} + +static inline uint32_t r100_irq_ack(struct radeon_device *rdev) +{ + uint32_t irqs = RREG32(RADEON_GEN_INT_STATUS); + uint32_t irq_mask = RADEON_SW_INT_TEST | RADEON_CRTC_VBLANK_STAT | + RADEON_CRTC2_VBLANK_STAT; + + if (irqs) { + WREG32(RADEON_GEN_INT_STATUS, irqs); + } + return irqs & irq_mask; +} + +int r100_irq_process(struct radeon_device *rdev) +{ + uint32_t status; + + status = r100_irq_ack(rdev); + if (!status) { + return IRQ_NONE; + } + while (status) { + /* SW interrupt */ + if (status & RADEON_SW_INT_TEST) { + radeon_fence_process(rdev); + } + /* Vertical blank interrupts */ + if (status & RADEON_CRTC_VBLANK_STAT) { + drm_handle_vblank(rdev->ddev, 0); + } + if (status & RADEON_CRTC2_VBLANK_STAT) { + drm_handle_vblank(rdev->ddev, 1); + } + status = r100_irq_ack(rdev); + } + return IRQ_HANDLED; +} + +u32 r100_get_vblank_counter(struct radeon_device *rdev, int crtc) +{ + if (crtc == 0) + return RREG32(RADEON_CRTC_CRNT_FRAME); + else + return RREG32(RADEON_CRTC2_CRNT_FRAME); +} + + +/* * Fence emission */ void r100_fence_ring_emit(struct radeon_device *rdev, @@ -719,13 +788,14 @@ int r100_cs_packet_parse(struct radeon_cs_parser *p, unsigned idx) { struct radeon_cs_chunk *ib_chunk = &p->chunks[p->chunk_ib_idx]; - uint32_t header = ib_chunk->kdata[idx]; + uint32_t header; if (idx >= ib_chunk->length_dw) { DRM_ERROR("Can not parse packet at %d after CS end %d !\n", idx, ib_chunk->length_dw); return -EINVAL; } + header = ib_chunk->kdata[idx]; pkt->idx = idx; pkt->type = CP_PACKET_GET_TYPE(header); pkt->count = CP_PACKET_GET_COUNT(header); @@ -753,6 +823,102 @@ int r100_cs_packet_parse(struct radeon_cs_parser *p, } /** + * r100_cs_packet_next_vline() - parse userspace VLINE packet + * @parser: parser structure holding parsing context. + * + * Userspace sends a special sequence for VLINE waits. + * PACKET0 - VLINE_START_END + value + * PACKET0 - WAIT_UNTIL +_value + * RELOC (P3) - crtc_id in reloc. + * + * This function parses this and relocates the VLINE START END + * and WAIT UNTIL packets to the correct crtc. + * It also detects a switched off crtc and nulls out the + * wait in that case. + */ +int r100_cs_packet_parse_vline(struct radeon_cs_parser *p) +{ + struct radeon_cs_chunk *ib_chunk; + struct drm_mode_object *obj; + struct drm_crtc *crtc; + struct radeon_crtc *radeon_crtc; + struct radeon_cs_packet p3reloc, waitreloc; + int crtc_id; + int r; + uint32_t header, h_idx, reg; + + ib_chunk = &p->chunks[p->chunk_ib_idx]; + + /* parse the wait until */ + r = r100_cs_packet_parse(p, &waitreloc, p->idx); + if (r) + return r; + + /* check its a wait until and only 1 count */ + if (waitreloc.reg != RADEON_WAIT_UNTIL || + waitreloc.count != 0) { + DRM_ERROR("vline wait had illegal wait until segment\n"); + r = -EINVAL; + return r; + } + + if (ib_chunk->kdata[waitreloc.idx + 1] != RADEON_WAIT_CRTC_VLINE) { + DRM_ERROR("vline wait had illegal wait until\n"); + r = -EINVAL; + return r; + } + + /* jump over the NOP */ + r = r100_cs_packet_parse(p, &p3reloc, p->idx); + if (r) + return r; + + h_idx = p->idx - 2; + p->idx += waitreloc.count; + p->idx += p3reloc.count; + + header = ib_chunk->kdata[h_idx]; + crtc_id = ib_chunk->kdata[h_idx + 5]; + reg = ib_chunk->kdata[h_idx] >> 2; + mutex_lock(&p->rdev->ddev->mode_config.mutex); + obj = drm_mode_object_find(p->rdev->ddev, crtc_id, DRM_MODE_OBJECT_CRTC); + if (!obj) { + DRM_ERROR("cannot find crtc %d\n", crtc_id); + r = -EINVAL; + goto out; + } + crtc = obj_to_crtc(obj); + radeon_crtc = to_radeon_crtc(crtc); + crtc_id = radeon_crtc->crtc_id; + + if (!crtc->enabled) { + /* if the CRTC isn't enabled - we need to nop out the wait until */ + ib_chunk->kdata[h_idx + 2] = PACKET2(0); + ib_chunk->kdata[h_idx + 3] = PACKET2(0); + } else if (crtc_id == 1) { + switch (reg) { + case AVIVO_D1MODE_VLINE_START_END: + header &= R300_CP_PACKET0_REG_MASK; + header |= AVIVO_D2MODE_VLINE_START_END >> 2; + break; + case RADEON_CRTC_GUI_TRIG_VLINE: + header &= R300_CP_PACKET0_REG_MASK; + header |= RADEON_CRTC2_GUI_TRIG_VLINE >> 2; + break; + default: + DRM_ERROR("unknown crtc reloc\n"); + r = -EINVAL; + goto out; + } + ib_chunk->kdata[h_idx] = header; + ib_chunk->kdata[h_idx + 3] |= RADEON_ENG_DISPLAY_SELECT_CRTC1; + } +out: + mutex_unlock(&p->rdev->ddev->mode_config.mutex); + return r; +} + +/** * r100_cs_packet_next_reloc() - parse next packet which should be reloc packet3 * @parser: parser structure holding parsing context. * @data: pointer to relocation data @@ -814,6 +980,7 @@ static int r100_packet0_check(struct radeon_cs_parser *p, unsigned idx; bool onereg; int r; + u32 tile_flags = 0; ib = p->ib->ptr; ib_chunk = &p->chunks[p->chunk_ib_idx]; @@ -825,6 +992,15 @@ static int r100_packet0_check(struct radeon_cs_parser *p, } for (i = 0; i <= pkt->count; i++, idx++, reg += 4) { switch (reg) { + case RADEON_CRTC_GUI_TRIG_VLINE: + r = r100_cs_packet_parse_vline(p); + if (r) { + DRM_ERROR("No reloc for ib[%d]=0x%04X\n", + idx, reg); + r100_cs_dump_packet(p, pkt); + return r; + } + break; /* FIXME: only allow PACKET3 blit? easier to check for out of * range access */ case RADEON_DST_PITCH_OFFSET: @@ -838,7 +1014,20 @@ static int r100_packet0_check(struct radeon_cs_parser *p, } tmp = ib_chunk->kdata[idx] & 0x003fffff; tmp += (((u32)reloc->lobj.gpu_offset) >> 10); - ib[idx] = (ib_chunk->kdata[idx] & 0xffc00000) | tmp; + + if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) + tile_flags |= RADEON_DST_TILE_MACRO; + if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO) { + if (reg == RADEON_SRC_PITCH_OFFSET) { + DRM_ERROR("Cannot src blit from microtiled surface\n"); + r100_cs_dump_packet(p, pkt); + return -EINVAL; + } + tile_flags |= RADEON_DST_TILE_MICRO; + } + + tmp |= tile_flags; + ib[idx] = (ib_chunk->kdata[idx] & 0x3fc00000) | tmp; break; case RADEON_RB3D_DEPTHOFFSET: case RADEON_RB3D_COLOROFFSET: @@ -869,6 +1058,40 @@ static int r100_packet0_check(struct radeon_cs_parser *p, case R300_TX_OFFSET_0+52: case R300_TX_OFFSET_0+56: case R300_TX_OFFSET_0+60: + /* rn50 has no 3D engine so fail on any 3d setup */ + if (ASIC_IS_RN50(p->rdev)) { + DRM_ERROR("attempt to use RN50 3D engine failed\n"); + return -EINVAL; + } + r = r100_cs_packet_next_reloc(p, &reloc); + if (r) { + DRM_ERROR("No reloc for ib[%d]=0x%04X\n", + idx, reg); + r100_cs_dump_packet(p, pkt); + return r; + } + ib[idx] = ib_chunk->kdata[idx] + ((u32)reloc->lobj.gpu_offset); + break; + case R300_RB3D_COLORPITCH0: + case RADEON_RB3D_COLORPITCH: + r = r100_cs_packet_next_reloc(p, &reloc); + if (r) { + DRM_ERROR("No reloc for ib[%d]=0x%04X\n", + idx, reg); + r100_cs_dump_packet(p, pkt); + return r; + } + + if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) + tile_flags |= RADEON_COLOR_TILE_ENABLE; + if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO) + tile_flags |= RADEON_COLOR_MICROTILE_ENABLE; + + tmp = ib_chunk->kdata[idx] & ~(0x7 << 16); + tmp |= tile_flags; + ib[idx] = tmp; + break; + case RADEON_RB3D_ZPASS_ADDR: r = r100_cs_packet_next_reloc(p, &reloc); if (r) { DRM_ERROR("No reloc for ib[%d]=0x%04X\n", @@ -1256,29 +1479,100 @@ static void r100_vram_get_type(struct radeon_device *rdev) } } -void r100_vram_info(struct radeon_device *rdev) +static u32 r100_get_accessible_vram(struct radeon_device *rdev) { - r100_vram_get_type(rdev); + u32 aper_size; + u8 byte; + + aper_size = RREG32(RADEON_CONFIG_APER_SIZE); + + /* Set HDP_APER_CNTL only on cards that are known not to be broken, + * that is has the 2nd generation multifunction PCI interface + */ + if (rdev->family == CHIP_RV280 || + rdev->family >= CHIP_RV350) { + WREG32_P(RADEON_HOST_PATH_CNTL, RADEON_HDP_APER_CNTL, + ~RADEON_HDP_APER_CNTL); + DRM_INFO("Generation 2 PCI interface, using max accessible memory\n"); + return aper_size * 2; + } + + /* Older cards have all sorts of funny issues to deal with. First + * check if it's a multifunction card by reading the PCI config + * header type... Limit those to one aperture size + */ + pci_read_config_byte(rdev->pdev, 0xe, &byte); + if (byte & 0x80) { + DRM_INFO("Generation 1 PCI interface in multifunction mode\n"); + DRM_INFO("Limiting VRAM to one aperture\n"); + return aper_size; + } + + /* Single function older card. We read HDP_APER_CNTL to see how the BIOS + * have set it up. We don't write this as it's broken on some ASICs but + * we expect the BIOS to have done the right thing (might be too optimistic...) + */ + if (RREG32(RADEON_HOST_PATH_CNTL) & RADEON_HDP_APER_CNTL) + return aper_size * 2; + return aper_size; +} + +void r100_vram_init_sizes(struct radeon_device *rdev) +{ + u64 config_aper_size; + u32 accessible; + + config_aper_size = RREG32(RADEON_CONFIG_APER_SIZE); if (rdev->flags & RADEON_IS_IGP) { uint32_t tom; /* read NB_TOM to get the amount of ram stolen for the GPU */ tom = RREG32(RADEON_NB_TOM); - rdev->mc.vram_size = (((tom >> 16) - (tom & 0xffff) + 1) << 16); - WREG32(RADEON_CONFIG_MEMSIZE, rdev->mc.vram_size); + rdev->mc.real_vram_size = (((tom >> 16) - (tom & 0xffff) + 1) << 16); + /* for IGPs we need to keep VRAM where it was put by the BIOS */ + rdev->mc.vram_location = (tom & 0xffff) << 16; + WREG32(RADEON_CONFIG_MEMSIZE, rdev->mc.real_vram_size); + rdev->mc.mc_vram_size = rdev->mc.real_vram_size; } else { - rdev->mc.vram_size = RREG32(RADEON_CONFIG_MEMSIZE); + rdev->mc.real_vram_size = RREG32(RADEON_CONFIG_MEMSIZE); /* Some production boards of m6 will report 0 * if it's 8 MB */ - if (rdev->mc.vram_size == 0) { - rdev->mc.vram_size = 8192 * 1024; - WREG32(RADEON_CONFIG_MEMSIZE, rdev->mc.vram_size); + if (rdev->mc.real_vram_size == 0) { + rdev->mc.real_vram_size = 8192 * 1024; + WREG32(RADEON_CONFIG_MEMSIZE, rdev->mc.real_vram_size); } + /* let driver place VRAM */ + rdev->mc.vram_location = 0xFFFFFFFFUL; + /* Fix for RN50, M6, M7 with 8/16/32(??) MBs of VRAM - + * Novell bug 204882 + along with lots of ubuntu ones */ + if (config_aper_size > rdev->mc.real_vram_size) + rdev->mc.mc_vram_size = config_aper_size; + else + rdev->mc.mc_vram_size = rdev->mc.real_vram_size; } + /* work out accessible VRAM */ + accessible = r100_get_accessible_vram(rdev); + rdev->mc.aper_base = drm_get_resource_start(rdev->ddev, 0); rdev->mc.aper_size = drm_get_resource_len(rdev->ddev, 0); + + if (accessible > rdev->mc.aper_size) + accessible = rdev->mc.aper_size; + + if (rdev->mc.mc_vram_size > rdev->mc.aper_size) + rdev->mc.mc_vram_size = rdev->mc.aper_size; + + if (rdev->mc.real_vram_size > rdev->mc.aper_size) + rdev->mc.real_vram_size = rdev->mc.aper_size; +} + +void r100_vram_info(struct radeon_device *rdev) +{ + r100_vram_get_type(rdev); + + r100_vram_init_sizes(rdev); } @@ -1338,26 +1632,6 @@ void r100_pll_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v) r100_pll_errata_after_data(rdev); } -uint32_t r100_mm_rreg(struct radeon_device *rdev, uint32_t reg) -{ - if (reg < 0x10000) - return readl(((void __iomem *)rdev->rmmio) + reg); - else { - writel(reg, ((void __iomem *)rdev->rmmio) + RADEON_MM_INDEX); - return readl(((void __iomem *)rdev->rmmio) + RADEON_MM_DATA); - } -} - -void r100_mm_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v) -{ - if (reg < 0x10000) - writel(v, ((void __iomem *)rdev->rmmio) + reg); - else { - writel(reg, ((void __iomem *)rdev->rmmio) + RADEON_MM_INDEX); - writel(v, ((void __iomem *)rdev->rmmio) + RADEON_MM_DATA); - } -} - int r100_init(struct radeon_device *rdev) { return 0; @@ -1533,3 +1807,530 @@ int r100_debugfs_mc_info_init(struct radeon_device *rdev) return 0; #endif } + +int r100_set_surface_reg(struct radeon_device *rdev, int reg, + uint32_t tiling_flags, uint32_t pitch, + uint32_t offset, uint32_t obj_size) +{ + int surf_index = reg * 16; + int flags = 0; + + /* r100/r200 divide by 16 */ + if (rdev->family < CHIP_R300) + flags = pitch / 16; + else + flags = pitch / 8; + + if (rdev->family <= CHIP_RS200) { + if ((tiling_flags & (RADEON_TILING_MACRO|RADEON_TILING_MICRO)) + == (RADEON_TILING_MACRO|RADEON_TILING_MICRO)) + flags |= RADEON_SURF_TILE_COLOR_BOTH; + if (tiling_flags & RADEON_TILING_MACRO) + flags |= RADEON_SURF_TILE_COLOR_MACRO; + } else if (rdev->family <= CHIP_RV280) { + if (tiling_flags & (RADEON_TILING_MACRO)) + flags |= R200_SURF_TILE_COLOR_MACRO; + if (tiling_flags & RADEON_TILING_MICRO) + flags |= R200_SURF_TILE_COLOR_MICRO; + } else { + if (tiling_flags & RADEON_TILING_MACRO) + flags |= R300_SURF_TILE_MACRO; + if (tiling_flags & RADEON_TILING_MICRO) + flags |= R300_SURF_TILE_MICRO; + } + + DRM_DEBUG("writing surface %d %d %x %x\n", reg, flags, offset, offset+obj_size-1); + WREG32(RADEON_SURFACE0_INFO + surf_index, flags); + WREG32(RADEON_SURFACE0_LOWER_BOUND + surf_index, offset); + WREG32(RADEON_SURFACE0_UPPER_BOUND + surf_index, offset + obj_size - 1); + return 0; +} + +void r100_clear_surface_reg(struct radeon_device *rdev, int reg) +{ + int surf_index = reg * 16; + WREG32(RADEON_SURFACE0_INFO + surf_index, 0); +} + +void r100_bandwidth_update(struct radeon_device *rdev) +{ + fixed20_12 trcd_ff, trp_ff, tras_ff, trbs_ff, tcas_ff; + fixed20_12 sclk_ff, mclk_ff, sclk_eff_ff, sclk_delay_ff; + fixed20_12 peak_disp_bw, mem_bw, pix_clk, pix_clk2, temp_ff, crit_point_ff; + uint32_t temp, data, mem_trcd, mem_trp, mem_tras; + fixed20_12 memtcas_ff[8] = { + fixed_init(1), + fixed_init(2), + fixed_init(3), + fixed_init(0), + fixed_init_half(1), + fixed_init_half(2), + fixed_init(0), + }; + fixed20_12 memtcas_rs480_ff[8] = { + fixed_init(0), + fixed_init(1), + fixed_init(2), + fixed_init(3), + fixed_init(0), + fixed_init_half(1), + fixed_init_half(2), + fixed_init_half(3), + }; + fixed20_12 memtcas2_ff[8] = { + fixed_init(0), + fixed_init(1), + fixed_init(2), + fixed_init(3), + fixed_init(4), + fixed_init(5), + fixed_init(6), + fixed_init(7), + }; + fixed20_12 memtrbs[8] = { + fixed_init(1), + fixed_init_half(1), + fixed_init(2), + fixed_init_half(2), + fixed_init(3), + fixed_init_half(3), + fixed_init(4), + fixed_init_half(4) + }; + fixed20_12 memtrbs_r4xx[8] = { + fixed_init(4), + fixed_init(5), + fixed_init(6), + fixed_init(7), + fixed_init(8), + fixed_init(9), + fixed_init(10), + fixed_init(11) + }; + fixed20_12 min_mem_eff; + fixed20_12 mc_latency_sclk, mc_latency_mclk, k1; + fixed20_12 cur_latency_mclk, cur_latency_sclk; + fixed20_12 disp_latency, disp_latency_overhead, disp_drain_rate, + disp_drain_rate2, read_return_rate; + fixed20_12 time_disp1_drop_priority; + int c; + int cur_size = 16; /* in octawords */ + int critical_point = 0, critical_point2; +/* uint32_t read_return_rate, time_disp1_drop_priority; */ + int stop_req, max_stop_req; + struct drm_display_mode *mode1 = NULL; + struct drm_display_mode *mode2 = NULL; + uint32_t pixel_bytes1 = 0; + uint32_t pixel_bytes2 = 0; + + if (rdev->mode_info.crtcs[0]->base.enabled) { + mode1 = &rdev->mode_info.crtcs[0]->base.mode; + pixel_bytes1 = rdev->mode_info.crtcs[0]->base.fb->bits_per_pixel / 8; + } + if (rdev->mode_info.crtcs[1]->base.enabled) { + mode2 = &rdev->mode_info.crtcs[1]->base.mode; + pixel_bytes2 = rdev->mode_info.crtcs[1]->base.fb->bits_per_pixel / 8; + } + + min_mem_eff.full = rfixed_const_8(0); + /* get modes */ + if ((rdev->disp_priority == 2) && ASIC_IS_R300(rdev)) { + uint32_t mc_init_misc_lat_timer = RREG32(R300_MC_INIT_MISC_LAT_TIMER); + mc_init_misc_lat_timer &= ~(R300_MC_DISP1R_INIT_LAT_MASK << R300_MC_DISP1R_INIT_LAT_SHIFT); + mc_init_misc_lat_timer &= ~(R300_MC_DISP0R_INIT_LAT_MASK << R300_MC_DISP0R_INIT_LAT_SHIFT); + /* check crtc enables */ + if (mode2) + mc_init_misc_lat_timer |= (1 << R300_MC_DISP1R_INIT_LAT_SHIFT); + if (mode1) + mc_init_misc_lat_timer |= (1 << R300_MC_DISP0R_INIT_LAT_SHIFT); + WREG32(R300_MC_INIT_MISC_LAT_TIMER, mc_init_misc_lat_timer); + } + + /* + * determine is there is enough bw for current mode + */ + mclk_ff.full = rfixed_const(rdev->clock.default_mclk); + temp_ff.full = rfixed_const(100); + mclk_ff.full = rfixed_div(mclk_ff, temp_ff); + sclk_ff.full = rfixed_const(rdev->clock.default_sclk); + sclk_ff.full = rfixed_div(sclk_ff, temp_ff); + + temp = (rdev->mc.vram_width / 8) * (rdev->mc.vram_is_ddr ? 2 : 1); + temp_ff.full = rfixed_const(temp); + mem_bw.full = rfixed_mul(mclk_ff, temp_ff); + + pix_clk.full = 0; + pix_clk2.full = 0; + peak_disp_bw.full = 0; + if (mode1) { + temp_ff.full = rfixed_const(1000); + pix_clk.full = rfixed_const(mode1->clock); /* convert to fixed point */ + pix_clk.full = rfixed_div(pix_clk, temp_ff); + temp_ff.full = rfixed_const(pixel_bytes1); + peak_disp_bw.full += rfixed_mul(pix_clk, temp_ff); + } + if (mode2) { + temp_ff.full = rfixed_const(1000); + pix_clk2.full = rfixed_const(mode2->clock); /* convert to fixed point */ + pix_clk2.full = rfixed_div(pix_clk2, temp_ff); + temp_ff.full = rfixed_const(pixel_bytes2); + peak_disp_bw.full += rfixed_mul(pix_clk2, temp_ff); + } + + mem_bw.full = rfixed_mul(mem_bw, min_mem_eff); + if (peak_disp_bw.full >= mem_bw.full) { + DRM_ERROR("You may not have enough display bandwidth for current mode\n" + "If you have flickering problem, try to lower resolution, refresh rate, or color depth\n"); + } + + /* Get values from the EXT_MEM_CNTL register...converting its contents. */ + temp = RREG32(RADEON_MEM_TIMING_CNTL); + if ((rdev->family == CHIP_RV100) || (rdev->flags & RADEON_IS_IGP)) { /* RV100, M6, IGPs */ + mem_trcd = ((temp >> 2) & 0x3) + 1; + mem_trp = ((temp & 0x3)) + 1; + mem_tras = ((temp & 0x70) >> 4) + 1; + } else if (rdev->family == CHIP_R300 || + rdev->family == CHIP_R350) { /* r300, r350 */ + mem_trcd = (temp & 0x7) + 1; + mem_trp = ((temp >> 8) & 0x7) + 1; + mem_tras = ((temp >> 11) & 0xf) + 4; + } else if (rdev->family == CHIP_RV350 || + rdev->family <= CHIP_RV380) { + /* rv3x0 */ + mem_trcd = (temp & 0x7) + 3; + mem_trp = ((temp >> 8) & 0x7) + 3; + mem_tras = ((temp >> 11) & 0xf) + 6; + } else if (rdev->family == CHIP_R420 || + rdev->family == CHIP_R423 || + rdev->family == CHIP_RV410) { + /* r4xx */ + mem_trcd = (temp & 0xf) + 3; + if (mem_trcd > 15) + mem_trcd = 15; + mem_trp = ((temp >> 8) & 0xf) + 3; + if (mem_trp > 15) + mem_trp = 15; + mem_tras = ((temp >> 12) & 0x1f) + 6; + if (mem_tras > 31) + mem_tras = 31; + } else { /* RV200, R200 */ + mem_trcd = (temp & 0x7) + 1; + mem_trp = ((temp >> 8) & 0x7) + 1; + mem_tras = ((temp >> 12) & 0xf) + 4; + } + /* convert to FF */ + trcd_ff.full = rfixed_const(mem_trcd); + trp_ff.full = rfixed_const(mem_trp); + tras_ff.full = rfixed_const(mem_tras); + + /* Get values from the MEM_SDRAM_MODE_REG register...converting its */ + temp = RREG32(RADEON_MEM_SDRAM_MODE_REG); + data = (temp & (7 << 20)) >> 20; + if ((rdev->family == CHIP_RV100) || rdev->flags & RADEON_IS_IGP) { + if (rdev->family == CHIP_RS480) /* don't think rs400 */ + tcas_ff = memtcas_rs480_ff[data]; + else + tcas_ff = memtcas_ff[data]; + } else + tcas_ff = memtcas2_ff[data]; + + if (rdev->family == CHIP_RS400 || + rdev->family == CHIP_RS480) { + /* extra cas latency stored in bits 23-25 0-4 clocks */ + data = (temp >> 23) & 0x7; + if (data < 5) + tcas_ff.full += rfixed_const(data); + } + + if (ASIC_IS_R300(rdev) && !(rdev->flags & RADEON_IS_IGP)) { + /* on the R300, Tcas is included in Trbs. + */ + temp = RREG32(RADEON_MEM_CNTL); + data = (R300_MEM_NUM_CHANNELS_MASK & temp); + if (data == 1) { + if (R300_MEM_USE_CD_CH_ONLY & temp) { + temp = RREG32(R300_MC_IND_INDEX); + temp &= ~R300_MC_IND_ADDR_MASK; + temp |= R300_MC_READ_CNTL_CD_mcind; + WREG32(R300_MC_IND_INDEX, temp); + temp = RREG32(R300_MC_IND_DATA); + data = (R300_MEM_RBS_POSITION_C_MASK & temp); + } else { + temp = RREG32(R300_MC_READ_CNTL_AB); + data = (R300_MEM_RBS_POSITION_A_MASK & temp); + } + } else { + temp = RREG32(R300_MC_READ_CNTL_AB); + data = (R300_MEM_RBS_POSITION_A_MASK & temp); + } + if (rdev->family == CHIP_RV410 || + rdev->family == CHIP_R420 || + rdev->family == CHIP_R423) + trbs_ff = memtrbs_r4xx[data]; + else + trbs_ff = memtrbs[data]; + tcas_ff.full += trbs_ff.full; + } + + sclk_eff_ff.full = sclk_ff.full; + + if (rdev->flags & RADEON_IS_AGP) { + fixed20_12 agpmode_ff; + agpmode_ff.full = rfixed_const(radeon_agpmode); + temp_ff.full = rfixed_const_666(16); + sclk_eff_ff.full -= rfixed_mul(agpmode_ff, temp_ff); + } + /* TODO PCIE lanes may affect this - agpmode == 16?? */ + + if (ASIC_IS_R300(rdev)) { + sclk_delay_ff.full = rfixed_const(250); + } else { + if ((rdev->family == CHIP_RV100) || + rdev->flags & RADEON_IS_IGP) { + if (rdev->mc.vram_is_ddr) + sclk_delay_ff.full = rfixed_const(41); + else + sclk_delay_ff.full = rfixed_const(33); + } else { + if (rdev->mc.vram_width == 128) + sclk_delay_ff.full = rfixed_const(57); + else + sclk_delay_ff.full = rfixed_const(41); + } + } + + mc_latency_sclk.full = rfixed_div(sclk_delay_ff, sclk_eff_ff); + + if (rdev->mc.vram_is_ddr) { + if (rdev->mc.vram_width == 32) { + k1.full = rfixed_const(40); + c = 3; + } else { + k1.full = rfixed_const(20); + c = 1; + } + } else { + k1.full = rfixed_const(40); + c = 3; + } + + temp_ff.full = rfixed_const(2); + mc_latency_mclk.full = rfixed_mul(trcd_ff, temp_ff); + temp_ff.full = rfixed_const(c); + mc_latency_mclk.full += rfixed_mul(tcas_ff, temp_ff); + temp_ff.full = rfixed_const(4); + mc_latency_mclk.full += rfixed_mul(tras_ff, temp_ff); + mc_latency_mclk.full += rfixed_mul(trp_ff, temp_ff); + mc_latency_mclk.full += k1.full; + + mc_latency_mclk.full = rfixed_div(mc_latency_mclk, mclk_ff); + mc_latency_mclk.full += rfixed_div(temp_ff, sclk_eff_ff); + + /* + HW cursor time assuming worst case of full size colour cursor. + */ + temp_ff.full = rfixed_const((2 * (cur_size - (rdev->mc.vram_is_ddr + 1)))); + temp_ff.full += trcd_ff.full; + if (temp_ff.full < tras_ff.full) + temp_ff.full = tras_ff.full; + cur_latency_mclk.full = rfixed_div(temp_ff, mclk_ff); + + temp_ff.full = rfixed_const(cur_size); + cur_latency_sclk.full = rfixed_div(temp_ff, sclk_eff_ff); + /* + Find the total latency for the display data. + */ + disp_latency_overhead.full = rfixed_const(80); + disp_latency_overhead.full = rfixed_div(disp_latency_overhead, sclk_ff); + mc_latency_mclk.full += disp_latency_overhead.full + cur_latency_mclk.full; + mc_latency_sclk.full += disp_latency_overhead.full + cur_latency_sclk.full; + + if (mc_latency_mclk.full > mc_latency_sclk.full) + disp_latency.full = mc_latency_mclk.full; + else + disp_latency.full = mc_latency_sclk.full; + + /* setup Max GRPH_STOP_REQ default value */ + if (ASIC_IS_RV100(rdev)) + max_stop_req = 0x5c; + else + max_stop_req = 0x7c; + + if (mode1) { + /* CRTC1 + Set GRPH_BUFFER_CNTL register using h/w defined optimal values. + GRPH_STOP_REQ <= MIN[ 0x7C, (CRTC_H_DISP + 1) * (bit depth) / 0x10 ] + */ + stop_req = mode1->hdisplay * pixel_bytes1 / 16; + + if (stop_req > max_stop_req) + stop_req = max_stop_req; + + /* + Find the drain rate of the display buffer. + */ + temp_ff.full = rfixed_const((16/pixel_bytes1)); + disp_drain_rate.full = rfixed_div(pix_clk, temp_ff); + + /* + Find the critical point of the display buffer. + */ + crit_point_ff.full = rfixed_mul(disp_drain_rate, disp_latency); + crit_point_ff.full += rfixed_const_half(0); + + critical_point = rfixed_trunc(crit_point_ff); + + if (rdev->disp_priority == 2) { + critical_point = 0; + } + + /* + The critical point should never be above max_stop_req-4. Setting + GRPH_CRITICAL_CNTL = 0 will thus force high priority all the time. + */ + if (max_stop_req - critical_point < 4) + critical_point = 0; + + if (critical_point == 0 && mode2 && rdev->family == CHIP_R300) { + /* some R300 cards have problem with this set to 0, when CRTC2 is enabled.*/ + critical_point = 0x10; + } + + temp = RREG32(RADEON_GRPH_BUFFER_CNTL); + temp &= ~(RADEON_GRPH_STOP_REQ_MASK); + temp |= (stop_req << RADEON_GRPH_STOP_REQ_SHIFT); + temp &= ~(RADEON_GRPH_START_REQ_MASK); + if ((rdev->family == CHIP_R350) && + (stop_req > 0x15)) { + stop_req -= 0x10; + } + temp |= (stop_req << RADEON_GRPH_START_REQ_SHIFT); + temp |= RADEON_GRPH_BUFFER_SIZE; + temp &= ~(RADEON_GRPH_CRITICAL_CNTL | + RADEON_GRPH_CRITICAL_AT_SOF | + RADEON_GRPH_STOP_CNTL); + /* + Write the result into the register. + */ + WREG32(RADEON_GRPH_BUFFER_CNTL, ((temp & ~RADEON_GRPH_CRITICAL_POINT_MASK) | + (critical_point << RADEON_GRPH_CRITICAL_POINT_SHIFT))); + +#if 0 + if ((rdev->family == CHIP_RS400) || + (rdev->family == CHIP_RS480)) { + /* attempt to program RS400 disp regs correctly ??? */ + temp = RREG32(RS400_DISP1_REG_CNTL); + temp &= ~(RS400_DISP1_START_REQ_LEVEL_MASK | + RS400_DISP1_STOP_REQ_LEVEL_MASK); + WREG32(RS400_DISP1_REQ_CNTL1, (temp | + (critical_point << RS400_DISP1_START_REQ_LEVEL_SHIFT) | + (critical_point << RS400_DISP1_STOP_REQ_LEVEL_SHIFT))); + temp = RREG32(RS400_DMIF_MEM_CNTL1); + temp &= ~(RS400_DISP1_CRITICAL_POINT_START_MASK | + RS400_DISP1_CRITICAL_POINT_STOP_MASK); + WREG32(RS400_DMIF_MEM_CNTL1, (temp | + (critical_point << RS400_DISP1_CRITICAL_POINT_START_SHIFT) | + (critical_point << RS400_DISP1_CRITICAL_POINT_STOP_SHIFT))); + } +#endif + + DRM_DEBUG("GRPH_BUFFER_CNTL from to %x\n", + /* (unsigned int)info->SavedReg->grph_buffer_cntl, */ + (unsigned int)RREG32(RADEON_GRPH_BUFFER_CNTL)); + } + + if (mode2) { + u32 grph2_cntl; + stop_req = mode2->hdisplay * pixel_bytes2 / 16; + + if (stop_req > max_stop_req) + stop_req = max_stop_req; + + /* + Find the drain rate of the display buffer. + */ + temp_ff.full = rfixed_const((16/pixel_bytes2)); + disp_drain_rate2.full = rfixed_div(pix_clk2, temp_ff); + + grph2_cntl = RREG32(RADEON_GRPH2_BUFFER_CNTL); + grph2_cntl &= ~(RADEON_GRPH_STOP_REQ_MASK); + grph2_cntl |= (stop_req << RADEON_GRPH_STOP_REQ_SHIFT); + grph2_cntl &= ~(RADEON_GRPH_START_REQ_MASK); + if ((rdev->family == CHIP_R350) && + (stop_req > 0x15)) { + stop_req -= 0x10; + } + grph2_cntl |= (stop_req << RADEON_GRPH_START_REQ_SHIFT); + grph2_cntl |= RADEON_GRPH_BUFFER_SIZE; + grph2_cntl &= ~(RADEON_GRPH_CRITICAL_CNTL | + RADEON_GRPH_CRITICAL_AT_SOF | + RADEON_GRPH_STOP_CNTL); + + if ((rdev->family == CHIP_RS100) || + (rdev->family == CHIP_RS200)) + critical_point2 = 0; + else { + temp = (rdev->mc.vram_width * rdev->mc.vram_is_ddr + 1)/128; + temp_ff.full = rfixed_const(temp); + temp_ff.full = rfixed_mul(mclk_ff, temp_ff); + if (sclk_ff.full < temp_ff.full) + temp_ff.full = sclk_ff.full; + + read_return_rate.full = temp_ff.full; + + if (mode1) { + temp_ff.full = read_return_rate.full - disp_drain_rate.full; + time_disp1_drop_priority.full = rfixed_div(crit_point_ff, temp_ff); + } else { + time_disp1_drop_priority.full = 0; + } + crit_point_ff.full = disp_latency.full + time_disp1_drop_priority.full + disp_latency.full; + crit_point_ff.full = rfixed_mul(crit_point_ff, disp_drain_rate2); + crit_point_ff.full += rfixed_const_half(0); + + critical_point2 = rfixed_trunc(crit_point_ff); + + if (rdev->disp_priority == 2) { + critical_point2 = 0; + } + + if (max_stop_req - critical_point2 < 4) + critical_point2 = 0; + + } + + if (critical_point2 == 0 && rdev->family == CHIP_R300) { + /* some R300 cards have problem with this set to 0 */ + critical_point2 = 0x10; + } + + WREG32(RADEON_GRPH2_BUFFER_CNTL, ((grph2_cntl & ~RADEON_GRPH_CRITICAL_POINT_MASK) | + (critical_point2 << RADEON_GRPH_CRITICAL_POINT_SHIFT))); + + if ((rdev->family == CHIP_RS400) || + (rdev->family == CHIP_RS480)) { +#if 0 + /* attempt to program RS400 disp2 regs correctly ??? */ + temp = RREG32(RS400_DISP2_REQ_CNTL1); + temp &= ~(RS400_DISP2_START_REQ_LEVEL_MASK | + RS400_DISP2_STOP_REQ_LEVEL_MASK); + WREG32(RS400_DISP2_REQ_CNTL1, (temp | + (critical_point2 << RS400_DISP1_START_REQ_LEVEL_SHIFT) | + (critical_point2 << RS400_DISP1_STOP_REQ_LEVEL_SHIFT))); + temp = RREG32(RS400_DISP2_REQ_CNTL2); + temp &= ~(RS400_DISP2_CRITICAL_POINT_START_MASK | + RS400_DISP2_CRITICAL_POINT_STOP_MASK); + WREG32(RS400_DISP2_REQ_CNTL2, (temp | + (critical_point2 << RS400_DISP2_CRITICAL_POINT_START_SHIFT) | + (critical_point2 << RS400_DISP2_CRITICAL_POINT_STOP_SHIFT))); +#endif + WREG32(RS400_DISP2_REQ_CNTL1, 0x105DC1CC); + WREG32(RS400_DISP2_REQ_CNTL2, 0x2749D000); + WREG32(RS400_DMIF_MEM_CNTL1, 0x29CA71DC); + WREG32(RS400_DISP1_REQ_CNTL1, 0x28FBC3AC); + } + + DRM_DEBUG("GRPH2_BUFFER_CNTL from to %x\n", + (unsigned int)RREG32(RADEON_GRPH2_BUFFER_CNTL)); + } +} diff --git a/drivers/gpu/drm/radeon/r300.c b/drivers/gpu/drm/radeon/r300.c index e2ed5bc0817..051bca6e3a4 100644 --- a/drivers/gpu/drm/radeon/r300.c +++ b/drivers/gpu/drm/radeon/r300.c @@ -30,6 +30,8 @@ #include "drm.h" #include "radeon_reg.h" #include "radeon.h" +#include "radeon_drm.h" +#include "radeon_share.h" /* r300,r350,rv350,rv370,rv380 depends on : */ void r100_hdp_reset(struct radeon_device *rdev); @@ -44,6 +46,7 @@ int r100_gui_wait_for_idle(struct radeon_device *rdev); int r100_cs_packet_parse(struct radeon_cs_parser *p, struct radeon_cs_packet *pkt, unsigned idx); +int r100_cs_packet_parse_vline(struct radeon_cs_parser *p); int r100_cs_packet_next_reloc(struct radeon_cs_parser *p, struct radeon_cs_reloc **cs_reloc); int r100_cs_parse_packet0(struct radeon_cs_parser *p, @@ -80,8 +83,8 @@ void rv370_pcie_gart_tlb_flush(struct radeon_device *rdev) WREG32_PCIE(RADEON_PCIE_TX_GART_CNTL, tmp | RADEON_PCIE_TX_GART_INVALIDATE_TLB); (void)RREG32_PCIE(RADEON_PCIE_TX_GART_CNTL); WREG32_PCIE(RADEON_PCIE_TX_GART_CNTL, tmp); - mb(); } + mb(); } int rv370_pcie_gart_enable(struct radeon_device *rdev) @@ -150,8 +153,13 @@ int rv370_pcie_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr) if (i < 0 || i > rdev->gart.num_gpu_pages) { return -EINVAL; } - addr = (((u32)addr) >> 8) | ((upper_32_bits(addr) & 0xff) << 4) | 0xC; - writel(cpu_to_le32(addr), ((void __iomem *)ptr) + (i * 4)); + addr = (lower_32_bits(addr) >> 8) | + ((upper_32_bits(addr) & 0xff) << 24) | + 0xc; + /* on x86 we want this to be CPU endian, on powerpc + * on powerpc without HW swappers, it'll get swapped on way + * into VRAM - so no need for cpu_to_le32 on VRAM tables */ + writel(addr, ((void __iomem *)ptr) + (i * 4)); return 0; } @@ -440,6 +448,7 @@ void r300_gpu_init(struct radeon_device *rdev) /* rv350,rv370,rv380 */ rdev->num_gb_pipes = 1; } + rdev->num_z_pipes = 1; gb_tile_config = (R300_ENABLE_TILING | R300_TILE_SIZE_16); switch (rdev->num_gb_pipes) { case 2: @@ -478,7 +487,8 @@ void r300_gpu_init(struct radeon_device *rdev) printk(KERN_WARNING "Failed to wait MC idle while " "programming pipes. Bad things might happen.\n"); } - DRM_INFO("radeon: %d pipes initialized.\n", rdev->num_gb_pipes); + DRM_INFO("radeon: %d quad pipes, %d Z pipes initialized.\n", + rdev->num_gb_pipes, rdev->num_z_pipes); } int r300_ga_reset(struct radeon_device *rdev) @@ -579,35 +589,12 @@ void r300_vram_info(struct radeon_device *rdev) } else { rdev->mc.vram_width = 64; } - rdev->mc.vram_size = RREG32(RADEON_CONFIG_MEMSIZE); - rdev->mc.aper_base = drm_get_resource_start(rdev->ddev, 0); - rdev->mc.aper_size = drm_get_resource_len(rdev->ddev, 0); + r100_vram_init_sizes(rdev); } /* - * Indirect registers accessor - */ -uint32_t rv370_pcie_rreg(struct radeon_device *rdev, uint32_t reg) -{ - uint32_t r; - - WREG8(RADEON_PCIE_INDEX, ((reg) & 0xff)); - (void)RREG32(RADEON_PCIE_INDEX); - r = RREG32(RADEON_PCIE_DATA); - return r; -} - -void rv370_pcie_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v) -{ - WREG8(RADEON_PCIE_INDEX, ((reg) & 0xff)); - (void)RREG32(RADEON_PCIE_INDEX); - WREG32(RADEON_PCIE_DATA, (v)); - (void)RREG32(RADEON_PCIE_DATA); -} - -/* * PCIE Lanes */ @@ -970,7 +957,7 @@ static inline void r300_cs_track_clear(struct r300_cs_track *track) static const unsigned r300_reg_safe_bm[159] = { 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, - 0xFFFFFFBF, 0xFFFFFFFF, 0xFFFFFFBF, 0xFFFFFFFF, + 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, @@ -1008,7 +995,7 @@ static const unsigned r300_reg_safe_bm[159] = { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0xFFFF0000, 0xFFFFFFFF, 0xFF80FFFF, 0x00000000, 0x00000000, 0x00000000, 0x00000000, - 0x0003FC01, 0xFFFFFFF8, 0xFE800B19, + 0x0003FC01, 0xFFFFFCF8, 0xFF800B19, }; static int r300_packet0_check(struct radeon_cs_parser *p, @@ -1019,7 +1006,7 @@ static int r300_packet0_check(struct radeon_cs_parser *p, struct radeon_cs_reloc *reloc; struct r300_cs_track *track; volatile uint32_t *ib; - uint32_t tmp; + uint32_t tmp, tile_flags = 0; unsigned i; int r; @@ -1027,6 +1014,16 @@ static int r300_packet0_check(struct radeon_cs_parser *p, ib_chunk = &p->chunks[p->chunk_ib_idx]; track = (struct r300_cs_track*)p->track; switch(reg) { + case AVIVO_D1MODE_VLINE_START_END: + case RADEON_CRTC_GUI_TRIG_VLINE: + r = r100_cs_packet_parse_vline(p); + if (r) { + DRM_ERROR("No reloc for ib[%d]=0x%04X\n", + idx, reg); + r100_cs_dump_packet(p, pkt); + return r; + } + break; case RADEON_DST_PITCH_OFFSET: case RADEON_SRC_PITCH_OFFSET: r = r100_cs_packet_next_reloc(p, &reloc); @@ -1038,7 +1035,19 @@ static int r300_packet0_check(struct radeon_cs_parser *p, } tmp = ib_chunk->kdata[idx] & 0x003fffff; tmp += (((u32)reloc->lobj.gpu_offset) >> 10); - ib[idx] = (ib_chunk->kdata[idx] & 0xffc00000) | tmp; + + if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) + tile_flags |= RADEON_DST_TILE_MACRO; + if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO) { + if (reg == RADEON_SRC_PITCH_OFFSET) { + DRM_ERROR("Cannot src blit from microtiled surface\n"); + r100_cs_dump_packet(p, pkt); + return -EINVAL; + } + tile_flags |= RADEON_DST_TILE_MICRO; + } + tmp |= tile_flags; + ib[idx] = (ib_chunk->kdata[idx] & 0x3fc00000) | tmp; break; case R300_RB3D_COLOROFFSET0: case R300_RB3D_COLOROFFSET1: @@ -1127,6 +1136,23 @@ static int r300_packet0_check(struct radeon_cs_parser *p, /* RB3D_COLORPITCH1 */ /* RB3D_COLORPITCH2 */ /* RB3D_COLORPITCH3 */ + r = r100_cs_packet_next_reloc(p, &reloc); + if (r) { + DRM_ERROR("No reloc for ib[%d]=0x%04X\n", + idx, reg); + r100_cs_dump_packet(p, pkt); + return r; + } + + if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) + tile_flags |= R300_COLOR_TILE_ENABLE; + if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO) + tile_flags |= R300_COLOR_MICROTILE_ENABLE; + + tmp = ib_chunk->kdata[idx] & ~(0x7 << 16); + tmp |= tile_flags; + ib[idx] = tmp; + i = (reg - 0x4E38) >> 2; track->cb[i].pitch = ib_chunk->kdata[idx] & 0x3FFE; switch (((ib_chunk->kdata[idx] >> 21) & 0xF)) { @@ -1182,6 +1208,23 @@ static int r300_packet0_check(struct radeon_cs_parser *p, break; case 0x4F24: /* ZB_DEPTHPITCH */ + r = r100_cs_packet_next_reloc(p, &reloc); + if (r) { + DRM_ERROR("No reloc for ib[%d]=0x%04X\n", + idx, reg); + r100_cs_dump_packet(p, pkt); + return r; + } + + if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) + tile_flags |= R300_DEPTHMACROTILE_ENABLE; + if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO) + tile_flags |= R300_DEPTHMICROTILE_TILED;; + + tmp = ib_chunk->kdata[idx] & ~(0x7 << 16); + tmp |= tile_flags; + ib[idx] = tmp; + track->zb.pitch = ib_chunk->kdata[idx] & 0x3FFC; break; case 0x4104: @@ -1341,6 +1384,21 @@ static int r300_packet0_check(struct radeon_cs_parser *p, tmp = (ib_chunk->kdata[idx] >> 22) & 0xF; track->textures[i].txdepth = tmp; break; + case R300_ZB_ZPASS_ADDR: + r = r100_cs_packet_next_reloc(p, &reloc); + if (r) { + DRM_ERROR("No reloc for ib[%d]=0x%04X\n", + idx, reg); + r100_cs_dump_packet(p, pkt); + return r; + } + ib[idx] = ib_chunk->kdata[idx] + ((u32)reloc->lobj.gpu_offset); + break; + case 0x4be8: + /* valid register only on RV530 */ + if (p->rdev->family == CHIP_RV530) + break; + /* fallthrough do not move */ default: printk(KERN_ERR "Forbidden register 0x%04X in cs at %d\n", reg, idx); diff --git a/drivers/gpu/drm/radeon/r300_reg.h b/drivers/gpu/drm/radeon/r300_reg.h index 70f48609515..4b7afef35a6 100644 --- a/drivers/gpu/drm/radeon/r300_reg.h +++ b/drivers/gpu/drm/radeon/r300_reg.h @@ -27,7 +27,9 @@ #ifndef _R300_REG_H_ #define _R300_REG_H_ - +#define R300_SURF_TILE_MACRO (1<<16) +#define R300_SURF_TILE_MICRO (2<<16) +#define R300_SURF_TILE_BOTH (3<<16) #define R300_MC_INIT_MISC_LAT_TIMER 0x180 diff --git a/drivers/gpu/drm/radeon/r420.c b/drivers/gpu/drm/radeon/r420.c index dea497a979f..97426a6f370 100644 --- a/drivers/gpu/drm/radeon/r420.c +++ b/drivers/gpu/drm/radeon/r420.c @@ -165,7 +165,18 @@ void r420_pipes_init(struct radeon_device *rdev) printk(KERN_WARNING "Failed to wait GUI idle while " "programming pipes. Bad things might happen.\n"); } - DRM_INFO("radeon: %d pipes initialized.\n", rdev->num_gb_pipes); + + if (rdev->family == CHIP_RV530) { + tmp = RREG32(RV530_GB_PIPE_SELECT2); + if ((tmp & 3) == 3) + rdev->num_z_pipes = 2; + else + rdev->num_z_pipes = 1; + } else + rdev->num_z_pipes = 1; + + DRM_INFO("radeon: %d quad pipes, %d z pipes initialized.\n", + rdev->num_gb_pipes, rdev->num_z_pipes); } void r420_gpu_init(struct radeon_device *rdev) diff --git a/drivers/gpu/drm/radeon/r500_reg.h b/drivers/gpu/drm/radeon/r500_reg.h index 9070a1c2ce2..e1d5e0331e1 100644 --- a/drivers/gpu/drm/radeon/r500_reg.h +++ b/drivers/gpu/drm/radeon/r500_reg.h @@ -350,6 +350,7 @@ #define AVIVO_D1CRTC_BLANK_CONTROL 0x6084 #define AVIVO_D1CRTC_INTERLACE_CONTROL 0x6088 #define AVIVO_D1CRTC_INTERLACE_STATUS 0x608c +#define AVIVO_D1CRTC_FRAME_COUNT 0x60a4 #define AVIVO_D1CRTC_STEREO_CONTROL 0x60c4 /* master controls */ @@ -438,13 +439,15 @@ # define AVIVO_DC_LB_DISP1_END_ADR_SHIFT 4 # define AVIVO_DC_LB_DISP1_END_ADR_MASK 0x7ff -#define R500_DxMODE_INT_MASK 0x6540 -#define R500_D1MODE_INT_MASK (1<<0) -#define R500_D2MODE_INT_MASK (1<<8) - #define AVIVO_D1MODE_DATA_FORMAT 0x6528 # define AVIVO_D1MODE_INTERLEAVE_EN (1 << 0) #define AVIVO_D1MODE_DESKTOP_HEIGHT 0x652C +#define AVIVO_D1MODE_VBLANK_STATUS 0x6534 +# define AVIVO_VBLANK_ACK (1 << 4) +#define AVIVO_D1MODE_VLINE_START_END 0x6538 +#define AVIVO_DxMODE_INT_MASK 0x6540 +# define AVIVO_D1MODE_INT_MASK (1 << 0) +# define AVIVO_D2MODE_INT_MASK (1 << 8) #define AVIVO_D1MODE_VIEWPORT_START 0x6580 #define AVIVO_D1MODE_VIEWPORT_SIZE 0x6584 #define AVIVO_D1MODE_EXT_OVERSCAN_LEFT_RIGHT 0x6588 @@ -474,6 +477,7 @@ #define AVIVO_D2CRTC_BLANK_CONTROL 0x6884 #define AVIVO_D2CRTC_INTERLACE_CONTROL 0x6888 #define AVIVO_D2CRTC_INTERLACE_STATUS 0x688c +#define AVIVO_D2CRTC_FRAME_COUNT 0x68a4 #define AVIVO_D2CRTC_STEREO_CONTROL 0x68c4 #define AVIVO_D2GRPH_ENABLE 0x6900 @@ -496,6 +500,8 @@ #define AVIVO_D2CUR_SIZE 0x6c10 #define AVIVO_D2CUR_POSITION 0x6c14 +#define AVIVO_D2MODE_VBLANK_STATUS 0x6d34 +#define AVIVO_D2MODE_VLINE_START_END 0x6d38 #define AVIVO_D2MODE_VIEWPORT_START 0x6d80 #define AVIVO_D2MODE_VIEWPORT_SIZE 0x6d84 #define AVIVO_D2MODE_EXT_OVERSCAN_LEFT_RIGHT 0x6d88 @@ -746,4 +752,8 @@ # define AVIVO_I2C_EN (1 << 0) # define AVIVO_I2C_RESET (1 << 8) +#define AVIVO_DISP_INTERRUPT_STATUS 0x7edc +# define AVIVO_D1_VBLANK_INTERRUPT (1 << 4) +# define AVIVO_D2_VBLANK_INTERRUPT (1 << 5) + #endif diff --git a/drivers/gpu/drm/radeon/r520.c b/drivers/gpu/drm/radeon/r520.c index 570a244bd88..ebd6b0f7bdf 100644 --- a/drivers/gpu/drm/radeon/r520.c +++ b/drivers/gpu/drm/radeon/r520.c @@ -28,6 +28,7 @@ #include "drmP.h" #include "radeon_reg.h" #include "radeon.h" +#include "radeon_share.h" /* r520,rv530,rv560,rv570,r580 depends on : */ void r100_hdp_reset(struct radeon_device *rdev); @@ -94,8 +95,8 @@ int r520_mc_init(struct radeon_device *rdev) "programming pipes. Bad things might happen.\n"); } /* Write VRAM size in case we are limiting it */ - WREG32(RADEON_CONFIG_MEMSIZE, rdev->mc.vram_size); - tmp = rdev->mc.vram_location + rdev->mc.vram_size - 1; + WREG32(RADEON_CONFIG_MEMSIZE, rdev->mc.real_vram_size); + tmp = rdev->mc.vram_location + rdev->mc.mc_vram_size - 1; tmp = REG_SET(R520_MC_FB_TOP, tmp >> 16); tmp |= REG_SET(R520_MC_FB_START, rdev->mc.vram_location >> 16); WREG32_MC(R520_MC_FB_LOCATION, tmp); @@ -176,7 +177,6 @@ void r520_gpu_init(struct radeon_device *rdev) */ /* workaround for RV530 */ if (rdev->family == CHIP_RV530) { - WREG32(0x4124, 1); WREG32(0x4128, 0xFF); } r420_pipes_init(rdev); @@ -226,9 +226,20 @@ static void r520_vram_get_type(struct radeon_device *rdev) void r520_vram_info(struct radeon_device *rdev) { + fixed20_12 a; + r520_vram_get_type(rdev); - rdev->mc.vram_size = RREG32(RADEON_CONFIG_MEMSIZE); - rdev->mc.aper_base = drm_get_resource_start(rdev->ddev, 0); - rdev->mc.aper_size = drm_get_resource_len(rdev->ddev, 0); + r100_vram_init_sizes(rdev); + /* FIXME: we should enforce default clock in case GPU is not in + * default setup + */ + a.full = rfixed_const(100); + rdev->pm.sclk.full = rfixed_const(rdev->clock.default_sclk); + rdev->pm.sclk.full = rfixed_div(rdev->pm.sclk, a); +} + +void r520_bandwidth_update(struct radeon_device *rdev) +{ + rv515_bandwidth_avivo_update(rdev); } diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c index c45559fc97f..538cd907df6 100644 --- a/drivers/gpu/drm/radeon/r600.c +++ b/drivers/gpu/drm/radeon/r600.c @@ -67,7 +67,7 @@ int r600_mc_init(struct radeon_device *rdev) "programming pipes. Bad things might happen.\n"); } - tmp = rdev->mc.vram_location + rdev->mc.vram_size - 1; + tmp = rdev->mc.vram_location + rdev->mc.mc_vram_size - 1; tmp = REG_SET(R600_MC_FB_TOP, tmp >> 24); tmp |= REG_SET(R600_MC_FB_BASE, rdev->mc.vram_location >> 24); WREG32(R600_MC_VM_FB_LOCATION, tmp); @@ -140,7 +140,8 @@ void r600_vram_get_type(struct radeon_device *rdev) void r600_vram_info(struct radeon_device *rdev) { r600_vram_get_type(rdev); - rdev->mc.vram_size = RREG32(R600_CONFIG_MEMSIZE); + rdev->mc.real_vram_size = RREG32(R600_CONFIG_MEMSIZE); + rdev->mc.mc_vram_size = rdev->mc.real_vram_size; /* Could aper size report 0 ? */ rdev->mc.aper_base = drm_get_resource_start(rdev->ddev, 0); diff --git a/drivers/gpu/drm/radeon/r600_cp.c b/drivers/gpu/drm/radeon/r600_cp.c index 146f3570af8..20f17908b03 100644 --- a/drivers/gpu/drm/radeon/r600_cp.c +++ b/drivers/gpu/drm/radeon/r600_cp.c @@ -384,8 +384,9 @@ static void r600_cp_load_microcode(drm_radeon_private_t *dev_priv) DRM_INFO("Loading RV670 PFP Microcode\n"); for (i = 0; i < PFP_UCODE_SIZE; i++) RADEON_WRITE(R600_CP_PFP_UCODE_DATA, RV670_pfp_microcode[i]); - } else if (((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS780)) { - DRM_INFO("Loading RS780 CP Microcode\n"); + } else if (((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS780) || + ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS880)) { + DRM_INFO("Loading RS780/RS880 CP Microcode\n"); for (i = 0; i < PM4_UCODE_SIZE; i++) { RADEON_WRITE(R600_CP_ME_RAM_DATA, RS780_cp_microcode[i][0]); @@ -396,7 +397,7 @@ static void r600_cp_load_microcode(drm_radeon_private_t *dev_priv) } RADEON_WRITE(R600_CP_PFP_UCODE_ADDR, 0); - DRM_INFO("Loading RS780 PFP Microcode\n"); + DRM_INFO("Loading RS780/RS880 PFP Microcode\n"); for (i = 0; i < PFP_UCODE_SIZE; i++) RADEON_WRITE(R600_CP_PFP_UCODE_DATA, RS780_pfp_microcode[i]); } @@ -783,6 +784,7 @@ static void r600_gfx_init(struct drm_device *dev, break; case CHIP_RV610: case CHIP_RS780: + case CHIP_RS880: case CHIP_RV620: dev_priv->r600_max_pipes = 1; dev_priv->r600_max_tile_pipes = 1; @@ -917,7 +919,8 @@ static void r600_gfx_init(struct drm_device *dev, ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV630) || ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV610) || ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV620) || - ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS780)) + ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS780) || + ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS880)) RADEON_WRITE(R600_DB_DEBUG, R600_PREZ_MUST_WAIT_FOR_POSTZ_DONE); else RADEON_WRITE(R600_DB_DEBUG, 0); @@ -935,7 +938,8 @@ static void r600_gfx_init(struct drm_device *dev, sq_ms_fifo_sizes = RADEON_READ(R600_SQ_MS_FIFO_SIZES); if (((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV610) || ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV620) || - ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS780)) { + ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS780) || + ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS880)) { sq_ms_fifo_sizes = (R600_CACHE_FIFO_SIZE(0xa) | R600_FETCH_FIFO_HIWATER(0xa) | R600_DONE_FIFO_HIWATER(0xe0) | @@ -978,7 +982,8 @@ static void r600_gfx_init(struct drm_device *dev, R600_NUM_ES_STACK_ENTRIES(0)); } else if (((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV610) || ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV620) || - ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS780)) { + ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS780) || + ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS880)) { /* no vertex cache */ sq_config &= ~R600_VC_ENABLE; @@ -1035,7 +1040,8 @@ static void r600_gfx_init(struct drm_device *dev, if (((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV610) || ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV620) || - ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS780)) + ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS780) || + ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS880)) RADEON_WRITE(R600_VGT_CACHE_INVALIDATION, R600_CACHE_INVALIDATION(R600_TC_ONLY)); else RADEON_WRITE(R600_VGT_CACHE_INVALIDATION, R600_CACHE_INVALIDATION(R600_VC_AND_TC)); @@ -1078,6 +1084,7 @@ static void r600_gfx_init(struct drm_device *dev, break; case CHIP_RV610: case CHIP_RS780: + case CHIP_RS880: case CHIP_RV620: gs_prim_buffer_depth = 32; break; @@ -1123,6 +1130,7 @@ static void r600_gfx_init(struct drm_device *dev, switch (dev_priv->flags & RADEON_FAMILY_MASK) { case CHIP_RV610: case CHIP_RS780: + case CHIP_RS880: case CHIP_RV620: tc_cntl = R600_TC_L2_SIZE(8); break; diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h index d61f2fc61df..b519fb2fecb 100644 --- a/drivers/gpu/drm/radeon/radeon.h +++ b/drivers/gpu/drm/radeon/radeon.h @@ -64,6 +64,7 @@ extern int radeon_agpmode; extern int radeon_vram_limit; extern int radeon_gart_size; extern int radeon_benchmarking; +extern int radeon_testing; extern int radeon_connector_table; /* @@ -113,6 +114,7 @@ enum radeon_family { CHIP_RV770, CHIP_RV730, CHIP_RV710, + CHIP_RS880, CHIP_LAST, }; @@ -201,6 +203,14 @@ int radeon_fence_wait_last(struct radeon_device *rdev); struct radeon_fence *radeon_fence_ref(struct radeon_fence *fence); void radeon_fence_unref(struct radeon_fence **fence); +/* + * Tiling registers + */ +struct radeon_surface_reg { + struct radeon_object *robj; +}; + +#define RADEON_GEM_MAX_SURFACES 8 /* * Radeon buffer. @@ -213,6 +223,7 @@ struct radeon_object_list { uint64_t gpu_offset; unsigned rdomain; unsigned wdomain; + uint32_t tiling_flags; }; int radeon_object_init(struct radeon_device *rdev); @@ -231,6 +242,7 @@ int radeon_object_pin(struct radeon_object *robj, uint32_t domain, uint64_t *gpu_addr); void radeon_object_unpin(struct radeon_object *robj); int radeon_object_wait(struct radeon_object *robj); +int radeon_object_busy_domain(struct radeon_object *robj, uint32_t *cur_placement); int radeon_object_evict_vram(struct radeon_device *rdev); int radeon_object_mmap(struct radeon_object *robj, uint64_t *offset); void radeon_object_force_delete(struct radeon_device *rdev); @@ -242,8 +254,15 @@ void radeon_object_list_clean(struct list_head *head); int radeon_object_fbdev_mmap(struct radeon_object *robj, struct vm_area_struct *vma); unsigned long radeon_object_size(struct radeon_object *robj); - - +void radeon_object_clear_surface_reg(struct radeon_object *robj); +int radeon_object_check_tiling(struct radeon_object *robj, bool has_moved, + bool force_drop); +void radeon_object_set_tiling_flags(struct radeon_object *robj, + uint32_t tiling_flags, uint32_t pitch); +void radeon_object_get_tiling_flags(struct radeon_object *robj, uint32_t *tiling_flags, uint32_t *pitch); +void radeon_bo_move_notify(struct ttm_buffer_object *bo, + struct ttm_mem_reg *mem); +void radeon_bo_fault_reserve_notify(struct ttm_buffer_object *bo); /* * GEM objects. */ @@ -315,8 +334,11 @@ struct radeon_mc { unsigned gtt_location; unsigned gtt_size; unsigned vram_location; - unsigned vram_size; + /* for some chips with <= 32MB we need to lie + * about vram size near mc fb location */ + unsigned mc_vram_size; unsigned vram_width; + unsigned real_vram_size; int vram_mtrr; bool vram_is_ddr; }; @@ -474,6 +496,39 @@ struct radeon_wb { uint64_t gpu_addr; }; +/** + * struct radeon_pm - power management datas + * @max_bandwidth: maximum bandwidth the gpu has (MByte/s) + * @igp_sideport_mclk: sideport memory clock Mhz (rs690,rs740,rs780,rs880) + * @igp_system_mclk: system clock Mhz (rs690,rs740,rs780,rs880) + * @igp_ht_link_clk: ht link clock Mhz (rs690,rs740,rs780,rs880) + * @igp_ht_link_width: ht link width in bits (rs690,rs740,rs780,rs880) + * @k8_bandwidth: k8 bandwidth the gpu has (MByte/s) (IGP) + * @sideport_bandwidth: sideport bandwidth the gpu has (MByte/s) (IGP) + * @ht_bandwidth: ht bandwidth the gpu has (MByte/s) (IGP) + * @core_bandwidth: core GPU bandwidth the gpu has (MByte/s) (IGP) + * @sclk: GPU clock Mhz (core bandwith depends of this clock) + * @needed_bandwidth: current bandwidth needs + * + * It keeps track of various data needed to take powermanagement decision. + * Bandwith need is used to determine minimun clock of the GPU and memory. + * Equation between gpu/memory clock and available bandwidth is hw dependent + * (type of memory, bus size, efficiency, ...) + */ +struct radeon_pm { + fixed20_12 max_bandwidth; + fixed20_12 igp_sideport_mclk; + fixed20_12 igp_system_mclk; + fixed20_12 igp_ht_link_clk; + fixed20_12 igp_ht_link_width; + fixed20_12 k8_bandwidth; + fixed20_12 sideport_bandwidth; + fixed20_12 ht_bandwidth; + fixed20_12 core_bandwidth; + fixed20_12 sclk; + fixed20_12 needed_bandwidth; +}; + /* * Benchmarking @@ -482,6 +537,12 @@ void radeon_benchmark(struct radeon_device *rdev); /* + * Testing + */ +void radeon_test_moves(struct radeon_device *rdev); + + +/* * Debugfs */ int radeon_debugfs_add_files(struct radeon_device *rdev, @@ -514,6 +575,7 @@ struct radeon_asic { void (*ring_start)(struct radeon_device *rdev); int (*irq_set)(struct radeon_device *rdev); int (*irq_process)(struct radeon_device *rdev); + u32 (*get_vblank_counter)(struct radeon_device *rdev, int crtc); void (*fence_ring_emit)(struct radeon_device *rdev, struct radeon_fence *fence); int (*cs_parse)(struct radeon_cs_parser *p); int (*copy_blit)(struct radeon_device *rdev, @@ -535,6 +597,11 @@ struct radeon_asic { void (*set_memory_clock)(struct radeon_device *rdev, uint32_t mem_clock); void (*set_pcie_lanes)(struct radeon_device *rdev, int lanes); void (*set_clock_gating)(struct radeon_device *rdev, int enable); + int (*set_surface_reg)(struct radeon_device *rdev, int reg, + uint32_t tiling_flags, uint32_t pitch, + uint32_t offset, uint32_t obj_size); + int (*clear_surface_reg)(struct radeon_device *rdev, int reg); + void (*bandwidth_update)(struct radeon_device *rdev); }; union radeon_asic_config { @@ -566,6 +633,10 @@ int radeon_gem_busy_ioctl(struct drm_device *dev, void *data, int radeon_gem_wait_idle_ioctl(struct drm_device *dev, void *data, struct drm_file *filp); int radeon_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp); +int radeon_gem_set_tiling_ioctl(struct drm_device *dev, void *data, + struct drm_file *filp); +int radeon_gem_get_tiling_ioctl(struct drm_device *dev, void *data, + struct drm_file *filp); /* @@ -584,6 +655,7 @@ struct radeon_device { int usec_timeout; enum radeon_pll_errata pll_errata; int num_gb_pipes; + int num_z_pipes; int disp_priority; /* BIOS */ uint8_t *bios; @@ -594,17 +666,14 @@ struct radeon_device { struct radeon_object *fbdev_robj; struct radeon_framebuffer *fbdev_rfb; /* Register mmio */ - unsigned long rmmio_base; - unsigned long rmmio_size; + resource_size_t rmmio_base; + resource_size_t rmmio_size; void *rmmio; - radeon_rreg_t mm_rreg; - radeon_wreg_t mm_wreg; radeon_rreg_t mc_rreg; radeon_wreg_t mc_wreg; radeon_rreg_t pll_rreg; radeon_wreg_t pll_wreg; - radeon_rreg_t pcie_rreg; - radeon_wreg_t pcie_wreg; + uint32_t pcie_reg_mask; radeon_rreg_t pciep_rreg; radeon_wreg_t pciep_wreg; struct radeon_clock clock; @@ -619,11 +688,14 @@ struct radeon_device { struct radeon_irq irq; struct radeon_asic *asic; struct radeon_gem gem; + struct radeon_pm pm; struct mutex cs_mutex; struct radeon_wb wb; bool gpu_lockup; bool shutdown; bool suspend; + bool need_dma32; + struct radeon_surface_reg surface_regs[RADEON_GEM_MAX_SURFACES]; }; int radeon_device_init(struct radeon_device *rdev, @@ -633,22 +705,42 @@ int radeon_device_init(struct radeon_device *rdev, void radeon_device_fini(struct radeon_device *rdev); int radeon_gpu_wait_for_idle(struct radeon_device *rdev); +static inline uint32_t r100_mm_rreg(struct radeon_device *rdev, uint32_t reg) +{ + if (reg < 0x10000) + return readl(((void __iomem *)rdev->rmmio) + reg); + else { + writel(reg, ((void __iomem *)rdev->rmmio) + RADEON_MM_INDEX); + return readl(((void __iomem *)rdev->rmmio) + RADEON_MM_DATA); + } +} + +static inline void r100_mm_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v) +{ + if (reg < 0x10000) + writel(v, ((void __iomem *)rdev->rmmio) + reg); + else { + writel(reg, ((void __iomem *)rdev->rmmio) + RADEON_MM_INDEX); + writel(v, ((void __iomem *)rdev->rmmio) + RADEON_MM_DATA); + } +} + /* * Registers read & write functions. */ #define RREG8(reg) readb(((void __iomem *)rdev->rmmio) + (reg)) #define WREG8(reg, v) writeb(v, ((void __iomem *)rdev->rmmio) + (reg)) -#define RREG32(reg) rdev->mm_rreg(rdev, (reg)) -#define WREG32(reg, v) rdev->mm_wreg(rdev, (reg), (v)) +#define RREG32(reg) r100_mm_rreg(rdev, (reg)) +#define WREG32(reg, v) r100_mm_wreg(rdev, (reg), (v)) #define REG_SET(FIELD, v) (((v) << FIELD##_SHIFT) & FIELD##_MASK) #define REG_GET(FIELD, v) (((v) << FIELD##_SHIFT) & FIELD##_MASK) #define RREG32_PLL(reg) rdev->pll_rreg(rdev, (reg)) #define WREG32_PLL(reg, v) rdev->pll_wreg(rdev, (reg), (v)) #define RREG32_MC(reg) rdev->mc_rreg(rdev, (reg)) #define WREG32_MC(reg, v) rdev->mc_wreg(rdev, (reg), (v)) -#define RREG32_PCIE(reg) rdev->pcie_rreg(rdev, (reg)) -#define WREG32_PCIE(reg, v) rdev->pcie_wreg(rdev, (reg), (v)) +#define RREG32_PCIE(reg) rv370_pcie_rreg(rdev, (reg)) +#define WREG32_PCIE(reg, v) rv370_pcie_wreg(rdev, (reg), (v)) #define WREG32_P(reg, val, mask) \ do { \ uint32_t tmp_ = RREG32(reg); \ @@ -664,12 +756,32 @@ int radeon_gpu_wait_for_idle(struct radeon_device *rdev); WREG32_PLL(reg, tmp_); \ } while (0) +/* + * Indirect registers accessor + */ +static inline uint32_t rv370_pcie_rreg(struct radeon_device *rdev, uint32_t reg) +{ + uint32_t r; + + WREG32(RADEON_PCIE_INDEX, ((reg) & rdev->pcie_reg_mask)); + r = RREG32(RADEON_PCIE_DATA); + return r; +} + +static inline void rv370_pcie_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v) +{ + WREG32(RADEON_PCIE_INDEX, ((reg) & rdev->pcie_reg_mask)); + WREG32(RADEON_PCIE_DATA, (v)); +} + void r100_pll_errata_after_index(struct radeon_device *rdev); /* * ASICs helpers. */ +#define ASIC_IS_RN50(rdev) ((rdev->pdev->device == 0x515e) || \ + (rdev->pdev->device == 0x5969)) #define ASIC_IS_RV100(rdev) ((rdev->family == CHIP_RV100) || \ (rdev->family == CHIP_RV200) || \ (rdev->family == CHIP_RS100) || \ @@ -788,6 +900,7 @@ static inline void radeon_ring_write(struct radeon_device *rdev, uint32_t v) #define radeon_ring_start(rdev) (rdev)->asic->ring_start((rdev)) #define radeon_irq_set(rdev) (rdev)->asic->irq_set((rdev)) #define radeon_irq_process(rdev) (rdev)->asic->irq_process((rdev)) +#define radeon_get_vblank_counter(rdev, crtc) (rdev)->asic->get_vblank_counter((rdev), (crtc)) #define radeon_fence_ring_emit(rdev, fence) (rdev)->asic->fence_ring_emit((rdev), (fence)) #define radeon_copy_blit(rdev, s, d, np, f) (rdev)->asic->copy_blit((rdev), (s), (d), (np), (f)) #define radeon_copy_dma(rdev, s, d, np, f) (rdev)->asic->copy_dma((rdev), (s), (d), (np), (f)) @@ -796,5 +909,8 @@ static inline void radeon_ring_write(struct radeon_device *rdev, uint32_t v) #define radeon_set_memory_clock(rdev, e) (rdev)->asic->set_engine_clock((rdev), (e)) #define radeon_set_pcie_lanes(rdev, l) (rdev)->asic->set_pcie_lanes((rdev), (l)) #define radeon_set_clock_gating(rdev, e) (rdev)->asic->set_clock_gating((rdev), (e)) +#define radeon_set_surface_reg(rdev, r, f, p, o, s) ((rdev)->asic->set_surface_reg((rdev), (r), (f), (p), (o), (s))) +#define radeon_clear_surface_reg(rdev, r) ((rdev)->asic->clear_surface_reg((rdev), (r))) +#define radeon_bandwidth_update(rdev) (rdev)->asic->bandwidth_update((rdev)) #endif diff --git a/drivers/gpu/drm/radeon/radeon_asic.h b/drivers/gpu/drm/radeon/radeon_asic.h index e2e567395df..93d8f888930 100644 --- a/drivers/gpu/drm/radeon/radeon_asic.h +++ b/drivers/gpu/drm/radeon/radeon_asic.h @@ -49,6 +49,7 @@ void r100_vram_info(struct radeon_device *rdev); int r100_gpu_reset(struct radeon_device *rdev); int r100_mc_init(struct radeon_device *rdev); void r100_mc_fini(struct radeon_device *rdev); +u32 r100_get_vblank_counter(struct radeon_device *rdev, int crtc); int r100_wb_init(struct radeon_device *rdev); void r100_wb_fini(struct radeon_device *rdev); int r100_gart_enable(struct radeon_device *rdev); @@ -71,6 +72,11 @@ int r100_copy_blit(struct radeon_device *rdev, uint64_t dst_offset, unsigned num_pages, struct radeon_fence *fence); +int r100_set_surface_reg(struct radeon_device *rdev, int reg, + uint32_t tiling_flags, uint32_t pitch, + uint32_t offset, uint32_t obj_size); +int r100_clear_surface_reg(struct radeon_device *rdev, int reg); +void r100_bandwidth_update(struct radeon_device *rdev); static struct radeon_asic r100_asic = { .init = &r100_init, @@ -91,6 +97,7 @@ static struct radeon_asic r100_asic = { .ring_start = &r100_ring_start, .irq_set = &r100_irq_set, .irq_process = &r100_irq_process, + .get_vblank_counter = &r100_get_vblank_counter, .fence_ring_emit = &r100_fence_ring_emit, .cs_parse = &r100_cs_parse, .copy_blit = &r100_copy_blit, @@ -100,6 +107,9 @@ static struct radeon_asic r100_asic = { .set_memory_clock = NULL, .set_pcie_lanes = NULL, .set_clock_gating = &radeon_legacy_set_clock_gating, + .set_surface_reg = r100_set_surface_reg, + .clear_surface_reg = r100_clear_surface_reg, + .bandwidth_update = &r100_bandwidth_update, }; @@ -128,6 +138,7 @@ int r300_copy_dma(struct radeon_device *rdev, uint64_t dst_offset, unsigned num_pages, struct radeon_fence *fence); + static struct radeon_asic r300_asic = { .init = &r300_init, .errata = &r300_errata, @@ -147,6 +158,7 @@ static struct radeon_asic r300_asic = { .ring_start = &r300_ring_start, .irq_set = &r100_irq_set, .irq_process = &r100_irq_process, + .get_vblank_counter = &r100_get_vblank_counter, .fence_ring_emit = &r300_fence_ring_emit, .cs_parse = &r300_cs_parse, .copy_blit = &r100_copy_blit, @@ -156,6 +168,9 @@ static struct radeon_asic r300_asic = { .set_memory_clock = NULL, .set_pcie_lanes = &rv370_set_pcie_lanes, .set_clock_gating = &radeon_legacy_set_clock_gating, + .set_surface_reg = r100_set_surface_reg, + .clear_surface_reg = r100_clear_surface_reg, + .bandwidth_update = &r100_bandwidth_update, }; /* @@ -184,6 +199,7 @@ static struct radeon_asic r420_asic = { .ring_start = &r300_ring_start, .irq_set = &r100_irq_set, .irq_process = &r100_irq_process, + .get_vblank_counter = &r100_get_vblank_counter, .fence_ring_emit = &r300_fence_ring_emit, .cs_parse = &r300_cs_parse, .copy_blit = &r100_copy_blit, @@ -193,6 +209,9 @@ static struct radeon_asic r420_asic = { .set_memory_clock = &radeon_atom_set_memory_clock, .set_pcie_lanes = &rv370_set_pcie_lanes, .set_clock_gating = &radeon_atom_set_clock_gating, + .set_surface_reg = r100_set_surface_reg, + .clear_surface_reg = r100_clear_surface_reg, + .bandwidth_update = &r100_bandwidth_update, }; @@ -228,6 +247,7 @@ static struct radeon_asic rs400_asic = { .ring_start = &r300_ring_start, .irq_set = &r100_irq_set, .irq_process = &r100_irq_process, + .get_vblank_counter = &r100_get_vblank_counter, .fence_ring_emit = &r300_fence_ring_emit, .cs_parse = &r300_cs_parse, .copy_blit = &r100_copy_blit, @@ -237,25 +257,32 @@ static struct radeon_asic rs400_asic = { .set_memory_clock = NULL, .set_pcie_lanes = NULL, .set_clock_gating = &radeon_legacy_set_clock_gating, + .set_surface_reg = r100_set_surface_reg, + .clear_surface_reg = r100_clear_surface_reg, + .bandwidth_update = &r100_bandwidth_update, }; /* * rs600. */ +int rs600_init(struct radeon_device *dev); void rs600_errata(struct radeon_device *rdev); void rs600_vram_info(struct radeon_device *rdev); int rs600_mc_init(struct radeon_device *rdev); void rs600_mc_fini(struct radeon_device *rdev); int rs600_irq_set(struct radeon_device *rdev); +int rs600_irq_process(struct radeon_device *rdev); +u32 rs600_get_vblank_counter(struct radeon_device *rdev, int crtc); int rs600_gart_enable(struct radeon_device *rdev); void rs600_gart_disable(struct radeon_device *rdev); void rs600_gart_tlb_flush(struct radeon_device *rdev); int rs600_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr); uint32_t rs600_mc_rreg(struct radeon_device *rdev, uint32_t reg); void rs600_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v); +void rs600_bandwidth_update(struct radeon_device *rdev); static struct radeon_asic rs600_asic = { - .init = &r300_init, + .init = &rs600_init, .errata = &rs600_errata, .vram_info = &rs600_vram_info, .gpu_reset = &r300_gpu_reset, @@ -272,7 +299,8 @@ static struct radeon_asic rs600_asic = { .cp_disable = &r100_cp_disable, .ring_start = &r300_ring_start, .irq_set = &rs600_irq_set, - .irq_process = &r100_irq_process, + .irq_process = &rs600_irq_process, + .get_vblank_counter = &rs600_get_vblank_counter, .fence_ring_emit = &r300_fence_ring_emit, .cs_parse = &r300_cs_parse, .copy_blit = &r100_copy_blit, @@ -282,6 +310,7 @@ static struct radeon_asic rs600_asic = { .set_memory_clock = &radeon_atom_set_memory_clock, .set_pcie_lanes = NULL, .set_clock_gating = &radeon_atom_set_clock_gating, + .bandwidth_update = &rs600_bandwidth_update, }; @@ -294,8 +323,9 @@ int rs690_mc_init(struct radeon_device *rdev); void rs690_mc_fini(struct radeon_device *rdev); uint32_t rs690_mc_rreg(struct radeon_device *rdev, uint32_t reg); void rs690_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v); +void rs690_bandwidth_update(struct radeon_device *rdev); static struct radeon_asic rs690_asic = { - .init = &r300_init, + .init = &rs600_init, .errata = &rs690_errata, .vram_info = &rs690_vram_info, .gpu_reset = &r300_gpu_reset, @@ -312,7 +342,8 @@ static struct radeon_asic rs690_asic = { .cp_disable = &r100_cp_disable, .ring_start = &r300_ring_start, .irq_set = &rs600_irq_set, - .irq_process = &r100_irq_process, + .irq_process = &rs600_irq_process, + .get_vblank_counter = &rs600_get_vblank_counter, .fence_ring_emit = &r300_fence_ring_emit, .cs_parse = &r300_cs_parse, .copy_blit = &r100_copy_blit, @@ -322,6 +353,9 @@ static struct radeon_asic rs690_asic = { .set_memory_clock = &radeon_atom_set_memory_clock, .set_pcie_lanes = NULL, .set_clock_gating = &radeon_atom_set_clock_gating, + .set_surface_reg = r100_set_surface_reg, + .clear_surface_reg = r100_clear_surface_reg, + .bandwidth_update = &rs690_bandwidth_update, }; @@ -339,6 +373,7 @@ void rv515_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v); void rv515_ring_start(struct radeon_device *rdev); uint32_t rv515_pcie_rreg(struct radeon_device *rdev, uint32_t reg); void rv515_pcie_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v); +void rv515_bandwidth_update(struct radeon_device *rdev); static struct radeon_asic rv515_asic = { .init = &rv515_init, .errata = &rv515_errata, @@ -356,8 +391,9 @@ static struct radeon_asic rv515_asic = { .cp_fini = &r100_cp_fini, .cp_disable = &r100_cp_disable, .ring_start = &rv515_ring_start, - .irq_set = &r100_irq_set, - .irq_process = &r100_irq_process, + .irq_set = &rs600_irq_set, + .irq_process = &rs600_irq_process, + .get_vblank_counter = &rs600_get_vblank_counter, .fence_ring_emit = &r300_fence_ring_emit, .cs_parse = &r300_cs_parse, .copy_blit = &r100_copy_blit, @@ -367,6 +403,9 @@ static struct radeon_asic rv515_asic = { .set_memory_clock = &radeon_atom_set_memory_clock, .set_pcie_lanes = &rv370_set_pcie_lanes, .set_clock_gating = &radeon_atom_set_clock_gating, + .set_surface_reg = r100_set_surface_reg, + .clear_surface_reg = r100_clear_surface_reg, + .bandwidth_update = &rv515_bandwidth_update, }; @@ -377,6 +416,7 @@ void r520_errata(struct radeon_device *rdev); void r520_vram_info(struct radeon_device *rdev); int r520_mc_init(struct radeon_device *rdev); void r520_mc_fini(struct radeon_device *rdev); +void r520_bandwidth_update(struct radeon_device *rdev); static struct radeon_asic r520_asic = { .init = &rv515_init, .errata = &r520_errata, @@ -394,8 +434,9 @@ static struct radeon_asic r520_asic = { .cp_fini = &r100_cp_fini, .cp_disable = &r100_cp_disable, .ring_start = &rv515_ring_start, - .irq_set = &r100_irq_set, - .irq_process = &r100_irq_process, + .irq_set = &rs600_irq_set, + .irq_process = &rs600_irq_process, + .get_vblank_counter = &rs600_get_vblank_counter, .fence_ring_emit = &r300_fence_ring_emit, .cs_parse = &r300_cs_parse, .copy_blit = &r100_copy_blit, @@ -405,6 +446,9 @@ static struct radeon_asic r520_asic = { .set_memory_clock = &radeon_atom_set_memory_clock, .set_pcie_lanes = &rv370_set_pcie_lanes, .set_clock_gating = &radeon_atom_set_clock_gating, + .set_surface_reg = r100_set_surface_reg, + .clear_surface_reg = r100_clear_surface_reg, + .bandwidth_update = &r520_bandwidth_update, }; /* diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c index 1f5a1a49098..fcfe5c02d74 100644 --- a/drivers/gpu/drm/radeon/radeon_atombios.c +++ b/drivers/gpu/drm/radeon/radeon_atombios.c @@ -103,7 +103,8 @@ static inline struct radeon_i2c_bus_rec radeon_lookup_gpio(struct drm_device static bool radeon_atom_apply_quirks(struct drm_device *dev, uint32_t supported_device, int *connector_type, - struct radeon_i2c_bus_rec *i2c_bus) + struct radeon_i2c_bus_rec *i2c_bus, + uint8_t *line_mux) { /* Asus M2A-VM HDMI board lists the DVI port as HDMI */ @@ -127,8 +128,10 @@ static bool radeon_atom_apply_quirks(struct drm_device *dev, if ((dev->pdev->device == 0x5653) && (dev->pdev->subsystem_vendor == 0x1462) && (dev->pdev->subsystem_device == 0x0291)) { - if (*connector_type == DRM_MODE_CONNECTOR_LVDS) + if (*connector_type == DRM_MODE_CONNECTOR_LVDS) { i2c_bus->valid = false; + *line_mux = 53; + } } /* Funky macbooks */ @@ -526,7 +529,7 @@ bool radeon_get_atom_connector_info_from_supported_devices_table(struct if (!radeon_atom_apply_quirks (dev, (1 << i), &bios_connectors[i].connector_type, - &bios_connectors[i].ddc_bus)) + &bios_connectors[i].ddc_bus, &bios_connectors[i].line_mux)) continue; bios_connectors[i].valid = true; diff --git a/drivers/gpu/drm/radeon/radeon_benchmark.c b/drivers/gpu/drm/radeon/radeon_benchmark.c index c44403a2ca7..2e938f7496f 100644 --- a/drivers/gpu/drm/radeon/radeon_benchmark.c +++ b/drivers/gpu/drm/radeon/radeon_benchmark.c @@ -63,7 +63,7 @@ void radeon_benchmark_move(struct radeon_device *rdev, unsigned bsize, if (r) { goto out_cleanup; } - r = radeon_copy_dma(rdev, saddr, daddr, size >> 14, fence); + r = radeon_copy_dma(rdev, saddr, daddr, size / 4096, fence); if (r) { goto out_cleanup; } @@ -88,7 +88,7 @@ void radeon_benchmark_move(struct radeon_device *rdev, unsigned bsize, if (r) { goto out_cleanup; } - r = radeon_copy_blit(rdev, saddr, daddr, size >> 14, fence); + r = radeon_copy_blit(rdev, saddr, daddr, size / 4096, fence); if (r) { goto out_cleanup; } diff --git a/drivers/gpu/drm/radeon/radeon_combios.c b/drivers/gpu/drm/radeon/radeon_combios.c index afc4db280b9..2a027e00762 100644 --- a/drivers/gpu/drm/radeon/radeon_combios.c +++ b/drivers/gpu/drm/radeon/radeon_combios.c @@ -685,23 +685,15 @@ static const uint32_t default_tvdac_adj[CHIP_LAST] = { 0x00780000, /* rs480 */ }; -static struct radeon_encoder_tv_dac - *radeon_legacy_get_tv_dac_info_from_table(struct radeon_device *rdev) +static void radeon_legacy_get_tv_dac_info_from_table(struct radeon_device *rdev, + struct radeon_encoder_tv_dac *tv_dac) { - struct radeon_encoder_tv_dac *tv_dac = NULL; - - tv_dac = kzalloc(sizeof(struct radeon_encoder_tv_dac), GFP_KERNEL); - - if (!tv_dac) - return NULL; - tv_dac->ps2_tvdac_adj = default_tvdac_adj[rdev->family]; if ((rdev->flags & RADEON_IS_MOBILITY) && (rdev->family == CHIP_RV250)) tv_dac->ps2_tvdac_adj = 0x00880000; tv_dac->pal_tvdac_adj = tv_dac->ps2_tvdac_adj; tv_dac->ntsc_tvdac_adj = tv_dac->ps2_tvdac_adj; - - return tv_dac; + return; } struct radeon_encoder_tv_dac *radeon_combios_get_tv_dac_info(struct @@ -713,19 +705,18 @@ struct radeon_encoder_tv_dac *radeon_combios_get_tv_dac_info(struct uint16_t dac_info; uint8_t rev, bg, dac; struct radeon_encoder_tv_dac *tv_dac = NULL; + int found = 0; + + tv_dac = kzalloc(sizeof(struct radeon_encoder_tv_dac), GFP_KERNEL); + if (!tv_dac) + return NULL; if (rdev->bios == NULL) - return radeon_legacy_get_tv_dac_info_from_table(rdev); + goto out; /* first check TV table */ dac_info = combios_get_table_offset(dev, COMBIOS_TV_INFO_TABLE); if (dac_info) { - tv_dac = - kzalloc(sizeof(struct radeon_encoder_tv_dac), GFP_KERNEL); - - if (!tv_dac) - return NULL; - rev = RBIOS8(dac_info + 0x3); if (rev > 4) { bg = RBIOS8(dac_info + 0xc) & 0xf; @@ -739,6 +730,7 @@ struct radeon_encoder_tv_dac *radeon_combios_get_tv_dac_info(struct bg = RBIOS8(dac_info + 0x10) & 0xf; dac = RBIOS8(dac_info + 0x11) & 0xf; tv_dac->ntsc_tvdac_adj = (bg << 16) | (dac << 20); + found = 1; } else if (rev > 1) { bg = RBIOS8(dac_info + 0xc) & 0xf; dac = (RBIOS8(dac_info + 0xc) >> 4) & 0xf; @@ -751,22 +743,15 @@ struct radeon_encoder_tv_dac *radeon_combios_get_tv_dac_info(struct bg = RBIOS8(dac_info + 0xe) & 0xf; dac = (RBIOS8(dac_info + 0xe) >> 4) & 0xf; tv_dac->ntsc_tvdac_adj = (bg << 16) | (dac << 20); + found = 1; } - tv_dac->tv_std = radeon_combios_get_tv_info(encoder); - - } else { + } + if (!found) { /* then check CRT table */ dac_info = combios_get_table_offset(dev, COMBIOS_CRT_INFO_TABLE); if (dac_info) { - tv_dac = - kzalloc(sizeof(struct radeon_encoder_tv_dac), - GFP_KERNEL); - - if (!tv_dac) - return NULL; - rev = RBIOS8(dac_info) & 0x3; if (rev < 2) { bg = RBIOS8(dac_info + 0x3) & 0xf; @@ -775,6 +760,7 @@ struct radeon_encoder_tv_dac *radeon_combios_get_tv_dac_info(struct (bg << 16) | (dac << 20); tv_dac->pal_tvdac_adj = tv_dac->ps2_tvdac_adj; tv_dac->ntsc_tvdac_adj = tv_dac->ps2_tvdac_adj; + found = 1; } else { bg = RBIOS8(dac_info + 0x4) & 0xf; dac = RBIOS8(dac_info + 0x5) & 0xf; @@ -782,13 +768,17 @@ struct radeon_encoder_tv_dac *radeon_combios_get_tv_dac_info(struct (bg << 16) | (dac << 20); tv_dac->pal_tvdac_adj = tv_dac->ps2_tvdac_adj; tv_dac->ntsc_tvdac_adj = tv_dac->ps2_tvdac_adj; + found = 1; } } else { DRM_INFO("No TV DAC info found in BIOS\n"); - return radeon_legacy_get_tv_dac_info_from_table(rdev); } } +out: + if (!found) /* fallback to defaults */ + radeon_legacy_get_tv_dac_info_from_table(rdev, tv_dac); + return tv_dac; } diff --git a/drivers/gpu/drm/radeon/radeon_cp.c b/drivers/gpu/drm/radeon/radeon_cp.c index d8356827ef1..7a52c461145 100644 --- a/drivers/gpu/drm/radeon/radeon_cp.c +++ b/drivers/gpu/drm/radeon/radeon_cp.c @@ -406,6 +406,15 @@ static void radeon_init_pipes(drm_radeon_private_t *dev_priv) { uint32_t gb_tile_config, gb_pipe_sel = 0; + if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV530) { + uint32_t z_pipe_sel = RADEON_READ(RV530_GB_PIPE_SELECT2); + if ((z_pipe_sel & 3) == 3) + dev_priv->num_z_pipes = 2; + else + dev_priv->num_z_pipes = 1; + } else + dev_priv->num_z_pipes = 1; + /* RS4xx/RS6xx/R4xx/R5xx */ if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_R420) { gb_pipe_sel = RADEON_READ(R400_GB_PIPE_SELECT); diff --git a/drivers/gpu/drm/radeon/radeon_cs.c b/drivers/gpu/drm/radeon/radeon_cs.c index b843f9bdfb1..a169067efc4 100644 --- a/drivers/gpu/drm/radeon/radeon_cs.c +++ b/drivers/gpu/drm/radeon/radeon_cs.c @@ -127,17 +127,23 @@ int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data) sizeof(struct drm_radeon_cs_chunk))) { return -EFAULT; } + p->chunks[i].length_dw = user_chunk.length_dw; + p->chunks[i].kdata = NULL; p->chunks[i].chunk_id = user_chunk.chunk_id; + if (p->chunks[i].chunk_id == RADEON_CHUNK_ID_RELOCS) { p->chunk_relocs_idx = i; } if (p->chunks[i].chunk_id == RADEON_CHUNK_ID_IB) { p->chunk_ib_idx = i; + /* zero length IB isn't useful */ + if (p->chunks[i].length_dw == 0) + return -EINVAL; } + p->chunks[i].length_dw = user_chunk.length_dw; cdata = (uint32_t *)(unsigned long)user_chunk.chunk_data; - p->chunks[i].kdata = NULL; size = p->chunks[i].length_dw * sizeof(uint32_t); p->chunks[i].kdata = kzalloc(size, GFP_KERNEL); if (p->chunks[i].kdata == NULL) { diff --git a/drivers/gpu/drm/radeon/radeon_cursor.c b/drivers/gpu/drm/radeon/radeon_cursor.c index 5232441f119..b13c79e38bc 100644 --- a/drivers/gpu/drm/radeon/radeon_cursor.c +++ b/drivers/gpu/drm/radeon/radeon_cursor.c @@ -111,9 +111,11 @@ static void radeon_set_cursor(struct drm_crtc *crtc, struct drm_gem_object *obj, if (ASIC_IS_AVIVO(rdev)) WREG32(AVIVO_D1CUR_SURFACE_ADDRESS + radeon_crtc->crtc_offset, gpu_addr); - else + else { + radeon_crtc->legacy_cursor_offset = gpu_addr - radeon_crtc->legacy_display_base_addr; /* offset is from DISP(2)_BASE_ADDRESS */ - WREG32(RADEON_CUR_OFFSET + radeon_crtc->crtc_offset, gpu_addr); + WREG32(RADEON_CUR_OFFSET + radeon_crtc->crtc_offset, radeon_crtc->legacy_cursor_offset); + } } int radeon_crtc_cursor_set(struct drm_crtc *crtc, @@ -245,6 +247,9 @@ int radeon_crtc_cursor_move(struct drm_crtc *crtc, (RADEON_CUR_LOCK | ((xorigin ? 0 : x) << 16) | (yorigin ? 0 : y))); + /* offset is from DISP(2)_BASE_ADDRESS */ + WREG32(RADEON_CUR_OFFSET + radeon_crtc->crtc_offset, (radeon_crtc->legacy_cursor_offset + + (yorigin * 256))); } radeon_lock_cursor(crtc, false); diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c index f30aa7274a5..7693f7c67bd 100644 --- a/drivers/gpu/drm/radeon/radeon_device.c +++ b/drivers/gpu/drm/radeon/radeon_device.c @@ -35,6 +35,25 @@ #include "atom.h" /* + * Clear GPU surface registers. + */ +static void radeon_surface_init(struct radeon_device *rdev) +{ + /* FIXME: check this out */ + if (rdev->family < CHIP_R600) { + int i; + + for (i = 0; i < 8; i++) { + WREG32(RADEON_SURFACE0_INFO + + i * (RADEON_SURFACE1_INFO - RADEON_SURFACE0_INFO), + 0); + } + /* enable surfaces */ + WREG32(RADEON_SURFACE_CNTL, 0); + } +} + +/* * GPU scratch registers helpers function. */ static void radeon_scratch_init(struct radeon_device *rdev) @@ -102,7 +121,7 @@ int radeon_mc_setup(struct radeon_device *rdev) if (rdev->mc.vram_location != 0xFFFFFFFFUL) { /* vram location was already setup try to put gtt after * if it fits */ - tmp = rdev->mc.vram_location + rdev->mc.vram_size; + tmp = rdev->mc.vram_location + rdev->mc.mc_vram_size; tmp = (tmp + rdev->mc.gtt_size - 1) & ~(rdev->mc.gtt_size - 1); if ((0xFFFFFFFFUL - tmp) >= rdev->mc.gtt_size) { rdev->mc.gtt_location = tmp; @@ -117,13 +136,13 @@ int radeon_mc_setup(struct radeon_device *rdev) } else if (rdev->mc.gtt_location != 0xFFFFFFFFUL) { /* gtt location was already setup try to put vram before * if it fits */ - if (rdev->mc.vram_size < rdev->mc.gtt_location) { + if (rdev->mc.mc_vram_size < rdev->mc.gtt_location) { rdev->mc.vram_location = 0; } else { tmp = rdev->mc.gtt_location + rdev->mc.gtt_size; - tmp += (rdev->mc.vram_size - 1); - tmp &= ~(rdev->mc.vram_size - 1); - if ((0xFFFFFFFFUL - tmp) >= rdev->mc.vram_size) { + tmp += (rdev->mc.mc_vram_size - 1); + tmp &= ~(rdev->mc.mc_vram_size - 1); + if ((0xFFFFFFFFUL - tmp) >= rdev->mc.mc_vram_size) { rdev->mc.vram_location = tmp; } else { printk(KERN_ERR "[drm] vram too big to fit " @@ -133,12 +152,16 @@ int radeon_mc_setup(struct radeon_device *rdev) } } else { rdev->mc.vram_location = 0; - rdev->mc.gtt_location = rdev->mc.vram_size; + tmp = rdev->mc.mc_vram_size; + tmp = (tmp + rdev->mc.gtt_size - 1) & ~(rdev->mc.gtt_size - 1); + rdev->mc.gtt_location = tmp; } - DRM_INFO("radeon: VRAM %uM\n", rdev->mc.vram_size >> 20); + DRM_INFO("radeon: VRAM %uM\n", rdev->mc.real_vram_size >> 20); DRM_INFO("radeon: VRAM from 0x%08X to 0x%08X\n", rdev->mc.vram_location, - rdev->mc.vram_location + rdev->mc.vram_size - 1); + rdev->mc.vram_location + rdev->mc.mc_vram_size - 1); + if (rdev->mc.real_vram_size != rdev->mc.mc_vram_size) + DRM_INFO("radeon: VRAM less than aperture workaround enabled\n"); DRM_INFO("radeon: GTT %uM\n", rdev->mc.gtt_size >> 20); DRM_INFO("radeon: GTT from 0x%08X to 0x%08X\n", rdev->mc.gtt_location, @@ -202,25 +225,18 @@ void radeon_invalid_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v) void radeon_register_accessor_init(struct radeon_device *rdev) { - rdev->mm_rreg = &r100_mm_rreg; - rdev->mm_wreg = &r100_mm_wreg; rdev->mc_rreg = &radeon_invalid_rreg; rdev->mc_wreg = &radeon_invalid_wreg; rdev->pll_rreg = &radeon_invalid_rreg; rdev->pll_wreg = &radeon_invalid_wreg; - rdev->pcie_rreg = &radeon_invalid_rreg; - rdev->pcie_wreg = &radeon_invalid_wreg; rdev->pciep_rreg = &radeon_invalid_rreg; rdev->pciep_wreg = &radeon_invalid_wreg; /* Don't change order as we are overridding accessor. */ if (rdev->family < CHIP_RV515) { - rdev->pcie_rreg = &rv370_pcie_rreg; - rdev->pcie_wreg = &rv370_pcie_wreg; - } - if (rdev->family >= CHIP_RV515) { - rdev->pcie_rreg = &rv515_pcie_rreg; - rdev->pcie_wreg = &rv515_pcie_wreg; + rdev->pcie_reg_mask = 0xff; + } else { + rdev->pcie_reg_mask = 0x7ff; } /* FIXME: not sure here */ if (rdev->family <= CHIP_R580) { @@ -433,6 +449,7 @@ int radeon_device_init(struct radeon_device *rdev, uint32_t flags) { int r, ret; + int dma_bits; DRM_INFO("radeon: Initializing kernel modesetting.\n"); rdev->shutdown = false; @@ -475,8 +492,20 @@ int radeon_device_init(struct radeon_device *rdev, return r; } - /* Report DMA addressing limitation */ - r = pci_set_dma_mask(rdev->pdev, DMA_BIT_MASK(32)); + /* set DMA mask + need_dma32 flags. + * PCIE - can handle 40-bits. + * IGP - can handle 40-bits (in theory) + * AGP - generally dma32 is safest + * PCI - only dma32 + */ + rdev->need_dma32 = false; + if (rdev->flags & RADEON_IS_AGP) + rdev->need_dma32 = true; + if (rdev->flags & RADEON_IS_PCI) + rdev->need_dma32 = true; + + dma_bits = rdev->need_dma32 ? 32 : 40; + r = pci_set_dma_mask(rdev->pdev, DMA_BIT_MASK(dma_bits)); if (r) { printk(KERN_WARNING "radeon: No suitable DMA available.\n"); } @@ -496,6 +525,8 @@ int radeon_device_init(struct radeon_device *rdev, radeon_errata(rdev); /* Initialize scratch registers */ radeon_scratch_init(rdev); + /* Initialize surface registers */ + radeon_surface_init(rdev); /* TODO: disable VGA need to use VGA request */ /* BIOS*/ @@ -527,27 +558,22 @@ int radeon_device_init(struct radeon_device *rdev, radeon_combios_asic_init(rdev->ddev); } } + /* Initialize clocks */ + r = radeon_clocks_init(rdev); + if (r) { + return r; + } /* Get vram informations */ radeon_vram_info(rdev); - /* Device is severly broken if aper size > vram size. - * for RN50/M6/M7 - Novell bug 204882 ? - */ - if (rdev->mc.vram_size < rdev->mc.aper_size) { - rdev->mc.aper_size = rdev->mc.vram_size; - } + /* Add an MTRR for the VRAM */ rdev->mc.vram_mtrr = mtrr_add(rdev->mc.aper_base, rdev->mc.aper_size, MTRR_TYPE_WRCOMB, 1); DRM_INFO("Detected VRAM RAM=%uM, BAR=%uM\n", - rdev->mc.vram_size >> 20, + rdev->mc.real_vram_size >> 20, (unsigned)rdev->mc.aper_size >> 20); DRM_INFO("RAM width %dbits %cDR\n", rdev->mc.vram_width, rdev->mc.vram_is_ddr ? 'D' : 'S'); - /* Initialize clocks */ - r = radeon_clocks_init(rdev); - if (r) { - return r; - } /* Initialize memory controller (also test AGP) */ r = radeon_mc_init(rdev); if (r) { @@ -604,12 +630,12 @@ int radeon_device_init(struct radeon_device *rdev, if (r) { return r; } - if (rdev->fbdev_rfb && rdev->fbdev_rfb->obj) { - rdev->fbdev_robj = rdev->fbdev_rfb->obj->driver_private; - } if (!ret) { DRM_INFO("radeon: kernel modesetting successfully initialized.\n"); } + if (radeon_testing) { + radeon_test_moves(rdev); + } if (radeon_benchmarking) { radeon_benchmark(rdev); } diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c index 3efcf1a526b..a8fa1bb84cf 100644 --- a/drivers/gpu/drm/radeon/radeon_display.c +++ b/drivers/gpu/drm/radeon/radeon_display.c @@ -187,6 +187,7 @@ static void radeon_crtc_init(struct drm_device *dev, int index) drm_mode_crtc_set_gamma_size(&radeon_crtc->base, 256); radeon_crtc->crtc_id = index; + rdev->mode_info.crtcs[index] = radeon_crtc; radeon_crtc->mode_set.crtc = &radeon_crtc->base; radeon_crtc->mode_set.connectors = (struct drm_connector **)(radeon_crtc + 1); @@ -491,7 +492,11 @@ void radeon_compute_pll(struct radeon_pll *pll, tmp += (uint64_t)pll->reference_freq * 1000 * frac_feedback_div; current_freq = radeon_div(tmp, ref_div * post_div); - error = abs(current_freq - freq); + if (flags & RADEON_PLL_PREFER_CLOSEST_LOWER) { + error = freq - current_freq; + error = error < 0 ? 0xffffffff : error; + } else + error = abs(current_freq - freq); vco_diff = abs(vco - best_vco); if ((best_vco == 0 && error < best_error) || @@ -657,36 +662,51 @@ void radeon_modeset_fini(struct radeon_device *rdev) } } -void radeon_init_disp_bandwidth(struct drm_device *dev) +bool radeon_crtc_scaling_mode_fixup(struct drm_crtc *crtc, + struct drm_display_mode *mode, + struct drm_display_mode *adjusted_mode) { - struct radeon_device *rdev = dev->dev_private; - struct drm_display_mode *modes[2]; - int pixel_bytes[2]; - struct drm_crtc *crtc; - - pixel_bytes[0] = pixel_bytes[1] = 0; - modes[0] = modes[1] = NULL; - - list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { - struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); + struct drm_device *dev = crtc->dev; + struct drm_encoder *encoder; + struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); + struct radeon_encoder *radeon_encoder; + bool first = true; - if (crtc->enabled && crtc->fb) { - modes[radeon_crtc->crtc_id] = &crtc->mode; - pixel_bytes[radeon_crtc->crtc_id] = crtc->fb->bits_per_pixel / 8; + list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { + radeon_encoder = to_radeon_encoder(encoder); + if (encoder->crtc != crtc) + continue; + if (first) { + radeon_crtc->rmx_type = radeon_encoder->rmx_type; + radeon_crtc->devices = radeon_encoder->devices; + memcpy(&radeon_crtc->native_mode, + &radeon_encoder->native_mode, + sizeof(struct radeon_native_mode)); + first = false; + } else { + if (radeon_crtc->rmx_type != radeon_encoder->rmx_type) { + /* WARNING: Right now this can't happen but + * in the future we need to check that scaling + * are consistent accross different encoder + * (ie all encoder can work with the same + * scaling). + */ + DRM_ERROR("Scaling not consistent accross encoder.\n"); + return false; + } } } - - if (ASIC_IS_AVIVO(rdev)) { - radeon_init_disp_bw_avivo(dev, - modes[0], - pixel_bytes[0], - modes[1], - pixel_bytes[1]); + if (radeon_crtc->rmx_type != RMX_OFF) { + fixed20_12 a, b; + a.full = rfixed_const(crtc->mode.vdisplay); + b.full = rfixed_const(radeon_crtc->native_mode.panel_xres); + radeon_crtc->vsc.full = rfixed_div(a, b); + a.full = rfixed_const(crtc->mode.hdisplay); + b.full = rfixed_const(radeon_crtc->native_mode.panel_yres); + radeon_crtc->hsc.full = rfixed_div(a, b); } else { - radeon_init_disp_bw_legacy(dev, - modes[0], - pixel_bytes[0], - modes[1], - pixel_bytes[1]); + radeon_crtc->vsc.full = rfixed_const(1); + radeon_crtc->hsc.full = rfixed_const(1); } + return true; } diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c index 09c9fb9f621..0bd5879a495 100644 --- a/drivers/gpu/drm/radeon/radeon_drv.c +++ b/drivers/gpu/drm/radeon/radeon_drv.c @@ -89,6 +89,7 @@ int radeon_agpmode = 0; int radeon_vram_limit = 0; int radeon_gart_size = 512; /* default gart size */ int radeon_benchmarking = 0; +int radeon_testing = 0; int radeon_connector_table = 0; #endif @@ -117,6 +118,9 @@ module_param_named(gartsize, radeon_gart_size, int, 0600); MODULE_PARM_DESC(benchmark, "Run benchmark"); module_param_named(benchmark, radeon_benchmarking, int, 0444); +MODULE_PARM_DESC(test, "Run tests"); +module_param_named(test, radeon_testing, int, 0444); + MODULE_PARM_DESC(connector_table, "Force connector table"); module_param_named(connector_table, radeon_connector_table, int, 0444); #endif @@ -314,6 +318,14 @@ static int __init radeon_init(void) driver = &driver_old; driver->num_ioctls = radeon_max_ioctl; #if defined(CONFIG_DRM_RADEON_KMS) +#ifdef CONFIG_VGA_CONSOLE + if (vgacon_text_force() && radeon_modeset == -1) { + DRM_INFO("VGACON disable radeon kernel modesetting.\n"); + driver = &driver_old; + driver->driver_features &= ~DRIVER_MODESET; + radeon_modeset = 0; + } +#endif /* if enabled by default */ if (radeon_modeset == -1) { DRM_INFO("radeon default to kernel modesetting.\n"); @@ -325,17 +337,8 @@ static int __init radeon_init(void) driver->driver_features |= DRIVER_MODESET; driver->num_ioctls = radeon_max_kms_ioctl; } - /* if the vga console setting is enabled still * let modprobe override it */ -#ifdef CONFIG_VGA_CONSOLE - if (vgacon_text_force() && radeon_modeset == -1) { - DRM_INFO("VGACON disable radeon kernel modesetting.\n"); - driver = &driver_old; - driver->driver_features &= ~DRIVER_MODESET; - radeon_modeset = 0; - } -#endif #endif return drm_init(driver); } @@ -345,7 +348,7 @@ static void __exit radeon_exit(void) drm_exit(driver); } -late_initcall(radeon_init); +module_init(radeon_init); module_exit(radeon_exit); MODULE_AUTHOR(DRIVER_AUTHOR); diff --git a/drivers/gpu/drm/radeon/radeon_drv.h b/drivers/gpu/drm/radeon/radeon_drv.h index 127d0456f62..6fa32dac4e9 100644 --- a/drivers/gpu/drm/radeon/radeon_drv.h +++ b/drivers/gpu/drm/radeon/radeon_drv.h @@ -100,9 +100,10 @@ * 1.28- Add support for VBL on CRTC2 * 1.29- R500 3D cmd buffer support * 1.30- Add support for occlusion queries + * 1.31- Add support for num Z pipes from GET_PARAM */ #define DRIVER_MAJOR 1 -#define DRIVER_MINOR 30 +#define DRIVER_MINOR 31 #define DRIVER_PATCHLEVEL 0 /* @@ -143,6 +144,7 @@ enum radeon_family { CHIP_RV635, CHIP_RV670, CHIP_RS780, + CHIP_RS880, CHIP_RV770, CHIP_RV730, CHIP_RV710, @@ -328,6 +330,7 @@ typedef struct drm_radeon_private { resource_size_t fb_aper_offset; int num_gb_pipes; + int num_z_pipes; int track_flush; drm_local_map_t *mmio; @@ -688,6 +691,7 @@ extern void r600_page_table_cleanup(struct drm_device *dev, struct drm_ati_pciga /* pipe config regs */ #define R400_GB_PIPE_SELECT 0x402c +#define RV530_GB_PIPE_SELECT2 0x4124 #define R500_DYN_SCLK_PWMEM_PIPE 0x000d /* PLL */ #define R300_GB_TILE_CONFIG 0x4018 # define R300_ENABLE_TILING (1 << 0) diff --git a/drivers/gpu/drm/radeon/radeon_encoders.c b/drivers/gpu/drm/radeon/radeon_encoders.c index c8ef0d14ffa..0a92706eac1 100644 --- a/drivers/gpu/drm/radeon/radeon_encoders.c +++ b/drivers/gpu/drm/radeon/radeon_encoders.c @@ -154,7 +154,6 @@ void radeon_rmx_mode_fixup(struct drm_encoder *encoder, if (mode->hdisplay < native_mode->panel_xres || mode->vdisplay < native_mode->panel_yres) { - radeon_encoder->flags |= RADEON_USE_RMX; if (ASIC_IS_AVIVO(rdev)) { adjusted_mode->hdisplay = native_mode->panel_xres; adjusted_mode->vdisplay = native_mode->panel_yres; @@ -197,15 +196,13 @@ void radeon_rmx_mode_fixup(struct drm_encoder *encoder, } } + static bool radeon_atom_mode_fixup(struct drm_encoder *encoder, struct drm_display_mode *mode, struct drm_display_mode *adjusted_mode) { - struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); - radeon_encoder->flags &= ~RADEON_USE_RMX; - drm_mode_set_crtcinfo(adjusted_mode, 0); if (radeon_encoder->rmx_type != RMX_OFF) @@ -808,234 +805,6 @@ atombios_dig_transmitter_setup(struct drm_encoder *encoder, int action) } -static void atom_rv515_force_tv_scaler(struct radeon_device *rdev) -{ - - WREG32(0x659C, 0x0); - WREG32(0x6594, 0x705); - WREG32(0x65A4, 0x10001); - WREG32(0x65D8, 0x0); - WREG32(0x65B0, 0x0); - WREG32(0x65C0, 0x0); - WREG32(0x65D4, 0x0); - WREG32(0x6578, 0x0); - WREG32(0x657C, 0x841880A8); - WREG32(0x6578, 0x1); - WREG32(0x657C, 0x84208680); - WREG32(0x6578, 0x2); - WREG32(0x657C, 0xBFF880B0); - WREG32(0x6578, 0x100); - WREG32(0x657C, 0x83D88088); - WREG32(0x6578, 0x101); - WREG32(0x657C, 0x84608680); - WREG32(0x6578, 0x102); - WREG32(0x657C, 0xBFF080D0); - WREG32(0x6578, 0x200); - WREG32(0x657C, 0x83988068); - WREG32(0x6578, 0x201); - WREG32(0x657C, 0x84A08680); - WREG32(0x6578, 0x202); - WREG32(0x657C, 0xBFF080F8); - WREG32(0x6578, 0x300); - WREG32(0x657C, 0x83588058); - WREG32(0x6578, 0x301); - WREG32(0x657C, 0x84E08660); - WREG32(0x6578, 0x302); - WREG32(0x657C, 0xBFF88120); - WREG32(0x6578, 0x400); - WREG32(0x657C, 0x83188040); - WREG32(0x6578, 0x401); - WREG32(0x657C, 0x85008660); - WREG32(0x6578, 0x402); - WREG32(0x657C, 0xBFF88150); - WREG32(0x6578, 0x500); - WREG32(0x657C, 0x82D88030); - WREG32(0x6578, 0x501); - WREG32(0x657C, 0x85408640); - WREG32(0x6578, 0x502); - WREG32(0x657C, 0xBFF88180); - WREG32(0x6578, 0x600); - WREG32(0x657C, 0x82A08018); - WREG32(0x6578, 0x601); - WREG32(0x657C, 0x85808620); - WREG32(0x6578, 0x602); - WREG32(0x657C, 0xBFF081B8); - WREG32(0x6578, 0x700); - WREG32(0x657C, 0x82608010); - WREG32(0x6578, 0x701); - WREG32(0x657C, 0x85A08600); - WREG32(0x6578, 0x702); - WREG32(0x657C, 0x800081F0); - WREG32(0x6578, 0x800); - WREG32(0x657C, 0x8228BFF8); - WREG32(0x6578, 0x801); - WREG32(0x657C, 0x85E085E0); - WREG32(0x6578, 0x802); - WREG32(0x657C, 0xBFF88228); - WREG32(0x6578, 0x10000); - WREG32(0x657C, 0x82A8BF00); - WREG32(0x6578, 0x10001); - WREG32(0x657C, 0x82A08CC0); - WREG32(0x6578, 0x10002); - WREG32(0x657C, 0x8008BEF8); - WREG32(0x6578, 0x10100); - WREG32(0x657C, 0x81F0BF28); - WREG32(0x6578, 0x10101); - WREG32(0x657C, 0x83608CA0); - WREG32(0x6578, 0x10102); - WREG32(0x657C, 0x8018BED0); - WREG32(0x6578, 0x10200); - WREG32(0x657C, 0x8148BF38); - WREG32(0x6578, 0x10201); - WREG32(0x657C, 0x84408C80); - WREG32(0x6578, 0x10202); - WREG32(0x657C, 0x8008BEB8); - WREG32(0x6578, 0x10300); - WREG32(0x657C, 0x80B0BF78); - WREG32(0x6578, 0x10301); - WREG32(0x657C, 0x85008C20); - WREG32(0x6578, 0x10302); - WREG32(0x657C, 0x8020BEA0); - WREG32(0x6578, 0x10400); - WREG32(0x657C, 0x8028BF90); - WREG32(0x6578, 0x10401); - WREG32(0x657C, 0x85E08BC0); - WREG32(0x6578, 0x10402); - WREG32(0x657C, 0x8018BE90); - WREG32(0x6578, 0x10500); - WREG32(0x657C, 0xBFB8BFB0); - WREG32(0x6578, 0x10501); - WREG32(0x657C, 0x86C08B40); - WREG32(0x6578, 0x10502); - WREG32(0x657C, 0x8010BE90); - WREG32(0x6578, 0x10600); - WREG32(0x657C, 0xBF58BFC8); - WREG32(0x6578, 0x10601); - WREG32(0x657C, 0x87A08AA0); - WREG32(0x6578, 0x10602); - WREG32(0x657C, 0x8010BE98); - WREG32(0x6578, 0x10700); - WREG32(0x657C, 0xBF10BFF0); - WREG32(0x6578, 0x10701); - WREG32(0x657C, 0x886089E0); - WREG32(0x6578, 0x10702); - WREG32(0x657C, 0x8018BEB0); - WREG32(0x6578, 0x10800); - WREG32(0x657C, 0xBED8BFE8); - WREG32(0x6578, 0x10801); - WREG32(0x657C, 0x89408940); - WREG32(0x6578, 0x10802); - WREG32(0x657C, 0xBFE8BED8); - WREG32(0x6578, 0x20000); - WREG32(0x657C, 0x80008000); - WREG32(0x6578, 0x20001); - WREG32(0x657C, 0x90008000); - WREG32(0x6578, 0x20002); - WREG32(0x657C, 0x80008000); - WREG32(0x6578, 0x20003); - WREG32(0x657C, 0x80008000); - WREG32(0x6578, 0x20100); - WREG32(0x657C, 0x80108000); - WREG32(0x6578, 0x20101); - WREG32(0x657C, 0x8FE0BF70); - WREG32(0x6578, 0x20102); - WREG32(0x657C, 0xBFE880C0); - WREG32(0x6578, 0x20103); - WREG32(0x657C, 0x80008000); - WREG32(0x6578, 0x20200); - WREG32(0x657C, 0x8018BFF8); - WREG32(0x6578, 0x20201); - WREG32(0x657C, 0x8F80BF08); - WREG32(0x6578, 0x20202); - WREG32(0x657C, 0xBFD081A0); - WREG32(0x6578, 0x20203); - WREG32(0x657C, 0xBFF88000); - WREG32(0x6578, 0x20300); - WREG32(0x657C, 0x80188000); - WREG32(0x6578, 0x20301); - WREG32(0x657C, 0x8EE0BEC0); - WREG32(0x6578, 0x20302); - WREG32(0x657C, 0xBFB082A0); - WREG32(0x6578, 0x20303); - WREG32(0x657C, 0x80008000); - WREG32(0x6578, 0x20400); - WREG32(0x657C, 0x80188000); - WREG32(0x6578, 0x20401); - WREG32(0x657C, 0x8E00BEA0); - WREG32(0x6578, 0x20402); - WREG32(0x657C, 0xBF8883C0); - WREG32(0x6578, 0x20403); - WREG32(0x657C, 0x80008000); - WREG32(0x6578, 0x20500); - WREG32(0x657C, 0x80188000); - WREG32(0x6578, 0x20501); - WREG32(0x657C, 0x8D00BE90); - WREG32(0x6578, 0x20502); - WREG32(0x657C, 0xBF588500); - WREG32(0x6578, 0x20503); - WREG32(0x657C, 0x80008008); - WREG32(0x6578, 0x20600); - WREG32(0x657C, 0x80188000); - WREG32(0x6578, 0x20601); - WREG32(0x657C, 0x8BC0BE98); - WREG32(0x6578, 0x20602); - WREG32(0x657C, 0xBF308660); - WREG32(0x6578, 0x20603); - WREG32(0x657C, 0x80008008); - WREG32(0x6578, 0x20700); - WREG32(0x657C, 0x80108000); - WREG32(0x6578, 0x20701); - WREG32(0x657C, 0x8A80BEB0); - WREG32(0x6578, 0x20702); - WREG32(0x657C, 0xBF0087C0); - WREG32(0x6578, 0x20703); - WREG32(0x657C, 0x80008008); - WREG32(0x6578, 0x20800); - WREG32(0x657C, 0x80108000); - WREG32(0x6578, 0x20801); - WREG32(0x657C, 0x8920BED0); - WREG32(0x6578, 0x20802); - WREG32(0x657C, 0xBED08920); - WREG32(0x6578, 0x20803); - WREG32(0x657C, 0x80008010); - WREG32(0x6578, 0x30000); - WREG32(0x657C, 0x90008000); - WREG32(0x6578, 0x30001); - WREG32(0x657C, 0x80008000); - WREG32(0x6578, 0x30100); - WREG32(0x657C, 0x8FE0BF90); - WREG32(0x6578, 0x30101); - WREG32(0x657C, 0xBFF880A0); - WREG32(0x6578, 0x30200); - WREG32(0x657C, 0x8F60BF40); - WREG32(0x6578, 0x30201); - WREG32(0x657C, 0xBFE88180); - WREG32(0x6578, 0x30300); - WREG32(0x657C, 0x8EC0BF00); - WREG32(0x6578, 0x30301); - WREG32(0x657C, 0xBFC88280); - WREG32(0x6578, 0x30400); - WREG32(0x657C, 0x8DE0BEE0); - WREG32(0x6578, 0x30401); - WREG32(0x657C, 0xBFA083A0); - WREG32(0x6578, 0x30500); - WREG32(0x657C, 0x8CE0BED0); - WREG32(0x6578, 0x30501); - WREG32(0x657C, 0xBF7884E0); - WREG32(0x6578, 0x30600); - WREG32(0x657C, 0x8BA0BED8); - WREG32(0x6578, 0x30601); - WREG32(0x657C, 0xBF508640); - WREG32(0x6578, 0x30700); - WREG32(0x657C, 0x8A60BEE8); - WREG32(0x6578, 0x30701); - WREG32(0x657C, 0xBF2087A0); - WREG32(0x6578, 0x30800); - WREG32(0x657C, 0x8900BF00); - WREG32(0x6578, 0x30801); - WREG32(0x657C, 0xBF008900); -} - static void atombios_yuv_setup(struct drm_encoder *encoder, bool enable) { @@ -1074,129 +843,6 @@ atombios_yuv_setup(struct drm_encoder *encoder, bool enable) } static void -atombios_overscan_setup(struct drm_encoder *encoder, - struct drm_display_mode *mode, - struct drm_display_mode *adjusted_mode) -{ - struct drm_device *dev = encoder->dev; - struct radeon_device *rdev = dev->dev_private; - struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); - struct radeon_crtc *radeon_crtc = to_radeon_crtc(encoder->crtc); - SET_CRTC_OVERSCAN_PS_ALLOCATION args; - int index = GetIndexIntoMasterTable(COMMAND, SetCRTC_OverScan); - - memset(&args, 0, sizeof(args)); - - args.usOverscanRight = 0; - args.usOverscanLeft = 0; - args.usOverscanBottom = 0; - args.usOverscanTop = 0; - args.ucCRTC = radeon_crtc->crtc_id; - - if (radeon_encoder->flags & RADEON_USE_RMX) { - if (radeon_encoder->rmx_type == RMX_FULL) { - args.usOverscanRight = 0; - args.usOverscanLeft = 0; - args.usOverscanBottom = 0; - args.usOverscanTop = 0; - } else if (radeon_encoder->rmx_type == RMX_CENTER) { - args.usOverscanTop = (adjusted_mode->crtc_vdisplay - mode->crtc_vdisplay) / 2; - args.usOverscanBottom = (adjusted_mode->crtc_vdisplay - mode->crtc_vdisplay) / 2; - args.usOverscanLeft = (adjusted_mode->crtc_hdisplay - mode->crtc_hdisplay) / 2; - args.usOverscanRight = (adjusted_mode->crtc_hdisplay - mode->crtc_hdisplay) / 2; - } else if (radeon_encoder->rmx_type == RMX_ASPECT) { - int a1 = mode->crtc_vdisplay * adjusted_mode->crtc_hdisplay; - int a2 = adjusted_mode->crtc_vdisplay * mode->crtc_hdisplay; - - if (a1 > a2) { - args.usOverscanLeft = (adjusted_mode->crtc_hdisplay - (a2 / mode->crtc_vdisplay)) / 2; - args.usOverscanRight = (adjusted_mode->crtc_hdisplay - (a2 / mode->crtc_vdisplay)) / 2; - } else if (a2 > a1) { - args.usOverscanLeft = (adjusted_mode->crtc_vdisplay - (a1 / mode->crtc_hdisplay)) / 2; - args.usOverscanRight = (adjusted_mode->crtc_vdisplay - (a1 / mode->crtc_hdisplay)) / 2; - } - } - } - - atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); - -} - -static void -atombios_scaler_setup(struct drm_encoder *encoder) -{ - struct drm_device *dev = encoder->dev; - struct radeon_device *rdev = dev->dev_private; - struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); - struct radeon_crtc *radeon_crtc = to_radeon_crtc(encoder->crtc); - ENABLE_SCALER_PS_ALLOCATION args; - int index = GetIndexIntoMasterTable(COMMAND, EnableScaler); - /* fixme - fill in enc_priv for atom dac */ - enum radeon_tv_std tv_std = TV_STD_NTSC; - - if (!ASIC_IS_AVIVO(rdev) && radeon_crtc->crtc_id) - return; - - memset(&args, 0, sizeof(args)); - - args.ucScaler = radeon_crtc->crtc_id; - - if (radeon_encoder->devices & (ATOM_DEVICE_TV_SUPPORT)) { - switch (tv_std) { - case TV_STD_NTSC: - default: - args.ucTVStandard = ATOM_TV_NTSC; - break; - case TV_STD_PAL: - args.ucTVStandard = ATOM_TV_PAL; - break; - case TV_STD_PAL_M: - args.ucTVStandard = ATOM_TV_PALM; - break; - case TV_STD_PAL_60: - args.ucTVStandard = ATOM_TV_PAL60; - break; - case TV_STD_NTSC_J: - args.ucTVStandard = ATOM_TV_NTSCJ; - break; - case TV_STD_SCART_PAL: - args.ucTVStandard = ATOM_TV_PAL; /* ??? */ - break; - case TV_STD_SECAM: - args.ucTVStandard = ATOM_TV_SECAM; - break; - case TV_STD_PAL_CN: - args.ucTVStandard = ATOM_TV_PALCN; - break; - } - args.ucEnable = SCALER_ENABLE_MULTITAP_MODE; - } else if (radeon_encoder->devices & (ATOM_DEVICE_CV_SUPPORT)) { - args.ucTVStandard = ATOM_TV_CV; - args.ucEnable = SCALER_ENABLE_MULTITAP_MODE; - } else if (radeon_encoder->flags & RADEON_USE_RMX) { - if (radeon_encoder->rmx_type == RMX_FULL) - args.ucEnable = ATOM_SCALER_EXPANSION; - else if (radeon_encoder->rmx_type == RMX_CENTER) - args.ucEnable = ATOM_SCALER_CENTER; - else if (radeon_encoder->rmx_type == RMX_ASPECT) - args.ucEnable = ATOM_SCALER_EXPANSION; - } else { - if (ASIC_IS_AVIVO(rdev)) - args.ucEnable = ATOM_SCALER_DISABLE; - else - args.ucEnable = ATOM_SCALER_CENTER; - } - - atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); - - if (radeon_encoder->devices & (ATOM_DEVICE_CV_SUPPORT | ATOM_DEVICE_TV_SUPPORT) - && rdev->family >= CHIP_RV515 && rdev->family <= CHIP_RV570) { - atom_rv515_force_tv_scaler(rdev); - } - -} - -static void radeon_atom_encoder_dpms(struct drm_encoder *encoder, int mode) { struct drm_device *dev = encoder->dev; @@ -1448,8 +1094,6 @@ radeon_atom_encoder_mode_set(struct drm_encoder *encoder, radeon_encoder->pixel_clock = adjusted_mode->clock; radeon_atombios_encoder_crtc_scratch_regs(encoder, radeon_crtc->crtc_id); - atombios_overscan_setup(encoder, mode, adjusted_mode); - atombios_scaler_setup(encoder); atombios_set_encoder_crtc_source(encoder); if (ASIC_IS_AVIVO(rdev)) { @@ -1667,6 +1311,7 @@ radeon_add_atom_encoder(struct drm_device *dev, uint32_t encoder_id, uint32_t su radeon_encoder->encoder_id = encoder_id; radeon_encoder->devices = supported_device; + radeon_encoder->rmx_type = RMX_OFF; switch (radeon_encoder->encoder_id) { case ENCODER_OBJECT_ID_INTERNAL_LVDS: diff --git a/drivers/gpu/drm/radeon/radeon_fb.c b/drivers/gpu/drm/radeon/radeon_fb.c index fa86d398945..ec383edf5f3 100644 --- a/drivers/gpu/drm/radeon/radeon_fb.c +++ b/drivers/gpu/drm/radeon/radeon_fb.c @@ -101,9 +101,10 @@ static int radeonfb_setcolreg(unsigned regno, break; case 24: case 32: - fb->pseudo_palette[regno] = ((red & 0xff00) << 8) | - (green & 0xff00) | - ((blue & 0xff00) >> 8); + fb->pseudo_palette[regno] = + (((red >> 8) & 0xff) << info->var.red.offset) | + (((green >> 8) & 0xff) << info->var.green.offset) | + (((blue >> 8) & 0xff) << info->var.blue.offset); break; } } @@ -154,6 +155,7 @@ static int radeonfb_check_var(struct fb_var_screeninfo *var, var->transp.length = 0; var->transp.offset = 0; break; +#ifdef __LITTLE_ENDIAN case 15: var->red.offset = 10; var->green.offset = 5; @@ -194,6 +196,28 @@ static int radeonfb_check_var(struct fb_var_screeninfo *var, var->transp.length = 8; var->transp.offset = 24; break; +#else + case 24: + var->red.offset = 8; + var->green.offset = 16; + var->blue.offset = 24; + var->red.length = 8; + var->green.length = 8; + var->blue.length = 8; + var->transp.length = 0; + var->transp.offset = 0; + break; + case 32: + var->red.offset = 8; + var->green.offset = 16; + var->blue.offset = 24; + var->red.length = 8; + var->green.length = 8; + var->blue.length = 8; + var->transp.length = 8; + var->transp.offset = 0; + break; +#endif default: return -EINVAL; } @@ -447,10 +471,10 @@ static struct notifier_block paniced = { .notifier_call = radeonfb_panic, }; -static int radeon_align_pitch(struct radeon_device *rdev, int width, int bpp) +static int radeon_align_pitch(struct radeon_device *rdev, int width, int bpp, bool tiled) { int aligned = width; - int align_large = (ASIC_IS_AVIVO(rdev)); + int align_large = (ASIC_IS_AVIVO(rdev)) || tiled; int pitch_mask = 0; switch (bpp / 8) { @@ -478,36 +502,42 @@ int radeonfb_create(struct radeon_device *rdev, { struct fb_info *info; struct radeon_fb_device *rfbdev; - struct drm_framebuffer *fb; + struct drm_framebuffer *fb = NULL; struct radeon_framebuffer *rfb; struct drm_mode_fb_cmd mode_cmd; struct drm_gem_object *gobj = NULL; struct radeon_object *robj = NULL; struct device *device = &rdev->pdev->dev; int size, aligned_size, ret; + u64 fb_gpuaddr; void *fbptr = NULL; + unsigned long tmp; + bool fb_tiled = false; /* useful for testing */ mode_cmd.width = surface_width; mode_cmd.height = surface_height; mode_cmd.bpp = 32; /* need to align pitch with crtc limits */ - mode_cmd.pitch = radeon_align_pitch(rdev, mode_cmd.width, mode_cmd.bpp) * ((mode_cmd.bpp + 1) / 8); + mode_cmd.pitch = radeon_align_pitch(rdev, mode_cmd.width, mode_cmd.bpp, fb_tiled) * ((mode_cmd.bpp + 1) / 8); mode_cmd.depth = 24; size = mode_cmd.pitch * mode_cmd.height; aligned_size = ALIGN(size, PAGE_SIZE); ret = radeon_gem_object_create(rdev, aligned_size, 0, - RADEON_GEM_DOMAIN_VRAM, - false, ttm_bo_type_kernel, - false, &gobj); + RADEON_GEM_DOMAIN_VRAM, + false, ttm_bo_type_kernel, + false, &gobj); if (ret) { - printk(KERN_ERR "failed to allocate framebuffer\n"); + printk(KERN_ERR "failed to allocate framebuffer (%d %d)\n", + surface_width, surface_height); ret = -ENOMEM; goto out; } robj = gobj->driver_private; + if (fb_tiled) + radeon_object_set_tiling_flags(robj, RADEON_TILING_MACRO|RADEON_TILING_SURFACE, mode_cmd.pitch); mutex_lock(&rdev->ddev->struct_mutex); fb = radeon_framebuffer_create(rdev->ddev, &mode_cmd, gobj); if (fb == NULL) { @@ -515,12 +545,19 @@ int radeonfb_create(struct radeon_device *rdev, ret = -ENOMEM; goto out_unref; } + ret = radeon_object_pin(robj, RADEON_GEM_DOMAIN_VRAM, &fb_gpuaddr); + if (ret) { + printk(KERN_ERR "failed to pin framebuffer\n"); + ret = -ENOMEM; + goto out_unref; + } list_add(&fb->filp_head, &rdev->ddev->mode_config.fb_kernel_list); rfb = to_radeon_framebuffer(fb); *rfb_p = rfb; rdev->fbdev_rfb = rfb; + rdev->fbdev_robj = robj; info = framebuffer_alloc(sizeof(struct radeon_fb_device), device); if (info == NULL) { @@ -529,11 +566,16 @@ int radeonfb_create(struct radeon_device *rdev, } rfbdev = info->par; + if (fb_tiled) + radeon_object_check_tiling(robj, 0, 0); + ret = radeon_object_kmap(robj, &fbptr); if (ret) { goto out_unref; } + memset_io(fbptr, 0, aligned_size); + strcpy(info->fix.id, "radeondrmfb"); info->fix.type = FB_TYPE_PACKED_PIXELS; info->fix.visual = FB_VISUAL_TRUECOLOR; @@ -541,13 +583,13 @@ int radeonfb_create(struct radeon_device *rdev, info->fix.xpanstep = 1; /* doing it in hw */ info->fix.ypanstep = 1; /* doing it in hw */ info->fix.ywrapstep = 0; - info->fix.accel = FB_ACCEL_I830; + info->fix.accel = FB_ACCEL_NONE; info->fix.type_aux = 0; info->flags = FBINFO_DEFAULT; info->fbops = &radeonfb_ops; info->fix.line_length = fb->pitch; - info->screen_base = fbptr; - info->fix.smem_start = (unsigned long)fbptr; + tmp = fb_gpuaddr - rdev->mc.vram_location; + info->fix.smem_start = rdev->mc.aper_base + tmp; info->fix.smem_len = size; info->screen_base = fbptr; info->screen_size = size; @@ -562,8 +604,13 @@ int radeonfb_create(struct radeon_device *rdev, info->var.width = -1; info->var.xres = fb_width; info->var.yres = fb_height; - info->fix.mmio_start = pci_resource_start(rdev->pdev, 2); - info->fix.mmio_len = pci_resource_len(rdev->pdev, 2); + + /* setup aperture base/size for vesafb takeover */ + info->aperture_base = rdev->ddev->mode_config.fb_base; + info->aperture_size = rdev->mc.real_vram_size; + + info->fix.mmio_start = 0; + info->fix.mmio_len = 0; info->pixmap.size = 64*1024; info->pixmap.buf_align = 8; info->pixmap.access_align = 32; @@ -590,6 +637,7 @@ int radeonfb_create(struct radeon_device *rdev, info->var.transp.offset = 0; info->var.transp.length = 0; break; +#ifdef __LITTLE_ENDIAN case 15: info->var.red.offset = 10; info->var.green.offset = 5; @@ -629,7 +677,29 @@ int radeonfb_create(struct radeon_device *rdev, info->var.transp.offset = 24; info->var.transp.length = 8; break; +#else + case 24: + info->var.red.offset = 8; + info->var.green.offset = 16; + info->var.blue.offset = 24; + info->var.red.length = 8; + info->var.green.length = 8; + info->var.blue.length = 8; + info->var.transp.offset = 0; + info->var.transp.length = 0; + break; + case 32: + info->var.red.offset = 8; + info->var.green.offset = 16; + info->var.blue.offset = 24; + info->var.red.length = 8; + info->var.green.length = 8; + info->var.blue.length = 8; + info->var.transp.offset = 0; + info->var.transp.length = 8; + break; default: +#endif break; } @@ -644,7 +714,7 @@ out_unref: if (robj) { radeon_object_kunmap(robj); } - if (ret) { + if (fb && ret) { list_del(&fb->filp_head); drm_gem_object_unreference(gobj); drm_framebuffer_cleanup(fb); @@ -813,6 +883,7 @@ int radeonfb_remove(struct drm_device *dev, struct drm_framebuffer *fb) robj = rfb->obj->driver_private; unregister_framebuffer(info); radeon_object_kunmap(robj); + radeon_object_unpin(robj); framebuffer_release(info); } diff --git a/drivers/gpu/drm/radeon/radeon_fence.c b/drivers/gpu/drm/radeon/radeon_fence.c index 96afbf5ae2a..b4e48dd2e85 100644 --- a/drivers/gpu/drm/radeon/radeon_fence.c +++ b/drivers/gpu/drm/radeon/radeon_fence.c @@ -195,7 +195,7 @@ retry: r = wait_event_interruptible_timeout(rdev->fence_drv.queue, radeon_fence_signaled(fence), timeout); if (unlikely(r == -ERESTARTSYS)) { - return -ERESTART; + return -EBUSY; } } else { r = wait_event_timeout(rdev->fence_drv.queue, diff --git a/drivers/gpu/drm/radeon/radeon_gart.c b/drivers/gpu/drm/radeon/radeon_gart.c index d343a15316e..2977539880f 100644 --- a/drivers/gpu/drm/radeon/radeon_gart.c +++ b/drivers/gpu/drm/radeon/radeon_gart.c @@ -177,7 +177,7 @@ int radeon_gart_bind(struct radeon_device *rdev, unsigned offset, return -ENOMEM; } rdev->gart.pages[p] = pagelist[i]; - page_base = (uint32_t)rdev->gart.pages_addr[p]; + page_base = rdev->gart.pages_addr[p]; for (j = 0; j < (PAGE_SIZE / 4096); j++, t++) { radeon_gart_set_page(rdev, t, page_base); page_base += 4096; diff --git a/drivers/gpu/drm/radeon/radeon_gem.c b/drivers/gpu/drm/radeon/radeon_gem.c index eb516034235..d880edf254d 100644 --- a/drivers/gpu/drm/radeon/radeon_gem.c +++ b/drivers/gpu/drm/radeon/radeon_gem.c @@ -157,9 +157,9 @@ int radeon_gem_info_ioctl(struct drm_device *dev, void *data, struct radeon_device *rdev = dev->dev_private; struct drm_radeon_gem_info *args = data; - args->vram_size = rdev->mc.vram_size; + args->vram_size = rdev->mc.real_vram_size; /* FIXME: report somethings that makes sense */ - args->vram_visible = rdev->mc.vram_size - (4 * 1024 * 1024); + args->vram_visible = rdev->mc.real_vram_size - (4 * 1024 * 1024); args->gart_size = rdev->mc.gtt_size; return 0; } @@ -262,8 +262,34 @@ int radeon_gem_mmap_ioctl(struct drm_device *dev, void *data, int radeon_gem_busy_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) { - /* FIXME: implement */ - return 0; + struct drm_radeon_gem_busy *args = data; + struct drm_gem_object *gobj; + struct radeon_object *robj; + int r; + uint32_t cur_placement; + + gobj = drm_gem_object_lookup(dev, filp, args->handle); + if (gobj == NULL) { + return -EINVAL; + } + robj = gobj->driver_private; + r = radeon_object_busy_domain(robj, &cur_placement); + switch (cur_placement) { + case TTM_PL_VRAM: + args->domain = RADEON_GEM_DOMAIN_VRAM; + break; + case TTM_PL_TT: + args->domain = RADEON_GEM_DOMAIN_GTT; + break; + case TTM_PL_SYSTEM: + args->domain = RADEON_GEM_DOMAIN_CPU; + default: + break; + } + mutex_lock(&dev->struct_mutex); + drm_gem_object_unreference(gobj); + mutex_unlock(&dev->struct_mutex); + return r; } int radeon_gem_wait_idle_ioctl(struct drm_device *dev, void *data, @@ -285,3 +311,44 @@ int radeon_gem_wait_idle_ioctl(struct drm_device *dev, void *data, mutex_unlock(&dev->struct_mutex); return r; } + +int radeon_gem_set_tiling_ioctl(struct drm_device *dev, void *data, + struct drm_file *filp) +{ + struct drm_radeon_gem_set_tiling *args = data; + struct drm_gem_object *gobj; + struct radeon_object *robj; + int r = 0; + + DRM_DEBUG("%d \n", args->handle); + gobj = drm_gem_object_lookup(dev, filp, args->handle); + if (gobj == NULL) + return -EINVAL; + robj = gobj->driver_private; + radeon_object_set_tiling_flags(robj, args->tiling_flags, args->pitch); + mutex_lock(&dev->struct_mutex); + drm_gem_object_unreference(gobj); + mutex_unlock(&dev->struct_mutex); + return r; +} + +int radeon_gem_get_tiling_ioctl(struct drm_device *dev, void *data, + struct drm_file *filp) +{ + struct drm_radeon_gem_get_tiling *args = data; + struct drm_gem_object *gobj; + struct radeon_object *robj; + int r = 0; + + DRM_DEBUG("\n"); + gobj = drm_gem_object_lookup(dev, filp, args->handle); + if (gobj == NULL) + return -EINVAL; + robj = gobj->driver_private; + radeon_object_get_tiling_flags(robj, &args->tiling_flags, + &args->pitch); + mutex_lock(&dev->struct_mutex); + drm_gem_object_unreference(gobj); + mutex_unlock(&dev->struct_mutex); + return r; +} diff --git a/drivers/gpu/drm/radeon/radeon_irq_kms.c b/drivers/gpu/drm/radeon/radeon_irq_kms.c index 491d569deb0..9805e4b6ca1 100644 --- a/drivers/gpu/drm/radeon/radeon_irq_kms.c +++ b/drivers/gpu/drm/radeon/radeon_irq_kms.c @@ -32,60 +32,6 @@ #include "radeon.h" #include "atom.h" -static inline uint32_t r100_irq_ack(struct radeon_device *rdev) -{ - uint32_t irqs = RREG32(RADEON_GEN_INT_STATUS); - uint32_t irq_mask = RADEON_SW_INT_TEST; - - if (irqs) { - WREG32(RADEON_GEN_INT_STATUS, irqs); - } - return irqs & irq_mask; -} - -int r100_irq_set(struct radeon_device *rdev) -{ - uint32_t tmp = 0; - - if (rdev->irq.sw_int) { - tmp |= RADEON_SW_INT_ENABLE; - } - /* Todo go through CRTC and enable vblank int or not */ - WREG32(RADEON_GEN_INT_CNTL, tmp); - return 0; -} - -int r100_irq_process(struct radeon_device *rdev) -{ - uint32_t status; - - status = r100_irq_ack(rdev); - if (!status) { - return IRQ_NONE; - } - while (status) { - /* SW interrupt */ - if (status & RADEON_SW_INT_TEST) { - radeon_fence_process(rdev); - } - status = r100_irq_ack(rdev); - } - return IRQ_HANDLED; -} - -int rs600_irq_set(struct radeon_device *rdev) -{ - uint32_t tmp = 0; - - if (rdev->irq.sw_int) { - tmp |= RADEON_SW_INT_ENABLE; - } - WREG32(RADEON_GEN_INT_CNTL, tmp); - /* Todo go through CRTC and enable vblank int or not */ - WREG32(R500_DxMODE_INT_MASK, 0); - return 0; -} - irqreturn_t radeon_driver_irq_handler_kms(DRM_IRQ_ARGS) { struct drm_device *dev = (struct drm_device *) arg; diff --git a/drivers/gpu/drm/radeon/radeon_kms.c b/drivers/gpu/drm/radeon/radeon_kms.c index 4612a7c146d..dce09ada32b 100644 --- a/drivers/gpu/drm/radeon/radeon_kms.c +++ b/drivers/gpu/drm/radeon/radeon_kms.c @@ -58,6 +58,8 @@ int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags) if (r) { DRM_ERROR("Failed to initialize radeon, disabling IOCTL\n"); radeon_device_fini(rdev); + kfree(rdev); + dev->dev_private = NULL; return r; } return 0; @@ -93,6 +95,9 @@ int radeon_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) case RADEON_INFO_NUM_GB_PIPES: value = rdev->num_gb_pipes; break; + case RADEON_INFO_NUM_Z_PIPES: + value = rdev->num_z_pipes; + break; default: DRM_DEBUG("Invalid request %d\n", info->request); return -EINVAL; @@ -139,19 +144,42 @@ void radeon_driver_preclose_kms(struct drm_device *dev, */ u32 radeon_get_vblank_counter_kms(struct drm_device *dev, int crtc) { - /* FIXME: implement */ - return 0; + struct radeon_device *rdev = dev->dev_private; + + if (crtc < 0 || crtc > 1) { + DRM_ERROR("Invalid crtc %d\n", crtc); + return -EINVAL; + } + + return radeon_get_vblank_counter(rdev, crtc); } int radeon_enable_vblank_kms(struct drm_device *dev, int crtc) { - /* FIXME: implement */ - return 0; + struct radeon_device *rdev = dev->dev_private; + + if (crtc < 0 || crtc > 1) { + DRM_ERROR("Invalid crtc %d\n", crtc); + return -EINVAL; + } + + rdev->irq.crtc_vblank_int[crtc] = true; + + return radeon_irq_set(rdev); } void radeon_disable_vblank_kms(struct drm_device *dev, int crtc) { - /* FIXME: implement */ + struct radeon_device *rdev = dev->dev_private; + + if (crtc < 0 || crtc > 1) { + DRM_ERROR("Invalid crtc %d\n", crtc); + return; + } + + rdev->irq.crtc_vblank_int[crtc] = false; + + radeon_irq_set(rdev); } @@ -291,5 +319,8 @@ struct drm_ioctl_desc radeon_ioctls_kms[] = { DRM_IOCTL_DEF(DRM_RADEON_GEM_WAIT_IDLE, radeon_gem_wait_idle_ioctl, DRM_AUTH), DRM_IOCTL_DEF(DRM_RADEON_CS, radeon_cs_ioctl, DRM_AUTH), DRM_IOCTL_DEF(DRM_RADEON_INFO, radeon_info_ioctl, DRM_AUTH), + DRM_IOCTL_DEF(DRM_RADEON_GEM_SET_TILING, radeon_gem_set_tiling_ioctl, DRM_AUTH), + DRM_IOCTL_DEF(DRM_RADEON_GEM_GET_TILING, radeon_gem_get_tiling_ioctl, DRM_AUTH), + DRM_IOCTL_DEF(DRM_RADEON_GEM_BUSY, radeon_gem_busy_ioctl, DRM_AUTH), }; int radeon_max_kms_ioctl = DRM_ARRAY_SIZE(radeon_ioctls_kms); diff --git a/drivers/gpu/drm/radeon/radeon_legacy_crtc.c b/drivers/gpu/drm/radeon/radeon_legacy_crtc.c index 8086ecf7f03..0da72f18fd3 100644 --- a/drivers/gpu/drm/radeon/radeon_legacy_crtc.c +++ b/drivers/gpu/drm/radeon/radeon_legacy_crtc.c @@ -29,6 +29,171 @@ #include "radeon_fixed.h" #include "radeon.h" +static void radeon_legacy_rmx_mode_set(struct drm_crtc *crtc, + struct drm_display_mode *mode, + struct drm_display_mode *adjusted_mode) +{ + struct drm_device *dev = crtc->dev; + struct radeon_device *rdev = dev->dev_private; + struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); + int xres = mode->hdisplay; + int yres = mode->vdisplay; + bool hscale = true, vscale = true; + int hsync_wid; + int vsync_wid; + int hsync_start; + int blank_width; + u32 scale, inc, crtc_more_cntl; + u32 fp_horz_stretch, fp_vert_stretch, fp_horz_vert_active; + u32 fp_h_sync_strt_wid, fp_crtc_h_total_disp; + u32 fp_v_sync_strt_wid, fp_crtc_v_total_disp; + struct radeon_native_mode *native_mode = &radeon_crtc->native_mode; + + fp_vert_stretch = RREG32(RADEON_FP_VERT_STRETCH) & + (RADEON_VERT_STRETCH_RESERVED | + RADEON_VERT_AUTO_RATIO_INC); + fp_horz_stretch = RREG32(RADEON_FP_HORZ_STRETCH) & + (RADEON_HORZ_FP_LOOP_STRETCH | + RADEON_HORZ_AUTO_RATIO_INC); + + crtc_more_cntl = 0; + if ((rdev->family == CHIP_RS100) || + (rdev->family == CHIP_RS200)) { + /* This is to workaround the asic bug for RMX, some versions + of BIOS dosen't have this register initialized correctly. */ + crtc_more_cntl |= RADEON_CRTC_H_CUTOFF_ACTIVE_EN; + } + + + fp_crtc_h_total_disp = ((((mode->crtc_htotal / 8) - 1) & 0x3ff) + | ((((mode->crtc_hdisplay / 8) - 1) & 0x1ff) << 16)); + + hsync_wid = (mode->crtc_hsync_end - mode->crtc_hsync_start) / 8; + if (!hsync_wid) + hsync_wid = 1; + hsync_start = mode->crtc_hsync_start - 8; + + fp_h_sync_strt_wid = ((hsync_start & 0x1fff) + | ((hsync_wid & 0x3f) << 16) + | ((mode->flags & DRM_MODE_FLAG_NHSYNC) + ? RADEON_CRTC_H_SYNC_POL + : 0)); + + fp_crtc_v_total_disp = (((mode->crtc_vtotal - 1) & 0xffff) + | ((mode->crtc_vdisplay - 1) << 16)); + + vsync_wid = mode->crtc_vsync_end - mode->crtc_vsync_start; + if (!vsync_wid) + vsync_wid = 1; + + fp_v_sync_strt_wid = (((mode->crtc_vsync_start - 1) & 0xfff) + | ((vsync_wid & 0x1f) << 16) + | ((mode->flags & DRM_MODE_FLAG_NVSYNC) + ? RADEON_CRTC_V_SYNC_POL + : 0)); + + fp_horz_vert_active = 0; + + if (native_mode->panel_xres == 0 || + native_mode->panel_yres == 0) { + hscale = false; + vscale = false; + } else { + if (xres > native_mode->panel_xres) + xres = native_mode->panel_xres; + if (yres > native_mode->panel_yres) + yres = native_mode->panel_yres; + + if (xres == native_mode->panel_xres) + hscale = false; + if (yres == native_mode->panel_yres) + vscale = false; + } + + switch (radeon_crtc->rmx_type) { + case RMX_FULL: + case RMX_ASPECT: + if (!hscale) + fp_horz_stretch |= ((xres/8-1) << 16); + else { + inc = (fp_horz_stretch & RADEON_HORZ_AUTO_RATIO_INC) ? 1 : 0; + scale = ((xres + inc) * RADEON_HORZ_STRETCH_RATIO_MAX) + / native_mode->panel_xres + 1; + fp_horz_stretch |= (((scale) & RADEON_HORZ_STRETCH_RATIO_MASK) | + RADEON_HORZ_STRETCH_BLEND | + RADEON_HORZ_STRETCH_ENABLE | + ((native_mode->panel_xres/8-1) << 16)); + } + + if (!vscale) + fp_vert_stretch |= ((yres-1) << 12); + else { + inc = (fp_vert_stretch & RADEON_VERT_AUTO_RATIO_INC) ? 1 : 0; + scale = ((yres + inc) * RADEON_VERT_STRETCH_RATIO_MAX) + / native_mode->panel_yres + 1; + fp_vert_stretch |= (((scale) & RADEON_VERT_STRETCH_RATIO_MASK) | + RADEON_VERT_STRETCH_ENABLE | + RADEON_VERT_STRETCH_BLEND | + ((native_mode->panel_yres-1) << 12)); + } + break; + case RMX_CENTER: + fp_horz_stretch |= ((xres/8-1) << 16); + fp_vert_stretch |= ((yres-1) << 12); + + crtc_more_cntl |= (RADEON_CRTC_AUTO_HORZ_CENTER_EN | + RADEON_CRTC_AUTO_VERT_CENTER_EN); + + blank_width = (mode->crtc_hblank_end - mode->crtc_hblank_start) / 8; + if (blank_width > 110) + blank_width = 110; + + fp_crtc_h_total_disp = (((blank_width) & 0x3ff) + | ((((mode->crtc_hdisplay / 8) - 1) & 0x1ff) << 16)); + + hsync_wid = (mode->crtc_hsync_end - mode->crtc_hsync_start) / 8; + if (!hsync_wid) + hsync_wid = 1; + + fp_h_sync_strt_wid = ((((mode->crtc_hsync_start - mode->crtc_hblank_start) / 8) & 0x1fff) + | ((hsync_wid & 0x3f) << 16) + | ((mode->flags & DRM_MODE_FLAG_NHSYNC) + ? RADEON_CRTC_H_SYNC_POL + : 0)); + + fp_crtc_v_total_disp = (((mode->crtc_vblank_end - mode->crtc_vblank_start) & 0xffff) + | ((mode->crtc_vdisplay - 1) << 16)); + + vsync_wid = mode->crtc_vsync_end - mode->crtc_vsync_start; + if (!vsync_wid) + vsync_wid = 1; + + fp_v_sync_strt_wid = ((((mode->crtc_vsync_start - mode->crtc_vblank_start) & 0xfff) + | ((vsync_wid & 0x1f) << 16) + | ((mode->flags & DRM_MODE_FLAG_NVSYNC) + ? RADEON_CRTC_V_SYNC_POL + : 0))); + + fp_horz_vert_active = (((native_mode->panel_yres) & 0xfff) | + (((native_mode->panel_xres / 8) & 0x1ff) << 16)); + break; + case RMX_OFF: + default: + fp_horz_stretch |= ((xres/8-1) << 16); + fp_vert_stretch |= ((yres-1) << 12); + break; + } + + WREG32(RADEON_FP_HORZ_STRETCH, fp_horz_stretch); + WREG32(RADEON_FP_VERT_STRETCH, fp_vert_stretch); + WREG32(RADEON_CRTC_MORE_CNTL, crtc_more_cntl); + WREG32(RADEON_FP_HORZ_VERT_ACTIVE, fp_horz_vert_active); + WREG32(RADEON_FP_H_SYNC_STRT_WID, fp_h_sync_strt_wid); + WREG32(RADEON_FP_V_SYNC_STRT_WID, fp_v_sync_strt_wid); + WREG32(RADEON_FP_CRTC_H_TOTAL_DISP, fp_crtc_h_total_disp); + WREG32(RADEON_FP_CRTC_V_TOTAL_DISP, fp_crtc_v_total_disp); +} + void radeon_restore_common_regs(struct drm_device *dev) { /* don't need this yet */ @@ -145,10 +310,13 @@ void radeon_crtc_dpms(struct drm_crtc *crtc, int mode) RADEON_CRTC_DISP_REQ_EN_B)); WREG32_P(RADEON_CRTC_EXT_CNTL, 0, ~mask); } + drm_vblank_post_modeset(dev, radeon_crtc->crtc_id); + radeon_crtc_load_lut(crtc); break; case DRM_MODE_DPMS_STANDBY: case DRM_MODE_DPMS_SUSPEND: case DRM_MODE_DPMS_OFF: + drm_vblank_pre_modeset(dev, radeon_crtc->crtc_id); if (radeon_crtc->crtc_id) WREG32_P(RADEON_CRTC2_GEN_CNTL, mask, ~mask); else { @@ -158,10 +326,6 @@ void radeon_crtc_dpms(struct drm_crtc *crtc, int mode) } break; } - - if (mode != DRM_MODE_DPMS_OFF) { - radeon_crtc_load_lut(crtc); - } } /* properly set crtc bpp when using atombios */ @@ -235,6 +399,7 @@ int radeon_crtc_set_base(struct drm_crtc *crtc, int x, int y, uint64_t base; uint32_t crtc_offset, crtc_offset_cntl, crtc_tile_x0_y0 = 0; uint32_t crtc_pitch, pitch_pixels; + uint32_t tiling_flags; DRM_DEBUG("\n"); @@ -244,7 +409,12 @@ int radeon_crtc_set_base(struct drm_crtc *crtc, int x, int y, if (radeon_gem_object_pin(obj, RADEON_GEM_DOMAIN_VRAM, &base)) { return -EINVAL; } - crtc_offset = (u32)base; + /* if scanout was in GTT this really wouldn't work */ + /* crtc offset is from display base addr not FB location */ + radeon_crtc->legacy_display_base_addr = rdev->mc.vram_location; + + base -= radeon_crtc->legacy_display_base_addr; + crtc_offset_cntl = 0; pitch_pixels = crtc->fb->pitch / (crtc->fb->bits_per_pixel / 8); @@ -253,8 +423,12 @@ int radeon_crtc_set_base(struct drm_crtc *crtc, int x, int y, (crtc->fb->bits_per_pixel * 8)); crtc_pitch |= crtc_pitch << 16; - /* TODO tiling */ - if (0) { + radeon_object_get_tiling_flags(obj->driver_private, + &tiling_flags, NULL); + if (tiling_flags & RADEON_TILING_MICRO) + DRM_ERROR("trying to scanout microtiled buffer\n"); + + if (tiling_flags & RADEON_TILING_MACRO) { if (ASIC_IS_R300(rdev)) crtc_offset_cntl |= (R300_CRTC_X_Y_MODE_EN | R300_CRTC_MICRO_TILE_BUFFER_DIS | @@ -270,15 +444,13 @@ int radeon_crtc_set_base(struct drm_crtc *crtc, int x, int y, crtc_offset_cntl &= ~RADEON_CRTC_TILE_EN; } - - /* TODO more tiling */ - if (0) { + if (tiling_flags & RADEON_TILING_MACRO) { if (ASIC_IS_R300(rdev)) { crtc_tile_x0_y0 = x | (y << 16); base &= ~0x7ff; } else { int byteshift = crtc->fb->bits_per_pixel >> 4; - int tile_addr = (((y >> 3) * crtc->fb->width + x) >> (8 - byteshift)) << 11; + int tile_addr = (((y >> 3) * pitch_pixels + x) >> (8 - byteshift)) << 11; base += tile_addr + ((x << byteshift) % 256) + ((y % 8) << 8); crtc_offset_cntl |= (y % 16); } @@ -303,11 +475,9 @@ int radeon_crtc_set_base(struct drm_crtc *crtc, int x, int y, base &= ~7; - /* update sarea TODO */ - crtc_offset = (u32)base; - WREG32(RADEON_DISPLAY_BASE_ADDR + radeon_crtc->crtc_offset, rdev->mc.vram_location); + WREG32(RADEON_DISPLAY_BASE_ADDR + radeon_crtc->crtc_offset, radeon_crtc->legacy_display_base_addr); if (ASIC_IS_R300(rdev)) { if (radeon_crtc->crtc_id) @@ -751,6 +921,8 @@ static bool radeon_crtc_mode_fixup(struct drm_crtc *crtc, struct drm_display_mode *mode, struct drm_display_mode *adjusted_mode) { + if (!radeon_crtc_scaling_mode_fixup(crtc, mode, adjusted_mode)) + return false; return true; } @@ -759,16 +931,25 @@ static int radeon_crtc_mode_set(struct drm_crtc *crtc, struct drm_display_mode *adjusted_mode, int x, int y, struct drm_framebuffer *old_fb) { - - DRM_DEBUG("\n"); + struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); + struct drm_device *dev = crtc->dev; + struct radeon_device *rdev = dev->dev_private; /* TODO TV */ - radeon_crtc_set_base(crtc, x, y, old_fb); radeon_set_crtc_timing(crtc, adjusted_mode); radeon_set_pll(crtc, adjusted_mode); - radeon_init_disp_bandwidth(crtc->dev); - + radeon_bandwidth_update(rdev); + if (radeon_crtc->crtc_id == 0) { + radeon_legacy_rmx_mode_set(crtc, mode, adjusted_mode); + } else { + if (radeon_crtc->rmx_type != RMX_OFF) { + /* FIXME: only first crtc has rmx what should we + * do ? + */ + DRM_ERROR("Mode need scaling but only first crtc can do that.\n"); + } + } return 0; } @@ -799,478 +980,3 @@ void radeon_legacy_init_crtc(struct drm_device *dev, radeon_crtc->crtc_offset = RADEON_CRTC2_H_TOTAL_DISP - RADEON_CRTC_H_TOTAL_DISP; drm_crtc_helper_add(&radeon_crtc->base, &legacy_helper_funcs); } - -void radeon_init_disp_bw_legacy(struct drm_device *dev, - struct drm_display_mode *mode1, - uint32_t pixel_bytes1, - struct drm_display_mode *mode2, - uint32_t pixel_bytes2) -{ - struct radeon_device *rdev = dev->dev_private; - fixed20_12 trcd_ff, trp_ff, tras_ff, trbs_ff, tcas_ff; - fixed20_12 sclk_ff, mclk_ff, sclk_eff_ff, sclk_delay_ff; - fixed20_12 peak_disp_bw, mem_bw, pix_clk, pix_clk2, temp_ff, crit_point_ff; - uint32_t temp, data, mem_trcd, mem_trp, mem_tras; - fixed20_12 memtcas_ff[8] = { - fixed_init(1), - fixed_init(2), - fixed_init(3), - fixed_init(0), - fixed_init_half(1), - fixed_init_half(2), - fixed_init(0), - }; - fixed20_12 memtcas_rs480_ff[8] = { - fixed_init(0), - fixed_init(1), - fixed_init(2), - fixed_init(3), - fixed_init(0), - fixed_init_half(1), - fixed_init_half(2), - fixed_init_half(3), - }; - fixed20_12 memtcas2_ff[8] = { - fixed_init(0), - fixed_init(1), - fixed_init(2), - fixed_init(3), - fixed_init(4), - fixed_init(5), - fixed_init(6), - fixed_init(7), - }; - fixed20_12 memtrbs[8] = { - fixed_init(1), - fixed_init_half(1), - fixed_init(2), - fixed_init_half(2), - fixed_init(3), - fixed_init_half(3), - fixed_init(4), - fixed_init_half(4) - }; - fixed20_12 memtrbs_r4xx[8] = { - fixed_init(4), - fixed_init(5), - fixed_init(6), - fixed_init(7), - fixed_init(8), - fixed_init(9), - fixed_init(10), - fixed_init(11) - }; - fixed20_12 min_mem_eff; - fixed20_12 mc_latency_sclk, mc_latency_mclk, k1; - fixed20_12 cur_latency_mclk, cur_latency_sclk; - fixed20_12 disp_latency, disp_latency_overhead, disp_drain_rate, - disp_drain_rate2, read_return_rate; - fixed20_12 time_disp1_drop_priority; - int c; - int cur_size = 16; /* in octawords */ - int critical_point = 0, critical_point2; -/* uint32_t read_return_rate, time_disp1_drop_priority; */ - int stop_req, max_stop_req; - - min_mem_eff.full = rfixed_const_8(0); - /* get modes */ - if ((rdev->disp_priority == 2) && ASIC_IS_R300(rdev)) { - uint32_t mc_init_misc_lat_timer = RREG32(R300_MC_INIT_MISC_LAT_TIMER); - mc_init_misc_lat_timer &= ~(R300_MC_DISP1R_INIT_LAT_MASK << R300_MC_DISP1R_INIT_LAT_SHIFT); - mc_init_misc_lat_timer &= ~(R300_MC_DISP0R_INIT_LAT_MASK << R300_MC_DISP0R_INIT_LAT_SHIFT); - /* check crtc enables */ - if (mode2) - mc_init_misc_lat_timer |= (1 << R300_MC_DISP1R_INIT_LAT_SHIFT); - if (mode1) - mc_init_misc_lat_timer |= (1 << R300_MC_DISP0R_INIT_LAT_SHIFT); - WREG32(R300_MC_INIT_MISC_LAT_TIMER, mc_init_misc_lat_timer); - } - - /* - * determine is there is enough bw for current mode - */ - mclk_ff.full = rfixed_const(rdev->clock.default_mclk); - temp_ff.full = rfixed_const(100); - mclk_ff.full = rfixed_div(mclk_ff, temp_ff); - sclk_ff.full = rfixed_const(rdev->clock.default_sclk); - sclk_ff.full = rfixed_div(sclk_ff, temp_ff); - - temp = (rdev->mc.vram_width / 8) * (rdev->mc.vram_is_ddr ? 2 : 1); - temp_ff.full = rfixed_const(temp); - mem_bw.full = rfixed_mul(mclk_ff, temp_ff); - - pix_clk.full = 0; - pix_clk2.full = 0; - peak_disp_bw.full = 0; - if (mode1) { - temp_ff.full = rfixed_const(1000); - pix_clk.full = rfixed_const(mode1->clock); /* convert to fixed point */ - pix_clk.full = rfixed_div(pix_clk, temp_ff); - temp_ff.full = rfixed_const(pixel_bytes1); - peak_disp_bw.full += rfixed_mul(pix_clk, temp_ff); - } - if (mode2) { - temp_ff.full = rfixed_const(1000); - pix_clk2.full = rfixed_const(mode2->clock); /* convert to fixed point */ - pix_clk2.full = rfixed_div(pix_clk2, temp_ff); - temp_ff.full = rfixed_const(pixel_bytes2); - peak_disp_bw.full += rfixed_mul(pix_clk2, temp_ff); - } - - mem_bw.full = rfixed_mul(mem_bw, min_mem_eff); - if (peak_disp_bw.full >= mem_bw.full) { - DRM_ERROR("You may not have enough display bandwidth for current mode\n" - "If you have flickering problem, try to lower resolution, refresh rate, or color depth\n"); - } - - /* Get values from the EXT_MEM_CNTL register...converting its contents. */ - temp = RREG32(RADEON_MEM_TIMING_CNTL); - if ((rdev->family == CHIP_RV100) || (rdev->flags & RADEON_IS_IGP)) { /* RV100, M6, IGPs */ - mem_trcd = ((temp >> 2) & 0x3) + 1; - mem_trp = ((temp & 0x3)) + 1; - mem_tras = ((temp & 0x70) >> 4) + 1; - } else if (rdev->family == CHIP_R300 || - rdev->family == CHIP_R350) { /* r300, r350 */ - mem_trcd = (temp & 0x7) + 1; - mem_trp = ((temp >> 8) & 0x7) + 1; - mem_tras = ((temp >> 11) & 0xf) + 4; - } else if (rdev->family == CHIP_RV350 || - rdev->family <= CHIP_RV380) { - /* rv3x0 */ - mem_trcd = (temp & 0x7) + 3; - mem_trp = ((temp >> 8) & 0x7) + 3; - mem_tras = ((temp >> 11) & 0xf) + 6; - } else if (rdev->family == CHIP_R420 || - rdev->family == CHIP_R423 || - rdev->family == CHIP_RV410) { - /* r4xx */ - mem_trcd = (temp & 0xf) + 3; - if (mem_trcd > 15) - mem_trcd = 15; - mem_trp = ((temp >> 8) & 0xf) + 3; - if (mem_trp > 15) - mem_trp = 15; - mem_tras = ((temp >> 12) & 0x1f) + 6; - if (mem_tras > 31) - mem_tras = 31; - } else { /* RV200, R200 */ - mem_trcd = (temp & 0x7) + 1; - mem_trp = ((temp >> 8) & 0x7) + 1; - mem_tras = ((temp >> 12) & 0xf) + 4; - } - /* convert to FF */ - trcd_ff.full = rfixed_const(mem_trcd); - trp_ff.full = rfixed_const(mem_trp); - tras_ff.full = rfixed_const(mem_tras); - - /* Get values from the MEM_SDRAM_MODE_REG register...converting its */ - temp = RREG32(RADEON_MEM_SDRAM_MODE_REG); - data = (temp & (7 << 20)) >> 20; - if ((rdev->family == CHIP_RV100) || rdev->flags & RADEON_IS_IGP) { - if (rdev->family == CHIP_RS480) /* don't think rs400 */ - tcas_ff = memtcas_rs480_ff[data]; - else - tcas_ff = memtcas_ff[data]; - } else - tcas_ff = memtcas2_ff[data]; - - if (rdev->family == CHIP_RS400 || - rdev->family == CHIP_RS480) { - /* extra cas latency stored in bits 23-25 0-4 clocks */ - data = (temp >> 23) & 0x7; - if (data < 5) - tcas_ff.full += rfixed_const(data); - } - - if (ASIC_IS_R300(rdev) && !(rdev->flags & RADEON_IS_IGP)) { - /* on the R300, Tcas is included in Trbs. - */ - temp = RREG32(RADEON_MEM_CNTL); - data = (R300_MEM_NUM_CHANNELS_MASK & temp); - if (data == 1) { - if (R300_MEM_USE_CD_CH_ONLY & temp) { - temp = RREG32(R300_MC_IND_INDEX); - temp &= ~R300_MC_IND_ADDR_MASK; - temp |= R300_MC_READ_CNTL_CD_mcind; - WREG32(R300_MC_IND_INDEX, temp); - temp = RREG32(R300_MC_IND_DATA); - data = (R300_MEM_RBS_POSITION_C_MASK & temp); - } else { - temp = RREG32(R300_MC_READ_CNTL_AB); - data = (R300_MEM_RBS_POSITION_A_MASK & temp); - } - } else { - temp = RREG32(R300_MC_READ_CNTL_AB); - data = (R300_MEM_RBS_POSITION_A_MASK & temp); - } - if (rdev->family == CHIP_RV410 || - rdev->family == CHIP_R420 || - rdev->family == CHIP_R423) - trbs_ff = memtrbs_r4xx[data]; - else - trbs_ff = memtrbs[data]; - tcas_ff.full += trbs_ff.full; - } - - sclk_eff_ff.full = sclk_ff.full; - - if (rdev->flags & RADEON_IS_AGP) { - fixed20_12 agpmode_ff; - agpmode_ff.full = rfixed_const(radeon_agpmode); - temp_ff.full = rfixed_const_666(16); - sclk_eff_ff.full -= rfixed_mul(agpmode_ff, temp_ff); - } - /* TODO PCIE lanes may affect this - agpmode == 16?? */ - - if (ASIC_IS_R300(rdev)) { - sclk_delay_ff.full = rfixed_const(250); - } else { - if ((rdev->family == CHIP_RV100) || - rdev->flags & RADEON_IS_IGP) { - if (rdev->mc.vram_is_ddr) - sclk_delay_ff.full = rfixed_const(41); - else - sclk_delay_ff.full = rfixed_const(33); - } else { - if (rdev->mc.vram_width == 128) - sclk_delay_ff.full = rfixed_const(57); - else - sclk_delay_ff.full = rfixed_const(41); - } - } - - mc_latency_sclk.full = rfixed_div(sclk_delay_ff, sclk_eff_ff); - - if (rdev->mc.vram_is_ddr) { - if (rdev->mc.vram_width == 32) { - k1.full = rfixed_const(40); - c = 3; - } else { - k1.full = rfixed_const(20); - c = 1; - } - } else { - k1.full = rfixed_const(40); - c = 3; - } - - temp_ff.full = rfixed_const(2); - mc_latency_mclk.full = rfixed_mul(trcd_ff, temp_ff); - temp_ff.full = rfixed_const(c); - mc_latency_mclk.full += rfixed_mul(tcas_ff, temp_ff); - temp_ff.full = rfixed_const(4); - mc_latency_mclk.full += rfixed_mul(tras_ff, temp_ff); - mc_latency_mclk.full += rfixed_mul(trp_ff, temp_ff); - mc_latency_mclk.full += k1.full; - - mc_latency_mclk.full = rfixed_div(mc_latency_mclk, mclk_ff); - mc_latency_mclk.full += rfixed_div(temp_ff, sclk_eff_ff); - - /* - HW cursor time assuming worst case of full size colour cursor. - */ - temp_ff.full = rfixed_const((2 * (cur_size - (rdev->mc.vram_is_ddr + 1)))); - temp_ff.full += trcd_ff.full; - if (temp_ff.full < tras_ff.full) - temp_ff.full = tras_ff.full; - cur_latency_mclk.full = rfixed_div(temp_ff, mclk_ff); - - temp_ff.full = rfixed_const(cur_size); - cur_latency_sclk.full = rfixed_div(temp_ff, sclk_eff_ff); - /* - Find the total latency for the display data. - */ - disp_latency_overhead.full = rfixed_const(80); - disp_latency_overhead.full = rfixed_div(disp_latency_overhead, sclk_ff); - mc_latency_mclk.full += disp_latency_overhead.full + cur_latency_mclk.full; - mc_latency_sclk.full += disp_latency_overhead.full + cur_latency_sclk.full; - - if (mc_latency_mclk.full > mc_latency_sclk.full) - disp_latency.full = mc_latency_mclk.full; - else - disp_latency.full = mc_latency_sclk.full; - - /* setup Max GRPH_STOP_REQ default value */ - if (ASIC_IS_RV100(rdev)) - max_stop_req = 0x5c; - else - max_stop_req = 0x7c; - - if (mode1) { - /* CRTC1 - Set GRPH_BUFFER_CNTL register using h/w defined optimal values. - GRPH_STOP_REQ <= MIN[ 0x7C, (CRTC_H_DISP + 1) * (bit depth) / 0x10 ] - */ - stop_req = mode1->hdisplay * pixel_bytes1 / 16; - - if (stop_req > max_stop_req) - stop_req = max_stop_req; - - /* - Find the drain rate of the display buffer. - */ - temp_ff.full = rfixed_const((16/pixel_bytes1)); - disp_drain_rate.full = rfixed_div(pix_clk, temp_ff); - - /* - Find the critical point of the display buffer. - */ - crit_point_ff.full = rfixed_mul(disp_drain_rate, disp_latency); - crit_point_ff.full += rfixed_const_half(0); - - critical_point = rfixed_trunc(crit_point_ff); - - if (rdev->disp_priority == 2) { - critical_point = 0; - } - - /* - The critical point should never be above max_stop_req-4. Setting - GRPH_CRITICAL_CNTL = 0 will thus force high priority all the time. - */ - if (max_stop_req - critical_point < 4) - critical_point = 0; - - if (critical_point == 0 && mode2 && rdev->family == CHIP_R300) { - /* some R300 cards have problem with this set to 0, when CRTC2 is enabled.*/ - critical_point = 0x10; - } - - temp = RREG32(RADEON_GRPH_BUFFER_CNTL); - temp &= ~(RADEON_GRPH_STOP_REQ_MASK); - temp |= (stop_req << RADEON_GRPH_STOP_REQ_SHIFT); - temp &= ~(RADEON_GRPH_START_REQ_MASK); - if ((rdev->family == CHIP_R350) && - (stop_req > 0x15)) { - stop_req -= 0x10; - } - temp |= (stop_req << RADEON_GRPH_START_REQ_SHIFT); - temp |= RADEON_GRPH_BUFFER_SIZE; - temp &= ~(RADEON_GRPH_CRITICAL_CNTL | - RADEON_GRPH_CRITICAL_AT_SOF | - RADEON_GRPH_STOP_CNTL); - /* - Write the result into the register. - */ - WREG32(RADEON_GRPH_BUFFER_CNTL, ((temp & ~RADEON_GRPH_CRITICAL_POINT_MASK) | - (critical_point << RADEON_GRPH_CRITICAL_POINT_SHIFT))); - -#if 0 - if ((rdev->family == CHIP_RS400) || - (rdev->family == CHIP_RS480)) { - /* attempt to program RS400 disp regs correctly ??? */ - temp = RREG32(RS400_DISP1_REG_CNTL); - temp &= ~(RS400_DISP1_START_REQ_LEVEL_MASK | - RS400_DISP1_STOP_REQ_LEVEL_MASK); - WREG32(RS400_DISP1_REQ_CNTL1, (temp | - (critical_point << RS400_DISP1_START_REQ_LEVEL_SHIFT) | - (critical_point << RS400_DISP1_STOP_REQ_LEVEL_SHIFT))); - temp = RREG32(RS400_DMIF_MEM_CNTL1); - temp &= ~(RS400_DISP1_CRITICAL_POINT_START_MASK | - RS400_DISP1_CRITICAL_POINT_STOP_MASK); - WREG32(RS400_DMIF_MEM_CNTL1, (temp | - (critical_point << RS400_DISP1_CRITICAL_POINT_START_SHIFT) | - (critical_point << RS400_DISP1_CRITICAL_POINT_STOP_SHIFT))); - } -#endif - - DRM_DEBUG("GRPH_BUFFER_CNTL from to %x\n", - /* (unsigned int)info->SavedReg->grph_buffer_cntl, */ - (unsigned int)RREG32(RADEON_GRPH_BUFFER_CNTL)); - } - - if (mode2) { - u32 grph2_cntl; - stop_req = mode2->hdisplay * pixel_bytes2 / 16; - - if (stop_req > max_stop_req) - stop_req = max_stop_req; - - /* - Find the drain rate of the display buffer. - */ - temp_ff.full = rfixed_const((16/pixel_bytes2)); - disp_drain_rate2.full = rfixed_div(pix_clk2, temp_ff); - - grph2_cntl = RREG32(RADEON_GRPH2_BUFFER_CNTL); - grph2_cntl &= ~(RADEON_GRPH_STOP_REQ_MASK); - grph2_cntl |= (stop_req << RADEON_GRPH_STOP_REQ_SHIFT); - grph2_cntl &= ~(RADEON_GRPH_START_REQ_MASK); - if ((rdev->family == CHIP_R350) && - (stop_req > 0x15)) { - stop_req -= 0x10; - } - grph2_cntl |= (stop_req << RADEON_GRPH_START_REQ_SHIFT); - grph2_cntl |= RADEON_GRPH_BUFFER_SIZE; - grph2_cntl &= ~(RADEON_GRPH_CRITICAL_CNTL | - RADEON_GRPH_CRITICAL_AT_SOF | - RADEON_GRPH_STOP_CNTL); - - if ((rdev->family == CHIP_RS100) || - (rdev->family == CHIP_RS200)) - critical_point2 = 0; - else { - temp = (rdev->mc.vram_width * rdev->mc.vram_is_ddr + 1)/128; - temp_ff.full = rfixed_const(temp); - temp_ff.full = rfixed_mul(mclk_ff, temp_ff); - if (sclk_ff.full < temp_ff.full) - temp_ff.full = sclk_ff.full; - - read_return_rate.full = temp_ff.full; - - if (mode1) { - temp_ff.full = read_return_rate.full - disp_drain_rate.full; - time_disp1_drop_priority.full = rfixed_div(crit_point_ff, temp_ff); - } else { - time_disp1_drop_priority.full = 0; - } - crit_point_ff.full = disp_latency.full + time_disp1_drop_priority.full + disp_latency.full; - crit_point_ff.full = rfixed_mul(crit_point_ff, disp_drain_rate2); - crit_point_ff.full += rfixed_const_half(0); - - critical_point2 = rfixed_trunc(crit_point_ff); - - if (rdev->disp_priority == 2) { - critical_point2 = 0; - } - - if (max_stop_req - critical_point2 < 4) - critical_point2 = 0; - - } - - if (critical_point2 == 0 && rdev->family == CHIP_R300) { - /* some R300 cards have problem with this set to 0 */ - critical_point2 = 0x10; - } - - WREG32(RADEON_GRPH2_BUFFER_CNTL, ((grph2_cntl & ~RADEON_GRPH_CRITICAL_POINT_MASK) | - (critical_point2 << RADEON_GRPH_CRITICAL_POINT_SHIFT))); - - if ((rdev->family == CHIP_RS400) || - (rdev->family == CHIP_RS480)) { -#if 0 - /* attempt to program RS400 disp2 regs correctly ??? */ - temp = RREG32(RS400_DISP2_REQ_CNTL1); - temp &= ~(RS400_DISP2_START_REQ_LEVEL_MASK | - RS400_DISP2_STOP_REQ_LEVEL_MASK); - WREG32(RS400_DISP2_REQ_CNTL1, (temp | - (critical_point2 << RS400_DISP1_START_REQ_LEVEL_SHIFT) | - (critical_point2 << RS400_DISP1_STOP_REQ_LEVEL_SHIFT))); - temp = RREG32(RS400_DISP2_REQ_CNTL2); - temp &= ~(RS400_DISP2_CRITICAL_POINT_START_MASK | - RS400_DISP2_CRITICAL_POINT_STOP_MASK); - WREG32(RS400_DISP2_REQ_CNTL2, (temp | - (critical_point2 << RS400_DISP2_CRITICAL_POINT_START_SHIFT) | - (critical_point2 << RS400_DISP2_CRITICAL_POINT_STOP_SHIFT))); -#endif - WREG32(RS400_DISP2_REQ_CNTL1, 0x105DC1CC); - WREG32(RS400_DISP2_REQ_CNTL2, 0x2749D000); - WREG32(RS400_DMIF_MEM_CNTL1, 0x29CA71DC); - WREG32(RS400_DISP1_REQ_CNTL1, 0x28FBC3AC); - } - - DRM_DEBUG("GRPH2_BUFFER_CNTL from to %x\n", - (unsigned int)RREG32(RADEON_GRPH2_BUFFER_CNTL)); - } -} diff --git a/drivers/gpu/drm/radeon/radeon_legacy_encoders.c b/drivers/gpu/drm/radeon/radeon_legacy_encoders.c index 2c2f42de1d4..9322675ef6d 100644 --- a/drivers/gpu/drm/radeon/radeon_legacy_encoders.c +++ b/drivers/gpu/drm/radeon/radeon_legacy_encoders.c @@ -30,170 +30,6 @@ #include "atom.h" -static void radeon_legacy_rmx_mode_set(struct drm_encoder *encoder, - struct drm_display_mode *mode, - struct drm_display_mode *adjusted_mode) -{ - struct drm_device *dev = encoder->dev; - struct radeon_device *rdev = dev->dev_private; - struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); - int xres = mode->hdisplay; - int yres = mode->vdisplay; - bool hscale = true, vscale = true; - int hsync_wid; - int vsync_wid; - int hsync_start; - uint32_t scale, inc; - uint32_t fp_horz_stretch, fp_vert_stretch, crtc_more_cntl, fp_horz_vert_active; - uint32_t fp_h_sync_strt_wid, fp_v_sync_strt_wid, fp_crtc_h_total_disp, fp_crtc_v_total_disp; - struct radeon_native_mode *native_mode = &radeon_encoder->native_mode; - - DRM_DEBUG("\n"); - - fp_vert_stretch = RREG32(RADEON_FP_VERT_STRETCH) & - (RADEON_VERT_STRETCH_RESERVED | - RADEON_VERT_AUTO_RATIO_INC); - fp_horz_stretch = RREG32(RADEON_FP_HORZ_STRETCH) & - (RADEON_HORZ_FP_LOOP_STRETCH | - RADEON_HORZ_AUTO_RATIO_INC); - - crtc_more_cntl = 0; - if ((rdev->family == CHIP_RS100) || - (rdev->family == CHIP_RS200)) { - /* This is to workaround the asic bug for RMX, some versions - of BIOS dosen't have this register initialized correctly. */ - crtc_more_cntl |= RADEON_CRTC_H_CUTOFF_ACTIVE_EN; - } - - - fp_crtc_h_total_disp = ((((mode->crtc_htotal / 8) - 1) & 0x3ff) - | ((((mode->crtc_hdisplay / 8) - 1) & 0x1ff) << 16)); - - hsync_wid = (mode->crtc_hsync_end - mode->crtc_hsync_start) / 8; - if (!hsync_wid) - hsync_wid = 1; - hsync_start = mode->crtc_hsync_start - 8; - - fp_h_sync_strt_wid = ((hsync_start & 0x1fff) - | ((hsync_wid & 0x3f) << 16) - | ((mode->flags & DRM_MODE_FLAG_NHSYNC) - ? RADEON_CRTC_H_SYNC_POL - : 0)); - - fp_crtc_v_total_disp = (((mode->crtc_vtotal - 1) & 0xffff) - | ((mode->crtc_vdisplay - 1) << 16)); - - vsync_wid = mode->crtc_vsync_end - mode->crtc_vsync_start; - if (!vsync_wid) - vsync_wid = 1; - - fp_v_sync_strt_wid = (((mode->crtc_vsync_start - 1) & 0xfff) - | ((vsync_wid & 0x1f) << 16) - | ((mode->flags & DRM_MODE_FLAG_NVSYNC) - ? RADEON_CRTC_V_SYNC_POL - : 0)); - - fp_horz_vert_active = 0; - - if (native_mode->panel_xres == 0 || - native_mode->panel_yres == 0) { - hscale = false; - vscale = false; - } else { - if (xres > native_mode->panel_xres) - xres = native_mode->panel_xres; - if (yres > native_mode->panel_yres) - yres = native_mode->panel_yres; - - if (xres == native_mode->panel_xres) - hscale = false; - if (yres == native_mode->panel_yres) - vscale = false; - } - - if (radeon_encoder->flags & RADEON_USE_RMX) { - if (radeon_encoder->rmx_type != RMX_CENTER) { - if (!hscale) - fp_horz_stretch |= ((xres/8-1) << 16); - else { - inc = (fp_horz_stretch & RADEON_HORZ_AUTO_RATIO_INC) ? 1 : 0; - scale = ((xres + inc) * RADEON_HORZ_STRETCH_RATIO_MAX) - / native_mode->panel_xres + 1; - fp_horz_stretch |= (((scale) & RADEON_HORZ_STRETCH_RATIO_MASK) | - RADEON_HORZ_STRETCH_BLEND | - RADEON_HORZ_STRETCH_ENABLE | - ((native_mode->panel_xres/8-1) << 16)); - } - - if (!vscale) - fp_vert_stretch |= ((yres-1) << 12); - else { - inc = (fp_vert_stretch & RADEON_VERT_AUTO_RATIO_INC) ? 1 : 0; - scale = ((yres + inc) * RADEON_VERT_STRETCH_RATIO_MAX) - / native_mode->panel_yres + 1; - fp_vert_stretch |= (((scale) & RADEON_VERT_STRETCH_RATIO_MASK) | - RADEON_VERT_STRETCH_ENABLE | - RADEON_VERT_STRETCH_BLEND | - ((native_mode->panel_yres-1) << 12)); - } - } else if (radeon_encoder->rmx_type == RMX_CENTER) { - int blank_width; - - fp_horz_stretch |= ((xres/8-1) << 16); - fp_vert_stretch |= ((yres-1) << 12); - - crtc_more_cntl |= (RADEON_CRTC_AUTO_HORZ_CENTER_EN | - RADEON_CRTC_AUTO_VERT_CENTER_EN); - - blank_width = (mode->crtc_hblank_end - mode->crtc_hblank_start) / 8; - if (blank_width > 110) - blank_width = 110; - - fp_crtc_h_total_disp = (((blank_width) & 0x3ff) - | ((((mode->crtc_hdisplay / 8) - 1) & 0x1ff) << 16)); - - hsync_wid = (mode->crtc_hsync_end - mode->crtc_hsync_start) / 8; - if (!hsync_wid) - hsync_wid = 1; - - fp_h_sync_strt_wid = ((((mode->crtc_hsync_start - mode->crtc_hblank_start) / 8) & 0x1fff) - | ((hsync_wid & 0x3f) << 16) - | ((mode->flags & DRM_MODE_FLAG_NHSYNC) - ? RADEON_CRTC_H_SYNC_POL - : 0)); - - fp_crtc_v_total_disp = (((mode->crtc_vblank_end - mode->crtc_vblank_start) & 0xffff) - | ((mode->crtc_vdisplay - 1) << 16)); - - vsync_wid = mode->crtc_vsync_end - mode->crtc_vsync_start; - if (!vsync_wid) - vsync_wid = 1; - - fp_v_sync_strt_wid = ((((mode->crtc_vsync_start - mode->crtc_vblank_start) & 0xfff) - | ((vsync_wid & 0x1f) << 16) - | ((mode->flags & DRM_MODE_FLAG_NVSYNC) - ? RADEON_CRTC_V_SYNC_POL - : 0))); - - fp_horz_vert_active = (((native_mode->panel_yres) & 0xfff) | - (((native_mode->panel_xres / 8) & 0x1ff) << 16)); - } - } else { - fp_horz_stretch |= ((xres/8-1) << 16); - fp_vert_stretch |= ((yres-1) << 12); - } - - WREG32(RADEON_FP_HORZ_STRETCH, fp_horz_stretch); - WREG32(RADEON_FP_VERT_STRETCH, fp_vert_stretch); - WREG32(RADEON_CRTC_MORE_CNTL, crtc_more_cntl); - WREG32(RADEON_FP_HORZ_VERT_ACTIVE, fp_horz_vert_active); - WREG32(RADEON_FP_H_SYNC_STRT_WID, fp_h_sync_strt_wid); - WREG32(RADEON_FP_V_SYNC_STRT_WID, fp_v_sync_strt_wid); - WREG32(RADEON_FP_CRTC_H_TOTAL_DISP, fp_crtc_h_total_disp); - WREG32(RADEON_FP_CRTC_V_TOTAL_DISP, fp_crtc_v_total_disp); - -} - static void radeon_legacy_lvds_dpms(struct drm_encoder *encoder, int mode) { struct drm_device *dev = encoder->dev; @@ -287,9 +123,6 @@ static void radeon_legacy_lvds_mode_set(struct drm_encoder *encoder, DRM_DEBUG("\n"); - if (radeon_crtc->crtc_id == 0) - radeon_legacy_rmx_mode_set(encoder, mode, adjusted_mode); - lvds_pll_cntl = RREG32(RADEON_LVDS_PLL_CNTL); lvds_pll_cntl &= ~RADEON_LVDS_PLL_EN; @@ -318,7 +151,7 @@ static void radeon_legacy_lvds_mode_set(struct drm_encoder *encoder, if (radeon_crtc->crtc_id == 0) { if (ASIC_IS_R300(rdev)) { - if (radeon_encoder->flags & RADEON_USE_RMX) + if (radeon_encoder->rmx_type != RMX_OFF) lvds_pll_cntl |= R300_LVDS_SRC_SEL_RMX; } else lvds_gen_cntl &= ~RADEON_LVDS_SEL_CRTC2; @@ -350,8 +183,6 @@ static bool radeon_legacy_lvds_mode_fixup(struct drm_encoder *encoder, drm_mode_set_crtcinfo(adjusted_mode, 0); - radeon_encoder->flags &= ~RADEON_USE_RMX; - if (radeon_encoder->rmx_type != RMX_OFF) radeon_rmx_mode_fixup(encoder, mode, adjusted_mode); @@ -455,9 +286,6 @@ static void radeon_legacy_primary_dac_mode_set(struct drm_encoder *encoder, DRM_DEBUG("\n"); - if (radeon_crtc->crtc_id == 0) - radeon_legacy_rmx_mode_set(encoder, mode, adjusted_mode); - if (radeon_crtc->crtc_id == 0) { if (rdev->family == CHIP_R200 || ASIC_IS_R300(rdev)) { disp_output_cntl = RREG32(RADEON_DISP_OUTPUT_CNTL) & @@ -653,9 +481,6 @@ static void radeon_legacy_tmds_int_mode_set(struct drm_encoder *encoder, DRM_DEBUG("\n"); - if (radeon_crtc->crtc_id == 0) - radeon_legacy_rmx_mode_set(encoder, mode, adjusted_mode); - tmp = tmds_pll_cntl = RREG32(RADEON_TMDS_PLL_CNTL); tmp &= 0xfffff; if (rdev->family == CHIP_RV280) { @@ -711,7 +536,7 @@ static void radeon_legacy_tmds_int_mode_set(struct drm_encoder *encoder, if (radeon_crtc->crtc_id == 0) { if (ASIC_IS_R300(rdev) || rdev->family == CHIP_R200) { fp_gen_cntl &= ~R200_FP_SOURCE_SEL_MASK; - if (radeon_encoder->flags & RADEON_USE_RMX) + if (radeon_encoder->rmx_type != RMX_OFF) fp_gen_cntl |= R200_FP_SOURCE_SEL_RMX; else fp_gen_cntl |= R200_FP_SOURCE_SEL_CRTC1; @@ -820,9 +645,6 @@ static void radeon_legacy_tmds_ext_mode_set(struct drm_encoder *encoder, DRM_DEBUG("\n"); - if (radeon_crtc->crtc_id == 0) - radeon_legacy_rmx_mode_set(encoder, mode, adjusted_mode); - if (rdev->is_atom_bios) { radeon_encoder->pixel_clock = adjusted_mode->clock; atombios_external_tmds_setup(encoder, ATOM_ENABLE); @@ -856,7 +678,7 @@ static void radeon_legacy_tmds_ext_mode_set(struct drm_encoder *encoder, if (radeon_crtc->crtc_id == 0) { if ((rdev->family == CHIP_R200) || ASIC_IS_R300(rdev)) { fp2_gen_cntl &= ~R200_FP2_SOURCE_SEL_MASK; - if (radeon_encoder->flags & RADEON_USE_RMX) + if (radeon_encoder->rmx_type != RMX_OFF) fp2_gen_cntl |= R200_FP2_SOURCE_SEL_RMX; else fp2_gen_cntl |= R200_FP2_SOURCE_SEL_CRTC1; @@ -1014,9 +836,6 @@ static void radeon_legacy_tv_dac_mode_set(struct drm_encoder *encoder, DRM_DEBUG("\n"); - if (radeon_crtc->crtc_id == 0) - radeon_legacy_rmx_mode_set(encoder, mode, adjusted_mode); - if (rdev->family != CHIP_R200) { tv_dac_cntl = RREG32(RADEON_TV_DAC_CNTL); if (rdev->family == CHIP_R420 || @@ -1243,9 +1062,11 @@ radeon_add_legacy_encoder(struct drm_device *dev, uint32_t encoder_id, uint32_t radeon_encoder->encoder_id = encoder_id; radeon_encoder->devices = supported_device; + radeon_encoder->rmx_type = RMX_OFF; switch (radeon_encoder->encoder_id) { case ENCODER_OBJECT_ID_INTERNAL_LVDS: + encoder->possible_crtcs = 0x1; drm_encoder_init(dev, encoder, &radeon_legacy_lvds_enc_funcs, DRM_MODE_ENCODER_LVDS); drm_encoder_helper_add(encoder, &radeon_legacy_lvds_helper_funcs); if (rdev->is_atom_bios) diff --git a/drivers/gpu/drm/radeon/radeon_mode.h b/drivers/gpu/drm/radeon/radeon_mode.h index 9173b687462..3b09a1f2d8f 100644 --- a/drivers/gpu/drm/radeon/radeon_mode.h +++ b/drivers/gpu/drm/radeon/radeon_mode.h @@ -36,6 +36,9 @@ #include <linux/i2c.h> #include <linux/i2c-id.h> #include <linux/i2c-algo-bit.h> +#include "radeon_fixed.h" + +struct radeon_device; #define to_radeon_crtc(x) container_of(x, struct radeon_crtc, base) #define to_radeon_connector(x) container_of(x, struct radeon_connector, base) @@ -124,6 +127,7 @@ struct radeon_tmds_pll { #define RADEON_PLL_PREFER_LOW_POST_DIV (1 << 8) #define RADEON_PLL_PREFER_HIGH_POST_DIV (1 << 9) #define RADEON_PLL_USE_FRAC_FB_DIV (1 << 10) +#define RADEON_PLL_PREFER_CLOSEST_LOWER (1 << 11) struct radeon_pll { uint16_t reference_freq; @@ -170,6 +174,18 @@ struct radeon_mode_info { struct atom_context *atom_context; enum radeon_connector_table connector_table; bool mode_config_initialized; + struct radeon_crtc *crtcs[2]; +}; + +struct radeon_native_mode { + /* preferred mode */ + uint32_t panel_xres, panel_yres; + uint32_t hoverplus, hsync_width; + uint32_t hblank; + uint32_t voverplus, vsync_width; + uint32_t vblank; + uint32_t dotclock; + uint32_t flags; }; struct radeon_crtc { @@ -185,19 +201,13 @@ struct radeon_crtc { uint64_t cursor_addr; int cursor_width; int cursor_height; -}; - -#define RADEON_USE_RMX 1 - -struct radeon_native_mode { - /* preferred mode */ - uint32_t panel_xres, panel_yres; - uint32_t hoverplus, hsync_width; - uint32_t hblank; - uint32_t voverplus, vsync_width; - uint32_t vblank; - uint32_t dotclock; - uint32_t flags; + uint32_t legacy_display_base_addr; + uint32_t legacy_cursor_offset; + enum radeon_rmx_type rmx_type; + uint32_t devices; + fixed20_12 vsc; + fixed20_12 hsc; + struct radeon_native_mode native_mode; }; struct radeon_encoder_primary_dac { @@ -383,16 +393,9 @@ void radeon_enc_destroy(struct drm_encoder *encoder); void radeon_copy_fb(struct drm_device *dev, struct drm_gem_object *dst_obj); void radeon_combios_asic_init(struct drm_device *dev); extern int radeon_static_clocks_init(struct drm_device *dev); -void radeon_init_disp_bw_legacy(struct drm_device *dev, - struct drm_display_mode *mode1, - uint32_t pixel_bytes1, - struct drm_display_mode *mode2, - uint32_t pixel_bytes2); -void radeon_init_disp_bw_avivo(struct drm_device *dev, - struct drm_display_mode *mode1, - uint32_t pixel_bytes1, - struct drm_display_mode *mode2, - uint32_t pixel_bytes2); -void radeon_init_disp_bandwidth(struct drm_device *dev); +bool radeon_crtc_scaling_mode_fixup(struct drm_crtc *crtc, + struct drm_display_mode *mode, + struct drm_display_mode *adjusted_mode); +void atom_rv515_force_tv_scaler(struct radeon_device *rdev); #endif diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c index 983e8df5e00..b85fb83d7ae 100644 --- a/drivers/gpu/drm/radeon/radeon_object.c +++ b/drivers/gpu/drm/radeon/radeon_object.c @@ -44,6 +44,9 @@ struct radeon_object { uint64_t gpu_addr; void *kptr; bool is_iomem; + uint32_t tiling_flags; + uint32_t pitch; + int surface_reg; }; int radeon_ttm_init(struct radeon_device *rdev); @@ -70,6 +73,7 @@ static void radeon_ttm_object_object_destroy(struct ttm_buffer_object *tobj) robj = container_of(tobj, struct radeon_object, tobj); list_del_init(&robj->list); + radeon_object_clear_surface_reg(robj); kfree(robj); } @@ -99,16 +103,16 @@ static inline uint32_t radeon_object_flags_from_domain(uint32_t domain) { uint32_t flags = 0; if (domain & RADEON_GEM_DOMAIN_VRAM) { - flags |= TTM_PL_FLAG_VRAM; + flags |= TTM_PL_FLAG_VRAM | TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED; } if (domain & RADEON_GEM_DOMAIN_GTT) { - flags |= TTM_PL_FLAG_TT; + flags |= TTM_PL_FLAG_TT | TTM_PL_MASK_CACHING; } if (domain & RADEON_GEM_DOMAIN_CPU) { - flags |= TTM_PL_FLAG_SYSTEM; + flags |= TTM_PL_FLAG_SYSTEM | TTM_PL_MASK_CACHING; } if (!flags) { - flags |= TTM_PL_FLAG_SYSTEM; + flags |= TTM_PL_FLAG_SYSTEM | TTM_PL_MASK_CACHING; } return flags; } @@ -141,6 +145,7 @@ int radeon_object_create(struct radeon_device *rdev, } robj->rdev = rdev; robj->gobj = gobj; + robj->surface_reg = -1; INIT_LIST_HEAD(&robj->list); flags = radeon_object_flags_from_domain(domain); @@ -223,7 +228,6 @@ int radeon_object_pin(struct radeon_object *robj, uint32_t domain, { uint32_t flags; uint32_t tmp; - void *fbptr; int r; flags = radeon_object_flags_from_domain(domain); @@ -242,10 +246,6 @@ int radeon_object_pin(struct radeon_object *robj, uint32_t domain, DRM_ERROR("radeon: failed to reserve object for pinning it.\n"); return r; } - if (robj->rdev->fbdev_robj == robj) { - mutex_lock(&robj->rdev->fbdev_info->lock); - radeon_object_kunmap(robj); - } tmp = robj->tobj.mem.placement; ttm_flag_masked(&tmp, flags, TTM_PL_MASK_MEM); robj->tobj.proposed_placement = tmp | TTM_PL_FLAG_NO_EVICT | TTM_PL_MASK_CACHING; @@ -261,23 +261,12 @@ int radeon_object_pin(struct radeon_object *robj, uint32_t domain, DRM_ERROR("radeon: failed to pin object.\n"); } radeon_object_unreserve(robj); - if (robj->rdev->fbdev_robj == robj) { - if (!r) { - r = radeon_object_kmap(robj, &fbptr); - } - if (!r) { - robj->rdev->fbdev_info->screen_base = fbptr; - robj->rdev->fbdev_info->fix.smem_start = (unsigned long)fbptr; - } - mutex_unlock(&robj->rdev->fbdev_info->lock); - } return r; } void radeon_object_unpin(struct radeon_object *robj) { uint32_t flags; - void *fbptr; int r; spin_lock(&robj->tobj.lock); @@ -297,10 +286,6 @@ void radeon_object_unpin(struct radeon_object *robj) DRM_ERROR("radeon: failed to reserve object for unpinning it.\n"); return; } - if (robj->rdev->fbdev_robj == robj) { - mutex_lock(&robj->rdev->fbdev_info->lock); - radeon_object_kunmap(robj); - } flags = robj->tobj.mem.placement; robj->tobj.proposed_placement = flags & ~TTM_PL_FLAG_NO_EVICT; r = ttm_buffer_object_validate(&robj->tobj, @@ -310,16 +295,6 @@ void radeon_object_unpin(struct radeon_object *robj) DRM_ERROR("radeon: failed to unpin buffer.\n"); } radeon_object_unreserve(robj); - if (robj->rdev->fbdev_robj == robj) { - if (!r) { - r = radeon_object_kmap(robj, &fbptr); - } - if (!r) { - robj->rdev->fbdev_info->screen_base = fbptr; - robj->rdev->fbdev_info->fix.smem_start = (unsigned long)fbptr; - } - mutex_unlock(&robj->rdev->fbdev_info->lock); - } } int radeon_object_wait(struct radeon_object *robj) @@ -334,7 +309,26 @@ int radeon_object_wait(struct radeon_object *robj) } spin_lock(&robj->tobj.lock); if (robj->tobj.sync_obj) { - r = ttm_bo_wait(&robj->tobj, true, false, false); + r = ttm_bo_wait(&robj->tobj, true, true, false); + } + spin_unlock(&robj->tobj.lock); + radeon_object_unreserve(robj); + return r; +} + +int radeon_object_busy_domain(struct radeon_object *robj, uint32_t *cur_placement) +{ + int r = 0; + + r = radeon_object_reserve(robj, true); + if (unlikely(r != 0)) { + DRM_ERROR("radeon: failed to reserve object for waiting.\n"); + return r; + } + spin_lock(&robj->tobj.lock); + *cur_placement = robj->tobj.mem.mem_type; + if (robj->tobj.sync_obj) { + r = ttm_bo_wait(&robj->tobj, true, true, true); } spin_unlock(&robj->tobj.lock); radeon_object_unreserve(robj); @@ -433,7 +427,6 @@ int radeon_object_list_validate(struct list_head *head, void *fence) struct radeon_object *robj; struct radeon_fence *old_fence = NULL; struct list_head *i; - uint32_t flags; int r; r = radeon_object_list_reserve(head); @@ -444,27 +437,25 @@ int radeon_object_list_validate(struct list_head *head, void *fence) list_for_each(i, head) { lobj = list_entry(i, struct radeon_object_list, list); robj = lobj->robj; - if (lobj->wdomain) { - flags = radeon_object_flags_from_domain(lobj->wdomain); - flags |= TTM_PL_FLAG_TT; - } else { - flags = radeon_object_flags_from_domain(lobj->rdomain); - flags |= TTM_PL_FLAG_TT; - flags |= TTM_PL_FLAG_VRAM; - } if (!robj->pin_count) { - robj->tobj.proposed_placement = flags | TTM_PL_MASK_CACHING; + if (lobj->wdomain) { + robj->tobj.proposed_placement = + radeon_object_flags_from_domain(lobj->wdomain); + } else { + robj->tobj.proposed_placement = + radeon_object_flags_from_domain(lobj->rdomain); + } r = ttm_buffer_object_validate(&robj->tobj, robj->tobj.proposed_placement, true, false); if (unlikely(r)) { - radeon_object_list_unreserve(head); DRM_ERROR("radeon: failed to validate.\n"); return r; } radeon_object_gpu_addr(robj); } lobj->gpu_offset = robj->gpu_addr; + lobj->tiling_flags = robj->tiling_flags; if (fence) { old_fence = (struct radeon_fence *)robj->tobj.sync_obj; robj->tobj.sync_obj = radeon_fence_ref(fence); @@ -509,3 +500,127 @@ unsigned long radeon_object_size(struct radeon_object *robj) { return robj->tobj.num_pages << PAGE_SHIFT; } + +int radeon_object_get_surface_reg(struct radeon_object *robj) +{ + struct radeon_device *rdev = robj->rdev; + struct radeon_surface_reg *reg; + struct radeon_object *old_object; + int steal; + int i; + + if (!robj->tiling_flags) + return 0; + + if (robj->surface_reg >= 0) { + reg = &rdev->surface_regs[robj->surface_reg]; + i = robj->surface_reg; + goto out; + } + + steal = -1; + for (i = 0; i < RADEON_GEM_MAX_SURFACES; i++) { + + reg = &rdev->surface_regs[i]; + if (!reg->robj) + break; + + old_object = reg->robj; + if (old_object->pin_count == 0) + steal = i; + } + + /* if we are all out */ + if (i == RADEON_GEM_MAX_SURFACES) { + if (steal == -1) + return -ENOMEM; + /* find someone with a surface reg and nuke their BO */ + reg = &rdev->surface_regs[steal]; + old_object = reg->robj; + /* blow away the mapping */ + DRM_DEBUG("stealing surface reg %d from %p\n", steal, old_object); + ttm_bo_unmap_virtual(&old_object->tobj); + old_object->surface_reg = -1; + i = steal; + } + + robj->surface_reg = i; + reg->robj = robj; + +out: + radeon_set_surface_reg(rdev, i, robj->tiling_flags, robj->pitch, + robj->tobj.mem.mm_node->start << PAGE_SHIFT, + robj->tobj.num_pages << PAGE_SHIFT); + return 0; +} + +void radeon_object_clear_surface_reg(struct radeon_object *robj) +{ + struct radeon_device *rdev = robj->rdev; + struct radeon_surface_reg *reg; + + if (robj->surface_reg == -1) + return; + + reg = &rdev->surface_regs[robj->surface_reg]; + radeon_clear_surface_reg(rdev, robj->surface_reg); + + reg->robj = NULL; + robj->surface_reg = -1; +} + +void radeon_object_set_tiling_flags(struct radeon_object *robj, + uint32_t tiling_flags, uint32_t pitch) +{ + robj->tiling_flags = tiling_flags; + robj->pitch = pitch; +} + +void radeon_object_get_tiling_flags(struct radeon_object *robj, + uint32_t *tiling_flags, + uint32_t *pitch) +{ + if (tiling_flags) + *tiling_flags = robj->tiling_flags; + if (pitch) + *pitch = robj->pitch; +} + +int radeon_object_check_tiling(struct radeon_object *robj, bool has_moved, + bool force_drop) +{ + if (!(robj->tiling_flags & RADEON_TILING_SURFACE)) + return 0; + + if (force_drop) { + radeon_object_clear_surface_reg(robj); + return 0; + } + + if (robj->tobj.mem.mem_type != TTM_PL_VRAM) { + if (!has_moved) + return 0; + + if (robj->surface_reg >= 0) + radeon_object_clear_surface_reg(robj); + return 0; + } + + if ((robj->surface_reg >= 0) && !has_moved) + return 0; + + return radeon_object_get_surface_reg(robj); +} + +void radeon_bo_move_notify(struct ttm_buffer_object *bo, + struct ttm_mem_reg *mem) +{ + struct radeon_object *robj = container_of(bo, struct radeon_object, tobj); + radeon_object_check_tiling(robj, 0, 1); +} + +void radeon_bo_fault_reserve_notify(struct ttm_buffer_object *bo) +{ + struct radeon_object *robj = container_of(bo, struct radeon_object, tobj); + radeon_object_check_tiling(robj, 0, 0); +} diff --git a/drivers/gpu/drm/radeon/radeon_reg.h b/drivers/gpu/drm/radeon/radeon_reg.h index e1b61857446..4df43f62c67 100644 --- a/drivers/gpu/drm/radeon/radeon_reg.h +++ b/drivers/gpu/drm/radeon/radeon_reg.h @@ -982,12 +982,15 @@ # define RS400_TMDS2_PLLRST (1 << 1) #define RADEON_GEN_INT_CNTL 0x0040 +# define RADEON_CRTC_VBLANK_MASK (1 << 0) +# define RADEON_CRTC2_VBLANK_MASK (1 << 9) # define RADEON_SW_INT_ENABLE (1 << 25) #define RADEON_GEN_INT_STATUS 0x0044 -# define RADEON_VSYNC_INT_AK (1 << 2) -# define RADEON_VSYNC_INT (1 << 2) -# define RADEON_VSYNC2_INT_AK (1 << 6) -# define RADEON_VSYNC2_INT (1 << 6) +# define AVIVO_DISPLAY_INT_STATUS (1 << 0) +# define RADEON_CRTC_VBLANK_STAT (1 << 0) +# define RADEON_CRTC_VBLANK_STAT_ACK (1 << 0) +# define RADEON_CRTC2_VBLANK_STAT (1 << 9) +# define RADEON_CRTC2_VBLANK_STAT_ACK (1 << 9) # define RADEON_SW_INT_FIRE (1 << 26) # define RADEON_SW_INT_TEST (1 << 25) # define RADEON_SW_INT_TEST_ACK (1 << 25) @@ -2334,6 +2337,9 @@ # define RADEON_RE_WIDTH_SHIFT 0 # define RADEON_RE_HEIGHT_SHIFT 16 +#define RADEON_RB3D_ZPASS_DATA 0x3290 +#define RADEON_RB3D_ZPASS_ADDR 0x3294 + #define RADEON_SE_CNTL 0x1c4c # define RADEON_FFACE_CULL_CW (0 << 0) # define RADEON_FFACE_CULL_CCW (1 << 0) @@ -3568,4 +3574,6 @@ #define RADEON_SCRATCH_REG4 0x15f0 #define RADEON_SCRATCH_REG5 0x15f4 +#define RV530_GB_PIPE_SELECT2 0x4124 + #endif diff --git a/drivers/gpu/drm/radeon/radeon_ring.c b/drivers/gpu/drm/radeon/radeon_ring.c index a853261d188..60d159308b8 100644 --- a/drivers/gpu/drm/radeon/radeon_ring.c +++ b/drivers/gpu/drm/radeon/radeon_ring.c @@ -126,32 +126,19 @@ static void radeon_ib_align(struct radeon_device *rdev, struct radeon_ib *ib) } } -static void radeon_ib_cpu_flush(struct radeon_device *rdev, - struct radeon_ib *ib) -{ - unsigned long tmp; - unsigned i; - - /* To force CPU cache flush ugly but seems reliable */ - for (i = 0; i < ib->length_dw; i += (rdev->cp.align_mask + 1)) { - tmp = readl(&ib->ptr[i]); - } -} - int radeon_ib_schedule(struct radeon_device *rdev, struct radeon_ib *ib) { int r = 0; mutex_lock(&rdev->ib_pool.mutex); radeon_ib_align(rdev, ib); - radeon_ib_cpu_flush(rdev, ib); if (!ib->length_dw || !rdev->cp.ready) { /* TODO: Nothings in the ib we should report. */ mutex_unlock(&rdev->ib_pool.mutex); DRM_ERROR("radeon: couldn't schedule IB(%lu).\n", ib->idx); return -EINVAL; } - /* 64 dwords should be enought for fence too */ + /* 64 dwords should be enough for fence too */ r = radeon_ring_lock(rdev, 64); if (r) { DRM_ERROR("radeon: scheduling IB failled (%d).\n", r); diff --git a/drivers/gpu/drm/radeon/radeon_share.h b/drivers/gpu/drm/radeon/radeon_share.h new file mode 100644 index 00000000000..63a773578f1 --- /dev/null +++ b/drivers/gpu/drm/radeon/radeon_share.h @@ -0,0 +1,39 @@ +/* + * Copyright 2008 Advanced Micro Devices, Inc. + * Copyright 2008 Red Hat Inc. + * Copyright 2009 Jerome Glisse. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: Dave Airlie + * Alex Deucher + * Jerome Glisse + */ +#ifndef __RADEON_SHARE_H__ +#define __RADEON_SHARE_H__ + +void r100_vram_init_sizes(struct radeon_device *rdev); + +void rs690_line_buffer_adjust(struct radeon_device *rdev, + struct drm_display_mode *mode1, + struct drm_display_mode *mode2); + +void rv515_bandwidth_avivo_update(struct radeon_device *rdev); + +#endif diff --git a/drivers/gpu/drm/radeon/radeon_state.c b/drivers/gpu/drm/radeon/radeon_state.c index 46645f3e032..2882f40d5ec 100644 --- a/drivers/gpu/drm/radeon/radeon_state.c +++ b/drivers/gpu/drm/radeon/radeon_state.c @@ -3081,6 +3081,9 @@ static int radeon_cp_getparam(struct drm_device *dev, void *data, struct drm_fil case RADEON_PARAM_NUM_GB_PIPES: value = dev_priv->num_gb_pipes; break; + case RADEON_PARAM_NUM_Z_PIPES: + value = dev_priv->num_z_pipes; + break; default: DRM_DEBUG("Invalid parameter %d\n", param->param); return -EINVAL; diff --git a/drivers/gpu/drm/radeon/radeon_test.c b/drivers/gpu/drm/radeon/radeon_test.c new file mode 100644 index 00000000000..03c33cf4e14 --- /dev/null +++ b/drivers/gpu/drm/radeon/radeon_test.c @@ -0,0 +1,209 @@ +/* + * Copyright 2009 VMware, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: Michel Dänzer + */ +#include <drm/drmP.h> +#include <drm/radeon_drm.h> +#include "radeon_reg.h" +#include "radeon.h" + + +/* Test BO GTT->VRAM and VRAM->GTT GPU copies across the whole GTT aperture */ +void radeon_test_moves(struct radeon_device *rdev) +{ + struct radeon_object *vram_obj = NULL; + struct radeon_object **gtt_obj = NULL; + struct radeon_fence *fence = NULL; + uint64_t gtt_addr, vram_addr; + unsigned i, n, size; + int r; + + size = 1024 * 1024; + + /* Number of tests = + * (Total GTT - IB pool - writeback page - ring buffer) / test size + */ + n = (rdev->mc.gtt_size - RADEON_IB_POOL_SIZE*64*1024 - 4096 - + rdev->cp.ring_size) / size; + + gtt_obj = kzalloc(n * sizeof(*gtt_obj), GFP_KERNEL); + if (!gtt_obj) { + DRM_ERROR("Failed to allocate %d pointers\n", n); + r = 1; + goto out_cleanup; + } + + r = radeon_object_create(rdev, NULL, size, true, RADEON_GEM_DOMAIN_VRAM, + false, &vram_obj); + if (r) { + DRM_ERROR("Failed to create VRAM object\n"); + goto out_cleanup; + } + + r = radeon_object_pin(vram_obj, RADEON_GEM_DOMAIN_VRAM, &vram_addr); + if (r) { + DRM_ERROR("Failed to pin VRAM object\n"); + goto out_cleanup; + } + + for (i = 0; i < n; i++) { + void *gtt_map, *vram_map; + void **gtt_start, **gtt_end; + void **vram_start, **vram_end; + + r = radeon_object_create(rdev, NULL, size, true, + RADEON_GEM_DOMAIN_GTT, false, gtt_obj + i); + if (r) { + DRM_ERROR("Failed to create GTT object %d\n", i); + goto out_cleanup; + } + + r = radeon_object_pin(gtt_obj[i], RADEON_GEM_DOMAIN_GTT, >t_addr); + if (r) { + DRM_ERROR("Failed to pin GTT object %d\n", i); + goto out_cleanup; + } + + r = radeon_object_kmap(gtt_obj[i], >t_map); + if (r) { + DRM_ERROR("Failed to map GTT object %d\n", i); + goto out_cleanup; + } + + for (gtt_start = gtt_map, gtt_end = gtt_map + size; + gtt_start < gtt_end; + gtt_start++) + *gtt_start = gtt_start; + + radeon_object_kunmap(gtt_obj[i]); + + r = radeon_fence_create(rdev, &fence); + if (r) { + DRM_ERROR("Failed to create GTT->VRAM fence %d\n", i); + goto out_cleanup; + } + + r = radeon_copy(rdev, gtt_addr, vram_addr, size / 4096, fence); + if (r) { + DRM_ERROR("Failed GTT->VRAM copy %d\n", i); + goto out_cleanup; + } + + r = radeon_fence_wait(fence, false); + if (r) { + DRM_ERROR("Failed to wait for GTT->VRAM fence %d\n", i); + goto out_cleanup; + } + + radeon_fence_unref(&fence); + + r = radeon_object_kmap(vram_obj, &vram_map); + if (r) { + DRM_ERROR("Failed to map VRAM object after copy %d\n", i); + goto out_cleanup; + } + + for (gtt_start = gtt_map, gtt_end = gtt_map + size, + vram_start = vram_map, vram_end = vram_map + size; + vram_start < vram_end; + gtt_start++, vram_start++) { + if (*vram_start != gtt_start) { + DRM_ERROR("Incorrect GTT->VRAM copy %d: Got 0x%p, " + "expected 0x%p (GTT map 0x%p-0x%p)\n", + i, *vram_start, gtt_start, gtt_map, + gtt_end); + radeon_object_kunmap(vram_obj); + goto out_cleanup; + } + *vram_start = vram_start; + } + + radeon_object_kunmap(vram_obj); + + r = radeon_fence_create(rdev, &fence); + if (r) { + DRM_ERROR("Failed to create VRAM->GTT fence %d\n", i); + goto out_cleanup; + } + + r = radeon_copy(rdev, vram_addr, gtt_addr, size / 4096, fence); + if (r) { + DRM_ERROR("Failed VRAM->GTT copy %d\n", i); + goto out_cleanup; + } + + r = radeon_fence_wait(fence, false); + if (r) { + DRM_ERROR("Failed to wait for VRAM->GTT fence %d\n", i); + goto out_cleanup; + } + + radeon_fence_unref(&fence); + + r = radeon_object_kmap(gtt_obj[i], >t_map); + if (r) { + DRM_ERROR("Failed to map GTT object after copy %d\n", i); + goto out_cleanup; + } + + for (gtt_start = gtt_map, gtt_end = gtt_map + size, + vram_start = vram_map, vram_end = vram_map + size; + gtt_start < gtt_end; + gtt_start++, vram_start++) { + if (*gtt_start != vram_start) { + DRM_ERROR("Incorrect VRAM->GTT copy %d: Got 0x%p, " + "expected 0x%p (VRAM map 0x%p-0x%p)\n", + i, *gtt_start, vram_start, vram_map, + vram_end); + radeon_object_kunmap(gtt_obj[i]); + goto out_cleanup; + } + } + + radeon_object_kunmap(gtt_obj[i]); + + DRM_INFO("Tested GTT->VRAM and VRAM->GTT copy for GTT offset 0x%llx\n", + gtt_addr - rdev->mc.gtt_location); + } + +out_cleanup: + if (vram_obj) { + radeon_object_unpin(vram_obj); + radeon_object_unref(&vram_obj); + } + if (gtt_obj) { + for (i = 0; i < n; i++) { + if (gtt_obj[i]) { + radeon_object_unpin(gtt_obj[i]); + radeon_object_unref(>t_obj[i]); + } + } + kfree(gtt_obj); + } + if (fence) { + radeon_fence_unref(&fence); + } + if (r) { + printk(KERN_WARNING "Error while testing BO move.\n"); + } +} + diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c index 1227a97f516..15c3531377e 100644 --- a/drivers/gpu/drm/radeon/radeon_ttm.c +++ b/drivers/gpu/drm/radeon/radeon_ttm.c @@ -355,23 +355,26 @@ static int radeon_bo_move(struct ttm_buffer_object *bo, if (!rdev->cp.ready) { /* use memcpy */ DRM_ERROR("CP is not ready use memcpy.\n"); - return ttm_bo_move_memcpy(bo, evict, no_wait, new_mem); + goto memcpy; } if (old_mem->mem_type == TTM_PL_VRAM && new_mem->mem_type == TTM_PL_SYSTEM) { - return radeon_move_vram_ram(bo, evict, interruptible, + r = radeon_move_vram_ram(bo, evict, interruptible, no_wait, new_mem); } else if (old_mem->mem_type == TTM_PL_SYSTEM && new_mem->mem_type == TTM_PL_VRAM) { - return radeon_move_ram_vram(bo, evict, interruptible, + r = radeon_move_ram_vram(bo, evict, interruptible, no_wait, new_mem); } else { r = radeon_move_blit(bo, evict, no_wait, new_mem, old_mem); - if (unlikely(r)) { - return r; - } } + + if (r) { +memcpy: + r = ttm_bo_move_memcpy(bo, evict, no_wait, new_mem); + } + return r; } @@ -429,6 +432,8 @@ static struct ttm_bo_driver radeon_bo_driver = { .sync_obj_flush = &radeon_sync_obj_flush, .sync_obj_unref = &radeon_sync_obj_unref, .sync_obj_ref = &radeon_sync_obj_ref, + .move_notify = &radeon_bo_move_notify, + .fault_reserve_notify = &radeon_bo_fault_reserve_notify, }; int radeon_ttm_init(struct radeon_device *rdev) @@ -442,13 +447,14 @@ int radeon_ttm_init(struct radeon_device *rdev) /* No others user of address space so set it to 0 */ r = ttm_bo_device_init(&rdev->mman.bdev, rdev->mman.mem_global_ref.object, - &radeon_bo_driver, DRM_FILE_PAGE_OFFSET); + &radeon_bo_driver, DRM_FILE_PAGE_OFFSET, + rdev->need_dma32); if (r) { DRM_ERROR("failed initializing buffer object driver(%d).\n", r); return r; } r = ttm_bo_init_mm(&rdev->mman.bdev, TTM_PL_VRAM, 0, - ((rdev->mc.aper_size) >> PAGE_SHIFT)); + ((rdev->mc.real_vram_size) >> PAGE_SHIFT)); if (r) { DRM_ERROR("Failed initializing VRAM heap.\n"); return r; @@ -465,7 +471,7 @@ int radeon_ttm_init(struct radeon_device *rdev) return r; } DRM_INFO("radeon: %uM of VRAM memory ready\n", - rdev->mc.vram_size / (1024 * 1024)); + rdev->mc.real_vram_size / (1024 * 1024)); r = ttm_bo_init_mm(&rdev->mman.bdev, TTM_PL_TT, 0, ((rdev->mc.gtt_size) >> PAGE_SHIFT)); if (r) { diff --git a/drivers/gpu/drm/radeon/rs400.c b/drivers/gpu/drm/radeon/rs400.c index cc074b5a8f7..b29affd9c5d 100644 --- a/drivers/gpu/drm/radeon/rs400.c +++ b/drivers/gpu/drm/radeon/rs400.c @@ -29,6 +29,7 @@ #include <drm/drmP.h> #include "radeon_reg.h" #include "radeon.h" +#include "radeon_share.h" /* rs400,rs480 depends on : */ void r100_hdp_reset(struct radeon_device *rdev); @@ -164,7 +165,9 @@ int rs400_gart_enable(struct radeon_device *rdev) WREG32(RADEON_BUS_CNTL, tmp); } /* Table should be in 32bits address space so ignore bits above. */ - tmp = rdev->gart.table_addr & 0xfffff000; + tmp = (u32)rdev->gart.table_addr & 0xfffff000; + tmp |= (upper_32_bits(rdev->gart.table_addr) & 0xff) << 4; + WREG32_MC(RS480_GART_BASE, tmp); /* TODO: more tweaking here */ WREG32_MC(RS480_GART_FEATURE_ID, @@ -201,10 +204,17 @@ void rs400_gart_disable(struct radeon_device *rdev) int rs400_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr) { + uint32_t entry; + if (i < 0 || i > rdev->gart.num_gpu_pages) { return -EINVAL; } - rdev->gart.table.ram.ptr[i] = cpu_to_le32(((uint32_t)addr) | 0xC); + + entry = (lower_32_bits(addr) & PAGE_MASK) | + ((upper_32_bits(addr) & 0xff) << 4) | + 0xc; + entry = cpu_to_le32(entry); + rdev->gart.table.ram.ptr[i] = entry; return 0; } @@ -223,10 +233,9 @@ int rs400_mc_init(struct radeon_device *rdev) rs400_gpu_init(rdev); rs400_gart_disable(rdev); - rdev->mc.gtt_location = rdev->mc.vram_size; + rdev->mc.gtt_location = rdev->mc.mc_vram_size; rdev->mc.gtt_location += (rdev->mc.gtt_size - 1); rdev->mc.gtt_location &= ~(rdev->mc.gtt_size - 1); - rdev->mc.vram_location = 0xFFFFFFFFUL; r = radeon_mc_setup(rdev); if (r) { return r; @@ -238,7 +247,7 @@ int rs400_mc_init(struct radeon_device *rdev) "programming pipes. Bad things might happen.\n"); } - tmp = rdev->mc.vram_location + rdev->mc.vram_size - 1; + tmp = rdev->mc.vram_location + rdev->mc.mc_vram_size - 1; tmp = REG_SET(RADEON_MC_FB_TOP, tmp >> 16); tmp |= REG_SET(RADEON_MC_FB_START, rdev->mc.vram_location >> 16); WREG32(RADEON_MC_FB_LOCATION, tmp); @@ -284,21 +293,12 @@ void rs400_gpu_init(struct radeon_device *rdev) */ void rs400_vram_info(struct radeon_device *rdev) { - uint32_t tom; - rs400_gart_adjust_size(rdev); /* DDR for all card after R300 & IGP */ rdev->mc.vram_is_ddr = true; rdev->mc.vram_width = 128; - /* read NB_TOM to get the amount of ram stolen for the GPU */ - tom = RREG32(RADEON_NB_TOM); - rdev->mc.vram_size = (((tom >> 16) - (tom & 0xffff) + 1) << 16); - WREG32(RADEON_CONFIG_MEMSIZE, rdev->mc.vram_size); - - /* Could aper size report 0 ? */ - rdev->mc.aper_base = drm_get_resource_start(rdev->ddev, 0); - rdev->mc.aper_size = drm_get_resource_len(rdev->ddev, 0); + r100_vram_init_sizes(rdev); } diff --git a/drivers/gpu/drm/radeon/rs600.c b/drivers/gpu/drm/radeon/rs600.c index ab0c967553e..02fd11aad6a 100644 --- a/drivers/gpu/drm/radeon/rs600.c +++ b/drivers/gpu/drm/radeon/rs600.c @@ -223,7 +223,7 @@ int rs600_mc_init(struct radeon_device *rdev) printk(KERN_WARNING "Failed to wait MC idle while " "programming pipes. Bad things might happen.\n"); } - tmp = rdev->mc.vram_location + rdev->mc.vram_size - 1; + tmp = rdev->mc.vram_location + rdev->mc.mc_vram_size - 1; tmp = REG_SET(RS600_MC_FB_TOP, tmp >> 16); tmp |= REG_SET(RS600_MC_FB_START, rdev->mc.vram_location >> 16); WREG32_MC(RS600_MC_FB_LOCATION, tmp); @@ -240,6 +240,88 @@ void rs600_mc_fini(struct radeon_device *rdev) /* + * Interrupts + */ +int rs600_irq_set(struct radeon_device *rdev) +{ + uint32_t tmp = 0; + uint32_t mode_int = 0; + + if (rdev->irq.sw_int) { + tmp |= RADEON_SW_INT_ENABLE; + } + if (rdev->irq.crtc_vblank_int[0]) { + tmp |= AVIVO_DISPLAY_INT_STATUS; + mode_int |= AVIVO_D1MODE_INT_MASK; + } + if (rdev->irq.crtc_vblank_int[1]) { + tmp |= AVIVO_DISPLAY_INT_STATUS; + mode_int |= AVIVO_D2MODE_INT_MASK; + } + WREG32(RADEON_GEN_INT_CNTL, tmp); + WREG32(AVIVO_DxMODE_INT_MASK, mode_int); + return 0; +} + +static inline uint32_t rs600_irq_ack(struct radeon_device *rdev, u32 *r500_disp_int) +{ + uint32_t irqs = RREG32(RADEON_GEN_INT_STATUS); + uint32_t irq_mask = RADEON_SW_INT_TEST; + + if (irqs & AVIVO_DISPLAY_INT_STATUS) { + *r500_disp_int = RREG32(AVIVO_DISP_INTERRUPT_STATUS); + if (*r500_disp_int & AVIVO_D1_VBLANK_INTERRUPT) { + WREG32(AVIVO_D1MODE_VBLANK_STATUS, AVIVO_VBLANK_ACK); + } + if (*r500_disp_int & AVIVO_D2_VBLANK_INTERRUPT) { + WREG32(AVIVO_D2MODE_VBLANK_STATUS, AVIVO_VBLANK_ACK); + } + } else { + *r500_disp_int = 0; + } + + if (irqs) { + WREG32(RADEON_GEN_INT_STATUS, irqs); + } + return irqs & irq_mask; +} + +int rs600_irq_process(struct radeon_device *rdev) +{ + uint32_t status; + uint32_t r500_disp_int; + + status = rs600_irq_ack(rdev, &r500_disp_int); + if (!status && !r500_disp_int) { + return IRQ_NONE; + } + while (status || r500_disp_int) { + /* SW interrupt */ + if (status & RADEON_SW_INT_TEST) { + radeon_fence_process(rdev); + } + /* Vertical blank interrupts */ + if (r500_disp_int & AVIVO_D1_VBLANK_INTERRUPT) { + drm_handle_vblank(rdev->ddev, 0); + } + if (r500_disp_int & AVIVO_D2_VBLANK_INTERRUPT) { + drm_handle_vblank(rdev->ddev, 1); + } + status = rs600_irq_ack(rdev, &r500_disp_int); + } + return IRQ_HANDLED; +} + +u32 rs600_get_vblank_counter(struct radeon_device *rdev, int crtc) +{ + if (crtc == 0) + return RREG32(AVIVO_D1CRTC_FRAME_COUNT); + else + return RREG32(AVIVO_D2CRTC_FRAME_COUNT); +} + + +/* * Global GPU functions */ void rs600_disable_vga(struct radeon_device *rdev) @@ -301,6 +383,11 @@ void rs600_vram_info(struct radeon_device *rdev) rdev->mc.vram_width = 128; } +void rs600_bandwidth_update(struct radeon_device *rdev) +{ + /* FIXME: implement, should this be like rs690 ? */ +} + /* * Indirect registers accessor @@ -322,3 +409,68 @@ void rs600_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v) ((reg) & RS600_MC_ADDR_MASK)); WREG32(RS600_MC_DATA, v); } + +static const unsigned rs600_reg_safe_bm[219] = { + 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, + 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, + 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, + 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, + 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, + 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, + 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, + 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, + 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, + 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, + 0x17FF1FFF, 0xFFFFFFFC, 0xFFFFFFFF, 0xFF30FFBF, + 0xFFFFFFF8, 0xC3E6FFFF, 0xFFFFF6DF, 0xFFFFFFFF, + 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, + 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, + 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFF03F, + 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, + 0xFFFFFFFF, 0xFFFFEFCE, 0xF00EBFFF, 0x007C0000, + 0xF0000078, 0xFF000009, 0xFFFFFFFF, 0xFFFFFFFF, + 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, + 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, + 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, + 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, + 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, + 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, + 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, + 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, + 0xFFFFF7FF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, + 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, + 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, + 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, + 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, + 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, + 0xFFFFFC78, 0xFFFFFFFF, 0xFFFFFFFE, 0xFFFFFFFF, + 0x38FF8F50, 0xFFF88082, 0xF000000C, 0xFAE009FF, + 0x0000FFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0x00000000, + 0x00000000, 0x0000C100, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0xFFFF0000, 0xFFFFFFFF, 0xFF80FFFF, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x0003FC01, 0xFFFFFCF8, 0xFF800B19, 0xFFFFFFFF, + 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, + 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, + 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, + 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, + 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, + 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, + 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, + 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, + 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, + 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, + 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, + 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, + 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, + 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, + 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, +}; + +int rs600_init(struct radeon_device *rdev) +{ + rdev->config.r300.reg_safe_bm = rs600_reg_safe_bm; + rdev->config.r300.reg_safe_bm_size = ARRAY_SIZE(rs600_reg_safe_bm); + return 0; +} diff --git a/drivers/gpu/drm/radeon/rs690.c b/drivers/gpu/drm/radeon/rs690.c index 79ba85042b5..879882533e4 100644 --- a/drivers/gpu/drm/radeon/rs690.c +++ b/drivers/gpu/drm/radeon/rs690.c @@ -28,6 +28,9 @@ #include "drmP.h" #include "radeon_reg.h" #include "radeon.h" +#include "rs690r.h" +#include "atom.h" +#include "atom-bits.h" /* rs690,rs740 depends on : */ void r100_hdp_reset(struct radeon_device *rdev); @@ -64,7 +67,7 @@ int rs690_mc_init(struct radeon_device *rdev) rs400_gart_disable(rdev); /* Setup GPU memory space */ - rdev->mc.gtt_location = rdev->mc.vram_size; + rdev->mc.gtt_location = rdev->mc.mc_vram_size; rdev->mc.gtt_location += (rdev->mc.gtt_size - 1); rdev->mc.gtt_location &= ~(rdev->mc.gtt_size - 1); rdev->mc.vram_location = 0xFFFFFFFFUL; @@ -79,7 +82,7 @@ int rs690_mc_init(struct radeon_device *rdev) printk(KERN_WARNING "Failed to wait MC idle while " "programming pipes. Bad things might happen.\n"); } - tmp = rdev->mc.vram_location + rdev->mc.vram_size - 1; + tmp = rdev->mc.vram_location + rdev->mc.mc_vram_size - 1; tmp = REG_SET(RS690_MC_FB_TOP, tmp >> 16); tmp |= REG_SET(RS690_MC_FB_START, rdev->mc.vram_location >> 16); WREG32_MC(RS690_MCCFG_FB_LOCATION, tmp); @@ -138,9 +141,82 @@ void rs690_gpu_init(struct radeon_device *rdev) /* * VRAM info. */ +void rs690_pm_info(struct radeon_device *rdev) +{ + int index = GetIndexIntoMasterTable(DATA, IntegratedSystemInfo); + struct _ATOM_INTEGRATED_SYSTEM_INFO *info; + struct _ATOM_INTEGRATED_SYSTEM_INFO_V2 *info_v2; + void *ptr; + uint16_t data_offset; + uint8_t frev, crev; + fixed20_12 tmp; + + atom_parse_data_header(rdev->mode_info.atom_context, index, NULL, + &frev, &crev, &data_offset); + ptr = rdev->mode_info.atom_context->bios + data_offset; + info = (struct _ATOM_INTEGRATED_SYSTEM_INFO *)ptr; + info_v2 = (struct _ATOM_INTEGRATED_SYSTEM_INFO_V2 *)ptr; + /* Get various system informations from bios */ + switch (crev) { + case 1: + tmp.full = rfixed_const(100); + rdev->pm.igp_sideport_mclk.full = rfixed_const(info->ulBootUpMemoryClock); + rdev->pm.igp_sideport_mclk.full = rfixed_div(rdev->pm.igp_sideport_mclk, tmp); + rdev->pm.igp_system_mclk.full = rfixed_const(le16_to_cpu(info->usK8MemoryClock)); + rdev->pm.igp_ht_link_clk.full = rfixed_const(le16_to_cpu(info->usFSBClock)); + rdev->pm.igp_ht_link_width.full = rfixed_const(info->ucHTLinkWidth); + break; + case 2: + tmp.full = rfixed_const(100); + rdev->pm.igp_sideport_mclk.full = rfixed_const(info_v2->ulBootUpSidePortClock); + rdev->pm.igp_sideport_mclk.full = rfixed_div(rdev->pm.igp_sideport_mclk, tmp); + rdev->pm.igp_system_mclk.full = rfixed_const(info_v2->ulBootUpUMAClock); + rdev->pm.igp_system_mclk.full = rfixed_div(rdev->pm.igp_system_mclk, tmp); + rdev->pm.igp_ht_link_clk.full = rfixed_const(info_v2->ulHTLinkFreq); + rdev->pm.igp_ht_link_clk.full = rfixed_div(rdev->pm.igp_ht_link_clk, tmp); + rdev->pm.igp_ht_link_width.full = rfixed_const(le16_to_cpu(info_v2->usMinHTLinkWidth)); + break; + default: + tmp.full = rfixed_const(100); + /* We assume the slower possible clock ie worst case */ + /* DDR 333Mhz */ + rdev->pm.igp_sideport_mclk.full = rfixed_const(333); + /* FIXME: system clock ? */ + rdev->pm.igp_system_mclk.full = rfixed_const(100); + rdev->pm.igp_system_mclk.full = rfixed_div(rdev->pm.igp_system_mclk, tmp); + rdev->pm.igp_ht_link_clk.full = rfixed_const(200); + rdev->pm.igp_ht_link_width.full = rfixed_const(8); + DRM_ERROR("No integrated system info for your GPU, using safe default\n"); + break; + } + /* Compute various bandwidth */ + /* k8_bandwidth = (memory_clk / 2) * 2 * 8 * 0.5 = memory_clk * 4 */ + tmp.full = rfixed_const(4); + rdev->pm.k8_bandwidth.full = rfixed_mul(rdev->pm.igp_system_mclk, tmp); + /* ht_bandwidth = ht_clk * 2 * ht_width / 8 * 0.8 + * = ht_clk * ht_width / 5 + */ + tmp.full = rfixed_const(5); + rdev->pm.ht_bandwidth.full = rfixed_mul(rdev->pm.igp_ht_link_clk, + rdev->pm.igp_ht_link_width); + rdev->pm.ht_bandwidth.full = rfixed_div(rdev->pm.ht_bandwidth, tmp); + if (tmp.full < rdev->pm.max_bandwidth.full) { + /* HT link is a limiting factor */ + rdev->pm.max_bandwidth.full = tmp.full; + } + /* sideport_bandwidth = (sideport_clk / 2) * 2 * 2 * 0.7 + * = (sideport_clk * 14) / 10 + */ + tmp.full = rfixed_const(14); + rdev->pm.sideport_bandwidth.full = rfixed_mul(rdev->pm.igp_sideport_mclk, tmp); + tmp.full = rfixed_const(10); + rdev->pm.sideport_bandwidth.full = rfixed_div(rdev->pm.sideport_bandwidth, tmp); +} + void rs690_vram_info(struct radeon_device *rdev) { uint32_t tmp; + fixed20_12 a; rs400_gart_adjust_size(rdev); /* DDR for all card after R300 & IGP */ @@ -152,12 +228,409 @@ void rs690_vram_info(struct radeon_device *rdev) } else { rdev->mc.vram_width = 64; } - rdev->mc.vram_size = RREG32(RADEON_CONFIG_MEMSIZE); + rdev->mc.real_vram_size = RREG32(RADEON_CONFIG_MEMSIZE); + rdev->mc.mc_vram_size = rdev->mc.real_vram_size; rdev->mc.aper_base = drm_get_resource_start(rdev->ddev, 0); rdev->mc.aper_size = drm_get_resource_len(rdev->ddev, 0); + rs690_pm_info(rdev); + /* FIXME: we should enforce default clock in case GPU is not in + * default setup + */ + a.full = rfixed_const(100); + rdev->pm.sclk.full = rfixed_const(rdev->clock.default_sclk); + rdev->pm.sclk.full = rfixed_div(rdev->pm.sclk, a); + a.full = rfixed_const(16); + /* core_bandwidth = sclk(Mhz) * 16 */ + rdev->pm.core_bandwidth.full = rfixed_div(rdev->pm.sclk, a); +} + +void rs690_line_buffer_adjust(struct radeon_device *rdev, + struct drm_display_mode *mode1, + struct drm_display_mode *mode2) +{ + u32 tmp; + + /* + * Line Buffer Setup + * There is a single line buffer shared by both display controllers. + * DC_LB_MEMORY_SPLIT controls how that line buffer is shared between + * the display controllers. The paritioning can either be done + * manually or via one of four preset allocations specified in bits 1:0: + * 0 - line buffer is divided in half and shared between crtc + * 1 - D1 gets 3/4 of the line buffer, D2 gets 1/4 + * 2 - D1 gets the whole buffer + * 3 - D1 gets 1/4 of the line buffer, D2 gets 3/4 + * Setting bit 2 of DC_LB_MEMORY_SPLIT controls switches to manual + * allocation mode. In manual allocation mode, D1 always starts at 0, + * D1 end/2 is specified in bits 14:4; D2 allocation follows D1. + */ + tmp = RREG32(DC_LB_MEMORY_SPLIT) & ~DC_LB_MEMORY_SPLIT_MASK; + tmp &= ~DC_LB_MEMORY_SPLIT_SHIFT_MODE; + /* auto */ + if (mode1 && mode2) { + if (mode1->hdisplay > mode2->hdisplay) { + if (mode1->hdisplay > 2560) + tmp |= DC_LB_MEMORY_SPLIT_D1_3Q_D2_1Q; + else + tmp |= DC_LB_MEMORY_SPLIT_D1HALF_D2HALF; + } else if (mode2->hdisplay > mode1->hdisplay) { + if (mode2->hdisplay > 2560) + tmp |= DC_LB_MEMORY_SPLIT_D1_1Q_D2_3Q; + else + tmp |= DC_LB_MEMORY_SPLIT_D1HALF_D2HALF; + } else + tmp |= AVIVO_DC_LB_MEMORY_SPLIT_D1HALF_D2HALF; + } else if (mode1) { + tmp |= DC_LB_MEMORY_SPLIT_D1_ONLY; + } else if (mode2) { + tmp |= DC_LB_MEMORY_SPLIT_D1_1Q_D2_3Q; + } + WREG32(DC_LB_MEMORY_SPLIT, tmp); +} + +struct rs690_watermark { + u32 lb_request_fifo_depth; + fixed20_12 num_line_pair; + fixed20_12 estimated_width; + fixed20_12 worst_case_latency; + fixed20_12 consumption_rate; + fixed20_12 active_time; + fixed20_12 dbpp; + fixed20_12 priority_mark_max; + fixed20_12 priority_mark; + fixed20_12 sclk; +}; + +void rs690_crtc_bandwidth_compute(struct radeon_device *rdev, + struct radeon_crtc *crtc, + struct rs690_watermark *wm) +{ + struct drm_display_mode *mode = &crtc->base.mode; + fixed20_12 a, b, c; + fixed20_12 pclk, request_fifo_depth, tolerable_latency, estimated_width; + fixed20_12 consumption_time, line_time, chunk_time, read_delay_latency; + /* FIXME: detect IGP with sideport memory, i don't think there is any + * such product available + */ + bool sideport = false; + + if (!crtc->base.enabled) { + /* FIXME: wouldn't it better to set priority mark to maximum */ + wm->lb_request_fifo_depth = 4; + return; + } + + if (crtc->vsc.full > rfixed_const(2)) + wm->num_line_pair.full = rfixed_const(2); + else + wm->num_line_pair.full = rfixed_const(1); + + b.full = rfixed_const(mode->crtc_hdisplay); + c.full = rfixed_const(256); + a.full = rfixed_mul(wm->num_line_pair, b); + request_fifo_depth.full = rfixed_div(a, c); + if (a.full < rfixed_const(4)) { + wm->lb_request_fifo_depth = 4; + } else { + wm->lb_request_fifo_depth = rfixed_trunc(request_fifo_depth); + } + + /* Determine consumption rate + * pclk = pixel clock period(ns) = 1000 / (mode.clock / 1000) + * vtaps = number of vertical taps, + * vsc = vertical scaling ratio, defined as source/destination + * hsc = horizontal scaling ration, defined as source/destination + */ + a.full = rfixed_const(mode->clock); + b.full = rfixed_const(1000); + a.full = rfixed_div(a, b); + pclk.full = rfixed_div(b, a); + if (crtc->rmx_type != RMX_OFF) { + b.full = rfixed_const(2); + if (crtc->vsc.full > b.full) + b.full = crtc->vsc.full; + b.full = rfixed_mul(b, crtc->hsc); + c.full = rfixed_const(2); + b.full = rfixed_div(b, c); + consumption_time.full = rfixed_div(pclk, b); + } else { + consumption_time.full = pclk.full; + } + a.full = rfixed_const(1); + wm->consumption_rate.full = rfixed_div(a, consumption_time); + + + /* Determine line time + * LineTime = total time for one line of displayhtotal + * LineTime = total number of horizontal pixels + * pclk = pixel clock period(ns) + */ + a.full = rfixed_const(crtc->base.mode.crtc_htotal); + line_time.full = rfixed_mul(a, pclk); + + /* Determine active time + * ActiveTime = time of active region of display within one line, + * hactive = total number of horizontal active pixels + * htotal = total number of horizontal pixels + */ + a.full = rfixed_const(crtc->base.mode.crtc_htotal); + b.full = rfixed_const(crtc->base.mode.crtc_hdisplay); + wm->active_time.full = rfixed_mul(line_time, b); + wm->active_time.full = rfixed_div(wm->active_time, a); + + /* Maximun bandwidth is the minimun bandwidth of all component */ + rdev->pm.max_bandwidth = rdev->pm.core_bandwidth; + if (sideport) { + if (rdev->pm.max_bandwidth.full > rdev->pm.sideport_bandwidth.full && + rdev->pm.sideport_bandwidth.full) + rdev->pm.max_bandwidth = rdev->pm.sideport_bandwidth; + read_delay_latency.full = rfixed_const(370 * 800 * 1000); + read_delay_latency.full = rfixed_div(read_delay_latency, + rdev->pm.igp_sideport_mclk); + } else { + if (rdev->pm.max_bandwidth.full > rdev->pm.k8_bandwidth.full && + rdev->pm.k8_bandwidth.full) + rdev->pm.max_bandwidth = rdev->pm.k8_bandwidth; + if (rdev->pm.max_bandwidth.full > rdev->pm.ht_bandwidth.full && + rdev->pm.ht_bandwidth.full) + rdev->pm.max_bandwidth = rdev->pm.ht_bandwidth; + read_delay_latency.full = rfixed_const(5000); + } + + /* sclk = system clocks(ns) = 1000 / max_bandwidth / 16 */ + a.full = rfixed_const(16); + rdev->pm.sclk.full = rfixed_mul(rdev->pm.max_bandwidth, a); + a.full = rfixed_const(1000); + rdev->pm.sclk.full = rfixed_div(a, rdev->pm.sclk); + /* Determine chunk time + * ChunkTime = the time it takes the DCP to send one chunk of data + * to the LB which consists of pipeline delay and inter chunk gap + * sclk = system clock(ns) + */ + a.full = rfixed_const(256 * 13); + chunk_time.full = rfixed_mul(rdev->pm.sclk, a); + a.full = rfixed_const(10); + chunk_time.full = rfixed_div(chunk_time, a); + + /* Determine the worst case latency + * NumLinePair = Number of line pairs to request(1=2 lines, 2=4 lines) + * WorstCaseLatency = worst case time from urgent to when the MC starts + * to return data + * READ_DELAY_IDLE_MAX = constant of 1us + * ChunkTime = time it takes the DCP to send one chunk of data to the LB + * which consists of pipeline delay and inter chunk gap + */ + if (rfixed_trunc(wm->num_line_pair) > 1) { + a.full = rfixed_const(3); + wm->worst_case_latency.full = rfixed_mul(a, chunk_time); + wm->worst_case_latency.full += read_delay_latency.full; + } else { + a.full = rfixed_const(2); + wm->worst_case_latency.full = rfixed_mul(a, chunk_time); + wm->worst_case_latency.full += read_delay_latency.full; + } + + /* Determine the tolerable latency + * TolerableLatency = Any given request has only 1 line time + * for the data to be returned + * LBRequestFifoDepth = Number of chunk requests the LB can + * put into the request FIFO for a display + * LineTime = total time for one line of display + * ChunkTime = the time it takes the DCP to send one chunk + * of data to the LB which consists of + * pipeline delay and inter chunk gap + */ + if ((2+wm->lb_request_fifo_depth) >= rfixed_trunc(request_fifo_depth)) { + tolerable_latency.full = line_time.full; + } else { + tolerable_latency.full = rfixed_const(wm->lb_request_fifo_depth - 2); + tolerable_latency.full = request_fifo_depth.full - tolerable_latency.full; + tolerable_latency.full = rfixed_mul(tolerable_latency, chunk_time); + tolerable_latency.full = line_time.full - tolerable_latency.full; + } + /* We assume worst case 32bits (4 bytes) */ + wm->dbpp.full = rfixed_const(4 * 8); + + /* Determine the maximum priority mark + * width = viewport width in pixels + */ + a.full = rfixed_const(16); + wm->priority_mark_max.full = rfixed_const(crtc->base.mode.crtc_hdisplay); + wm->priority_mark_max.full = rfixed_div(wm->priority_mark_max, a); + + /* Determine estimated width */ + estimated_width.full = tolerable_latency.full - wm->worst_case_latency.full; + estimated_width.full = rfixed_div(estimated_width, consumption_time); + if (rfixed_trunc(estimated_width) > crtc->base.mode.crtc_hdisplay) { + wm->priority_mark.full = rfixed_const(10); + } else { + a.full = rfixed_const(16); + wm->priority_mark.full = rfixed_div(estimated_width, a); + wm->priority_mark.full = wm->priority_mark_max.full - wm->priority_mark.full; + } } +void rs690_bandwidth_update(struct radeon_device *rdev) +{ + struct drm_display_mode *mode0 = NULL; + struct drm_display_mode *mode1 = NULL; + struct rs690_watermark wm0; + struct rs690_watermark wm1; + u32 tmp; + fixed20_12 priority_mark02, priority_mark12, fill_rate; + fixed20_12 a, b; + + if (rdev->mode_info.crtcs[0]->base.enabled) + mode0 = &rdev->mode_info.crtcs[0]->base.mode; + if (rdev->mode_info.crtcs[1]->base.enabled) + mode1 = &rdev->mode_info.crtcs[1]->base.mode; + /* + * Set display0/1 priority up in the memory controller for + * modes if the user specifies HIGH for displaypriority + * option. + */ + if (rdev->disp_priority == 2) { + tmp = RREG32_MC(MC_INIT_MISC_LAT_TIMER); + tmp &= ~MC_DISP1R_INIT_LAT_MASK; + tmp &= ~MC_DISP0R_INIT_LAT_MASK; + if (mode1) + tmp |= (1 << MC_DISP1R_INIT_LAT_SHIFT); + if (mode0) + tmp |= (1 << MC_DISP0R_INIT_LAT_SHIFT); + WREG32_MC(MC_INIT_MISC_LAT_TIMER, tmp); + } + rs690_line_buffer_adjust(rdev, mode0, mode1); + + if ((rdev->family == CHIP_RS690) || (rdev->family == CHIP_RS740)) + WREG32(DCP_CONTROL, 0); + if ((rdev->family == CHIP_RS780) || (rdev->family == CHIP_RS880)) + WREG32(DCP_CONTROL, 2); + + rs690_crtc_bandwidth_compute(rdev, rdev->mode_info.crtcs[0], &wm0); + rs690_crtc_bandwidth_compute(rdev, rdev->mode_info.crtcs[1], &wm1); + + tmp = (wm0.lb_request_fifo_depth - 1); + tmp |= (wm1.lb_request_fifo_depth - 1) << 16; + WREG32(LB_MAX_REQ_OUTSTANDING, tmp); + + if (mode0 && mode1) { + if (rfixed_trunc(wm0.dbpp) > 64) + a.full = rfixed_mul(wm0.dbpp, wm0.num_line_pair); + else + a.full = wm0.num_line_pair.full; + if (rfixed_trunc(wm1.dbpp) > 64) + b.full = rfixed_mul(wm1.dbpp, wm1.num_line_pair); + else + b.full = wm1.num_line_pair.full; + a.full += b.full; + fill_rate.full = rfixed_div(wm0.sclk, a); + if (wm0.consumption_rate.full > fill_rate.full) { + b.full = wm0.consumption_rate.full - fill_rate.full; + b.full = rfixed_mul(b, wm0.active_time); + a.full = rfixed_mul(wm0.worst_case_latency, + wm0.consumption_rate); + a.full = a.full + b.full; + b.full = rfixed_const(16 * 1000); + priority_mark02.full = rfixed_div(a, b); + } else { + a.full = rfixed_mul(wm0.worst_case_latency, + wm0.consumption_rate); + b.full = rfixed_const(16 * 1000); + priority_mark02.full = rfixed_div(a, b); + } + if (wm1.consumption_rate.full > fill_rate.full) { + b.full = wm1.consumption_rate.full - fill_rate.full; + b.full = rfixed_mul(b, wm1.active_time); + a.full = rfixed_mul(wm1.worst_case_latency, + wm1.consumption_rate); + a.full = a.full + b.full; + b.full = rfixed_const(16 * 1000); + priority_mark12.full = rfixed_div(a, b); + } else { + a.full = rfixed_mul(wm1.worst_case_latency, + wm1.consumption_rate); + b.full = rfixed_const(16 * 1000); + priority_mark12.full = rfixed_div(a, b); + } + if (wm0.priority_mark.full > priority_mark02.full) + priority_mark02.full = wm0.priority_mark.full; + if (rfixed_trunc(priority_mark02) < 0) + priority_mark02.full = 0; + if (wm0.priority_mark_max.full > priority_mark02.full) + priority_mark02.full = wm0.priority_mark_max.full; + if (wm1.priority_mark.full > priority_mark12.full) + priority_mark12.full = wm1.priority_mark.full; + if (rfixed_trunc(priority_mark12) < 0) + priority_mark12.full = 0; + if (wm1.priority_mark_max.full > priority_mark12.full) + priority_mark12.full = wm1.priority_mark_max.full; + WREG32(D1MODE_PRIORITY_A_CNT, rfixed_trunc(priority_mark02)); + WREG32(D1MODE_PRIORITY_B_CNT, rfixed_trunc(priority_mark02)); + WREG32(D2MODE_PRIORITY_A_CNT, rfixed_trunc(priority_mark12)); + WREG32(D2MODE_PRIORITY_B_CNT, rfixed_trunc(priority_mark12)); + } else if (mode0) { + if (rfixed_trunc(wm0.dbpp) > 64) + a.full = rfixed_mul(wm0.dbpp, wm0.num_line_pair); + else + a.full = wm0.num_line_pair.full; + fill_rate.full = rfixed_div(wm0.sclk, a); + if (wm0.consumption_rate.full > fill_rate.full) { + b.full = wm0.consumption_rate.full - fill_rate.full; + b.full = rfixed_mul(b, wm0.active_time); + a.full = rfixed_mul(wm0.worst_case_latency, + wm0.consumption_rate); + a.full = a.full + b.full; + b.full = rfixed_const(16 * 1000); + priority_mark02.full = rfixed_div(a, b); + } else { + a.full = rfixed_mul(wm0.worst_case_latency, + wm0.consumption_rate); + b.full = rfixed_const(16 * 1000); + priority_mark02.full = rfixed_div(a, b); + } + if (wm0.priority_mark.full > priority_mark02.full) + priority_mark02.full = wm0.priority_mark.full; + if (rfixed_trunc(priority_mark02) < 0) + priority_mark02.full = 0; + if (wm0.priority_mark_max.full > priority_mark02.full) + priority_mark02.full = wm0.priority_mark_max.full; + WREG32(D1MODE_PRIORITY_A_CNT, rfixed_trunc(priority_mark02)); + WREG32(D1MODE_PRIORITY_B_CNT, rfixed_trunc(priority_mark02)); + WREG32(D2MODE_PRIORITY_A_CNT, MODE_PRIORITY_OFF); + WREG32(D2MODE_PRIORITY_B_CNT, MODE_PRIORITY_OFF); + } else { + if (rfixed_trunc(wm1.dbpp) > 64) + a.full = rfixed_mul(wm1.dbpp, wm1.num_line_pair); + else + a.full = wm1.num_line_pair.full; + fill_rate.full = rfixed_div(wm1.sclk, a); + if (wm1.consumption_rate.full > fill_rate.full) { + b.full = wm1.consumption_rate.full - fill_rate.full; + b.full = rfixed_mul(b, wm1.active_time); + a.full = rfixed_mul(wm1.worst_case_latency, + wm1.consumption_rate); + a.full = a.full + b.full; + b.full = rfixed_const(16 * 1000); + priority_mark12.full = rfixed_div(a, b); + } else { + a.full = rfixed_mul(wm1.worst_case_latency, + wm1.consumption_rate); + b.full = rfixed_const(16 * 1000); + priority_mark12.full = rfixed_div(a, b); + } + if (wm1.priority_mark.full > priority_mark12.full) + priority_mark12.full = wm1.priority_mark.full; + if (rfixed_trunc(priority_mark12) < 0) + priority_mark12.full = 0; + if (wm1.priority_mark_max.full > priority_mark12.full) + priority_mark12.full = wm1.priority_mark_max.full; + WREG32(D1MODE_PRIORITY_A_CNT, MODE_PRIORITY_OFF); + WREG32(D1MODE_PRIORITY_B_CNT, MODE_PRIORITY_OFF); + WREG32(D2MODE_PRIORITY_A_CNT, rfixed_trunc(priority_mark12)); + WREG32(D2MODE_PRIORITY_B_CNT, rfixed_trunc(priority_mark12)); + } +} /* * Indirect registers accessor @@ -179,3 +652,4 @@ void rs690_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v) WREG32(RS690_MC_DATA, v); WREG32(RS690_MC_INDEX, RS690_MC_INDEX_WR_ACK); } + diff --git a/drivers/gpu/drm/radeon/rs690r.h b/drivers/gpu/drm/radeon/rs690r.h new file mode 100644 index 00000000000..c0d9faa2175 --- /dev/null +++ b/drivers/gpu/drm/radeon/rs690r.h @@ -0,0 +1,99 @@ +/* + * Copyright 2008 Advanced Micro Devices, Inc. + * Copyright 2008 Red Hat Inc. + * Copyright 2009 Jerome Glisse. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: Dave Airlie + * Alex Deucher + * Jerome Glisse + */ +#ifndef RS690R_H +#define RS690R_H + +/* RS690/RS740 registers */ +#define MC_INDEX 0x0078 +# define MC_INDEX_MASK 0x1FF +# define MC_INDEX_WR_EN (1 << 9) +# define MC_INDEX_WR_ACK 0x7F +#define MC_DATA 0x007C +#define HDP_FB_LOCATION 0x0134 +#define DC_LB_MEMORY_SPLIT 0x6520 +#define DC_LB_MEMORY_SPLIT_MASK 0x00000003 +#define DC_LB_MEMORY_SPLIT_SHIFT 0 +#define DC_LB_MEMORY_SPLIT_D1HALF_D2HALF 0 +#define DC_LB_MEMORY_SPLIT_D1_3Q_D2_1Q 1 +#define DC_LB_MEMORY_SPLIT_D1_ONLY 2 +#define DC_LB_MEMORY_SPLIT_D1_1Q_D2_3Q 3 +#define DC_LB_MEMORY_SPLIT_SHIFT_MODE (1 << 2) +#define DC_LB_DISP1_END_ADR_SHIFT 4 +#define DC_LB_DISP1_END_ADR_MASK 0x00007FF0 +#define D1MODE_PRIORITY_A_CNT 0x6548 +#define MODE_PRIORITY_MARK_MASK 0x00007FFF +#define MODE_PRIORITY_OFF (1 << 16) +#define MODE_PRIORITY_ALWAYS_ON (1 << 20) +#define MODE_PRIORITY_FORCE_MASK (1 << 24) +#define D1MODE_PRIORITY_B_CNT 0x654C +#define LB_MAX_REQ_OUTSTANDING 0x6D58 +#define LB_D1_MAX_REQ_OUTSTANDING_MASK 0x0000000F +#define LB_D1_MAX_REQ_OUTSTANDING_SHIFT 0 +#define LB_D2_MAX_REQ_OUTSTANDING_MASK 0x000F0000 +#define LB_D2_MAX_REQ_OUTSTANDING_SHIFT 16 +#define DCP_CONTROL 0x6C9C +#define D2MODE_PRIORITY_A_CNT 0x6D48 +#define D2MODE_PRIORITY_B_CNT 0x6D4C + +/* MC indirect registers */ +#define MC_STATUS_IDLE (1 << 0) +#define MC_MISC_CNTL 0x18 +#define DISABLE_GTW (1 << 1) +#define GART_INDEX_REG_EN (1 << 12) +#define BLOCK_GFX_D3_EN (1 << 14) +#define GART_FEATURE_ID 0x2B +#define HANG_EN (1 << 11) +#define TLB_ENABLE (1 << 18) +#define P2P_ENABLE (1 << 19) +#define GTW_LAC_EN (1 << 25) +#define LEVEL2_GART (0 << 30) +#define LEVEL1_GART (1 << 30) +#define PDC_EN (1 << 31) +#define GART_BASE 0x2C +#define GART_CACHE_CNTRL 0x2E +# define GART_CACHE_INVALIDATE (1 << 0) +#define MC_STATUS 0x90 +#define MCCFG_FB_LOCATION 0x100 +#define MC_FB_START_MASK 0x0000FFFF +#define MC_FB_START_SHIFT 0 +#define MC_FB_TOP_MASK 0xFFFF0000 +#define MC_FB_TOP_SHIFT 16 +#define MCCFG_AGP_LOCATION 0x101 +#define MC_AGP_START_MASK 0x0000FFFF +#define MC_AGP_START_SHIFT 0 +#define MC_AGP_TOP_MASK 0xFFFF0000 +#define MC_AGP_TOP_SHIFT 16 +#define MCCFG_AGP_BASE 0x102 +#define MCCFG_AGP_BASE_2 0x103 +#define MC_INIT_MISC_LAT_TIMER 0x104 +#define MC_DISP0R_INIT_LAT_SHIFT 8 +#define MC_DISP0R_INIT_LAT_MASK 0x00000F00 +#define MC_DISP1R_INIT_LAT_SHIFT 12 +#define MC_DISP1R_INIT_LAT_MASK 0x0000F000 + +#endif diff --git a/drivers/gpu/drm/radeon/rv515.c b/drivers/gpu/drm/radeon/rv515.c index ffea37b1b3e..0566fb67e46 100644 --- a/drivers/gpu/drm/radeon/rv515.c +++ b/drivers/gpu/drm/radeon/rv515.c @@ -27,8 +27,9 @@ */ #include <linux/seq_file.h> #include "drmP.h" -#include "radeon_reg.h" +#include "rv515r.h" #include "radeon.h" +#include "radeon_share.h" /* rv515 depends on : */ void r100_hdp_reset(struct radeon_device *rdev); @@ -99,26 +100,26 @@ int rv515_mc_init(struct radeon_device *rdev) "programming pipes. Bad things might happen.\n"); } /* Write VRAM size in case we are limiting it */ - WREG32(RADEON_CONFIG_MEMSIZE, rdev->mc.vram_size); - tmp = REG_SET(RV515_MC_FB_START, rdev->mc.vram_location >> 16); + WREG32(RADEON_CONFIG_MEMSIZE, rdev->mc.real_vram_size); + tmp = REG_SET(MC_FB_START, rdev->mc.vram_location >> 16); WREG32(0x134, tmp); - tmp = rdev->mc.vram_location + rdev->mc.vram_size - 1; - tmp = REG_SET(RV515_MC_FB_TOP, tmp >> 16); - tmp |= REG_SET(RV515_MC_FB_START, rdev->mc.vram_location >> 16); - WREG32_MC(RV515_MC_FB_LOCATION, tmp); - WREG32(RS690_HDP_FB_LOCATION, rdev->mc.vram_location >> 16); + tmp = rdev->mc.vram_location + rdev->mc.mc_vram_size - 1; + tmp = REG_SET(MC_FB_TOP, tmp >> 16); + tmp |= REG_SET(MC_FB_START, rdev->mc.vram_location >> 16); + WREG32_MC(MC_FB_LOCATION, tmp); + WREG32(HDP_FB_LOCATION, rdev->mc.vram_location >> 16); WREG32(0x310, rdev->mc.vram_location); if (rdev->flags & RADEON_IS_AGP) { tmp = rdev->mc.gtt_location + rdev->mc.gtt_size - 1; - tmp = REG_SET(RV515_MC_AGP_TOP, tmp >> 16); - tmp |= REG_SET(RV515_MC_AGP_START, rdev->mc.gtt_location >> 16); - WREG32_MC(RV515_MC_AGP_LOCATION, tmp); - WREG32_MC(RV515_MC_AGP_BASE, rdev->mc.agp_base); - WREG32_MC(RV515_MC_AGP_BASE_2, 0); + tmp = REG_SET(MC_AGP_TOP, tmp >> 16); + tmp |= REG_SET(MC_AGP_START, rdev->mc.gtt_location >> 16); + WREG32_MC(MC_AGP_LOCATION, tmp); + WREG32_MC(MC_AGP_BASE, rdev->mc.agp_base); + WREG32_MC(MC_AGP_BASE_2, 0); } else { - WREG32_MC(RV515_MC_AGP_LOCATION, 0x0FFFFFFF); - WREG32_MC(RV515_MC_AGP_BASE, 0); - WREG32_MC(RV515_MC_AGP_BASE_2, 0); + WREG32_MC(MC_AGP_LOCATION, 0x0FFFFFFF); + WREG32_MC(MC_AGP_BASE, 0); + WREG32_MC(MC_AGP_BASE_2, 0); } return 0; } @@ -136,95 +137,67 @@ void rv515_mc_fini(struct radeon_device *rdev) */ void rv515_ring_start(struct radeon_device *rdev) { - unsigned gb_tile_config; int r; - /* Sub pixel 1/12 so we can have 4K rendering according to doc */ - gb_tile_config = R300_ENABLE_TILING | R300_TILE_SIZE_16; - switch (rdev->num_gb_pipes) { - case 2: - gb_tile_config |= R300_PIPE_COUNT_R300; - break; - case 3: - gb_tile_config |= R300_PIPE_COUNT_R420_3P; - break; - case 4: - gb_tile_config |= R300_PIPE_COUNT_R420; - break; - case 1: - default: - gb_tile_config |= R300_PIPE_COUNT_RV350; - break; - } - r = radeon_ring_lock(rdev, 64); if (r) { return; } - radeon_ring_write(rdev, PACKET0(RADEON_ISYNC_CNTL, 0)); - radeon_ring_write(rdev, - RADEON_ISYNC_ANY2D_IDLE3D | - RADEON_ISYNC_ANY3D_IDLE2D | - RADEON_ISYNC_WAIT_IDLEGUI | - RADEON_ISYNC_CPSCRATCH_IDLEGUI); - radeon_ring_write(rdev, PACKET0(R300_GB_TILE_CONFIG, 0)); - radeon_ring_write(rdev, gb_tile_config); - radeon_ring_write(rdev, PACKET0(RADEON_WAIT_UNTIL, 0)); + radeon_ring_write(rdev, PACKET0(ISYNC_CNTL, 0)); radeon_ring_write(rdev, - RADEON_WAIT_2D_IDLECLEAN | - RADEON_WAIT_3D_IDLECLEAN); + ISYNC_ANY2D_IDLE3D | + ISYNC_ANY3D_IDLE2D | + ISYNC_WAIT_IDLEGUI | + ISYNC_CPSCRATCH_IDLEGUI); + radeon_ring_write(rdev, PACKET0(WAIT_UNTIL, 0)); + radeon_ring_write(rdev, WAIT_2D_IDLECLEAN | WAIT_3D_IDLECLEAN); radeon_ring_write(rdev, PACKET0(0x170C, 0)); radeon_ring_write(rdev, 1 << 31); - radeon_ring_write(rdev, PACKET0(R300_GB_SELECT, 0)); + radeon_ring_write(rdev, PACKET0(GB_SELECT, 0)); radeon_ring_write(rdev, 0); - radeon_ring_write(rdev, PACKET0(R300_GB_ENABLE, 0)); + radeon_ring_write(rdev, PACKET0(GB_ENABLE, 0)); radeon_ring_write(rdev, 0); radeon_ring_write(rdev, PACKET0(0x42C8, 0)); radeon_ring_write(rdev, (1 << rdev->num_gb_pipes) - 1); - radeon_ring_write(rdev, PACKET0(R500_VAP_INDEX_OFFSET, 0)); + radeon_ring_write(rdev, PACKET0(VAP_INDEX_OFFSET, 0)); radeon_ring_write(rdev, 0); - radeon_ring_write(rdev, PACKET0(R300_RB3D_DSTCACHE_CTLSTAT, 0)); - radeon_ring_write(rdev, R300_RB3D_DC_FLUSH | R300_RB3D_DC_FREE); - radeon_ring_write(rdev, PACKET0(R300_RB3D_ZCACHE_CTLSTAT, 0)); - radeon_ring_write(rdev, R300_ZC_FLUSH | R300_ZC_FREE); - radeon_ring_write(rdev, PACKET0(RADEON_WAIT_UNTIL, 0)); - radeon_ring_write(rdev, - RADEON_WAIT_2D_IDLECLEAN | - RADEON_WAIT_3D_IDLECLEAN); - radeon_ring_write(rdev, PACKET0(R300_GB_AA_CONFIG, 0)); + radeon_ring_write(rdev, PACKET0(RB3D_DSTCACHE_CTLSTAT, 0)); + radeon_ring_write(rdev, RB3D_DC_FLUSH | RB3D_DC_FREE); + radeon_ring_write(rdev, PACKET0(ZB_ZCACHE_CTLSTAT, 0)); + radeon_ring_write(rdev, ZC_FLUSH | ZC_FREE); + radeon_ring_write(rdev, PACKET0(WAIT_UNTIL, 0)); + radeon_ring_write(rdev, WAIT_2D_IDLECLEAN | WAIT_3D_IDLECLEAN); + radeon_ring_write(rdev, PACKET0(GB_AA_CONFIG, 0)); radeon_ring_write(rdev, 0); - radeon_ring_write(rdev, PACKET0(R300_RB3D_DSTCACHE_CTLSTAT, 0)); - radeon_ring_write(rdev, R300_RB3D_DC_FLUSH | R300_RB3D_DC_FREE); - radeon_ring_write(rdev, PACKET0(R300_RB3D_ZCACHE_CTLSTAT, 0)); - radeon_ring_write(rdev, R300_ZC_FLUSH | R300_ZC_FREE); - radeon_ring_write(rdev, PACKET0(R300_GB_MSPOS0, 0)); - radeon_ring_write(rdev, - ((6 << R300_MS_X0_SHIFT) | - (6 << R300_MS_Y0_SHIFT) | - (6 << R300_MS_X1_SHIFT) | - (6 << R300_MS_Y1_SHIFT) | - (6 << R300_MS_X2_SHIFT) | - (6 << R300_MS_Y2_SHIFT) | - (6 << R300_MSBD0_Y_SHIFT) | - (6 << R300_MSBD0_X_SHIFT))); - radeon_ring_write(rdev, PACKET0(R300_GB_MSPOS1, 0)); - radeon_ring_write(rdev, - ((6 << R300_MS_X3_SHIFT) | - (6 << R300_MS_Y3_SHIFT) | - (6 << R300_MS_X4_SHIFT) | - (6 << R300_MS_Y4_SHIFT) | - (6 << R300_MS_X5_SHIFT) | - (6 << R300_MS_Y5_SHIFT) | - (6 << R300_MSBD1_SHIFT))); - radeon_ring_write(rdev, PACKET0(R300_GA_ENHANCE, 0)); - radeon_ring_write(rdev, R300_GA_DEADLOCK_CNTL | R300_GA_FASTSYNC_CNTL); - radeon_ring_write(rdev, PACKET0(R300_GA_POLY_MODE, 0)); + radeon_ring_write(rdev, PACKET0(RB3D_DSTCACHE_CTLSTAT, 0)); + radeon_ring_write(rdev, RB3D_DC_FLUSH | RB3D_DC_FREE); + radeon_ring_write(rdev, PACKET0(ZB_ZCACHE_CTLSTAT, 0)); + radeon_ring_write(rdev, ZC_FLUSH | ZC_FREE); + radeon_ring_write(rdev, PACKET0(GB_MSPOS0, 0)); radeon_ring_write(rdev, - R300_FRONT_PTYPE_TRIANGE | R300_BACK_PTYPE_TRIANGE); - radeon_ring_write(rdev, PACKET0(R300_GA_ROUND_MODE, 0)); + ((6 << MS_X0_SHIFT) | + (6 << MS_Y0_SHIFT) | + (6 << MS_X1_SHIFT) | + (6 << MS_Y1_SHIFT) | + (6 << MS_X2_SHIFT) | + (6 << MS_Y2_SHIFT) | + (6 << MSBD0_Y_SHIFT) | + (6 << MSBD0_X_SHIFT))); + radeon_ring_write(rdev, PACKET0(GB_MSPOS1, 0)); radeon_ring_write(rdev, - R300_GEOMETRY_ROUND_NEAREST | - R300_COLOR_ROUND_NEAREST); + ((6 << MS_X3_SHIFT) | + (6 << MS_Y3_SHIFT) | + (6 << MS_X4_SHIFT) | + (6 << MS_Y4_SHIFT) | + (6 << MS_X5_SHIFT) | + (6 << MS_Y5_SHIFT) | + (6 << MSBD1_SHIFT))); + radeon_ring_write(rdev, PACKET0(GA_ENHANCE, 0)); + radeon_ring_write(rdev, GA_DEADLOCK_CNTL | GA_FASTSYNC_CNTL); + radeon_ring_write(rdev, PACKET0(GA_POLY_MODE, 0)); + radeon_ring_write(rdev, FRONT_PTYPE_TRIANGE | BACK_PTYPE_TRIANGE); + radeon_ring_write(rdev, PACKET0(GA_ROUND_MODE, 0)); + radeon_ring_write(rdev, GEOMETRY_ROUND_NEAREST | COLOR_ROUND_NEAREST); radeon_ring_write(rdev, PACKET0(0x20C8, 0)); radeon_ring_write(rdev, 0); radeon_ring_unlock_commit(rdev); @@ -242,8 +215,8 @@ int rv515_mc_wait_for_idle(struct radeon_device *rdev) for (i = 0; i < rdev->usec_timeout; i++) { /* read MC_STATUS */ - tmp = RREG32_MC(RV515_MC_STATUS); - if (tmp & RV515_MC_STATUS_IDLE) { + tmp = RREG32_MC(MC_STATUS); + if (tmp & MC_STATUS_IDLE) { return 0; } DRM_UDELAY(1); @@ -291,33 +264,33 @@ int rv515_ga_reset(struct radeon_device *rdev) reinit_cp = rdev->cp.ready; rdev->cp.ready = false; for (i = 0; i < rdev->usec_timeout; i++) { - WREG32(RADEON_CP_CSQ_MODE, 0); - WREG32(RADEON_CP_CSQ_CNTL, 0); - WREG32(RADEON_RBBM_SOFT_RESET, 0x32005); - (void)RREG32(RADEON_RBBM_SOFT_RESET); + WREG32(CP_CSQ_MODE, 0); + WREG32(CP_CSQ_CNTL, 0); + WREG32(RBBM_SOFT_RESET, 0x32005); + (void)RREG32(RBBM_SOFT_RESET); udelay(200); - WREG32(RADEON_RBBM_SOFT_RESET, 0); + WREG32(RBBM_SOFT_RESET, 0); /* Wait to prevent race in RBBM_STATUS */ mdelay(1); - tmp = RREG32(RADEON_RBBM_STATUS); + tmp = RREG32(RBBM_STATUS); if (tmp & ((1 << 20) | (1 << 26))) { DRM_ERROR("VAP & CP still busy (RBBM_STATUS=0x%08X)\n", tmp); /* GA still busy soft reset it */ WREG32(0x429C, 0x200); - WREG32(R300_VAP_PVS_STATE_FLUSH_REG, 0); + WREG32(VAP_PVS_STATE_FLUSH_REG, 0); WREG32(0x43E0, 0); WREG32(0x43E4, 0); WREG32(0x24AC, 0); } /* Wait to prevent race in RBBM_STATUS */ mdelay(1); - tmp = RREG32(RADEON_RBBM_STATUS); + tmp = RREG32(RBBM_STATUS); if (!(tmp & ((1 << 20) | (1 << 26)))) { break; } } for (i = 0; i < rdev->usec_timeout; i++) { - tmp = RREG32(RADEON_RBBM_STATUS); + tmp = RREG32(RBBM_STATUS); if (!(tmp & ((1 << 20) | (1 << 26)))) { DRM_INFO("GA reset succeed (RBBM_STATUS=0x%08X)\n", tmp); @@ -331,7 +304,7 @@ int rv515_ga_reset(struct radeon_device *rdev) } DRM_UDELAY(1); } - tmp = RREG32(RADEON_RBBM_STATUS); + tmp = RREG32(RBBM_STATUS); DRM_ERROR("Failed to reset GA ! (RBBM_STATUS=0x%08X)\n", tmp); return -1; } @@ -341,7 +314,7 @@ int rv515_gpu_reset(struct radeon_device *rdev) uint32_t status; /* reset order likely matter */ - status = RREG32(RADEON_RBBM_STATUS); + status = RREG32(RBBM_STATUS); /* reset HDP */ r100_hdp_reset(rdev); /* reset rb2d */ @@ -353,12 +326,12 @@ int rv515_gpu_reset(struct radeon_device *rdev) rv515_ga_reset(rdev); } /* reset CP */ - status = RREG32(RADEON_RBBM_STATUS); + status = RREG32(RBBM_STATUS); if (status & (1 << 16)) { r100_cp_reset(rdev); } /* Check if GPU is idle */ - status = RREG32(RADEON_RBBM_STATUS); + status = RREG32(RBBM_STATUS); if (status & (1 << 31)) { DRM_ERROR("Failed to reset GPU (RBBM_STATUS=0x%08X)\n", status); return -1; @@ -377,8 +350,7 @@ static void rv515_vram_get_type(struct radeon_device *rdev) rdev->mc.vram_width = 128; rdev->mc.vram_is_ddr = true; - tmp = RREG32_MC(RV515_MC_CNTL); - tmp &= RV515_MEM_NUM_CHANNELS_MASK; + tmp = RREG32_MC(RV515_MC_CNTL) & MEM_NUM_CHANNELS_MASK; switch (tmp) { case 0: rdev->mc.vram_width = 64; @@ -394,11 +366,17 @@ static void rv515_vram_get_type(struct radeon_device *rdev) void rv515_vram_info(struct radeon_device *rdev) { + fixed20_12 a; + rv515_vram_get_type(rdev); - rdev->mc.vram_size = RREG32(RADEON_CONFIG_MEMSIZE); - rdev->mc.aper_base = drm_get_resource_start(rdev->ddev, 0); - rdev->mc.aper_size = drm_get_resource_len(rdev->ddev, 0); + r100_vram_init_sizes(rdev); + /* FIXME: we should enforce default clock in case GPU is not in + * default setup + */ + a.full = rfixed_const(100); + rdev->pm.sclk.full = rfixed_const(rdev->clock.default_sclk); + rdev->pm.sclk.full = rfixed_div(rdev->pm.sclk, a); } @@ -409,38 +387,19 @@ uint32_t rv515_mc_rreg(struct radeon_device *rdev, uint32_t reg) { uint32_t r; - WREG32(R520_MC_IND_INDEX, 0x7f0000 | (reg & 0xffff)); - r = RREG32(R520_MC_IND_DATA); - WREG32(R520_MC_IND_INDEX, 0); + WREG32(MC_IND_INDEX, 0x7f0000 | (reg & 0xffff)); + r = RREG32(MC_IND_DATA); + WREG32(MC_IND_INDEX, 0); return r; } void rv515_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v) { - WREG32(R520_MC_IND_INDEX, 0xff0000 | ((reg) & 0xffff)); - WREG32(R520_MC_IND_DATA, (v)); - WREG32(R520_MC_IND_INDEX, 0); -} - -uint32_t rv515_pcie_rreg(struct radeon_device *rdev, uint32_t reg) -{ - uint32_t r; - - WREG32(RADEON_PCIE_INDEX, ((reg) & 0x7ff)); - (void)RREG32(RADEON_PCIE_INDEX); - r = RREG32(RADEON_PCIE_DATA); - return r; -} - -void rv515_pcie_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v) -{ - WREG32(RADEON_PCIE_INDEX, ((reg) & 0x7ff)); - (void)RREG32(RADEON_PCIE_INDEX); - WREG32(RADEON_PCIE_DATA, (v)); - (void)RREG32(RADEON_PCIE_DATA); + WREG32(MC_IND_INDEX, 0xff0000 | ((reg) & 0xffff)); + WREG32(MC_IND_DATA, (v)); + WREG32(MC_IND_INDEX, 0); } - /* * Debugfs info */ @@ -452,13 +411,13 @@ static int rv515_debugfs_pipes_info(struct seq_file *m, void *data) struct radeon_device *rdev = dev->dev_private; uint32_t tmp; - tmp = RREG32(R400_GB_PIPE_SELECT); + tmp = RREG32(GB_PIPE_SELECT); seq_printf(m, "GB_PIPE_SELECT 0x%08x\n", tmp); - tmp = RREG32(R500_SU_REG_DEST); + tmp = RREG32(SU_REG_DEST); seq_printf(m, "SU_REG_DEST 0x%08x\n", tmp); - tmp = RREG32(R300_GB_TILE_CONFIG); + tmp = RREG32(GB_TILE_CONFIG); seq_printf(m, "GB_TILE_CONFIG 0x%08x\n", tmp); - tmp = RREG32(R300_DST_PIPE_CONFIG); + tmp = RREG32(DST_PIPE_CONFIG); seq_printf(m, "DST_PIPE_CONFIG 0x%08x\n", tmp); return 0; } @@ -509,9 +468,9 @@ int rv515_debugfs_ga_info_init(struct radeon_device *rdev) /* * Asic initialization */ -static const unsigned r500_reg_safe_bm[159] = { +static const unsigned r500_reg_safe_bm[219] = { + 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, - 0xFFFFFFBF, 0xFFFFFFFF, 0xFFFFFFBF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, @@ -549,14 +508,575 @@ static const unsigned r500_reg_safe_bm[159] = { 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFF80FFFF, 0x00000000, 0x00000000, 0x00000000, 0x00000000, - 0x0003FC01, 0x3FFFFCF8, 0xFE800B19, + 0x0003FC01, 0x3FFFFCF8, 0xFF800B19, 0xFFDFFFFF, + 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, + 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, + 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, + 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, + 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, + 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, + 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, + 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, + 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, + 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, + 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, + 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, + 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, + 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, + 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, }; - - int rv515_init(struct radeon_device *rdev) { rdev->config.r300.reg_safe_bm = r500_reg_safe_bm; rdev->config.r300.reg_safe_bm_size = ARRAY_SIZE(r500_reg_safe_bm); return 0; } + +void atom_rv515_force_tv_scaler(struct radeon_device *rdev) +{ + + WREG32(0x659C, 0x0); + WREG32(0x6594, 0x705); + WREG32(0x65A4, 0x10001); + WREG32(0x65D8, 0x0); + WREG32(0x65B0, 0x0); + WREG32(0x65C0, 0x0); + WREG32(0x65D4, 0x0); + WREG32(0x6578, 0x0); + WREG32(0x657C, 0x841880A8); + WREG32(0x6578, 0x1); + WREG32(0x657C, 0x84208680); + WREG32(0x6578, 0x2); + WREG32(0x657C, 0xBFF880B0); + WREG32(0x6578, 0x100); + WREG32(0x657C, 0x83D88088); + WREG32(0x6578, 0x101); + WREG32(0x657C, 0x84608680); + WREG32(0x6578, 0x102); + WREG32(0x657C, 0xBFF080D0); + WREG32(0x6578, 0x200); + WREG32(0x657C, 0x83988068); + WREG32(0x6578, 0x201); + WREG32(0x657C, 0x84A08680); + WREG32(0x6578, 0x202); + WREG32(0x657C, 0xBFF080F8); + WREG32(0x6578, 0x300); + WREG32(0x657C, 0x83588058); + WREG32(0x6578, 0x301); + WREG32(0x657C, 0x84E08660); + WREG32(0x6578, 0x302); + WREG32(0x657C, 0xBFF88120); + WREG32(0x6578, 0x400); + WREG32(0x657C, 0x83188040); + WREG32(0x6578, 0x401); + WREG32(0x657C, 0x85008660); + WREG32(0x6578, 0x402); + WREG32(0x657C, 0xBFF88150); + WREG32(0x6578, 0x500); + WREG32(0x657C, 0x82D88030); + WREG32(0x6578, 0x501); + WREG32(0x657C, 0x85408640); + WREG32(0x6578, 0x502); + WREG32(0x657C, 0xBFF88180); + WREG32(0x6578, 0x600); + WREG32(0x657C, 0x82A08018); + WREG32(0x6578, 0x601); + WREG32(0x657C, 0x85808620); + WREG32(0x6578, 0x602); + WREG32(0x657C, 0xBFF081B8); + WREG32(0x6578, 0x700); + WREG32(0x657C, 0x82608010); + WREG32(0x6578, 0x701); + WREG32(0x657C, 0x85A08600); + WREG32(0x6578, 0x702); + WREG32(0x657C, 0x800081F0); + WREG32(0x6578, 0x800); + WREG32(0x657C, 0x8228BFF8); + WREG32(0x6578, 0x801); + WREG32(0x657C, 0x85E085E0); + WREG32(0x6578, 0x802); + WREG32(0x657C, 0xBFF88228); + WREG32(0x6578, 0x10000); + WREG32(0x657C, 0x82A8BF00); + WREG32(0x6578, 0x10001); + WREG32(0x657C, 0x82A08CC0); + WREG32(0x6578, 0x10002); + WREG32(0x657C, 0x8008BEF8); + WREG32(0x6578, 0x10100); + WREG32(0x657C, 0x81F0BF28); + WREG32(0x6578, 0x10101); + WREG32(0x657C, 0x83608CA0); + WREG32(0x6578, 0x10102); + WREG32(0x657C, 0x8018BED0); + WREG32(0x6578, 0x10200); + WREG32(0x657C, 0x8148BF38); + WREG32(0x6578, 0x10201); + WREG32(0x657C, 0x84408C80); + WREG32(0x6578, 0x10202); + WREG32(0x657C, 0x8008BEB8); + WREG32(0x6578, 0x10300); + WREG32(0x657C, 0x80B0BF78); + WREG32(0x6578, 0x10301); + WREG32(0x657C, 0x85008C20); + WREG32(0x6578, 0x10302); + WREG32(0x657C, 0x8020BEA0); + WREG32(0x6578, 0x10400); + WREG32(0x657C, 0x8028BF90); + WREG32(0x6578, 0x10401); + WREG32(0x657C, 0x85E08BC0); + WREG32(0x6578, 0x10402); + WREG32(0x657C, 0x8018BE90); + WREG32(0x6578, 0x10500); + WREG32(0x657C, 0xBFB8BFB0); + WREG32(0x6578, 0x10501); + WREG32(0x657C, 0x86C08B40); + WREG32(0x6578, 0x10502); + WREG32(0x657C, 0x8010BE90); + WREG32(0x6578, 0x10600); + WREG32(0x657C, 0xBF58BFC8); + WREG32(0x6578, 0x10601); + WREG32(0x657C, 0x87A08AA0); + WREG32(0x6578, 0x10602); + WREG32(0x657C, 0x8010BE98); + WREG32(0x6578, 0x10700); + WREG32(0x657C, 0xBF10BFF0); + WREG32(0x6578, 0x10701); + WREG32(0x657C, 0x886089E0); + WREG32(0x6578, 0x10702); + WREG32(0x657C, 0x8018BEB0); + WREG32(0x6578, 0x10800); + WREG32(0x657C, 0xBED8BFE8); + WREG32(0x6578, 0x10801); + WREG32(0x657C, 0x89408940); + WREG32(0x6578, 0x10802); + WREG32(0x657C, 0xBFE8BED8); + WREG32(0x6578, 0x20000); + WREG32(0x657C, 0x80008000); + WREG32(0x6578, 0x20001); + WREG32(0x657C, 0x90008000); + WREG32(0x6578, 0x20002); + WREG32(0x657C, 0x80008000); + WREG32(0x6578, 0x20003); + WREG32(0x657C, 0x80008000); + WREG32(0x6578, 0x20100); + WREG32(0x657C, 0x80108000); + WREG32(0x6578, 0x20101); + WREG32(0x657C, 0x8FE0BF70); + WREG32(0x6578, 0x20102); + WREG32(0x657C, 0xBFE880C0); + WREG32(0x6578, 0x20103); + WREG32(0x657C, 0x80008000); + WREG32(0x6578, 0x20200); + WREG32(0x657C, 0x8018BFF8); + WREG32(0x6578, 0x20201); + WREG32(0x657C, 0x8F80BF08); + WREG32(0x6578, 0x20202); + WREG32(0x657C, 0xBFD081A0); + WREG32(0x6578, 0x20203); + WREG32(0x657C, 0xBFF88000); + WREG32(0x6578, 0x20300); + WREG32(0x657C, 0x80188000); + WREG32(0x6578, 0x20301); + WREG32(0x657C, 0x8EE0BEC0); + WREG32(0x6578, 0x20302); + WREG32(0x657C, 0xBFB082A0); + WREG32(0x6578, 0x20303); + WREG32(0x657C, 0x80008000); + WREG32(0x6578, 0x20400); + WREG32(0x657C, 0x80188000); + WREG32(0x6578, 0x20401); + WREG32(0x657C, 0x8E00BEA0); + WREG32(0x6578, 0x20402); + WREG32(0x657C, 0xBF8883C0); + WREG32(0x6578, 0x20403); + WREG32(0x657C, 0x80008000); + WREG32(0x6578, 0x20500); + WREG32(0x657C, 0x80188000); + WREG32(0x6578, 0x20501); + WREG32(0x657C, 0x8D00BE90); + WREG32(0x6578, 0x20502); + WREG32(0x657C, 0xBF588500); + WREG32(0x6578, 0x20503); + WREG32(0x657C, 0x80008008); + WREG32(0x6578, 0x20600); + WREG32(0x657C, 0x80188000); + WREG32(0x6578, 0x20601); + WREG32(0x657C, 0x8BC0BE98); + WREG32(0x6578, 0x20602); + WREG32(0x657C, 0xBF308660); + WREG32(0x6578, 0x20603); + WREG32(0x657C, 0x80008008); + WREG32(0x6578, 0x20700); + WREG32(0x657C, 0x80108000); + WREG32(0x6578, 0x20701); + WREG32(0x657C, 0x8A80BEB0); + WREG32(0x6578, 0x20702); + WREG32(0x657C, 0xBF0087C0); + WREG32(0x6578, 0x20703); + WREG32(0x657C, 0x80008008); + WREG32(0x6578, 0x20800); + WREG32(0x657C, 0x80108000); + WREG32(0x6578, 0x20801); + WREG32(0x657C, 0x8920BED0); + WREG32(0x6578, 0x20802); + WREG32(0x657C, 0xBED08920); + WREG32(0x6578, 0x20803); + WREG32(0x657C, 0x80008010); + WREG32(0x6578, 0x30000); + WREG32(0x657C, 0x90008000); + WREG32(0x6578, 0x30001); + WREG32(0x657C, 0x80008000); + WREG32(0x6578, 0x30100); + WREG32(0x657C, 0x8FE0BF90); + WREG32(0x6578, 0x30101); + WREG32(0x657C, 0xBFF880A0); + WREG32(0x6578, 0x30200); + WREG32(0x657C, 0x8F60BF40); + WREG32(0x6578, 0x30201); + WREG32(0x657C, 0xBFE88180); + WREG32(0x6578, 0x30300); + WREG32(0x657C, 0x8EC0BF00); + WREG32(0x6578, 0x30301); + WREG32(0x657C, 0xBFC88280); + WREG32(0x6578, 0x30400); + WREG32(0x657C, 0x8DE0BEE0); + WREG32(0x6578, 0x30401); + WREG32(0x657C, 0xBFA083A0); + WREG32(0x6578, 0x30500); + WREG32(0x657C, 0x8CE0BED0); + WREG32(0x6578, 0x30501); + WREG32(0x657C, 0xBF7884E0); + WREG32(0x6578, 0x30600); + WREG32(0x657C, 0x8BA0BED8); + WREG32(0x6578, 0x30601); + WREG32(0x657C, 0xBF508640); + WREG32(0x6578, 0x30700); + WREG32(0x657C, 0x8A60BEE8); + WREG32(0x6578, 0x30701); + WREG32(0x657C, 0xBF2087A0); + WREG32(0x6578, 0x30800); + WREG32(0x657C, 0x8900BF00); + WREG32(0x6578, 0x30801); + WREG32(0x657C, 0xBF008900); +} + +struct rv515_watermark { + u32 lb_request_fifo_depth; + fixed20_12 num_line_pair; + fixed20_12 estimated_width; + fixed20_12 worst_case_latency; + fixed20_12 consumption_rate; + fixed20_12 active_time; + fixed20_12 dbpp; + fixed20_12 priority_mark_max; + fixed20_12 priority_mark; + fixed20_12 sclk; +}; + +void rv515_crtc_bandwidth_compute(struct radeon_device *rdev, + struct radeon_crtc *crtc, + struct rv515_watermark *wm) +{ + struct drm_display_mode *mode = &crtc->base.mode; + fixed20_12 a, b, c; + fixed20_12 pclk, request_fifo_depth, tolerable_latency, estimated_width; + fixed20_12 consumption_time, line_time, chunk_time, read_delay_latency; + + if (!crtc->base.enabled) { + /* FIXME: wouldn't it better to set priority mark to maximum */ + wm->lb_request_fifo_depth = 4; + return; + } + + if (crtc->vsc.full > rfixed_const(2)) + wm->num_line_pair.full = rfixed_const(2); + else + wm->num_line_pair.full = rfixed_const(1); + + b.full = rfixed_const(mode->crtc_hdisplay); + c.full = rfixed_const(256); + a.full = rfixed_mul(wm->num_line_pair, b); + request_fifo_depth.full = rfixed_div(a, c); + if (a.full < rfixed_const(4)) { + wm->lb_request_fifo_depth = 4; + } else { + wm->lb_request_fifo_depth = rfixed_trunc(request_fifo_depth); + } + + /* Determine consumption rate + * pclk = pixel clock period(ns) = 1000 / (mode.clock / 1000) + * vtaps = number of vertical taps, + * vsc = vertical scaling ratio, defined as source/destination + * hsc = horizontal scaling ration, defined as source/destination + */ + a.full = rfixed_const(mode->clock); + b.full = rfixed_const(1000); + a.full = rfixed_div(a, b); + pclk.full = rfixed_div(b, a); + if (crtc->rmx_type != RMX_OFF) { + b.full = rfixed_const(2); + if (crtc->vsc.full > b.full) + b.full = crtc->vsc.full; + b.full = rfixed_mul(b, crtc->hsc); + c.full = rfixed_const(2); + b.full = rfixed_div(b, c); + consumption_time.full = rfixed_div(pclk, b); + } else { + consumption_time.full = pclk.full; + } + a.full = rfixed_const(1); + wm->consumption_rate.full = rfixed_div(a, consumption_time); + + + /* Determine line time + * LineTime = total time for one line of displayhtotal + * LineTime = total number of horizontal pixels + * pclk = pixel clock period(ns) + */ + a.full = rfixed_const(crtc->base.mode.crtc_htotal); + line_time.full = rfixed_mul(a, pclk); + + /* Determine active time + * ActiveTime = time of active region of display within one line, + * hactive = total number of horizontal active pixels + * htotal = total number of horizontal pixels + */ + a.full = rfixed_const(crtc->base.mode.crtc_htotal); + b.full = rfixed_const(crtc->base.mode.crtc_hdisplay); + wm->active_time.full = rfixed_mul(line_time, b); + wm->active_time.full = rfixed_div(wm->active_time, a); + + /* Determine chunk time + * ChunkTime = the time it takes the DCP to send one chunk of data + * to the LB which consists of pipeline delay and inter chunk gap + * sclk = system clock(Mhz) + */ + a.full = rfixed_const(600 * 1000); + chunk_time.full = rfixed_div(a, rdev->pm.sclk); + read_delay_latency.full = rfixed_const(1000); + + /* Determine the worst case latency + * NumLinePair = Number of line pairs to request(1=2 lines, 2=4 lines) + * WorstCaseLatency = worst case time from urgent to when the MC starts + * to return data + * READ_DELAY_IDLE_MAX = constant of 1us + * ChunkTime = time it takes the DCP to send one chunk of data to the LB + * which consists of pipeline delay and inter chunk gap + */ + if (rfixed_trunc(wm->num_line_pair) > 1) { + a.full = rfixed_const(3); + wm->worst_case_latency.full = rfixed_mul(a, chunk_time); + wm->worst_case_latency.full += read_delay_latency.full; + } else { + wm->worst_case_latency.full = chunk_time.full + read_delay_latency.full; + } + + /* Determine the tolerable latency + * TolerableLatency = Any given request has only 1 line time + * for the data to be returned + * LBRequestFifoDepth = Number of chunk requests the LB can + * put into the request FIFO for a display + * LineTime = total time for one line of display + * ChunkTime = the time it takes the DCP to send one chunk + * of data to the LB which consists of + * pipeline delay and inter chunk gap + */ + if ((2+wm->lb_request_fifo_depth) >= rfixed_trunc(request_fifo_depth)) { + tolerable_latency.full = line_time.full; + } else { + tolerable_latency.full = rfixed_const(wm->lb_request_fifo_depth - 2); + tolerable_latency.full = request_fifo_depth.full - tolerable_latency.full; + tolerable_latency.full = rfixed_mul(tolerable_latency, chunk_time); + tolerable_latency.full = line_time.full - tolerable_latency.full; + } + /* We assume worst case 32bits (4 bytes) */ + wm->dbpp.full = rfixed_const(2 * 16); + + /* Determine the maximum priority mark + * width = viewport width in pixels + */ + a.full = rfixed_const(16); + wm->priority_mark_max.full = rfixed_const(crtc->base.mode.crtc_hdisplay); + wm->priority_mark_max.full = rfixed_div(wm->priority_mark_max, a); + + /* Determine estimated width */ + estimated_width.full = tolerable_latency.full - wm->worst_case_latency.full; + estimated_width.full = rfixed_div(estimated_width, consumption_time); + if (rfixed_trunc(estimated_width) > crtc->base.mode.crtc_hdisplay) { + wm->priority_mark.full = rfixed_const(10); + } else { + a.full = rfixed_const(16); + wm->priority_mark.full = rfixed_div(estimated_width, a); + wm->priority_mark.full = wm->priority_mark_max.full - wm->priority_mark.full; + } +} + +void rv515_bandwidth_avivo_update(struct radeon_device *rdev) +{ + struct drm_display_mode *mode0 = NULL; + struct drm_display_mode *mode1 = NULL; + struct rv515_watermark wm0; + struct rv515_watermark wm1; + u32 tmp; + fixed20_12 priority_mark02, priority_mark12, fill_rate; + fixed20_12 a, b; + + if (rdev->mode_info.crtcs[0]->base.enabled) + mode0 = &rdev->mode_info.crtcs[0]->base.mode; + if (rdev->mode_info.crtcs[1]->base.enabled) + mode1 = &rdev->mode_info.crtcs[1]->base.mode; + rs690_line_buffer_adjust(rdev, mode0, mode1); + + rv515_crtc_bandwidth_compute(rdev, rdev->mode_info.crtcs[0], &wm0); + rv515_crtc_bandwidth_compute(rdev, rdev->mode_info.crtcs[1], &wm1); + + tmp = wm0.lb_request_fifo_depth; + tmp |= wm1.lb_request_fifo_depth << 16; + WREG32(LB_MAX_REQ_OUTSTANDING, tmp); + + if (mode0 && mode1) { + if (rfixed_trunc(wm0.dbpp) > 64) + a.full = rfixed_div(wm0.dbpp, wm0.num_line_pair); + else + a.full = wm0.num_line_pair.full; + if (rfixed_trunc(wm1.dbpp) > 64) + b.full = rfixed_div(wm1.dbpp, wm1.num_line_pair); + else + b.full = wm1.num_line_pair.full; + a.full += b.full; + fill_rate.full = rfixed_div(wm0.sclk, a); + if (wm0.consumption_rate.full > fill_rate.full) { + b.full = wm0.consumption_rate.full - fill_rate.full; + b.full = rfixed_mul(b, wm0.active_time); + a.full = rfixed_const(16); + b.full = rfixed_div(b, a); + a.full = rfixed_mul(wm0.worst_case_latency, + wm0.consumption_rate); + priority_mark02.full = a.full + b.full; + } else { + a.full = rfixed_mul(wm0.worst_case_latency, + wm0.consumption_rate); + b.full = rfixed_const(16 * 1000); + priority_mark02.full = rfixed_div(a, b); + } + if (wm1.consumption_rate.full > fill_rate.full) { + b.full = wm1.consumption_rate.full - fill_rate.full; + b.full = rfixed_mul(b, wm1.active_time); + a.full = rfixed_const(16); + b.full = rfixed_div(b, a); + a.full = rfixed_mul(wm1.worst_case_latency, + wm1.consumption_rate); + priority_mark12.full = a.full + b.full; + } else { + a.full = rfixed_mul(wm1.worst_case_latency, + wm1.consumption_rate); + b.full = rfixed_const(16 * 1000); + priority_mark12.full = rfixed_div(a, b); + } + if (wm0.priority_mark.full > priority_mark02.full) + priority_mark02.full = wm0.priority_mark.full; + if (rfixed_trunc(priority_mark02) < 0) + priority_mark02.full = 0; + if (wm0.priority_mark_max.full > priority_mark02.full) + priority_mark02.full = wm0.priority_mark_max.full; + if (wm1.priority_mark.full > priority_mark12.full) + priority_mark12.full = wm1.priority_mark.full; + if (rfixed_trunc(priority_mark12) < 0) + priority_mark12.full = 0; + if (wm1.priority_mark_max.full > priority_mark12.full) + priority_mark12.full = wm1.priority_mark_max.full; + WREG32(D1MODE_PRIORITY_A_CNT, rfixed_trunc(priority_mark02)); + WREG32(D1MODE_PRIORITY_B_CNT, rfixed_trunc(priority_mark02)); + WREG32(D2MODE_PRIORITY_A_CNT, rfixed_trunc(priority_mark12)); + WREG32(D2MODE_PRIORITY_B_CNT, rfixed_trunc(priority_mark12)); + } else if (mode0) { + if (rfixed_trunc(wm0.dbpp) > 64) + a.full = rfixed_div(wm0.dbpp, wm0.num_line_pair); + else + a.full = wm0.num_line_pair.full; + fill_rate.full = rfixed_div(wm0.sclk, a); + if (wm0.consumption_rate.full > fill_rate.full) { + b.full = wm0.consumption_rate.full - fill_rate.full; + b.full = rfixed_mul(b, wm0.active_time); + a.full = rfixed_const(16); + b.full = rfixed_div(b, a); + a.full = rfixed_mul(wm0.worst_case_latency, + wm0.consumption_rate); + priority_mark02.full = a.full + b.full; + } else { + a.full = rfixed_mul(wm0.worst_case_latency, + wm0.consumption_rate); + b.full = rfixed_const(16); + priority_mark02.full = rfixed_div(a, b); + } + if (wm0.priority_mark.full > priority_mark02.full) + priority_mark02.full = wm0.priority_mark.full; + if (rfixed_trunc(priority_mark02) < 0) + priority_mark02.full = 0; + if (wm0.priority_mark_max.full > priority_mark02.full) + priority_mark02.full = wm0.priority_mark_max.full; + WREG32(D1MODE_PRIORITY_A_CNT, rfixed_trunc(priority_mark02)); + WREG32(D1MODE_PRIORITY_B_CNT, rfixed_trunc(priority_mark02)); + WREG32(D2MODE_PRIORITY_A_CNT, MODE_PRIORITY_OFF); + WREG32(D2MODE_PRIORITY_B_CNT, MODE_PRIORITY_OFF); + } else { + if (rfixed_trunc(wm1.dbpp) > 64) + a.full = rfixed_div(wm1.dbpp, wm1.num_line_pair); + else + a.full = wm1.num_line_pair.full; + fill_rate.full = rfixed_div(wm1.sclk, a); + if (wm1.consumption_rate.full > fill_rate.full) { + b.full = wm1.consumption_rate.full - fill_rate.full; + b.full = rfixed_mul(b, wm1.active_time); + a.full = rfixed_const(16); + b.full = rfixed_div(b, a); + a.full = rfixed_mul(wm1.worst_case_latency, + wm1.consumption_rate); + priority_mark12.full = a.full + b.full; + } else { + a.full = rfixed_mul(wm1.worst_case_latency, + wm1.consumption_rate); + b.full = rfixed_const(16 * 1000); + priority_mark12.full = rfixed_div(a, b); + } + if (wm1.priority_mark.full > priority_mark12.full) + priority_mark12.full = wm1.priority_mark.full; + if (rfixed_trunc(priority_mark12) < 0) + priority_mark12.full = 0; + if (wm1.priority_mark_max.full > priority_mark12.full) + priority_mark12.full = wm1.priority_mark_max.full; + WREG32(D1MODE_PRIORITY_A_CNT, MODE_PRIORITY_OFF); + WREG32(D1MODE_PRIORITY_B_CNT, MODE_PRIORITY_OFF); + WREG32(D2MODE_PRIORITY_A_CNT, rfixed_trunc(priority_mark12)); + WREG32(D2MODE_PRIORITY_B_CNT, rfixed_trunc(priority_mark12)); + } +} + +void rv515_bandwidth_update(struct radeon_device *rdev) +{ + uint32_t tmp; + struct drm_display_mode *mode0 = NULL; + struct drm_display_mode *mode1 = NULL; + + if (rdev->mode_info.crtcs[0]->base.enabled) + mode0 = &rdev->mode_info.crtcs[0]->base.mode; + if (rdev->mode_info.crtcs[1]->base.enabled) + mode1 = &rdev->mode_info.crtcs[1]->base.mode; + /* + * Set display0/1 priority up in the memory controller for + * modes if the user specifies HIGH for displaypriority + * option. + */ + if (rdev->disp_priority == 2) { + tmp = RREG32_MC(MC_MISC_LAT_TIMER); + tmp &= ~MC_DISP1R_INIT_LAT_MASK; + tmp &= ~MC_DISP0R_INIT_LAT_MASK; + if (mode1) + tmp |= (1 << MC_DISP1R_INIT_LAT_SHIFT); + if (mode0) + tmp |= (1 << MC_DISP0R_INIT_LAT_SHIFT); + WREG32_MC(MC_MISC_LAT_TIMER, tmp); + } + rv515_bandwidth_avivo_update(rdev); +} diff --git a/drivers/gpu/drm/radeon/rv515r.h b/drivers/gpu/drm/radeon/rv515r.h new file mode 100644 index 00000000000..f3cf8403990 --- /dev/null +++ b/drivers/gpu/drm/radeon/rv515r.h @@ -0,0 +1,170 @@ +/* + * Copyright 2008 Advanced Micro Devices, Inc. + * Copyright 2008 Red Hat Inc. + * Copyright 2009 Jerome Glisse. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: Dave Airlie + * Alex Deucher + * Jerome Glisse + */ +#ifndef RV515R_H +#define RV515R_H + +/* RV515 registers */ +#define PCIE_INDEX 0x0030 +#define PCIE_DATA 0x0034 +#define MC_IND_INDEX 0x0070 +#define MC_IND_WR_EN (1 << 24) +#define MC_IND_DATA 0x0074 +#define RBBM_SOFT_RESET 0x00F0 +#define CONFIG_MEMSIZE 0x00F8 +#define HDP_FB_LOCATION 0x0134 +#define CP_CSQ_CNTL 0x0740 +#define CP_CSQ_MODE 0x0744 +#define CP_CSQ_ADDR 0x07F0 +#define CP_CSQ_DATA 0x07F4 +#define CP_CSQ_STAT 0x07F8 +#define CP_CSQ2_STAT 0x07FC +#define RBBM_STATUS 0x0E40 +#define DST_PIPE_CONFIG 0x170C +#define WAIT_UNTIL 0x1720 +#define WAIT_2D_IDLE (1 << 14) +#define WAIT_3D_IDLE (1 << 15) +#define WAIT_2D_IDLECLEAN (1 << 16) +#define WAIT_3D_IDLECLEAN (1 << 17) +#define ISYNC_CNTL 0x1724 +#define ISYNC_ANY2D_IDLE3D (1 << 0) +#define ISYNC_ANY3D_IDLE2D (1 << 1) +#define ISYNC_TRIG2D_IDLE3D (1 << 2) +#define ISYNC_TRIG3D_IDLE2D (1 << 3) +#define ISYNC_WAIT_IDLEGUI (1 << 4) +#define ISYNC_CPSCRATCH_IDLEGUI (1 << 5) +#define VAP_INDEX_OFFSET 0x208C +#define VAP_PVS_STATE_FLUSH_REG 0x2284 +#define GB_ENABLE 0x4008 +#define GB_MSPOS0 0x4010 +#define MS_X0_SHIFT 0 +#define MS_Y0_SHIFT 4 +#define MS_X1_SHIFT 8 +#define MS_Y1_SHIFT 12 +#define MS_X2_SHIFT 16 +#define MS_Y2_SHIFT 20 +#define MSBD0_Y_SHIFT 24 +#define MSBD0_X_SHIFT 28 +#define GB_MSPOS1 0x4014 +#define MS_X3_SHIFT 0 +#define MS_Y3_SHIFT 4 +#define MS_X4_SHIFT 8 +#define MS_Y4_SHIFT 12 +#define MS_X5_SHIFT 16 +#define MS_Y5_SHIFT 20 +#define MSBD1_SHIFT 24 +#define GB_TILE_CONFIG 0x4018 +#define ENABLE_TILING (1 << 0) +#define PIPE_COUNT_MASK 0x0000000E +#define PIPE_COUNT_SHIFT 1 +#define TILE_SIZE_8 (0 << 4) +#define TILE_SIZE_16 (1 << 4) +#define TILE_SIZE_32 (2 << 4) +#define SUBPIXEL_1_12 (0 << 16) +#define SUBPIXEL_1_16 (1 << 16) +#define GB_SELECT 0x401C +#define GB_AA_CONFIG 0x4020 +#define GB_PIPE_SELECT 0x402C +#define GA_ENHANCE 0x4274 +#define GA_DEADLOCK_CNTL (1 << 0) +#define GA_FASTSYNC_CNTL (1 << 1) +#define GA_POLY_MODE 0x4288 +#define FRONT_PTYPE_POINT (0 << 4) +#define FRONT_PTYPE_LINE (1 << 4) +#define FRONT_PTYPE_TRIANGE (2 << 4) +#define BACK_PTYPE_POINT (0 << 7) +#define BACK_PTYPE_LINE (1 << 7) +#define BACK_PTYPE_TRIANGE (2 << 7) +#define GA_ROUND_MODE 0x428C +#define GEOMETRY_ROUND_TRUNC (0 << 0) +#define GEOMETRY_ROUND_NEAREST (1 << 0) +#define COLOR_ROUND_TRUNC (0 << 2) +#define COLOR_ROUND_NEAREST (1 << 2) +#define SU_REG_DEST 0x42C8 +#define RB3D_DSTCACHE_CTLSTAT 0x4E4C +#define RB3D_DC_FLUSH (2 << 0) +#define RB3D_DC_FREE (2 << 2) +#define RB3D_DC_FINISH (1 << 4) +#define ZB_ZCACHE_CTLSTAT 0x4F18 +#define ZC_FLUSH (1 << 0) +#define ZC_FREE (1 << 1) +#define DC_LB_MEMORY_SPLIT 0x6520 +#define DC_LB_MEMORY_SPLIT_MASK 0x00000003 +#define DC_LB_MEMORY_SPLIT_SHIFT 0 +#define DC_LB_MEMORY_SPLIT_D1HALF_D2HALF 0 +#define DC_LB_MEMORY_SPLIT_D1_3Q_D2_1Q 1 +#define DC_LB_MEMORY_SPLIT_D1_ONLY 2 +#define DC_LB_MEMORY_SPLIT_D1_1Q_D2_3Q 3 +#define DC_LB_MEMORY_SPLIT_SHIFT_MODE (1 << 2) +#define DC_LB_DISP1_END_ADR_SHIFT 4 +#define DC_LB_DISP1_END_ADR_MASK 0x00007FF0 +#define D1MODE_PRIORITY_A_CNT 0x6548 +#define MODE_PRIORITY_MARK_MASK 0x00007FFF +#define MODE_PRIORITY_OFF (1 << 16) +#define MODE_PRIORITY_ALWAYS_ON (1 << 20) +#define MODE_PRIORITY_FORCE_MASK (1 << 24) +#define D1MODE_PRIORITY_B_CNT 0x654C +#define LB_MAX_REQ_OUTSTANDING 0x6D58 +#define LB_D1_MAX_REQ_OUTSTANDING_MASK 0x0000000F +#define LB_D1_MAX_REQ_OUTSTANDING_SHIFT 0 +#define LB_D2_MAX_REQ_OUTSTANDING_MASK 0x000F0000 +#define LB_D2_MAX_REQ_OUTSTANDING_SHIFT 16 +#define D2MODE_PRIORITY_A_CNT 0x6D48 +#define D2MODE_PRIORITY_B_CNT 0x6D4C + +/* ix[MC] registers */ +#define MC_FB_LOCATION 0x01 +#define MC_FB_START_MASK 0x0000FFFF +#define MC_FB_START_SHIFT 0 +#define MC_FB_TOP_MASK 0xFFFF0000 +#define MC_FB_TOP_SHIFT 16 +#define MC_AGP_LOCATION 0x02 +#define MC_AGP_START_MASK 0x0000FFFF +#define MC_AGP_START_SHIFT 0 +#define MC_AGP_TOP_MASK 0xFFFF0000 +#define MC_AGP_TOP_SHIFT 16 +#define MC_AGP_BASE 0x03 +#define MC_AGP_BASE_2 0x04 +#define MC_CNTL 0x5 +#define MEM_NUM_CHANNELS_MASK 0x00000003 +#define MC_STATUS 0x08 +#define MC_STATUS_IDLE (1 << 4) +#define MC_MISC_LAT_TIMER 0x09 +#define MC_CPR_INIT_LAT_MASK 0x0000000F +#define MC_VF_INIT_LAT_MASK 0x000000F0 +#define MC_DISP0R_INIT_LAT_MASK 0x00000F00 +#define MC_DISP0R_INIT_LAT_SHIFT 8 +#define MC_DISP1R_INIT_LAT_MASK 0x0000F000 +#define MC_DISP1R_INIT_LAT_SHIFT 12 +#define MC_FIXED_INIT_LAT_MASK 0x000F0000 +#define MC_E2R_INIT_LAT_MASK 0x00F00000 +#define SAME_PAGE_PRIO_MASK 0x0F000000 +#define MC_GLOBW_INIT_LAT_MASK 0xF0000000 + + +#endif + diff --git a/drivers/gpu/drm/radeon/rv770.c b/drivers/gpu/drm/radeon/rv770.c index da50cc51ede..21d8ffd5730 100644 --- a/drivers/gpu/drm/radeon/rv770.c +++ b/drivers/gpu/drm/radeon/rv770.c @@ -67,7 +67,7 @@ int rv770_mc_init(struct radeon_device *rdev) "programming pipes. Bad things might happen.\n"); } - tmp = rdev->mc.vram_location + rdev->mc.vram_size - 1; + tmp = rdev->mc.vram_location + rdev->mc.mc_vram_size - 1; tmp = REG_SET(R700_MC_FB_TOP, tmp >> 24); tmp |= REG_SET(R700_MC_FB_BASE, rdev->mc.vram_location >> 24); WREG32(R700_MC_VM_FB_LOCATION, tmp); diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c index c1c407f7cca..c2b0d710d10 100644 --- a/drivers/gpu/drm/ttm/ttm_bo.c +++ b/drivers/gpu/drm/ttm/ttm_bo.c @@ -43,7 +43,6 @@ #define TTM_BO_HASH_ORDER 13 static int ttm_bo_setup_vm(struct ttm_buffer_object *bo); -static void ttm_bo_unmap_virtual(struct ttm_buffer_object *bo); static int ttm_bo_swapout(struct ttm_mem_shrink *shrink); static inline uint32_t ttm_bo_type_flags(unsigned type) @@ -224,6 +223,9 @@ static int ttm_bo_add_ttm(struct ttm_buffer_object *bo, bool zero_alloc) TTM_ASSERT_LOCKED(&bo->mutex); bo->ttm = NULL; + if (bdev->need_dma32) + page_flags |= TTM_PAGE_FLAG_DMA32; + switch (bo->type) { case ttm_bo_type_device: if (zero_alloc) @@ -304,6 +306,9 @@ static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo, } + if (bdev->driver->move_notify) + bdev->driver->move_notify(bo, mem); + if (!(old_man->flags & TTM_MEMTYPE_FLAG_FIXED) && !(new_man->flags & TTM_MEMTYPE_FLAG_FIXED)) ret = ttm_bo_move_ttm(bo, evict, no_wait, mem); @@ -655,31 +660,52 @@ retry_pre_get: return 0; } +static uint32_t ttm_bo_select_caching(struct ttm_mem_type_manager *man, + uint32_t cur_placement, + uint32_t proposed_placement) +{ + uint32_t caching = proposed_placement & TTM_PL_MASK_CACHING; + uint32_t result = proposed_placement & ~TTM_PL_MASK_CACHING; + + /** + * Keep current caching if possible. + */ + + if ((cur_placement & caching) != 0) + result |= (cur_placement & caching); + else if ((man->default_caching & caching) != 0) + result |= man->default_caching; + else if ((TTM_PL_FLAG_CACHED & caching) != 0) + result |= TTM_PL_FLAG_CACHED; + else if ((TTM_PL_FLAG_WC & caching) != 0) + result |= TTM_PL_FLAG_WC; + else if ((TTM_PL_FLAG_UNCACHED & caching) != 0) + result |= TTM_PL_FLAG_UNCACHED; + + return result; +} + + static bool ttm_bo_mt_compatible(struct ttm_mem_type_manager *man, bool disallow_fixed, uint32_t mem_type, - uint32_t mask, uint32_t *res_mask) + uint32_t proposed_placement, + uint32_t *masked_placement) { uint32_t cur_flags = ttm_bo_type_flags(mem_type); if ((man->flags & TTM_MEMTYPE_FLAG_FIXED) && disallow_fixed) return false; - if ((cur_flags & mask & TTM_PL_MASK_MEM) == 0) + if ((cur_flags & proposed_placement & TTM_PL_MASK_MEM) == 0) return false; - if ((mask & man->available_caching) == 0) + if ((proposed_placement & man->available_caching) == 0) return false; - if (mask & man->default_caching) - cur_flags |= man->default_caching; - else if (mask & TTM_PL_FLAG_CACHED) - cur_flags |= TTM_PL_FLAG_CACHED; - else if (mask & TTM_PL_FLAG_WC) - cur_flags |= TTM_PL_FLAG_WC; - else - cur_flags |= TTM_PL_FLAG_UNCACHED; - *res_mask = cur_flags; + cur_flags |= (proposed_placement & man->available_caching); + + *masked_placement = cur_flags; return true; } @@ -723,6 +749,9 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo, if (!type_ok) continue; + cur_flags = ttm_bo_select_caching(man, bo->mem.placement, + cur_flags); + if (mem_type == TTM_PL_SYSTEM) break; @@ -779,6 +808,9 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo, proposed_placement, &cur_flags)) continue; + cur_flags = ttm_bo_select_caching(man, bo->mem.placement, + cur_flags); + ret = ttm_bo_mem_force_space(bdev, mem, mem_type, interruptible, no_wait); @@ -1150,13 +1182,14 @@ static int ttm_bo_force_list_clean(struct ttm_bo_device *bdev, int ttm_bo_clean_mm(struct ttm_bo_device *bdev, unsigned mem_type) { - struct ttm_mem_type_manager *man = &bdev->man[mem_type]; + struct ttm_mem_type_manager *man; int ret = -EINVAL; if (mem_type >= TTM_NUM_MEM_TYPES) { printk(KERN_ERR TTM_PFX "Illegal memory type %d\n", mem_type); return ret; } + man = &bdev->man[mem_type]; if (!man->has_type) { printk(KERN_ERR TTM_PFX "Trying to take down uninitialized " @@ -1305,7 +1338,8 @@ EXPORT_SYMBOL(ttm_bo_device_release); int ttm_bo_device_init(struct ttm_bo_device *bdev, struct ttm_mem_global *mem_glob, - struct ttm_bo_driver *driver, uint64_t file_page_offset) + struct ttm_bo_driver *driver, uint64_t file_page_offset, + bool need_dma32) { int ret = -EINVAL; @@ -1342,6 +1376,7 @@ int ttm_bo_device_init(struct ttm_bo_device *bdev, INIT_LIST_HEAD(&bdev->ddestroy); INIT_LIST_HEAD(&bdev->swap_lru); bdev->dev_mapping = NULL; + bdev->need_dma32 = need_dma32; ttm_mem_init_shrink(&bdev->shrink, ttm_bo_swapout); ret = ttm_mem_register_shrink(mem_glob, &bdev->shrink); if (unlikely(ret != 0)) { @@ -1419,6 +1454,7 @@ void ttm_bo_unmap_virtual(struct ttm_buffer_object *bo) unmap_mapping_range(bdev->dev_mapping, offset, holelen, 1); } +EXPORT_SYMBOL(ttm_bo_unmap_virtual); static void ttm_bo_vm_insert_rb(struct ttm_buffer_object *bo) { @@ -1540,6 +1576,10 @@ int ttm_bo_wait(struct ttm_buffer_object *bo, driver->sync_obj_unref(&sync_obj); driver->sync_obj_unref(&tmp_obj); spin_lock(&bo->lock); + } else { + spin_unlock(&bo->lock); + driver->sync_obj_unref(&sync_obj); + spin_lock(&bo->lock); } } return 0; diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c index 517c8455963..ad4ada07c6c 100644 --- a/drivers/gpu/drm/ttm/ttm_bo_util.c +++ b/drivers/gpu/drm/ttm/ttm_bo_util.c @@ -34,7 +34,6 @@ #include <linux/highmem.h> #include <linux/wait.h> #include <linux/vmalloc.h> -#include <linux/version.h> #include <linux/module.h> void ttm_bo_free_old_node(struct ttm_buffer_object *bo) @@ -137,7 +136,8 @@ static int ttm_copy_io_page(void *dst, void *src, unsigned long page) } static int ttm_copy_io_ttm_page(struct ttm_tt *ttm, void *src, - unsigned long page) + unsigned long page, + pgprot_t prot) { struct page *d = ttm_tt_get_page(ttm, page); void *dst; @@ -146,17 +146,35 @@ static int ttm_copy_io_ttm_page(struct ttm_tt *ttm, void *src, return -ENOMEM; src = (void *)((unsigned long)src + (page << PAGE_SHIFT)); - dst = kmap(d); + +#ifdef CONFIG_X86 + dst = kmap_atomic_prot(d, KM_USER0, prot); +#else + if (pgprot_val(prot) != pgprot_val(PAGE_KERNEL)) + dst = vmap(&d, 1, 0, prot); + else + dst = kmap(d); +#endif if (!dst) return -ENOMEM; memcpy_fromio(dst, src, PAGE_SIZE); - kunmap(d); + +#ifdef CONFIG_X86 + kunmap_atomic(dst, KM_USER0); +#else + if (pgprot_val(prot) != pgprot_val(PAGE_KERNEL)) + vunmap(dst); + else + kunmap(d); +#endif + return 0; } static int ttm_copy_ttm_io_page(struct ttm_tt *ttm, void *dst, - unsigned long page) + unsigned long page, + pgprot_t prot) { struct page *s = ttm_tt_get_page(ttm, page); void *src; @@ -165,12 +183,28 @@ static int ttm_copy_ttm_io_page(struct ttm_tt *ttm, void *dst, return -ENOMEM; dst = (void *)((unsigned long)dst + (page << PAGE_SHIFT)); - src = kmap(s); +#ifdef CONFIG_X86 + src = kmap_atomic_prot(s, KM_USER0, prot); +#else + if (pgprot_val(prot) != pgprot_val(PAGE_KERNEL)) + src = vmap(&s, 1, 0, prot); + else + src = kmap(s); +#endif if (!src) return -ENOMEM; memcpy_toio(dst, src, PAGE_SIZE); - kunmap(s); + +#ifdef CONFIG_X86 + kunmap_atomic(src, KM_USER0); +#else + if (pgprot_val(prot) != pgprot_val(PAGE_KERNEL)) + vunmap(src); + else + kunmap(s); +#endif + return 0; } @@ -215,11 +249,17 @@ int ttm_bo_move_memcpy(struct ttm_buffer_object *bo, for (i = 0; i < new_mem->num_pages; ++i) { page = i * dir + add; - if (old_iomap == NULL) - ret = ttm_copy_ttm_io_page(ttm, new_iomap, page); - else if (new_iomap == NULL) - ret = ttm_copy_io_ttm_page(ttm, old_iomap, page); - else + if (old_iomap == NULL) { + pgprot_t prot = ttm_io_prot(old_mem->placement, + PAGE_KERNEL); + ret = ttm_copy_ttm_io_page(ttm, new_iomap, page, + prot); + } else if (new_iomap == NULL) { + pgprot_t prot = ttm_io_prot(new_mem->placement, + PAGE_KERNEL); + ret = ttm_copy_io_ttm_page(ttm, old_iomap, page, + prot); + } else ret = ttm_copy_io_page(new_iomap, old_iomap, page); if (ret) goto out1; @@ -510,8 +550,8 @@ int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo, if (evict) { ret = ttm_bo_wait(bo, false, false, false); spin_unlock(&bo->lock); - driver->sync_obj_unref(&bo->sync_obj); - + if (tmp_obj) + driver->sync_obj_unref(&tmp_obj); if (ret) return ret; @@ -533,6 +573,8 @@ int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo, set_bit(TTM_BO_PRIV_FLAG_MOVING, &bo->priv_flags); spin_unlock(&bo->lock); + if (tmp_obj) + driver->sync_obj_unref(&tmp_obj); ret = ttm_buffer_object_transfer(bo, &ghost_obj); if (ret) diff --git a/drivers/gpu/drm/ttm/ttm_bo_vm.c b/drivers/gpu/drm/ttm/ttm_bo_vm.c index 27b146c54fb..33de7637c0c 100644 --- a/drivers/gpu/drm/ttm/ttm_bo_vm.c +++ b/drivers/gpu/drm/ttm/ttm_bo_vm.c @@ -32,7 +32,6 @@ #include <ttm/ttm_bo_driver.h> #include <ttm/ttm_placement.h> #include <linux/mm.h> -#include <linux/version.h> #include <linux/rbtree.h> #include <linux/module.h> #include <linux/uaccess.h> @@ -102,6 +101,9 @@ static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf) return VM_FAULT_NOPAGE; } + if (bdev->driver->fault_reserve_notify) + bdev->driver->fault_reserve_notify(bo); + /* * Wait for buffer data in transit, due to a pipelined * move. @@ -328,7 +330,7 @@ ssize_t ttm_bo_io(struct ttm_bo_device *bdev, struct file *filp, goto out_unref; kmap_offset = dev_offset - bo->vm_node->start; - if (unlikely(kmap_offset) >= bo->num_pages) { + if (unlikely(kmap_offset >= bo->num_pages)) { ret = -EFBIG; goto out_unref; } @@ -402,7 +404,7 @@ ssize_t ttm_bo_fbdev_io(struct ttm_buffer_object *bo, const char __user *wbuf, bool dummy; kmap_offset = (*f_pos >> PAGE_SHIFT); - if (unlikely(kmap_offset) >= bo->num_pages) + if (unlikely(kmap_offset >= bo->num_pages)) return -EFBIG; page_offset = *f_pos & ~PAGE_MASK; diff --git a/drivers/gpu/drm/ttm/ttm_tt.c b/drivers/gpu/drm/ttm/ttm_tt.c index 0331fa74cd3..b8b6c4a5f98 100644 --- a/drivers/gpu/drm/ttm/ttm_tt.c +++ b/drivers/gpu/drm/ttm/ttm_tt.c @@ -28,7 +28,6 @@ * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com> */ -#include <linux/version.h> #include <linux/vmalloc.h> #include <linux/sched.h> #include <linux/highmem.h> @@ -87,10 +86,16 @@ void ttm_tt_cache_flush(struct page *pages[], unsigned long num_pages) unsigned long i; for (i = 0; i < num_pages; ++i) { - if (pages[i]) { - unsigned long start = (unsigned long)page_address(pages[i]); - flush_dcache_range(start, start + PAGE_SIZE); - } + struct page *page = pages[i]; + void *page_virtual; + + if (unlikely(page == NULL)) + continue; + + page_virtual = kmap_atomic(page, KM_USER0); + flush_dcache_range((unsigned long) page_virtual, + (unsigned long) page_virtual + PAGE_SIZE); + kunmap_atomic(page_virtual, KM_USER0); } #else if (on_each_cpu(ttm_tt_ipi_handler, NULL, 1) != 0) @@ -132,10 +137,17 @@ static void ttm_tt_free_page_directory(struct ttm_tt *ttm) static struct page *ttm_tt_alloc_page(unsigned page_flags) { + gfp_t gfp_flags = GFP_USER; + if (page_flags & TTM_PAGE_FLAG_ZERO_ALLOC) - return alloc_page(GFP_HIGHUSER | __GFP_ZERO); + gfp_flags |= __GFP_ZERO; + + if (page_flags & TTM_PAGE_FLAG_DMA32) + gfp_flags |= __GFP_DMA32; + else + gfp_flags |= __GFP_HIGHMEM; - return alloc_page(GFP_HIGHUSER); + return alloc_page(gfp_flags); } static void ttm_tt_free_user_pages(struct ttm_tt *ttm) diff --git a/drivers/gpu/drm/via/via_irq.c b/drivers/gpu/drm/via/via_irq.c index c248c1d3726..5935b8842e8 100644 --- a/drivers/gpu/drm/via/via_irq.c +++ b/drivers/gpu/drm/via/via_irq.c @@ -183,7 +183,7 @@ int via_enable_vblank(struct drm_device *dev, int crtc) } status = VIA_READ(VIA_REG_INTERRUPT); - VIA_WRITE(VIA_REG_INTERRUPT, status & VIA_IRQ_VBLANK_ENABLE); + VIA_WRITE(VIA_REG_INTERRUPT, status | VIA_IRQ_VBLANK_ENABLE); VIA_WRITE8(0x83d4, 0x11); VIA_WRITE8(0x83d5, VIA_READ8(0x83d5) | 0x30); @@ -194,6 +194,10 @@ int via_enable_vblank(struct drm_device *dev, int crtc) void via_disable_vblank(struct drm_device *dev, int crtc) { drm_via_private_t *dev_priv = dev->dev_private; + u32 status; + + status = VIA_READ(VIA_REG_INTERRUPT); + VIA_WRITE(VIA_REG_INTERRUPT, status & ~VIA_IRQ_VBLANK_ENABLE); VIA_WRITE8(0x83d4, 0x11); VIA_WRITE8(0x83d5, VIA_READ8(0x83d5) & ~0x30); diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c index f2c21d5d24e..5eb10c2ce66 100644 --- a/drivers/hid/hid-core.c +++ b/drivers/hid/hid-core.c @@ -1075,14 +1075,16 @@ EXPORT_SYMBOL_GPL(hid_report_raw_event); */ int hid_input_report(struct hid_device *hid, int type, u8 *data, int size, int interrupt) { - struct hid_report_enum *report_enum = hid->report_enum + type; - struct hid_driver *hdrv = hid->driver; + struct hid_report_enum *report_enum; + struct hid_driver *hdrv; struct hid_report *report; unsigned int i; int ret; if (!hid || !hid->driver) return -ENODEV; + report_enum = hid->report_enum + type; + hdrv = hid->driver; if (!size) { dbg_hid("empty report\n"); diff --git a/drivers/hid/usbhid/hid-core.c b/drivers/hid/usbhid/hid-core.c index 76c4bbe9dcc..3c1fcb7640a 100644 --- a/drivers/hid/usbhid/hid-core.c +++ b/drivers/hid/usbhid/hid-core.c @@ -22,7 +22,6 @@ #include <linux/list.h> #include <linux/mm.h> #include <linux/mutex.h> -#include <linux/smp_lock.h> #include <linux/spinlock.h> #include <asm/unaligned.h> #include <asm/byteorder.h> diff --git a/drivers/hid/usbhid/hiddev.c b/drivers/hid/usbhid/hiddev.c index 9e9421525fb..215b2addddb 100644 --- a/drivers/hid/usbhid/hiddev.c +++ b/drivers/hid/usbhid/hiddev.c @@ -527,8 +527,10 @@ static noinline int hiddev_ioctl_usage(struct hiddev *hiddev, unsigned int cmd, goto goodreturn; case HIDIOCGCOLLECTIONINDEX: + i = field->usage[uref->usage_index].collection_index; + unlock_kernel(); kfree(uref_multi); - return field->usage[uref->usage_index].collection_index; + return i; case HIDIOCGUSAGES: for (i = 0; i < uref_multi->num_values; i++) uref_multi->values[i] = diff --git a/drivers/hwmon/abituguru3.c b/drivers/hwmon/abituguru3.c index ad2b3431b72..7d3f15d32fd 100644 --- a/drivers/hwmon/abituguru3.c +++ b/drivers/hwmon/abituguru3.c @@ -357,7 +357,7 @@ static const struct abituguru3_motherboard_info abituguru3_motherboards[] = { { "AUX5 Fan", 39, 2, 60, 1, 0 }, { NULL, 0, 0, 0, 0, 0 } } }, - { 0x0014, NULL /* Abit AB9 Pro, need DMI string */, { + { 0x0014, "AB9", /* + AB9 Pro */ { { "CPU Core", 0, 0, 10, 1, 0 }, { "DDR", 1, 0, 10, 1, 0 }, { "DDR VTT", 2, 0, 10, 1, 0 }, @@ -455,7 +455,7 @@ static const struct abituguru3_motherboard_info abituguru3_motherboards[] = { { "AUX3 FAN", 37, 2, 60, 1, 0 }, { NULL, 0, 0, 0, 0, 0 } } }, - { 0x0018, NULL /* Unknown, need DMI string */, { + { 0x0018, "AB9 QuadGT", { { "CPU Core", 0, 0, 10, 1, 0 }, { "DDR2", 1, 0, 20, 1, 0 }, { "DDR2 VTT", 2, 0, 10, 1, 0 }, @@ -564,7 +564,7 @@ static const struct abituguru3_motherboard_info abituguru3_motherboards[] = { { "AUX3 Fan", 36, 2, 60, 1, 0 }, { NULL, 0, 0, 0, 0, 0 } } }, - { 0x001C, NULL /* Unknown, need DMI string */, { + { 0x001C, "IX38 QuadGT", { { "CPU Core", 0, 0, 10, 1, 0 }, { "DDR2", 1, 0, 20, 1, 0 }, { "DDR2 VTT", 2, 0, 10, 1, 0 }, diff --git a/drivers/hwmon/asus_atk0110.c b/drivers/hwmon/asus_atk0110.c index bff0103610c..fe4fa29c921 100644 --- a/drivers/hwmon/asus_atk0110.c +++ b/drivers/hwmon/asus_atk0110.c @@ -593,7 +593,11 @@ static int atk_add_sensor(struct atk_data *data, union acpi_object *obj) sensor->data = data; sensor->id = flags->integer.value; sensor->limit1 = limit1->integer.value; - sensor->limit2 = limit2->integer.value; + if (data->old_interface) + sensor->limit2 = limit2->integer.value; + else + /* The upper limit is expressed as delta from lower limit */ + sensor->limit2 = sensor->limit1 + limit2->integer.value; snprintf(sensor->input_attr_name, ATTR_NAME_SIZE, "%s%d_input", base_name, start + *num); diff --git a/drivers/hwmon/max6650.c b/drivers/hwmon/max6650.c index 86142a85823..58f66be61b1 100644 --- a/drivers/hwmon/max6650.c +++ b/drivers/hwmon/max6650.c @@ -418,6 +418,7 @@ static ssize_t set_div(struct device *dev, struct device_attribute *devattr, data->count = 3; break; default: + mutex_unlock(&data->update_lock); dev_err(&client->dev, "illegal value for fan divider (%d)\n", div); return -EINVAL; diff --git a/drivers/hwmon/sht15.c b/drivers/hwmon/sht15.c index 56cd6004da3..6290a259456 100644 --- a/drivers/hwmon/sht15.c +++ b/drivers/hwmon/sht15.c @@ -257,7 +257,7 @@ static inline int sht15_update_single_val(struct sht15_data *data, (data->flag == SHT15_READING_NOTHING), msecs_to_jiffies(timeout_msecs)); if (ret == 0) {/* timeout occurred */ - disable_irq_nosync(gpio_to_irq(data->pdata->gpio_data));; + disable_irq_nosync(gpio_to_irq(data->pdata->gpio_data)); sht15_connection_reset(data); return -ETIME; } diff --git a/drivers/hwmon/smsc47m1.c b/drivers/hwmon/smsc47m1.c index a92dbb97ee9..ba75bfcf14c 100644 --- a/drivers/hwmon/smsc47m1.c +++ b/drivers/hwmon/smsc47m1.c @@ -86,6 +86,7 @@ superio_exit(void) #define SUPERIO_REG_ACT 0x30 #define SUPERIO_REG_BASE 0x60 #define SUPERIO_REG_DEVID 0x20 +#define SUPERIO_REG_DEVREV 0x21 /* Logical device registers */ @@ -429,6 +430,9 @@ static int __init smsc47m1_find(unsigned short *addr, * The LPC47M292 (device id 0x6B) is somewhat compatible, but it * supports a 3rd fan, and the pin configuration registers are * unfortunately different. + * The LPC47M233 has the same device id (0x6B) but is not compatible. + * We check the high bit of the device revision register to + * differentiate them. */ switch (val) { case 0x51: @@ -448,6 +452,13 @@ static int __init smsc47m1_find(unsigned short *addr, sio_data->type = smsc47m1; break; case 0x6B: + if (superio_inb(SUPERIO_REG_DEVREV) & 0x80) { + pr_debug(DRVNAME ": " + "Found SMSC LPC47M233, unsupported\n"); + superio_exit(); + return -ENODEV; + } + pr_info(DRVNAME ": Found SMSC LPC47M292\n"); sio_data->type = smsc47m2; break; diff --git a/drivers/i2c/busses/Kconfig b/drivers/i2c/busses/Kconfig index aa87b6a3bbe..8206442fbab 100644 --- a/drivers/i2c/busses/Kconfig +++ b/drivers/i2c/busses/Kconfig @@ -328,6 +328,7 @@ config I2C_DAVINCI config I2C_DESIGNWARE tristate "Synopsys DesignWare" + depends on HAVE_CLK help If you say yes to this option, support will be included for the Synopsys DesignWare I2C adapter. Only master mode is supported. diff --git a/drivers/i2c/busses/i2c-davinci.c b/drivers/i2c/busses/i2c-davinci.c index 3fae3a91ce5..c89687a1083 100644 --- a/drivers/i2c/busses/i2c-davinci.c +++ b/drivers/i2c/busses/i2c-davinci.c @@ -187,6 +187,11 @@ static int i2c_davinci_init(struct davinci_i2c_dev *dev) davinci_i2c_write_reg(dev, DAVINCI_I2C_CLKH_REG, clkh); davinci_i2c_write_reg(dev, DAVINCI_I2C_CLKL_REG, clkl); + /* Respond at reserved "SMBus Host" slave address" (and zero); + * we seem to have no option to not respond... + */ + davinci_i2c_write_reg(dev, DAVINCI_I2C_OAR_REG, 0x08); + dev_dbg(dev->dev, "input_clock = %d, CLK = %d\n", input_clock, clk); dev_dbg(dev->dev, "PSC = %d\n", davinci_i2c_read_reg(dev, DAVINCI_I2C_PSC_REG)); @@ -387,7 +392,7 @@ static void terminate_write(struct davinci_i2c_dev *dev) davinci_i2c_write_reg(dev, DAVINCI_I2C_MDR_REG, w); if (!dev->terminate) - dev_err(dev->dev, "TDR IRQ while no data to send\n"); + dev_dbg(dev->dev, "TDR IRQ while no data to send\n"); } /* @@ -473,9 +478,14 @@ static irqreturn_t i2c_davinci_isr(int this_irq, void *dev_id) break; case DAVINCI_I2C_IVR_AAS: - dev_warn(dev->dev, "Address as slave interrupt\n"); - }/* switch */ - }/* while */ + dev_dbg(dev->dev, "Address as slave interrupt\n"); + break; + + default: + dev_warn(dev->dev, "Unrecognized irq stat %d\n", stat); + break; + } + } return count ? IRQ_HANDLED : IRQ_NONE; } @@ -505,7 +515,7 @@ static int davinci_i2c_probe(struct platform_device *pdev) return -ENODEV; } - ioarea = request_mem_region(mem->start, (mem->end - mem->start) + 1, + ioarea = request_mem_region(mem->start, resource_size(mem), pdev->name); if (!ioarea) { dev_err(&pdev->dev, "I2C region already claimed\n"); @@ -523,7 +533,7 @@ static int davinci_i2c_probe(struct platform_device *pdev) dev->irq = irq->start; platform_set_drvdata(pdev, dev); - dev->clk = clk_get(&pdev->dev, "I2CCLK"); + dev->clk = clk_get(&pdev->dev, NULL); if (IS_ERR(dev->clk)) { r = -ENODEV; goto err_free_mem; @@ -568,7 +578,7 @@ err_free_mem: put_device(&pdev->dev); kfree(dev); err_release_region: - release_mem_region(mem->start, (mem->end - mem->start) + 1); + release_mem_region(mem->start, resource_size(mem)); return r; } @@ -591,7 +601,7 @@ static int davinci_i2c_remove(struct platform_device *pdev) kfree(dev); mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); - release_mem_region(mem->start, (mem->end - mem->start) + 1); + release_mem_region(mem->start, resource_size(mem)); return 0; } diff --git a/drivers/i2c/busses/i2c-ibm_iic.c b/drivers/i2c/busses/i2c-ibm_iic.c index e4476743f20..b1bc6e277d2 100644 --- a/drivers/i2c/busses/i2c-ibm_iic.c +++ b/drivers/i2c/busses/i2c-ibm_iic.c @@ -85,10 +85,11 @@ static void dump_iic_regs(const char* header, struct ibm_iic_private* dev) { volatile struct iic_regs __iomem *iic = dev->vaddr; printk(KERN_DEBUG "ibm-iic%d: %s\n", dev->idx, header); - printk(KERN_DEBUG " cntl = 0x%02x, mdcntl = 0x%02x\n" - KERN_DEBUG " sts = 0x%02x, extsts = 0x%02x\n" - KERN_DEBUG " clkdiv = 0x%02x, xfrcnt = 0x%02x\n" - KERN_DEBUG " xtcntlss = 0x%02x, directcntl = 0x%02x\n", + printk(KERN_DEBUG + " cntl = 0x%02x, mdcntl = 0x%02x\n" + " sts = 0x%02x, extsts = 0x%02x\n" + " clkdiv = 0x%02x, xfrcnt = 0x%02x\n" + " xtcntlss = 0x%02x, directcntl = 0x%02x\n", in_8(&iic->cntl), in_8(&iic->mdcntl), in_8(&iic->sts), in_8(&iic->extsts), in_8(&iic->clkdiv), in_8(&iic->xfrcnt), in_8(&iic->xtcntlss), in_8(&iic->directcntl)); diff --git a/drivers/i2c/busses/i2c-omap.c b/drivers/i2c/busses/i2c-omap.c index ad8d2010c92..827da085813 100644 --- a/drivers/i2c/busses/i2c-omap.c +++ b/drivers/i2c/busses/i2c-omap.c @@ -672,9 +672,17 @@ omap_i2c_isr(int this_irq, void *dev_id) break; } - omap_i2c_write_reg(dev, OMAP_I2C_STAT_REG, stat); - err = 0; +complete: + /* + * Ack the stat in one go, but [R/X]DR and [R/X]RDY should be + * acked after the data operation is complete. + * Ref: TRM SWPU114Q Figure 18-31 + */ + omap_i2c_write_reg(dev, OMAP_I2C_STAT_REG, stat & + ~(OMAP_I2C_STAT_RRDY | OMAP_I2C_STAT_RDR | + OMAP_I2C_STAT_XRDY | OMAP_I2C_STAT_XDR)); + if (stat & OMAP_I2C_STAT_NACK) { err |= OMAP_I2C_STAT_NACK; omap_i2c_write_reg(dev, OMAP_I2C_CON_REG, @@ -685,16 +693,22 @@ omap_i2c_isr(int this_irq, void *dev_id) err |= OMAP_I2C_STAT_AL; } if (stat & (OMAP_I2C_STAT_ARDY | OMAP_I2C_STAT_NACK | - OMAP_I2C_STAT_AL)) + OMAP_I2C_STAT_AL)) { + omap_i2c_ack_stat(dev, stat & + (OMAP_I2C_STAT_RRDY | OMAP_I2C_STAT_RDR | + OMAP_I2C_STAT_XRDY | OMAP_I2C_STAT_XDR)); omap_i2c_complete_cmd(dev, err); + return IRQ_HANDLED; + } if (stat & (OMAP_I2C_STAT_RRDY | OMAP_I2C_STAT_RDR)) { u8 num_bytes = 1; if (dev->fifo_size) { if (stat & OMAP_I2C_STAT_RRDY) num_bytes = dev->fifo_size; - else - num_bytes = omap_i2c_read_reg(dev, - OMAP_I2C_BUFSTAT_REG); + else /* read RXSTAT on RDR interrupt */ + num_bytes = (omap_i2c_read_reg(dev, + OMAP_I2C_BUFSTAT_REG) + >> 8) & 0x3F; } while (num_bytes) { num_bytes--; @@ -731,9 +745,10 @@ omap_i2c_isr(int this_irq, void *dev_id) if (dev->fifo_size) { if (stat & OMAP_I2C_STAT_XRDY) num_bytes = dev->fifo_size; - else + else /* read TXSTAT on XDR interrupt */ num_bytes = omap_i2c_read_reg(dev, - OMAP_I2C_BUFSTAT_REG); + OMAP_I2C_BUFSTAT_REG) + & 0x3F; } while (num_bytes) { num_bytes--; @@ -760,6 +775,27 @@ omap_i2c_isr(int this_irq, void *dev_id) "data to send\n"); break; } + + /* + * OMAP3430 Errata 1.153: When an XRDY/XDR + * is hit, wait for XUDF before writing data + * to DATA_REG. Otherwise some data bytes can + * be lost while transferring them from the + * memory to the I2C interface. + */ + + if (dev->rev <= OMAP_I2C_REV_ON_3430) { + while (!(stat & OMAP_I2C_STAT_XUDF)) { + if (stat & (OMAP_I2C_STAT_NACK | OMAP_I2C_STAT_AL)) { + omap_i2c_ack_stat(dev, stat & (OMAP_I2C_STAT_XRDY | OMAP_I2C_STAT_XDR)); + err |= OMAP_I2C_STAT_XUDF; + goto complete; + } + cpu_relax(); + stat = omap_i2c_read_reg(dev, OMAP_I2C_STAT_REG); + } + } + omap_i2c_write_reg(dev, OMAP_I2C_DATA_REG, w); } omap_i2c_ack_stat(dev, @@ -806,7 +842,7 @@ omap_i2c_probe(struct platform_device *pdev) return -ENODEV; } - ioarea = request_mem_region(mem->start, (mem->end - mem->start) + 1, + ioarea = request_mem_region(mem->start, resource_size(mem), pdev->name); if (!ioarea) { dev_err(&pdev->dev, "I2C region already claimed\n"); @@ -879,7 +915,7 @@ omap_i2c_probe(struct platform_device *pdev) i2c_set_adapdata(adap, dev); adap->owner = THIS_MODULE; adap->class = I2C_CLASS_HWMON; - strncpy(adap->name, "OMAP I2C adapter", sizeof(adap->name)); + strlcpy(adap->name, "OMAP I2C adapter", sizeof(adap->name)); adap->algo = &omap_i2c_algo; adap->dev.parent = &pdev->dev; @@ -905,7 +941,7 @@ err_free_mem: platform_set_drvdata(pdev, NULL); kfree(dev); err_release_region: - release_mem_region(mem->start, (mem->end - mem->start) + 1); + release_mem_region(mem->start, resource_size(mem)); return r; } @@ -925,7 +961,7 @@ omap_i2c_remove(struct platform_device *pdev) iounmap(dev->base); kfree(dev); mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); - release_mem_region(mem->start, (mem->end - mem->start) + 1); + release_mem_region(mem->start, resource_size(mem)); return 0; } diff --git a/drivers/i2c/busses/i2c-s3c2410.c b/drivers/i2c/busses/i2c-s3c2410.c index 8f42a4536cd..20bb0ceb027 100644 --- a/drivers/i2c/busses/i2c-s3c2410.c +++ b/drivers/i2c/busses/i2c-s3c2410.c @@ -763,11 +763,6 @@ static int s3c24xx_i2c_init(struct s3c24xx_i2c *i2c) dev_info(i2c->dev, "bus frequency set to %d KHz\n", freq); dev_dbg(i2c->dev, "S3C2410_IICCON=0x%02lx\n", iicon); - /* check for s3c2440 i2c controller */ - - if (s3c24xx_i2c_is2440(i2c)) - writel(0x0, i2c->regs + S3C2440_IICLC); - return 0; } diff --git a/drivers/i2c/busses/i2c-sh_mobile.c b/drivers/i2c/busses/i2c-sh_mobile.c index 1c01083b01b..820487d0d5c 100644 --- a/drivers/i2c/busses/i2c-sh_mobile.c +++ b/drivers/i2c/busses/i2c-sh_mobile.c @@ -563,7 +563,7 @@ static int sh_mobile_i2c_probe(struct platform_device *dev) goto err_irq; } - size = (res->end - res->start) + 1; + size = resource_size(res); pd->reg = ioremap(res->start, size); if (pd->reg == NULL) { @@ -637,7 +637,7 @@ static void __exit sh_mobile_i2c_adap_exit(void) platform_driver_unregister(&sh_mobile_i2c_driver); } -module_init(sh_mobile_i2c_adap_init); +subsys_initcall(sh_mobile_i2c_adap_init); module_exit(sh_mobile_i2c_adap_exit); MODULE_DESCRIPTION("SuperH Mobile I2C Bus Controller driver"); diff --git a/drivers/i2c/busses/i2c-simtec.c b/drivers/i2c/busses/i2c-simtec.c index 042fda295f3..6407f47bda8 100644 --- a/drivers/i2c/busses/i2c-simtec.c +++ b/drivers/i2c/busses/i2c-simtec.c @@ -92,7 +92,7 @@ static int simtec_i2c_probe(struct platform_device *dev) goto err; } - size = (res->end-res->start)+1; + size = resource_size(res); pd->ioarea = request_mem_region(res->start, size, dev->name); if (pd->ioarea == NULL) { diff --git a/drivers/i2c/busses/i2c-stu300.c b/drivers/i2c/busses/i2c-stu300.c index 182e711318b..d2728a28a8d 100644 --- a/drivers/i2c/busses/i2c-stu300.c +++ b/drivers/i2c/busses/i2c-stu300.c @@ -117,7 +117,8 @@ enum stu300_error { STU300_ERROR_NONE = 0, STU300_ERROR_ACKNOWLEDGE_FAILURE, STU300_ERROR_BUS_ERROR, - STU300_ERROR_ARBITRATION_LOST + STU300_ERROR_ARBITRATION_LOST, + STU300_ERROR_UNKNOWN }; /* timeout waiting for the controller to respond */ @@ -127,7 +128,7 @@ enum stu300_error { * The number of address send athemps tried before giving up. * If the first one failes it seems like 5 to 8 attempts are required. */ -#define NUM_ADDR_RESEND_ATTEMPTS 10 +#define NUM_ADDR_RESEND_ATTEMPTS 12 /* I2C clock speed, in Hz 0-400kHz*/ static unsigned int scl_frequency = 100000; @@ -149,6 +150,7 @@ module_param(scl_frequency, uint, 0644); * @msg_index: index of current message * @msg_len: length of current message */ + struct stu300_dev { struct platform_device *pdev; struct i2c_adapter adapter; @@ -188,6 +190,27 @@ static inline u32 stu300_r8(void __iomem *address) return readl(address) & 0x000000FFU; } +static void stu300_irq_enable(struct stu300_dev *dev) +{ + u32 val; + val = stu300_r8(dev->virtbase + I2C_CR); + val |= I2C_CR_INTERRUPT_ENABLE; + /* Twice paranoia (possible HW glitch) */ + stu300_wr8(val, dev->virtbase + I2C_CR); + stu300_wr8(val, dev->virtbase + I2C_CR); +} + +static void stu300_irq_disable(struct stu300_dev *dev) +{ + u32 val; + val = stu300_r8(dev->virtbase + I2C_CR); + val &= ~I2C_CR_INTERRUPT_ENABLE; + /* Twice paranoia (possible HW glitch) */ + stu300_wr8(val, dev->virtbase + I2C_CR); + stu300_wr8(val, dev->virtbase + I2C_CR); +} + + /* * Tells whether a certain event or events occurred in * response to a command. The events represent states in @@ -196,9 +219,10 @@ static inline u32 stu300_r8(void __iomem *address) * documentation and can only be treated as abstract state * machine states. * - * @ret 0 = event has not occurred, any other value means - * the event occurred. + * @ret 0 = event has not occurred or unknown error, any + * other value means the correct event occurred or an error. */ + static int stu300_event_occurred(struct stu300_dev *dev, enum stu300_event mr_event) { u32 status1; @@ -206,11 +230,28 @@ static int stu300_event_occurred(struct stu300_dev *dev, /* What event happened? */ status1 = stu300_r8(dev->virtbase + I2C_SR1); + if (!(status1 & I2C_SR1_EVF_IND)) /* No event at all */ return 0; + status2 = stu300_r8(dev->virtbase + I2C_SR2); + /* Block any multiple interrupts */ + stu300_irq_disable(dev); + + /* Check for errors first */ + if (status2 & I2C_SR2_AF_IND) { + dev->cmd_err = STU300_ERROR_ACKNOWLEDGE_FAILURE; + return 1; + } else if (status2 & I2C_SR2_BERR_IND) { + dev->cmd_err = STU300_ERROR_BUS_ERROR; + return 1; + } else if (status2 & I2C_SR2_ARLO_IND) { + dev->cmd_err = STU300_ERROR_ARBITRATION_LOST; + return 1; + } + switch (mr_event) { case STU300_EVENT_1: if (status1 & I2C_SR1_ADSL_IND) @@ -221,10 +262,6 @@ static int stu300_event_occurred(struct stu300_dev *dev, case STU300_EVENT_7: case STU300_EVENT_8: if (status1 & I2C_SR1_BTF_IND) { - if (status2 & I2C_SR2_AF_IND) - dev->cmd_err = STU300_ERROR_ACKNOWLEDGE_FAILURE; - else if (status2 & I2C_SR2_BERR_IND) - dev->cmd_err = STU300_ERROR_BUS_ERROR; return 1; } break; @@ -240,8 +277,6 @@ static int stu300_event_occurred(struct stu300_dev *dev, case STU300_EVENT_6: if (status2 & I2C_SR2_ENDAD_IND) { /* First check for any errors */ - if (status2 & I2C_SR2_AF_IND) - dev->cmd_err = STU300_ERROR_ACKNOWLEDGE_FAILURE; return 1; } break; @@ -252,8 +287,15 @@ static int stu300_event_occurred(struct stu300_dev *dev, default: break; } - if (status2 & I2C_SR2_ARLO_IND) - dev->cmd_err = STU300_ERROR_ARBITRATION_LOST; + /* If we get here, we're on thin ice. + * Here we are in a status where we have + * gotten a response that does not match + * what we requested. + */ + dev->cmd_err = STU300_ERROR_UNKNOWN; + dev_err(&dev->pdev->dev, + "Unhandled interrupt! %d sr1: 0x%x sr2: 0x%x\n", + mr_event, status1, status2); return 0; } @@ -262,21 +304,20 @@ static irqreturn_t stu300_irh(int irq, void *data) struct stu300_dev *dev = data; int res; + /* Just make sure that the block is clocked */ + clk_enable(dev->clk); + /* See if this was what we were waiting for */ spin_lock(&dev->cmd_issue_lock); - if (dev->cmd_event != STU300_EVENT_NONE) { - res = stu300_event_occurred(dev, dev->cmd_event); - if (res || dev->cmd_err != STU300_ERROR_NONE) { - u32 val; - - complete(&dev->cmd_complete); - /* Block any multiple interrupts */ - val = stu300_r8(dev->virtbase + I2C_CR); - val &= ~I2C_CR_INTERRUPT_ENABLE; - stu300_wr8(val, dev->virtbase + I2C_CR); - } - } + + res = stu300_event_occurred(dev, dev->cmd_event); + if (res || dev->cmd_err != STU300_ERROR_NONE) + complete(&dev->cmd_complete); + spin_unlock(&dev->cmd_issue_lock); + + clk_disable(dev->clk); + return IRQ_HANDLED; } @@ -308,7 +349,6 @@ static int stu300_start_and_await_event(struct stu300_dev *dev, stu300_wr8(cr_value, dev->virtbase + I2C_CR); ret = wait_for_completion_interruptible_timeout(&dev->cmd_complete, STU300_TIMEOUT); - if (ret < 0) { dev_err(&dev->pdev->dev, "wait_for_completion_interruptible_timeout() " @@ -342,7 +382,6 @@ static int stu300_await_event(struct stu300_dev *dev, enum stu300_event mr_event) { int ret; - u32 val; if (unlikely(irqs_disabled())) { /* TODO: implement polling for this case if need be. */ @@ -354,36 +393,18 @@ static int stu300_await_event(struct stu300_dev *dev, /* Is it already here? */ spin_lock_irq(&dev->cmd_issue_lock); dev->cmd_err = STU300_ERROR_NONE; - if (stu300_event_occurred(dev, mr_event)) { - spin_unlock_irq(&dev->cmd_issue_lock); - goto exit_await_check_err; - } - init_completion(&dev->cmd_complete); - dev->cmd_err = STU300_ERROR_NONE; dev->cmd_event = mr_event; - /* Turn on the I2C interrupt for current operation */ - val = stu300_r8(dev->virtbase + I2C_CR); - val |= I2C_CR_INTERRUPT_ENABLE; - stu300_wr8(val, dev->virtbase + I2C_CR); - - /* Twice paranoia (possible HW glitch) */ - stu300_wr8(val, dev->virtbase + I2C_CR); + init_completion(&dev->cmd_complete); - /* Check again: is it already here? */ - if (unlikely(stu300_event_occurred(dev, mr_event))) { - /* Disable IRQ again. */ - val &= ~I2C_CR_INTERRUPT_ENABLE; - stu300_wr8(val, dev->virtbase + I2C_CR); - spin_unlock_irq(&dev->cmd_issue_lock); - goto exit_await_check_err; - } + /* Turn on the I2C interrupt for current operation */ + stu300_irq_enable(dev); /* Unlock the command block and wait for the event to occur */ spin_unlock_irq(&dev->cmd_issue_lock); + ret = wait_for_completion_interruptible_timeout(&dev->cmd_complete, STU300_TIMEOUT); - if (ret < 0) { dev_err(&dev->pdev->dev, "wait_for_completion_interruptible_timeout()" @@ -401,7 +422,6 @@ static int stu300_await_event(struct stu300_dev *dev, return -ETIMEDOUT; } - exit_await_check_err: if (dev->cmd_err != STU300_ERROR_NONE) { if (mr_event != STU300_EVENT_6) { dev_err(&dev->pdev->dev, "controller " @@ -457,18 +477,19 @@ struct stu300_clkset { }; static const struct stu300_clkset stu300_clktable[] = { - { 0, 0xFFU }, - { 2500000, I2C_OAR2_FR_25_10MHZ }, - { 10000000, I2C_OAR2_FR_10_1667MHZ }, - { 16670000, I2C_OAR2_FR_1667_2667MHZ }, - { 26670000, I2C_OAR2_FR_2667_40MHZ }, - { 40000000, I2C_OAR2_FR_40_5333MHZ }, - { 53330000, I2C_OAR2_FR_5333_66MHZ }, - { 66000000, I2C_OAR2_FR_66_80MHZ }, - { 80000000, I2C_OAR2_FR_80_100MHZ }, + { 0, 0xFFU }, + { 2500000, I2C_OAR2_FR_25_10MHZ }, + { 10000000, I2C_OAR2_FR_10_1667MHZ }, + { 16670000, I2C_OAR2_FR_1667_2667MHZ }, + { 26670000, I2C_OAR2_FR_2667_40MHZ }, + { 40000000, I2C_OAR2_FR_40_5333MHZ }, + { 53330000, I2C_OAR2_FR_5333_66MHZ }, + { 66000000, I2C_OAR2_FR_66_80MHZ }, + { 80000000, I2C_OAR2_FR_80_100MHZ }, { 100000000, 0xFFU }, }; + static int stu300_set_clk(struct stu300_dev *dev, unsigned long clkrate) { @@ -494,10 +515,10 @@ static int stu300_set_clk(struct stu300_dev *dev, unsigned long clkrate) if (dev->speed > 100000) /* Fast Mode I2C */ - val = ((clkrate/dev->speed)-9)/3; + val = ((clkrate/dev->speed) - 9)/3 + 1; else /* Standard Mode I2C */ - val = ((clkrate/dev->speed)-7)/2; + val = ((clkrate/dev->speed) - 7)/2 + 1; /* According to spec the divider must be > 2 */ if (val < 0x002) { @@ -557,6 +578,7 @@ static int stu300_init_hw(struct stu300_dev *dev) */ clkrate = clk_get_rate(dev->clk); ret = stu300_set_clk(dev, clkrate); + if (ret) return ret; /* @@ -641,7 +663,6 @@ static int stu300_xfer_msg(struct i2c_adapter *adap, int attempts = 0; struct stu300_dev *dev = i2c_get_adapdata(adap); - clk_enable(dev->clk); /* Remove this if (0) to trace each and every message. */ @@ -715,14 +736,15 @@ static int stu300_xfer_msg(struct i2c_adapter *adap, if (attempts < NUM_ADDR_RESEND_ATTEMPTS && attempts > 0) { dev_dbg(&dev->pdev->dev, "managed to get address " - "through after %d attempts\n", attempts); + "through after %d attempts\n", attempts); } else if (attempts == NUM_ADDR_RESEND_ATTEMPTS) { dev_dbg(&dev->pdev->dev, "I give up, tried %d times " - "to resend address.\n", - NUM_ADDR_RESEND_ATTEMPTS); + "to resend address.\n", + NUM_ADDR_RESEND_ATTEMPTS); goto exit_disable; } + if (msg->flags & I2C_M_RD) { /* READ: we read the actual bytes one at a time */ for (i = 0; i < msg->len; i++) { @@ -804,8 +826,10 @@ static int stu300_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, { int ret = -1; int i; + struct stu300_dev *dev = i2c_get_adapdata(adap); dev->msg_len = num; + for (i = 0; i < num; i++) { /* * Another driver appears to send stop for each message, @@ -817,6 +841,7 @@ static int stu300_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, dev->msg_index = i; ret = stu300_xfer_msg(adap, &msgs[i], (i == (num - 1))); + if (ret != 0) { num = ret; break; @@ -845,6 +870,7 @@ stu300_probe(struct platform_device *pdev) struct resource *res; int bus_nr; int ret = 0; + char clk_name[] = "I2C0"; dev = kzalloc(sizeof(struct stu300_dev), GFP_KERNEL); if (!dev) { @@ -854,7 +880,8 @@ stu300_probe(struct platform_device *pdev) } bus_nr = pdev->id; - dev->clk = clk_get(&pdev->dev, NULL); + clk_name[3] += (char)bus_nr; + dev->clk = clk_get(&pdev->dev, clk_name); if (IS_ERR(dev->clk)) { ret = PTR_ERR(dev->clk); dev_err(&pdev->dev, "could not retrieve i2c bus clock\n"); diff --git a/drivers/i2c/chips/tsl2550.c b/drivers/i2c/chips/tsl2550.c index 1a9cc135219..b96f3025e58 100644 --- a/drivers/i2c/chips/tsl2550.c +++ b/drivers/i2c/chips/tsl2550.c @@ -27,7 +27,7 @@ #include <linux/delay.h> #define TSL2550_DRV_NAME "tsl2550" -#define DRIVER_VERSION "1.1.1" +#define DRIVER_VERSION "1.1.2" /* * Defines @@ -189,13 +189,16 @@ static int tsl2550_calculate_lux(u8 ch0, u8 ch1) u8 r = 128; /* Avoid division by 0 and count 1 cannot be greater than count 0 */ - if (c0 && (c1 <= c0)) - r = c1 * 128 / c0; + if (c1 <= c0) + if (c0) { + r = c1 * 128 / c0; + + /* Calculate LUX */ + lux = ((c0 - c1) * ratio_lut[r]) / 256; + } else + lux = 0; else - return -1; - - /* Calculate LUX */ - lux = ((c0 - c1) * ratio_lut[r]) / 256; + return -EAGAIN; /* LUX range check */ return lux > TSL2550_MAX_LUX ? TSL2550_MAX_LUX : lux; diff --git a/drivers/ide/cs5520.c b/drivers/ide/cs5520.c index bd066bb9d61..09f98ed0731 100644 --- a/drivers/ide/cs5520.c +++ b/drivers/ide/cs5520.c @@ -135,6 +135,7 @@ static int __devinit cs5520_init_one(struct pci_dev *dev, const struct pci_devic ide_pci_setup_ports(dev, d, &hw[0], &hws[0]); hw[0].irq = 14; + hw[1].irq = 15; return ide_host_add(d, hws, 2, NULL); } diff --git a/drivers/ide/ide-acpi.c b/drivers/ide/ide-acpi.c index 77f79d26b26..c509c991646 100644 --- a/drivers/ide/ide-acpi.c +++ b/drivers/ide/ide-acpi.c @@ -92,6 +92,11 @@ int ide_acpi_init(void) return 0; } +bool ide_port_acpi(ide_hwif_t *hwif) +{ + return ide_noacpi == 0 && hwif->acpidata; +} + /** * ide_get_dev_handle - finds acpi_handle and PCI device.function * @dev: device to locate @@ -352,9 +357,6 @@ int ide_acpi_exec_tfs(ide_drive_t *drive) unsigned long gtf_address; unsigned long obj_loc; - if (ide_noacpi) - return 0; - DEBPRINT("call get_GTF, drive=%s port=%d\n", drive->name, drive->dn); ret = do_drive_get_GTF(drive, >f_length, >f_address, &obj_loc); @@ -389,16 +391,6 @@ void ide_acpi_get_timing(ide_hwif_t *hwif) struct acpi_buffer output; union acpi_object *out_obj; - if (ide_noacpi) - return; - - DEBPRINT("ENTER:\n"); - - if (!hwif->acpidata) { - DEBPRINT("no ACPI data for %s\n", hwif->name); - return; - } - /* Setting up output buffer for _GTM */ output.length = ACPI_ALLOCATE_BUFFER; output.pointer = NULL; /* ACPI-CA sets this; save/free it later */ @@ -479,16 +471,6 @@ void ide_acpi_push_timing(ide_hwif_t *hwif) struct ide_acpi_drive_link *master = &hwif->acpidata->master; struct ide_acpi_drive_link *slave = &hwif->acpidata->slave; - if (ide_noacpi) - return; - - DEBPRINT("ENTER:\n"); - - if (!hwif->acpidata) { - DEBPRINT("no ACPI data for %s\n", hwif->name); - return; - } - /* Give the GTM buffer + drive Identify data to the channel via the * _STM method: */ /* setup input parameters buffer for _STM */ @@ -527,16 +509,11 @@ void ide_acpi_set_state(ide_hwif_t *hwif, int on) ide_drive_t *drive; int i; - if (ide_noacpi || ide_noacpi_psx) + if (ide_noacpi_psx) return; DEBPRINT("ENTER:\n"); - if (!hwif->acpidata) { - DEBPRINT("no ACPI data for %s\n", hwif->name); - return; - } - /* channel first and then drives for power on and verse versa for power off */ if (on) acpi_bus_set_power(hwif->acpidata->obj_handle, ACPI_STATE_D0); @@ -616,7 +593,7 @@ void ide_acpi_port_init_devices(ide_hwif_t *hwif) drive->name, err); } - if (!ide_acpionboot) { + if (ide_noacpi || ide_acpionboot == 0) { DEBPRINT("ACPI methods disabled on boot\n"); return; } diff --git a/drivers/ide/ide-cd.c b/drivers/ide/ide-cd.c index 4a19686fcfe..6a9a769bffc 100644 --- a/drivers/ide/ide-cd.c +++ b/drivers/ide/ide-cd.c @@ -592,9 +592,19 @@ static ide_startstop_t cdrom_newpc_intr(ide_drive_t *drive) } } else if (!blk_pc_request(rq)) { ide_cd_request_sense_fixup(drive, cmd); - /* complain if we still have data left to transfer */ + uptodate = cmd->nleft ? 0 : 1; - if (uptodate == 0) + + /* + * suck out the remaining bytes from the drive in an + * attempt to complete the data xfer. (see BZ#13399) + */ + if (!(stat & ATA_ERR) && !uptodate && thislen) { + ide_pio_bytes(drive, cmd, write, thislen); + uptodate = cmd->nleft ? 0 : 1; + } + + if (!uptodate) rq->cmd_flags |= REQ_FAILED; } goto out_end; @@ -876,9 +886,12 @@ static int cdrom_read_capacity(ide_drive_t *drive, unsigned long *capacity, return stat; /* - * Sanity check the given block size + * Sanity check the given block size, in so far as making + * sure the sectors_per_frame we give to the caller won't + * end up being bogus. */ blocklen = be32_to_cpu(capbuf.blocklen); + blocklen = (blocklen >> SECTOR_BITS) << SECTOR_BITS; switch (blocklen) { case 512: case 1024: @@ -886,10 +899,9 @@ static int cdrom_read_capacity(ide_drive_t *drive, unsigned long *capacity, case 4096: break; default: - printk(KERN_ERR PFX "%s: weird block size %u\n", + printk_once(KERN_ERR PFX "%s: weird block size %u; " + "setting default block size to 2048\n", drive->name, blocklen); - printk(KERN_ERR PFX "%s: default to 2kb block size\n", - drive->name); blocklen = 2048; break; } diff --git a/drivers/ide/ide-cs.c b/drivers/ide/ide-cs.c index 527908ff298..063b933d864 100644 --- a/drivers/ide/ide-cs.c +++ b/drivers/ide/ide-cs.c @@ -408,6 +408,7 @@ static struct pcmcia_device_id ide_ids[] = { PCMCIA_DEVICE_PROD_ID123("PCMCIA", "IDE CARD", "F1", 0x281f1c5d, 0x1907960c, 0xf7fde8b9), PCMCIA_DEVICE_PROD_ID12("ARGOSY", "CD-ROM", 0x78f308dc, 0x66536591), PCMCIA_DEVICE_PROD_ID12("ARGOSY", "PnPIDE", 0x78f308dc, 0x0c694728), + PCMCIA_DEVICE_PROD_ID12("CNF ", "CD-ROM", 0x46d7db81, 0x66536591), PCMCIA_DEVICE_PROD_ID12("CNF CD-M", "CD-ROM", 0x7d93b852, 0x66536591), PCMCIA_DEVICE_PROD_ID12("Creative Technology Ltd.", "PCMCIA CD-ROM Interface Card", 0xff8c8a45, 0xfe8020c4), PCMCIA_DEVICE_PROD_ID12("Digital Equipment Corporation.", "Digital Mobile Media CD-ROM", 0x17692a66, 0xef1dcbde), diff --git a/drivers/ide/ide-devsets.c b/drivers/ide/ide-devsets.c index 5bf958e5b1d..1099bf7cf96 100644 --- a/drivers/ide/ide-devsets.c +++ b/drivers/ide/ide-devsets.c @@ -183,6 +183,6 @@ ide_startstop_t ide_do_devset(ide_drive_t *drive, struct request *rq) err = setfunc(drive, *(int *)&rq->cmd[1]); if (err) rq->errors = err; - ide_complete_rq(drive, err, ide_rq_bytes(rq)); + ide_complete_rq(drive, err, blk_rq_bytes(rq)); return ide_stopped; } diff --git a/drivers/ide/ide-disk.c b/drivers/ide/ide-disk.c index 695181120cd..7f878017b73 100644 --- a/drivers/ide/ide-disk.c +++ b/drivers/ide/ide-disk.c @@ -455,6 +455,7 @@ static void idedisk_prepare_flush(struct request_queue *q, struct request *rq) rq->cmd_type = REQ_TYPE_ATA_TASKFILE; rq->special = cmd; + cmd->rq = rq; } ide_devset_get(multcount, mult_count); diff --git a/drivers/ide/ide-dma.c b/drivers/ide/ide-dma.c index 219e6fb78dc..ee58c88dee5 100644 --- a/drivers/ide/ide-dma.c +++ b/drivers/ide/ide-dma.c @@ -361,9 +361,6 @@ static int ide_tune_dma(ide_drive_t *drive) if (__ide_dma_bad_drive(drive)) return 0; - if (ide_id_dma_bug(drive)) - return 0; - if (hwif->host_flags & IDE_HFLAG_TRUST_BIOS_FOR_DMA) return config_drive_for_dma(drive); @@ -394,24 +391,6 @@ static int ide_dma_check(ide_drive_t *drive) return -1; } -int ide_id_dma_bug(ide_drive_t *drive) -{ - u16 *id = drive->id; - - if (id[ATA_ID_FIELD_VALID] & 4) { - if ((id[ATA_ID_UDMA_MODES] >> 8) && - (id[ATA_ID_MWDMA_MODES] >> 8)) - goto err_out; - } else if ((id[ATA_ID_MWDMA_MODES] >> 8) && - (id[ATA_ID_SWDMA_MODES] >> 8)) - goto err_out; - - return 0; -err_out: - printk(KERN_ERR "%s: bad DMA info in identify block\n", drive->name); - return 1; -} - int ide_set_dma(ide_drive_t *drive) { int rc; diff --git a/drivers/ide/ide-eh.c b/drivers/ide/ide-eh.c index 2b914197961..e9abf2c3c33 100644 --- a/drivers/ide/ide-eh.c +++ b/drivers/ide/ide-eh.c @@ -149,7 +149,7 @@ static inline void ide_complete_drive_reset(ide_drive_t *drive, int err) if (rq && blk_special_request(rq) && rq->cmd[0] == REQ_DRIVE_RESET) { if (err <= 0 && rq->errors == 0) rq->errors = -EIO; - ide_complete_rq(drive, err ? err : 0, ide_rq_bytes(rq)); + ide_complete_rq(drive, err ? err : 0, blk_rq_bytes(rq)); } } diff --git a/drivers/ide/ide-floppy.c b/drivers/ide/ide-floppy.c index 8b3f204f7d7..fefbdfc8db0 100644 --- a/drivers/ide/ide-floppy.c +++ b/drivers/ide/ide-floppy.c @@ -293,7 +293,7 @@ out_end: drive->failed_pc = NULL; if (blk_fs_request(rq) == 0 && rq->errors == 0) rq->errors = -EIO; - ide_complete_rq(drive, -EIO, ide_rq_bytes(rq)); + ide_complete_rq(drive, -EIO, blk_rq_bytes(rq)); return ide_stopped; } diff --git a/drivers/ide/ide-io.c b/drivers/ide/ide-io.c index 1059f809b80..db96138fefc 100644 --- a/drivers/ide/ide-io.c +++ b/drivers/ide/ide-io.c @@ -112,16 +112,6 @@ void ide_complete_cmd(ide_drive_t *drive, struct ide_cmd *cmd, u8 stat, u8 err) } } -/* obsolete, blk_rq_bytes() should be used instead */ -unsigned int ide_rq_bytes(struct request *rq) -{ - if (blk_pc_request(rq)) - return blk_rq_bytes(rq); - else - return blk_rq_cur_sectors(rq) << 9; -} -EXPORT_SYMBOL_GPL(ide_rq_bytes); - int ide_complete_rq(ide_drive_t *drive, int error, unsigned int nr_bytes) { ide_hwif_t *hwif = drive->hwif; @@ -152,14 +142,14 @@ void ide_kill_rq(ide_drive_t *drive, struct request *rq) if ((media == ide_floppy || media == ide_tape) && drv_req) { rq->errors = 0; - ide_complete_rq(drive, 0, blk_rq_bytes(rq)); } else { if (media == ide_tape) rq->errors = IDE_DRV_ERROR_GENERAL; else if (blk_fs_request(rq) == 0 && rq->errors == 0) rq->errors = -EIO; - ide_complete_rq(drive, -EIO, ide_rq_bytes(rq)); } + + ide_complete_rq(drive, -EIO, blk_rq_bytes(rq)); } static void ide_tf_set_specify_cmd(ide_drive_t *drive, struct ide_taskfile *tf) diff --git a/drivers/ide/ide-ioctls.c b/drivers/ide/ide-ioctls.c index 82f252c3ee6..e246d3d3fbc 100644 --- a/drivers/ide/ide-ioctls.c +++ b/drivers/ide/ide-ioctls.c @@ -64,7 +64,8 @@ static int ide_get_identity_ioctl(ide_drive_t *drive, unsigned int cmd, goto out; } - id = kmalloc(size, GFP_KERNEL); + /* ata_id_to_hd_driveid() relies on 'id' to be fully allocated. */ + id = kmalloc(ATA_ID_WORDS * 2, GFP_KERNEL); if (id == NULL) { rc = -ENOMEM; goto out; diff --git a/drivers/ide/ide-iops.c b/drivers/ide/ide-iops.c index fa047150a1c..2892b242bbe 100644 --- a/drivers/ide/ide-iops.c +++ b/drivers/ide/ide-iops.c @@ -210,6 +210,7 @@ EXPORT_SYMBOL_GPL(ide_in_drive_list); */ static const struct drive_list_entry ivb_list[] = { { "QUANTUM FIREBALLlct10 05" , "A03.0900" }, + { "QUANTUM FIREBALLlct20 30" , "APL.0900" }, { "TSSTcorp CDDVDW SH-S202J" , "SB00" }, { "TSSTcorp CDDVDW SH-S202J" , "SB01" }, { "TSSTcorp CDDVDW SH-S202N" , "SB00" }, @@ -329,9 +330,6 @@ int ide_driveid_update(ide_drive_t *drive) kfree(id); - if ((drive->dev_flags & IDE_DFLAG_USING_DMA) && ide_id_dma_bug(drive)) - ide_dma_off(drive); - return 1; out_err: if (rc == 2) diff --git a/drivers/ide/ide-pm.c b/drivers/ide/ide-pm.c index c14ca144cff..ad7be2669dc 100644 --- a/drivers/ide/ide-pm.c +++ b/drivers/ide/ide-pm.c @@ -10,9 +10,11 @@ int generic_ide_suspend(struct device *dev, pm_message_t mesg) struct request_pm_state rqpm; int ret; - /* call ACPI _GTM only once */ - if ((drive->dn & 1) == 0 || pair == NULL) - ide_acpi_get_timing(hwif); + if (ide_port_acpi(hwif)) { + /* call ACPI _GTM only once */ + if ((drive->dn & 1) == 0 || pair == NULL) + ide_acpi_get_timing(hwif); + } memset(&rqpm, 0, sizeof(rqpm)); rq = blk_get_request(drive->queue, READ, __GFP_WAIT); @@ -26,9 +28,11 @@ int generic_ide_suspend(struct device *dev, pm_message_t mesg) ret = blk_execute_rq(drive->queue, NULL, rq, 0); blk_put_request(rq); - /* call ACPI _PS3 only after both devices are suspended */ - if (ret == 0 && ((drive->dn & 1) || pair == NULL)) - ide_acpi_set_state(hwif, 0); + if (ret == 0 && ide_port_acpi(hwif)) { + /* call ACPI _PS3 only after both devices are suspended */ + if ((drive->dn & 1) || pair == NULL) + ide_acpi_set_state(hwif, 0); + } return ret; } @@ -42,13 +46,15 @@ int generic_ide_resume(struct device *dev) struct request_pm_state rqpm; int err; - /* call ACPI _PS0 / _STM only once */ - if ((drive->dn & 1) == 0 || pair == NULL) { - ide_acpi_set_state(hwif, 1); - ide_acpi_push_timing(hwif); - } + if (ide_port_acpi(hwif)) { + /* call ACPI _PS0 / _STM only once */ + if ((drive->dn & 1) == 0 || pair == NULL) { + ide_acpi_set_state(hwif, 1); + ide_acpi_push_timing(hwif); + } - ide_acpi_exec_tfs(drive); + ide_acpi_exec_tfs(drive); + } memset(&rqpm, 0, sizeof(rqpm)); rq = blk_get_request(drive->queue, READ, __GFP_WAIT); diff --git a/drivers/ide/ide-probe.c b/drivers/ide/ide-probe.c index 51af4eea0d3..1bb106f6221 100644 --- a/drivers/ide/ide-probe.c +++ b/drivers/ide/ide-probe.c @@ -818,6 +818,24 @@ static int ide_port_setup_devices(ide_hwif_t *hwif) return j; } +static void ide_host_enable_irqs(struct ide_host *host) +{ + ide_hwif_t *hwif; + int i; + + ide_host_for_each_port(i, hwif, host) { + if (hwif == NULL) + continue; + + /* clear any pending IRQs */ + hwif->tp_ops->read_status(hwif); + + /* unmask IRQs */ + if (hwif->io_ports.ctl_addr) + hwif->tp_ops->write_devctl(hwif, ATA_DEVCTL_OBS); + } +} + /* * This routine sets up the IRQ for an IDE interface. */ @@ -831,9 +849,6 @@ static int init_irq (ide_hwif_t *hwif) if (irq_handler == NULL) irq_handler = ide_intr; - if (io_ports->ctl_addr) - hwif->tp_ops->write_devctl(hwif, ATA_DEVCTL_OBS); - if (request_irq(hwif->irq, irq_handler, sa, hwif->name, hwif)) goto out_up; @@ -1404,6 +1419,8 @@ int ide_host_register(struct ide_host *host, const struct ide_port_info *d, ide_port_tune_devices(hwif); } + ide_host_enable_irqs(host); + ide_host_for_each_port(i, hwif, host) { if (hwif == NULL) continue; diff --git a/drivers/ide/ide-tape.c b/drivers/ide/ide-tape.c index 013dc595fab..bc5fb12b913 100644 --- a/drivers/ide/ide-tape.c +++ b/drivers/ide/ide-tape.c @@ -1064,6 +1064,7 @@ static int idetape_blkdev_ioctl(ide_drive_t *drive, unsigned int cmd, tape->best_dsc_rw_freq = config.dsc_rw_frequency; break; case 0x0350: + memset(&config, 0, sizeof(config)); config.dsc_rw_frequency = (int) tape->best_dsc_rw_freq; config.nr_stages = 1; if (copy_to_user(argp, &config, sizeof(config))) diff --git a/drivers/ieee1394/sbp2.c b/drivers/ieee1394/sbp2.c index 83b734aec92..52b25f8b111 100644 --- a/drivers/ieee1394/sbp2.c +++ b/drivers/ieee1394/sbp2.c @@ -880,6 +880,7 @@ static struct sbp2_lu *sbp2_alloc_device(struct unit_directory *ud) } shost->hostdata[0] = (unsigned long)lu; + shost->max_cmd_len = SBP2_MAX_CDB_SIZE; if (!scsi_add_host(shost, &ud->device)) { lu->shost = shost; diff --git a/drivers/ieee1394/sbp2.h b/drivers/ieee1394/sbp2.h index c5036f1cc5b..64a3a66a8a3 100644 --- a/drivers/ieee1394/sbp2.h +++ b/drivers/ieee1394/sbp2.h @@ -25,6 +25,12 @@ #define SBP2_DEVICE_NAME "sbp2" /* + * There is no transport protocol limit to the CDB length, but we implement + * a fixed length only. 16 bytes is enough for disks larger than 2 TB. + */ +#define SBP2_MAX_CDB_SIZE 16 + +/* * SBP-2 specific definitions */ @@ -51,7 +57,7 @@ struct sbp2_command_orb { u32 data_descriptor_hi; u32 data_descriptor_lo; u32 misc; - u8 cdb[12]; + u8 cdb[SBP2_MAX_CDB_SIZE]; } __attribute__((packed)); #define SBP2_LOGIN_REQUEST 0x0 diff --git a/drivers/input/evdev.c b/drivers/input/evdev.c index 114efd8dc8f..1148140d08a 100644 --- a/drivers/input/evdev.c +++ b/drivers/input/evdev.c @@ -608,8 +608,7 @@ static long evdev_do_ioctl(struct file *file, unsigned int cmd, p, compat_mode); if (_IOC_NR(cmd) == _IOC_NR(EVIOCGNAME(0))) - return str_to_user(dev_name(&evdev->dev), - _IOC_SIZE(cmd), p); + return str_to_user(dev->name, _IOC_SIZE(cmd), p); if (_IOC_NR(cmd) == _IOC_NR(EVIOCGPHYS(0))) return str_to_user(dev->phys, _IOC_SIZE(cmd), p); diff --git a/drivers/input/joydev.c b/drivers/input/joydev.c index 0e12f89276a..9a1d55b74d7 100644 --- a/drivers/input/joydev.c +++ b/drivers/input/joydev.c @@ -456,8 +456,11 @@ static int joydev_ioctl_common(struct joydev *joydev, unsigned int cmd, void __user *argp) { struct input_dev *dev = joydev->handle.dev; + size_t len; int i, j; + const char *name; + /* Process fixed-sized commands. */ switch (cmd) { case JS_SET_CAL: @@ -499,9 +502,22 @@ static int joydev_ioctl_common(struct joydev *joydev, return copy_to_user(argp, joydev->corr, sizeof(joydev->corr[0]) * joydev->nabs) ? -EFAULT : 0; - case JSIOCSAXMAP: - if (copy_from_user(joydev->abspam, argp, - sizeof(__u8) * (ABS_MAX + 1))) + } + + /* + * Process variable-sized commands (the axis and button map commands + * are considered variable-sized to decouple them from the values of + * ABS_MAX and KEY_MAX). + */ + switch (cmd & ~IOCSIZE_MASK) { + + case (JSIOCSAXMAP & ~IOCSIZE_MASK): + len = min_t(size_t, _IOC_SIZE(cmd), sizeof(joydev->abspam)); + /* + * FIXME: we should not copy into our axis map before + * validating the data. + */ + if (copy_from_user(joydev->abspam, argp, len)) return -EFAULT; for (i = 0; i < joydev->nabs; i++) { @@ -511,13 +527,17 @@ static int joydev_ioctl_common(struct joydev *joydev, } return 0; - case JSIOCGAXMAP: - return copy_to_user(argp, joydev->abspam, - sizeof(__u8) * (ABS_MAX + 1)) ? -EFAULT : 0; - - case JSIOCSBTNMAP: - if (copy_from_user(joydev->keypam, argp, - sizeof(__u16) * (KEY_MAX - BTN_MISC + 1))) + case (JSIOCGAXMAP & ~IOCSIZE_MASK): + len = min_t(size_t, _IOC_SIZE(cmd), sizeof(joydev->abspam)); + return copy_to_user(argp, joydev->abspam, len) ? -EFAULT : 0; + + case (JSIOCSBTNMAP & ~IOCSIZE_MASK): + len = min_t(size_t, _IOC_SIZE(cmd), sizeof(joydev->keypam)); + /* + * FIXME: we should not copy into our keymap before + * validating the data. + */ + if (copy_from_user(joydev->keypam, argp, len)) return -EFAULT; for (i = 0; i < joydev->nkey; i++) { @@ -529,25 +549,19 @@ static int joydev_ioctl_common(struct joydev *joydev, return 0; - case JSIOCGBTNMAP: - return copy_to_user(argp, joydev->keypam, - sizeof(__u16) * (KEY_MAX - BTN_MISC + 1)) ? -EFAULT : 0; + case (JSIOCGBTNMAP & ~IOCSIZE_MASK): + len = min_t(size_t, _IOC_SIZE(cmd), sizeof(joydev->keypam)); + return copy_to_user(argp, joydev->keypam, len) ? -EFAULT : 0; - default: - if ((cmd & ~IOCSIZE_MASK) == JSIOCGNAME(0)) { - int len; - const char *name = dev_name(&dev->dev); - - if (!name) - return 0; - len = strlen(name) + 1; - if (len > _IOC_SIZE(cmd)) - len = _IOC_SIZE(cmd); - if (copy_to_user(argp, name, len)) - return -EFAULT; - return len; - } + case JSIOCGNAME(0): + name = dev->name; + if (!name) + return 0; + + len = min_t(size_t, _IOC_SIZE(cmd), strlen(name) + 1); + return copy_to_user(argp, name, len) ? -EFAULT : len; } + return -EINVAL; } diff --git a/drivers/input/joystick/iforce/iforce-main.c b/drivers/input/joystick/iforce/iforce-main.c index baabf830264..f6c688cae33 100644 --- a/drivers/input/joystick/iforce/iforce-main.c +++ b/drivers/input/joystick/iforce/iforce-main.c @@ -74,6 +74,7 @@ static struct iforce_device iforce_device[] = { { 0x05ef, 0x8884, "AVB Mag Turbo Force", btn_avb_wheel, abs_wheel, ff_iforce }, { 0x05ef, 0x8888, "AVB Top Shot Force Feedback Racing Wheel", btn_avb_tw, abs_wheel, ff_iforce }, //? { 0x061c, 0xc0a4, "ACT LABS Force RS", btn_wheel, abs_wheel, ff_iforce }, //? + { 0x061c, 0xc084, "ACT LABS Force RS", btn_wheel, abs_wheel, ff_iforce }, { 0x06f8, 0x0001, "Guillemot Race Leader Force Feedback", btn_wheel, abs_wheel, ff_iforce }, //? { 0x06f8, 0x0004, "Guillemot Force Feedback Racing Wheel", btn_wheel, abs_wheel, ff_iforce }, //? { 0x06f8, 0x0004, "Gullemot Jet Leader 3D", btn_joystick, abs_joystick, ff_iforce }, //? diff --git a/drivers/input/joystick/iforce/iforce-usb.c b/drivers/input/joystick/iforce/iforce-usb.c index f83185aeb51..9f289d8f52c 100644 --- a/drivers/input/joystick/iforce/iforce-usb.c +++ b/drivers/input/joystick/iforce/iforce-usb.c @@ -223,6 +223,7 @@ static struct usb_device_id iforce_usb_ids [] = { { USB_DEVICE(0x05ef, 0x8884) }, /* AVB Mag Turbo Force */ { USB_DEVICE(0x05ef, 0x8888) }, /* AVB Top Shot FFB Racing Wheel */ { USB_DEVICE(0x061c, 0xc0a4) }, /* ACT LABS Force RS */ + { USB_DEVICE(0x061c, 0xc084) }, /* ACT LABS Force RS */ { USB_DEVICE(0x06f8, 0x0001) }, /* Guillemot Race Leader Force Feedback */ { USB_DEVICE(0x06f8, 0x0004) }, /* Guillemot Force Feedback Racing Wheel */ { USB_DEVICE(0x06f8, 0xa302) }, /* Guillemot Jet Leader 3D */ diff --git a/drivers/input/joystick/xpad.c b/drivers/input/joystick/xpad.c index b868b8d5fbb..f155ad8cdae 100644 --- a/drivers/input/joystick/xpad.c +++ b/drivers/input/joystick/xpad.c @@ -470,20 +470,20 @@ static void xpad_irq_out(struct urb *urb) status = urb->status; switch (status) { - case 0: + case 0: /* success */ - break; - case -ECONNRESET: - case -ENOENT: - case -ESHUTDOWN: - /* this urb is terminated, clean up */ - dbg("%s - urb shutting down with status: %d", - __func__, status); - return; - default: - dbg("%s - nonzero urb status received: %d", - __func__, status); - goto exit; + return; + + case -ECONNRESET: + case -ENOENT: + case -ESHUTDOWN: + /* this urb is terminated, clean up */ + dbg("%s - urb shutting down with status: %d", __func__, status); + return; + + default: + dbg("%s - nonzero urb status received: %d", __func__, status); + goto exit; } exit: diff --git a/drivers/input/keyboard/Kconfig b/drivers/input/keyboard/Kconfig index 9d8f796c674..a6b989a9dc0 100644 --- a/drivers/input/keyboard/Kconfig +++ b/drivers/input/keyboard/Kconfig @@ -12,6 +12,42 @@ menuconfig INPUT_KEYBOARD if INPUT_KEYBOARD +config KEYBOARD_AAED2000 + tristate "AAED-2000 keyboard" + depends on MACH_AAED2000 + select INPUT_POLLDEV + default y + help + Say Y here to enable the keyboard on the Agilent AAED-2000 + development board. + + To compile this driver as a module, choose M here: the + module will be called aaed2000_kbd. + +config KEYBOARD_AMIGA + tristate "Amiga keyboard" + depends on AMIGA + help + Say Y here if you are running Linux on any AMIGA and have a keyboard + attached. + + To compile this driver as a module, choose M here: the + module will be called amikbd. + +config ATARI_KBD_CORE + bool + +config KEYBOARD_ATARI + tristate "Atari keyboard" + depends on ATARI + select ATARI_KBD_CORE + help + Say Y here if you are running Linux on any Atari and have a keyboard + attached. + + To compile this driver as a module, choose M here: the + module will be called atakbd. + config KEYBOARD_ATKBD tristate "AT keyboard" if EMBEDDED || !X86 default y @@ -68,69 +104,14 @@ config KEYBOARD_ATKBD_RDI_KEYCODES right-hand column will be interpreted as the key shown in the left-hand column. -config KEYBOARD_SUNKBD - tristate "Sun Type 4 and Type 5 keyboard" - select SERIO - help - Say Y here if you want to use a Sun Type 4 or Type 5 keyboard, - connected either to the Sun keyboard connector or to an serial - (RS-232) port via a simple adapter. - - To compile this driver as a module, choose M here: the - module will be called sunkbd. - -config KEYBOARD_LKKBD - tristate "DECstation/VAXstation LK201/LK401 keyboard" - select SERIO - help - Say Y here if you want to use a LK201 or LK401 style serial - keyboard. This keyboard is also useable on PCs if you attach - it with the inputattach program. The connector pinout is - described within lkkbd.c. - - To compile this driver as a module, choose M here: the - module will be called lkkbd. - -config KEYBOARD_LOCOMO - tristate "LoCoMo Keyboard Support" - depends on SHARP_LOCOMO && INPUT_KEYBOARD - help - Say Y here if you are running Linux on a Sharp Zaurus Collie or Poodle based PDA - - To compile this driver as a module, choose M here: the - module will be called locomokbd. - -config KEYBOARD_XTKBD - tristate "XT keyboard" - select SERIO - help - Say Y here if you want to use the old IBM PC/XT keyboard (or - compatible) on your system. This is only possible with a - parallel port keyboard adapter, you cannot connect it to the - keyboard port on a PC that runs Linux. - - To compile this driver as a module, choose M here: the - module will be called xtkbd. - -config KEYBOARD_NEWTON - tristate "Newton keyboard" - select SERIO - help - Say Y here if you have a Newton keyboard on a serial port. - - To compile this driver as a module, choose M here: the - module will be called newtonkbd. - -config KEYBOARD_STOWAWAY - tristate "Stowaway keyboard" - select SERIO +config KEYBOARD_BFIN + tristate "Blackfin BF54x keypad support" + depends on (BF54x && !BF544) help - Say Y here if you have a Stowaway keyboard on a serial port. - Stowaway compatible keyboards like Dicota Input-PDA keyboard - are also supported by this driver. + Say Y here if you want to use the BF54x keypad. To compile this driver as a module, choose M here: the - module will be called stowaway. + module will be called bf54x-keys. config KEYBOARD_CORGI tristate "Corgi keyboard" @@ -143,61 +124,50 @@ config KEYBOARD_CORGI To compile this driver as a module, choose M here: the module will be called corgikbd. -config KEYBOARD_SPITZ - tristate "Spitz keyboard" - depends on PXA_SHARPSL - default y +config KEYBOARD_LKKBD + tristate "DECstation/VAXstation LK201/LK401 keyboard" + select SERIO help - Say Y here to enable the keyboard on the Sharp Zaurus SL-C1000, - SL-C3000 and Sl-C3100 series of PDAs. + Say Y here if you want to use a LK201 or LK401 style serial + keyboard. This keyboard is also useable on PCs if you attach + it with the inputattach program. The connector pinout is + described within lkkbd.c. To compile this driver as a module, choose M here: the - module will be called spitzkbd. + module will be called lkkbd. -config KEYBOARD_TOSA - tristate "Tosa keyboard" - depends on MACH_TOSA - default y +config KEYBOARD_EP93XX + tristate "EP93xx Matrix Keypad support" + depends on ARCH_EP93XX help - Say Y here to enable the keyboard on the Sharp Zaurus SL-6000x (Tosa) + Say Y here to enable the matrix keypad on the Cirrus EP93XX. To compile this driver as a module, choose M here: the - module will be called tosakbd. + module will be called ep93xx_keypad. -config KEYBOARD_TOSA_USE_EXT_KEYCODES - bool "Tosa keyboard: use extended keycodes" - depends on KEYBOARD_TOSA - default n +config KEYBOARD_GPIO + tristate "GPIO Buttons" + depends on GENERIC_GPIO help - Say Y here to enable the tosa keyboard driver to generate extended - (>= 127) keycodes. Be aware, that they can't be correctly interpreted - by either console keyboard driver or by Kdrive keybd driver. - - Say Y only if you know, what you are doing! + This driver implements support for buttons connected + to GPIO pins of various CPUs (and some other chips). -config KEYBOARD_AMIGA - tristate "Amiga keyboard" - depends on AMIGA - help - Say Y here if you are running Linux on any AMIGA and have a keyboard - attached. + Say Y here if your device has buttons connected + directly to such GPIO pins. Your board-specific + setup logic must also provide a platform device, + with configuration data saying which GPIOs are used. To compile this driver as a module, choose M here: the - module will be called amikbd. + module will be called gpio_keys. -config ATARI_KBD_CORE - bool - -config KEYBOARD_ATARI - tristate "Atari keyboard" - depends on ATARI - select ATARI_KBD_CORE +config KEYBOARD_MATRIX + tristate "GPIO driven matrix keypad support" + depends on GENERIC_GPIO help - Say Y here if you are running Linux on any Atari and have a keyboard - attached. + Enable support for GPIO driven matrix keypad. To compile this driver as a module, choose M here: the - module will be called atakbd. + module will be called matrix_keypad. config KEYBOARD_HIL_OLD tristate "HP HIL keyboard support (simple driver)" @@ -261,20 +231,39 @@ config KEYBOARD_LM8323 To compile this driver as a module, choose M here: the module will be called lm8323. -config KEYBOARD_OMAP - tristate "TI OMAP keypad support" - depends on (ARCH_OMAP1 || ARCH_OMAP2) +config KEYBOARD_LOCOMO + tristate "LoCoMo Keyboard Support" + depends on SHARP_LOCOMO help - Say Y here if you want to use the OMAP keypad. + Say Y here if you are running Linux on a Sharp Zaurus Collie or Poodle based PDA To compile this driver as a module, choose M here: the - module will be called omap-keypad. + module will be called locomokbd. + +config KEYBOARD_MAPLE + tristate "Maple bus keyboard" + depends on SH_DREAMCAST && MAPLE + help + Say Y here if you have a Dreamcast console running Linux and have + a keyboard attached to its Maple bus. + + To compile this driver as a module, choose M here: the + module will be called maple_keyb. + +config KEYBOARD_NEWTON + tristate "Newton keyboard" + select SERIO + help + Say Y here if you have a Newton keyboard on a serial port. + + To compile this driver as a module, choose M here: the + module will be called newtonkbd. config KEYBOARD_PXA27x tristate "PXA27x/PXA3xx keypad support" depends on PXA27x || PXA3xx help - Enable support for PXA27x/PXA3xx keypad controller + Enable support for PXA27x/PXA3xx keypad controller. To compile this driver as a module, choose M here: the module will be called pxa27x_keypad. @@ -288,51 +277,38 @@ config KEYBOARD_PXA930_ROTARY To compile this driver as a module, choose M here: the module will be called pxa930_rotary. -config KEYBOARD_AAED2000 - tristate "AAED-2000 keyboard" - depends on MACH_AAED2000 - select INPUT_POLLDEV +config KEYBOARD_SPITZ + tristate "Spitz keyboard" + depends on PXA_SHARPSL default y help - Say Y here to enable the keyboard on the Agilent AAED-2000 - development board. - - To compile this driver as a module, choose M here: the - module will be called aaed2000_kbd. - -config KEYBOARD_GPIO - tristate "GPIO Buttons" - depends on GENERIC_GPIO - help - This driver implements support for buttons connected - to GPIO pins of various CPUs (and some other chips). - - Say Y here if your device has buttons connected - directly to such GPIO pins. Your board-specific - setup logic must also provide a platform device, - with configuration data saying which GPIOs are used. + Say Y here to enable the keyboard on the Sharp Zaurus SL-C1000, + SL-C3000 and Sl-C3100 series of PDAs. To compile this driver as a module, choose M here: the - module will be called gpio-keys. + module will be called spitzkbd. -config KEYBOARD_MAPLE - tristate "Maple bus keyboard" - depends on SH_DREAMCAST && MAPLE +config KEYBOARD_STOWAWAY + tristate "Stowaway keyboard" + select SERIO help - Say Y here if you have a Dreamcast console running Linux and have - a keyboard attached to its Maple bus. + Say Y here if you have a Stowaway keyboard on a serial port. + Stowaway compatible keyboards like Dicota Input-PDA keyboard + are also supported by this driver. To compile this driver as a module, choose M here: the - module will be called maple_keyb. + module will be called stowaway. -config KEYBOARD_BFIN - tristate "Blackfin BF54x keypad support" - depends on (BF54x && !BF544) +config KEYBOARD_SUNKBD + tristate "Sun Type 4 and Type 5 keyboard" + select SERIO help - Say Y here if you want to use the BF54x keypad. + Say Y here if you want to use a Sun Type 4 or Type 5 keyboard, + connected either to the Sun keyboard connector or to an serial + (RS-232) port via a simple adapter. To compile this driver as a module, choose M here: the - module will be called bf54x-keys. + module will be called sunkbd. config KEYBOARD_SH_KEYSC tristate "SuperH KEYSC keypad support" @@ -344,13 +320,45 @@ config KEYBOARD_SH_KEYSC To compile this driver as a module, choose M here: the module will be called sh_keysc. -config KEYBOARD_EP93XX - tristate "EP93xx Matrix Keypad support" - depends on ARCH_EP93XX +config KEYBOARD_OMAP + tristate "TI OMAP keypad support" + depends on (ARCH_OMAP1 || ARCH_OMAP2) help - Say Y here to enable the matrix keypad on the Cirrus EP93XX. + Say Y here if you want to use the OMAP keypad. To compile this driver as a module, choose M here: the - module will be called ep93xx_keypad. + module will be called omap-keypad. + +config KEYBOARD_TOSA + tristate "Tosa keyboard" + depends on MACH_TOSA + default y + help + Say Y here to enable the keyboard on the Sharp Zaurus SL-6000x (Tosa) + + To compile this driver as a module, choose M here: the + module will be called tosakbd. + +config KEYBOARD_TOSA_USE_EXT_KEYCODES + bool "Tosa keyboard: use extended keycodes" + depends on KEYBOARD_TOSA + help + Say Y here to enable the tosa keyboard driver to generate extended + (>= 127) keycodes. Be aware, that they can't be correctly interpreted + by either console keyboard driver or by Kdrive keybd driver. + + Say Y only if you know, what you are doing! + +config KEYBOARD_XTKBD + tristate "XT keyboard" + select SERIO + help + Say Y here if you want to use the old IBM PC/XT keyboard (or + compatible) on your system. This is only possible with a + parallel port keyboard adapter, you cannot connect it to the + keyboard port on a PC that runs Linux. + + To compile this driver as a module, choose M here: the + module will be called xtkbd. endif diff --git a/drivers/input/keyboard/Makefile b/drivers/input/keyboard/Makefile index 156b647a259..b5b5eae9724 100644 --- a/drivers/input/keyboard/Makefile +++ b/drivers/input/keyboard/Makefile @@ -4,29 +4,30 @@ # Each configuration option enables a list of files. -obj-$(CONFIG_KEYBOARD_ATKBD) += atkbd.o -obj-$(CONFIG_KEYBOARD_SUNKBD) += sunkbd.o -obj-$(CONFIG_KEYBOARD_LKKBD) += lkkbd.o -obj-$(CONFIG_KEYBOARD_XTKBD) += xtkbd.o +obj-$(CONFIG_KEYBOARD_AAED2000) += aaed2000_kbd.o obj-$(CONFIG_KEYBOARD_AMIGA) += amikbd.o obj-$(CONFIG_KEYBOARD_ATARI) += atakbd.o -obj-$(CONFIG_KEYBOARD_LOCOMO) += locomokbd.o -obj-$(CONFIG_KEYBOARD_NEWTON) += newtonkbd.o -obj-$(CONFIG_KEYBOARD_STOWAWAY) += stowaway.o +obj-$(CONFIG_KEYBOARD_ATKBD) += atkbd.o +obj-$(CONFIG_KEYBOARD_BFIN) += bf54x-keys.o obj-$(CONFIG_KEYBOARD_CORGI) += corgikbd.o -obj-$(CONFIG_KEYBOARD_SPITZ) += spitzkbd.o -obj-$(CONFIG_KEYBOARD_TOSA) += tosakbd.o +obj-$(CONFIG_KEYBOARD_EP93XX) += ep93xx_keypad.o +obj-$(CONFIG_KEYBOARD_GPIO) += gpio_keys.o obj-$(CONFIG_KEYBOARD_HIL) += hil_kbd.o obj-$(CONFIG_KEYBOARD_HIL_OLD) += hilkbd.o +obj-$(CONFIG_KEYBOARD_HP6XX) += jornada680_kbd.o +obj-$(CONFIG_KEYBOARD_HP7XX) += jornada720_kbd.o +obj-$(CONFIG_KEYBOARD_LKKBD) += lkkbd.o obj-$(CONFIG_KEYBOARD_LM8323) += lm8323.o +obj-$(CONFIG_KEYBOARD_LOCOMO) += locomokbd.o +obj-$(CONFIG_KEYBOARD_MAPLE) += maple_keyb.o +obj-$(CONFIG_KEYBOARD_MATRIX) += matrix_keypad.o +obj-$(CONFIG_KEYBOARD_NEWTON) += newtonkbd.o obj-$(CONFIG_KEYBOARD_OMAP) += omap-keypad.o obj-$(CONFIG_KEYBOARD_PXA27x) += pxa27x_keypad.o obj-$(CONFIG_KEYBOARD_PXA930_ROTARY) += pxa930_rotary.o -obj-$(CONFIG_KEYBOARD_AAED2000) += aaed2000_kbd.o -obj-$(CONFIG_KEYBOARD_GPIO) += gpio_keys.o -obj-$(CONFIG_KEYBOARD_HP6XX) += jornada680_kbd.o -obj-$(CONFIG_KEYBOARD_HP7XX) += jornada720_kbd.o -obj-$(CONFIG_KEYBOARD_MAPLE) += maple_keyb.o -obj-$(CONFIG_KEYBOARD_BFIN) += bf54x-keys.o obj-$(CONFIG_KEYBOARD_SH_KEYSC) += sh_keysc.o -obj-$(CONFIG_KEYBOARD_EP93XX) += ep93xx_keypad.o +obj-$(CONFIG_KEYBOARD_SPITZ) += spitzkbd.o +obj-$(CONFIG_KEYBOARD_STOWAWAY) += stowaway.o +obj-$(CONFIG_KEYBOARD_SUNKBD) += sunkbd.o +obj-$(CONFIG_KEYBOARD_TOSA) += tosakbd.o +obj-$(CONFIG_KEYBOARD_XTKBD) += xtkbd.o diff --git a/drivers/input/keyboard/atkbd.c b/drivers/input/keyboard/atkbd.c index df3f8aa6811..6c6a09b1c0f 100644 --- a/drivers/input/keyboard/atkbd.c +++ b/drivers/input/keyboard/atkbd.c @@ -880,6 +880,14 @@ static unsigned int atkbd_hp_zv6100_forced_release_keys[] = { }; /* + * Perform fixup for HP (Compaq) Presario R4000 R4100 R4200 that don't generate + * release for their volume buttons + */ +static unsigned int atkbd_hp_r4000_forced_release_keys[] = { + 0xae, 0xb0, -1U +}; + +/* * Samsung NC10,NC20 with Fn+F? key release not working */ static unsigned int atkbd_samsung_forced_release_keys[] = { @@ -895,6 +903,13 @@ static unsigned int atkbd_amilo_pa1510_forced_release_keys[] = { }; /* + * Amilo Pi 3525 key release for Fn+Volume keys not working + */ +static unsigned int atkbd_amilo_pi3525_forced_release_keys[] = { + 0x20, 0xa0, 0x2e, 0xae, 0x30, 0xb0, -1U +}; + +/* * Amilo Xi 3650 key release for light touch bar not working */ static unsigned int atkbd_amilo_xi3650_forced_release_keys[] = { @@ -902,6 +917,13 @@ static unsigned int atkbd_amilo_xi3650_forced_release_keys[] = { }; /* + * Soltech TA12 system with broken key release on volume keys and mute key + */ +static unsigned int atkdb_soltech_ta12_forced_release_keys[] = { + 0xa0, 0xae, 0xb0, -1U +}; + +/* * atkbd_set_keycode_table() initializes keyboard's keycode table * according to the selected scancode set */ @@ -1523,6 +1545,33 @@ static struct dmi_system_id atkbd_dmi_quirk_table[] __initdata = { .driver_data = atkbd_hp_zv6100_forced_release_keys, }, { + .ident = "HP Presario R4000", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"), + DMI_MATCH(DMI_PRODUCT_NAME, "Presario R4000"), + }, + .callback = atkbd_setup_forced_release, + .driver_data = atkbd_hp_r4000_forced_release_keys, + }, + { + .ident = "HP Presario R4100", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"), + DMI_MATCH(DMI_PRODUCT_NAME, "Presario R4100"), + }, + .callback = atkbd_setup_forced_release, + .driver_data = atkbd_hp_r4000_forced_release_keys, + }, + { + .ident = "HP Presario R4200", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"), + DMI_MATCH(DMI_PRODUCT_NAME, "Presario R4200"), + }, + .callback = atkbd_setup_forced_release, + .driver_data = atkbd_hp_r4000_forced_release_keys, + }, + { .ident = "Inventec Symphony", .matches = { DMI_MATCH(DMI_SYS_VENDOR, "INVENTEC"), @@ -1568,6 +1617,15 @@ static struct dmi_system_id atkbd_dmi_quirk_table[] __initdata = { .driver_data = atkbd_amilo_pa1510_forced_release_keys, }, { + .ident = "Fujitsu Amilo Pi 3525", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU SIEMENS"), + DMI_MATCH(DMI_PRODUCT_NAME, "AMILO Pi 3525"), + }, + .callback = atkbd_setup_forced_release, + .driver_data = atkbd_amilo_pi3525_forced_release_keys, + }, + { .ident = "Fujitsu Amilo Xi 3650", .matches = { DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU SIEMENS"), @@ -1576,6 +1634,15 @@ static struct dmi_system_id atkbd_dmi_quirk_table[] __initdata = { .callback = atkbd_setup_forced_release, .driver_data = atkbd_amilo_xi3650_forced_release_keys, }, + { + .ident = "Soltech Corporation TA12", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Soltech Corporation"), + DMI_MATCH(DMI_PRODUCT_NAME, "TA12"), + }, + .callback = atkbd_setup_forced_release, + .driver_data = atkdb_soltech_ta12_forced_release_keys, + }, { } }; diff --git a/drivers/input/keyboard/gpio_keys.c b/drivers/input/keyboard/gpio_keys.c index 2157cd7de00..efed0c9e242 100644 --- a/drivers/input/keyboard/gpio_keys.c +++ b/drivers/input/keyboard/gpio_keys.c @@ -29,7 +29,8 @@ struct gpio_button_data { struct gpio_keys_button *button; struct input_dev *input; - struct delayed_work work; + struct timer_list timer; + struct work_struct work; }; struct gpio_keys_drvdata { @@ -40,7 +41,7 @@ struct gpio_keys_drvdata { static void gpio_keys_report_event(struct work_struct *work) { struct gpio_button_data *bdata = - container_of(work, struct gpio_button_data, work.work); + container_of(work, struct gpio_button_data, work); struct gpio_keys_button *button = bdata->button; struct input_dev *input = bdata->input; unsigned int type = button->type ?: EV_KEY; @@ -50,17 +51,25 @@ static void gpio_keys_report_event(struct work_struct *work) input_sync(input); } +static void gpio_keys_timer(unsigned long _data) +{ + struct gpio_button_data *data = (struct gpio_button_data *)_data; + + schedule_work(&data->work); +} + static irqreturn_t gpio_keys_isr(int irq, void *dev_id) { struct gpio_button_data *bdata = dev_id; struct gpio_keys_button *button = bdata->button; - unsigned long delay; BUG_ON(irq != gpio_to_irq(button->gpio)); - delay = button->debounce_interval ? - msecs_to_jiffies(button->debounce_interval) : 0; - schedule_delayed_work(&bdata->work, delay); + if (button->debounce_interval) + mod_timer(&bdata->timer, + jiffies + msecs_to_jiffies(button->debounce_interval)); + else + schedule_work(&bdata->work); return IRQ_HANDLED; } @@ -107,7 +116,9 @@ static int __devinit gpio_keys_probe(struct platform_device *pdev) bdata->input = input; bdata->button = button; - INIT_DELAYED_WORK(&bdata->work, gpio_keys_report_event); + setup_timer(&bdata->timer, + gpio_keys_timer, (unsigned long)bdata); + INIT_WORK(&bdata->work, gpio_keys_report_event); error = gpio_request(button->gpio, button->desc ?: "gpio_keys"); if (error < 0) { @@ -166,7 +177,9 @@ static int __devinit gpio_keys_probe(struct platform_device *pdev) fail2: while (--i >= 0) { free_irq(gpio_to_irq(pdata->buttons[i].gpio), &ddata->data[i]); - cancel_delayed_work_sync(&ddata->data[i].work); + if (pdata->buttons[i].debounce_interval) + del_timer_sync(&ddata->data[i].timer); + cancel_work_sync(&ddata->data[i].work); gpio_free(pdata->buttons[i].gpio); } @@ -190,7 +203,9 @@ static int __devexit gpio_keys_remove(struct platform_device *pdev) for (i = 0; i < pdata->nbuttons; i++) { int irq = gpio_to_irq(pdata->buttons[i].gpio); free_irq(irq, &ddata->data[i]); - cancel_delayed_work_sync(&ddata->data[i].work); + if (pdata->buttons[i].debounce_interval) + del_timer_sync(&ddata->data[i].timer); + cancel_work_sync(&ddata->data[i].work); gpio_free(pdata->buttons[i].gpio); } diff --git a/drivers/input/keyboard/matrix_keypad.c b/drivers/input/keyboard/matrix_keypad.c new file mode 100644 index 00000000000..541b981ff07 --- /dev/null +++ b/drivers/input/keyboard/matrix_keypad.c @@ -0,0 +1,453 @@ +/* + * GPIO driven matrix keyboard driver + * + * Copyright (c) 2008 Marek Vasut <marek.vasut@gmail.com> + * + * Based on corgikbd.c + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + */ + +#include <linux/types.h> +#include <linux/delay.h> +#include <linux/platform_device.h> +#include <linux/init.h> +#include <linux/input.h> +#include <linux/irq.h> +#include <linux/interrupt.h> +#include <linux/jiffies.h> +#include <linux/module.h> +#include <linux/gpio.h> +#include <linux/input/matrix_keypad.h> + +struct matrix_keypad { + const struct matrix_keypad_platform_data *pdata; + struct input_dev *input_dev; + unsigned short *keycodes; + unsigned int row_shift; + + uint32_t last_key_state[MATRIX_MAX_COLS]; + struct delayed_work work; + bool scan_pending; + bool stopped; + spinlock_t lock; +}; + +/* + * NOTE: normally the GPIO has to be put into HiZ when de-activated to cause + * minmal side effect when scanning other columns, here it is configured to + * be input, and it should work on most platforms. + */ +static void __activate_col(const struct matrix_keypad_platform_data *pdata, + int col, bool on) +{ + bool level_on = !pdata->active_low; + + if (on) { + gpio_direction_output(pdata->col_gpios[col], level_on); + } else { + gpio_set_value_cansleep(pdata->col_gpios[col], !level_on); + gpio_direction_input(pdata->col_gpios[col]); + } +} + +static void activate_col(const struct matrix_keypad_platform_data *pdata, + int col, bool on) +{ + __activate_col(pdata, col, on); + + if (on && pdata->col_scan_delay_us) + udelay(pdata->col_scan_delay_us); +} + +static void activate_all_cols(const struct matrix_keypad_platform_data *pdata, + bool on) +{ + int col; + + for (col = 0; col < pdata->num_col_gpios; col++) + __activate_col(pdata, col, on); +} + +static bool row_asserted(const struct matrix_keypad_platform_data *pdata, + int row) +{ + return gpio_get_value_cansleep(pdata->row_gpios[row]) ? + !pdata->active_low : pdata->active_low; +} + +static void enable_row_irqs(struct matrix_keypad *keypad) +{ + const struct matrix_keypad_platform_data *pdata = keypad->pdata; + int i; + + for (i = 0; i < pdata->num_row_gpios; i++) + enable_irq(gpio_to_irq(pdata->row_gpios[i])); +} + +static void disable_row_irqs(struct matrix_keypad *keypad) +{ + const struct matrix_keypad_platform_data *pdata = keypad->pdata; + int i; + + for (i = 0; i < pdata->num_row_gpios; i++) + disable_irq_nosync(gpio_to_irq(pdata->row_gpios[i])); +} + +/* + * This gets the keys from keyboard and reports it to input subsystem + */ +static void matrix_keypad_scan(struct work_struct *work) +{ + struct matrix_keypad *keypad = + container_of(work, struct matrix_keypad, work.work); + struct input_dev *input_dev = keypad->input_dev; + const struct matrix_keypad_platform_data *pdata = keypad->pdata; + uint32_t new_state[MATRIX_MAX_COLS]; + int row, col, code; + + /* de-activate all columns for scanning */ + activate_all_cols(pdata, false); + + memset(new_state, 0, sizeof(new_state)); + + /* assert each column and read the row status out */ + for (col = 0; col < pdata->num_col_gpios; col++) { + + activate_col(pdata, col, true); + + for (row = 0; row < pdata->num_row_gpios; row++) + new_state[col] |= + row_asserted(pdata, row) ? (1 << row) : 0; + + activate_col(pdata, col, false); + } + + for (col = 0; col < pdata->num_col_gpios; col++) { + uint32_t bits_changed; + + bits_changed = keypad->last_key_state[col] ^ new_state[col]; + if (bits_changed == 0) + continue; + + for (row = 0; row < pdata->num_row_gpios; row++) { + if ((bits_changed & (1 << row)) == 0) + continue; + + code = MATRIX_SCAN_CODE(row, col, keypad->row_shift); + input_event(input_dev, EV_MSC, MSC_SCAN, code); + input_report_key(input_dev, + keypad->keycodes[code], + new_state[col] & (1 << row)); + } + } + input_sync(input_dev); + + memcpy(keypad->last_key_state, new_state, sizeof(new_state)); + + activate_all_cols(pdata, true); + + /* Enable IRQs again */ + spin_lock_irq(&keypad->lock); + keypad->scan_pending = false; + enable_row_irqs(keypad); + spin_unlock_irq(&keypad->lock); +} + +static irqreturn_t matrix_keypad_interrupt(int irq, void *id) +{ + struct matrix_keypad *keypad = id; + unsigned long flags; + + spin_lock_irqsave(&keypad->lock, flags); + + /* + * See if another IRQ beaten us to it and scheduled the + * scan already. In that case we should not try to + * disable IRQs again. + */ + if (unlikely(keypad->scan_pending || keypad->stopped)) + goto out; + + disable_row_irqs(keypad); + keypad->scan_pending = true; + schedule_delayed_work(&keypad->work, + msecs_to_jiffies(keypad->pdata->debounce_ms)); + +out: + spin_unlock_irqrestore(&keypad->lock, flags); + return IRQ_HANDLED; +} + +static int matrix_keypad_start(struct input_dev *dev) +{ + struct matrix_keypad *keypad = input_get_drvdata(dev); + + keypad->stopped = false; + mb(); + + /* + * Schedule an immediate key scan to capture current key state; + * columns will be activated and IRQs be enabled after the scan. + */ + schedule_delayed_work(&keypad->work, 0); + + return 0; +} + +static void matrix_keypad_stop(struct input_dev *dev) +{ + struct matrix_keypad *keypad = input_get_drvdata(dev); + + keypad->stopped = true; + mb(); + flush_work(&keypad->work.work); + /* + * matrix_keypad_scan() will leave IRQs enabled; + * we should disable them now. + */ + disable_row_irqs(keypad); +} + +#ifdef CONFIG_PM +static int matrix_keypad_suspend(struct platform_device *pdev, pm_message_t state) +{ + struct matrix_keypad *keypad = platform_get_drvdata(pdev); + const struct matrix_keypad_platform_data *pdata = keypad->pdata; + int i; + + matrix_keypad_stop(keypad->input_dev); + + if (device_may_wakeup(&pdev->dev)) + for (i = 0; i < pdata->num_row_gpios; i++) + enable_irq_wake(gpio_to_irq(pdata->row_gpios[i])); + + return 0; +} + +static int matrix_keypad_resume(struct platform_device *pdev) +{ + struct matrix_keypad *keypad = platform_get_drvdata(pdev); + const struct matrix_keypad_platform_data *pdata = keypad->pdata; + int i; + + if (device_may_wakeup(&pdev->dev)) + for (i = 0; i < pdata->num_row_gpios; i++) + disable_irq_wake(gpio_to_irq(pdata->row_gpios[i])); + + matrix_keypad_start(keypad->input_dev); + + return 0; +} +#else +#define matrix_keypad_suspend NULL +#define matrix_keypad_resume NULL +#endif + +static int __devinit init_matrix_gpio(struct platform_device *pdev, + struct matrix_keypad *keypad) +{ + const struct matrix_keypad_platform_data *pdata = keypad->pdata; + int i, err = -EINVAL; + + /* initialized strobe lines as outputs, activated */ + for (i = 0; i < pdata->num_col_gpios; i++) { + err = gpio_request(pdata->col_gpios[i], "matrix_kbd_col"); + if (err) { + dev_err(&pdev->dev, + "failed to request GPIO%d for COL%d\n", + pdata->col_gpios[i], i); + goto err_free_cols; + } + + gpio_direction_output(pdata->col_gpios[i], !pdata->active_low); + } + + for (i = 0; i < pdata->num_row_gpios; i++) { + err = gpio_request(pdata->row_gpios[i], "matrix_kbd_row"); + if (err) { + dev_err(&pdev->dev, + "failed to request GPIO%d for ROW%d\n", + pdata->row_gpios[i], i); + goto err_free_rows; + } + + gpio_direction_input(pdata->row_gpios[i]); + } + + for (i = 0; i < pdata->num_row_gpios; i++) { + err = request_irq(gpio_to_irq(pdata->row_gpios[i]), + matrix_keypad_interrupt, + IRQF_DISABLED | + IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING, + "matrix-keypad", keypad); + if (err) { + dev_err(&pdev->dev, + "Unable to acquire interrupt for GPIO line %i\n", + pdata->row_gpios[i]); + goto err_free_irqs; + } + } + + /* initialized as disabled - enabled by input->open */ + disable_row_irqs(keypad); + return 0; + +err_free_irqs: + while (--i >= 0) + free_irq(gpio_to_irq(pdata->row_gpios[i]), keypad); + i = pdata->num_row_gpios; +err_free_rows: + while (--i >= 0) + gpio_free(pdata->row_gpios[i]); + i = pdata->num_col_gpios; +err_free_cols: + while (--i >= 0) + gpio_free(pdata->col_gpios[i]); + + return err; +} + +static int __devinit matrix_keypad_probe(struct platform_device *pdev) +{ + const struct matrix_keypad_platform_data *pdata; + const struct matrix_keymap_data *keymap_data; + struct matrix_keypad *keypad; + struct input_dev *input_dev; + unsigned short *keycodes; + unsigned int row_shift; + int i; + int err; + + pdata = pdev->dev.platform_data; + if (!pdata) { + dev_err(&pdev->dev, "no platform data defined\n"); + return -EINVAL; + } + + keymap_data = pdata->keymap_data; + if (!keymap_data) { + dev_err(&pdev->dev, "no keymap data defined\n"); + return -EINVAL; + } + + row_shift = get_count_order(pdata->num_col_gpios); + + keypad = kzalloc(sizeof(struct matrix_keypad), GFP_KERNEL); + keycodes = kzalloc((pdata->num_row_gpios << row_shift) * + sizeof(*keycodes), + GFP_KERNEL); + input_dev = input_allocate_device(); + if (!keypad || !keycodes || !input_dev) { + err = -ENOMEM; + goto err_free_mem; + } + + keypad->input_dev = input_dev; + keypad->pdata = pdata; + keypad->keycodes = keycodes; + keypad->row_shift = row_shift; + keypad->stopped = true; + INIT_DELAYED_WORK(&keypad->work, matrix_keypad_scan); + spin_lock_init(&keypad->lock); + + input_dev->name = pdev->name; + input_dev->id.bustype = BUS_HOST; + input_dev->dev.parent = &pdev->dev; + input_dev->evbit[0] = BIT_MASK(EV_KEY) | BIT_MASK(EV_REP); + input_dev->open = matrix_keypad_start; + input_dev->close = matrix_keypad_stop; + + input_dev->keycode = keycodes; + input_dev->keycodesize = sizeof(*keycodes); + input_dev->keycodemax = pdata->num_row_gpios << keypad->row_shift; + + for (i = 0; i < keymap_data->keymap_size; i++) { + unsigned int key = keymap_data->keymap[i]; + unsigned int row = KEY_ROW(key); + unsigned int col = KEY_COL(key); + unsigned short code = KEY_VAL(key); + + keycodes[MATRIX_SCAN_CODE(row, col, row_shift)] = code; + __set_bit(code, input_dev->keybit); + } + __clear_bit(KEY_RESERVED, input_dev->keybit); + + input_set_capability(input_dev, EV_MSC, MSC_SCAN); + input_set_drvdata(input_dev, keypad); + + err = init_matrix_gpio(pdev, keypad); + if (err) + goto err_free_mem; + + err = input_register_device(keypad->input_dev); + if (err) + goto err_free_mem; + + device_init_wakeup(&pdev->dev, pdata->wakeup); + platform_set_drvdata(pdev, keypad); + + return 0; + +err_free_mem: + input_free_device(input_dev); + kfree(keycodes); + kfree(keypad); + return err; +} + +static int __devexit matrix_keypad_remove(struct platform_device *pdev) +{ + struct matrix_keypad *keypad = platform_get_drvdata(pdev); + const struct matrix_keypad_platform_data *pdata = keypad->pdata; + int i; + + device_init_wakeup(&pdev->dev, 0); + + for (i = 0; i < pdata->num_row_gpios; i++) { + free_irq(gpio_to_irq(pdata->row_gpios[i]), keypad); + gpio_free(pdata->row_gpios[i]); + } + + for (i = 0; i < pdata->num_col_gpios; i++) + gpio_free(pdata->col_gpios[i]); + + input_unregister_device(keypad->input_dev); + platform_set_drvdata(pdev, NULL); + kfree(keypad->keycodes); + kfree(keypad); + + return 0; +} + +static struct platform_driver matrix_keypad_driver = { + .probe = matrix_keypad_probe, + .remove = __devexit_p(matrix_keypad_remove), + .suspend = matrix_keypad_suspend, + .resume = matrix_keypad_resume, + .driver = { + .name = "matrix-keypad", + .owner = THIS_MODULE, + }, +}; + +static int __init matrix_keypad_init(void) +{ + return platform_driver_register(&matrix_keypad_driver); +} + +static void __exit matrix_keypad_exit(void) +{ + platform_driver_unregister(&matrix_keypad_driver); +} + +module_init(matrix_keypad_init); +module_exit(matrix_keypad_exit); + +MODULE_AUTHOR("Marek Vasut <marek.vasut@gmail.com>"); +MODULE_DESCRIPTION("GPIO Driven Matrix Keypad Driver"); +MODULE_LICENSE("GPL v2"); +MODULE_ALIAS("platform:matrix-keypad"); diff --git a/drivers/input/misc/cobalt_btns.c b/drivers/input/misc/cobalt_btns.c index 2adf9cb265d..d114d3a9e1e 100644 --- a/drivers/input/misc/cobalt_btns.c +++ b/drivers/input/misc/cobalt_btns.c @@ -1,7 +1,7 @@ /* * Cobalt button interface driver. * - * Copyright (C) 2007-2008 Yoichi Yuasa <yoichi_yuasa@tripeaks.co.jp> + * Copyright (C) 2007-2008 Yoichi Yuasa <yuasa@linux-mips.org> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by @@ -148,7 +148,7 @@ static int __devexit cobalt_buttons_remove(struct platform_device *pdev) return 0; } -MODULE_AUTHOR("Yoichi Yuasa <yoichi_yuasa@tripeaks.co.jp>"); +MODULE_AUTHOR("Yoichi Yuasa <yuasa@linux-mips.org>"); MODULE_DESCRIPTION("Cobalt button interface driver"); MODULE_LICENSE("GPL"); /* work with hotplug and coldplug */ diff --git a/drivers/input/misc/pcspkr.c b/drivers/input/misc/pcspkr.c index 6d67af5387a..21cb755a54f 100644 --- a/drivers/input/misc/pcspkr.c +++ b/drivers/input/misc/pcspkr.c @@ -114,7 +114,7 @@ static int __devexit pcspkr_remove(struct platform_device *dev) return 0; } -static int pcspkr_suspend(struct platform_device *dev, pm_message_t state) +static int pcspkr_suspend(struct device *dev) { pcspkr_event(NULL, EV_SND, SND_BELL, 0); @@ -127,14 +127,18 @@ static void pcspkr_shutdown(struct platform_device *dev) pcspkr_event(NULL, EV_SND, SND_BELL, 0); } +static struct dev_pm_ops pcspkr_pm_ops = { + .suspend = pcspkr_suspend, +}; + static struct platform_driver pcspkr_platform_driver = { .driver = { .name = "pcspkr", .owner = THIS_MODULE, + .pm = &pcspkr_pm_ops, }, .probe = pcspkr_probe, .remove = __devexit_p(pcspkr_remove), - .suspend = pcspkr_suspend, .shutdown = pcspkr_shutdown, }; diff --git a/drivers/input/misc/wistron_btns.c b/drivers/input/misc/wistron_btns.c index 7c8957dd22c..27ee976eb54 100644 --- a/drivers/input/misc/wistron_btns.c +++ b/drivers/input/misc/wistron_btns.c @@ -611,6 +611,20 @@ static struct key_entry keymap_wistron_generic[] __initdata = { { KE_END, 0 } }; +static struct key_entry keymap_prestigio[] __initdata = { + { KE_KEY, 0x11, {KEY_PROG1} }, + { KE_KEY, 0x12, {KEY_PROG2} }, + { KE_WIFI, 0x30 }, + { KE_KEY, 0x22, {KEY_REWIND} }, + { KE_KEY, 0x23, {KEY_FORWARD} }, + { KE_KEY, 0x24, {KEY_PLAYPAUSE} }, + { KE_KEY, 0x25, {KEY_STOPCD} }, + { KE_KEY, 0x31, {KEY_MAIL} }, + { KE_KEY, 0x36, {KEY_WWW} }, + { KE_END, 0 } +}; + + /* * If your machine is not here (which is currently rather likely), please send * a list of buttons and their key codes (reported when loading this module @@ -646,6 +660,15 @@ static struct dmi_system_id dmi_ids[] __initdata = { }, { .callback = dmi_matched, + .ident = "Maxdata Pro 7000 DX", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "MAXDATA"), + DMI_MATCH(DMI_PRODUCT_NAME, "Pro 7000"), + }, + .driver_data = keymap_fs_amilo_pro_v2000 + }, + { + .callback = dmi_matched, .ident = "Fujitsu N3510", .matches = { DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"), @@ -962,6 +985,8 @@ static int __init select_keymap(void) if (keymap_name != NULL) { if (strcmp (keymap_name, "1557/MS2141") == 0) keymap = keymap_wistron_ms2141; + else if (strcmp (keymap_name, "prestigio") == 0) + keymap = keymap_prestigio; else if (strcmp (keymap_name, "generic") == 0) keymap = keymap_wistron_generic; else { diff --git a/drivers/input/mouse/gpio_mouse.c b/drivers/input/mouse/gpio_mouse.c index 5e5eb88d8d1..7b6ce178f1b 100644 --- a/drivers/input/mouse/gpio_mouse.c +++ b/drivers/input/mouse/gpio_mouse.c @@ -46,7 +46,7 @@ static void gpio_mouse_scan(struct input_polled_dev *dev) input_sync(input); } -static int __init gpio_mouse_probe(struct platform_device *pdev) +static int __devinit gpio_mouse_probe(struct platform_device *pdev) { struct gpio_mouse_platform_data *pdata = pdev->dev.platform_data; struct input_polled_dev *input_poll; @@ -170,10 +170,8 @@ static int __devexit gpio_mouse_remove(struct platform_device *pdev) return 0; } -/* work with hotplug and coldplug */ -MODULE_ALIAS("platform:gpio_mouse"); - static struct platform_driver gpio_mouse_device_driver = { + .probe = gpio_mouse_probe, .remove = __devexit_p(gpio_mouse_remove), .driver = { .name = "gpio_mouse", @@ -183,8 +181,7 @@ static struct platform_driver gpio_mouse_device_driver = { static int __init gpio_mouse_init(void) { - return platform_driver_probe(&gpio_mouse_device_driver, - gpio_mouse_probe); + return platform_driver_register(&gpio_mouse_device_driver); } module_init(gpio_mouse_init); @@ -197,3 +194,5 @@ module_exit(gpio_mouse_exit); MODULE_AUTHOR("Hans-Christian Egtvedt <hcegtvedt@atmel.com>"); MODULE_DESCRIPTION("GPIO mouse driver"); MODULE_LICENSE("GPL"); +MODULE_ALIAS("platform:gpio_mouse"); /* work with hotplug and coldplug */ + diff --git a/drivers/input/serio/hp_sdc_mlc.c b/drivers/input/serio/hp_sdc_mlc.c index b587e2d576a..820e51673b2 100644 --- a/drivers/input/serio/hp_sdc_mlc.c +++ b/drivers/input/serio/hp_sdc_mlc.c @@ -296,7 +296,7 @@ static void hp_sdc_mlc_out(hil_mlc *mlc) priv->tseq[3] = 0; if (mlc->opacket & HIL_CTRL_APE) { priv->tseq[3] |= HP_SDC_LPC_APE_IPF; - down_trylock(&mlc->csem); + BUG_ON(down_trylock(&mlc->csem)); } enqueue: hp_sdc_enqueue_transaction(&priv->trans); diff --git a/drivers/input/serio/i8042-x86ia64io.h b/drivers/input/serio/i8042-x86ia64io.h index fb8a3cd3ffd..ccbf23ece8e 100644 --- a/drivers/input/serio/i8042-x86ia64io.h +++ b/drivers/input/serio/i8042-x86ia64io.h @@ -78,6 +78,14 @@ static struct dmi_system_id __initdata i8042_dmi_noloop_table[] = { }, }, { + .ident = "ASUS G1S", + .matches = { + DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK Computer Inc."), + DMI_MATCH(DMI_BOARD_NAME, "G1S"), + DMI_MATCH(DMI_BOARD_VERSION, "1.0"), + }, + }, + { /* AUX LOOP command does not raise AUX IRQ */ .ident = "ASUS P65UP5", .matches = { @@ -374,6 +382,14 @@ static struct dmi_system_id __initdata i8042_dmi_nomux_table[] = { DMI_MATCH(DMI_PRODUCT_NAME, "Vostro1510"), }, }, + { + .ident = "Acer Aspire 5536", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Acer"), + DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 5536"), + DMI_MATCH(DMI_PRODUCT_VERSION, "0100"), + }, + }, { } }; @@ -392,6 +408,34 @@ static struct dmi_system_id __initdata i8042_dmi_reset_table[] = { DMI_MATCH(DMI_BOARD_VENDOR, "LG Electronics Inc."), }, }, + { + .ident = "Acer Aspire One 150", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Acer"), + DMI_MATCH(DMI_PRODUCT_NAME, "AOA150"), + }, + }, + { + .ident = "Advent 4211", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "DIXONSXP"), + DMI_MATCH(DMI_PRODUCT_NAME, "Advent 4211"), + }, + }, + { + .ident = "Medion Akoya Mini E1210", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "MEDION"), + DMI_MATCH(DMI_PRODUCT_NAME, "E1210"), + }, + }, + { + .ident = "Mivvy M310", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "VIOOO"), + DMI_MATCH(DMI_PRODUCT_NAME, "N10"), + }, + }, { } }; diff --git a/drivers/input/serio/i8042.c b/drivers/input/serio/i8042.c index f919bf57293..582245c497e 100644 --- a/drivers/input/serio/i8042.c +++ b/drivers/input/serio/i8042.c @@ -934,10 +934,11 @@ static bool i8042_suspended; static int i8042_suspend(struct platform_device *dev, pm_message_t state) { - if (!i8042_suspended && state.event == PM_EVENT_SUSPEND) { + if (!i8042_suspended && state.event == PM_EVENT_SUSPEND) i8042_controller_reset(); - i8042_suspended = true; - } + + i8042_suspended = state.event == PM_EVENT_SUSPEND || + state.event == PM_EVENT_FREEZE; return 0; } diff --git a/drivers/input/serio/serio.c b/drivers/input/serio/serio.c index fb17573f8f2..d66f4944f2a 100644 --- a/drivers/input/serio/serio.c +++ b/drivers/input/serio/serio.c @@ -935,10 +935,11 @@ static int serio_suspend(struct device *dev, pm_message_t state) { struct serio *serio = to_serio_port(dev); - if (!serio->suspended && state.event == PM_EVENT_SUSPEND) { + if (!serio->suspended && state.event == PM_EVENT_SUSPEND) serio_cleanup(serio); - serio->suspended = true; - } + + serio->suspended = state.event == PM_EVENT_SUSPEND || + state.event == PM_EVENT_FREEZE; return 0; } diff --git a/drivers/input/tablet/wacom_sys.c b/drivers/input/tablet/wacom_sys.c index a9d5031b855..ea30c983a33 100644 --- a/drivers/input/tablet/wacom_sys.c +++ b/drivers/input/tablet/wacom_sys.c @@ -388,6 +388,32 @@ static int wacom_parse_hid(struct usb_interface *intf, struct hid_descriptor *hi return result; } +static int wacom_query_tablet_data(struct usb_interface *intf) +{ + unsigned char *rep_data; + int limit = 0; + int error; + + rep_data = kmalloc(2, GFP_KERNEL); + if (!rep_data) + return -ENOMEM; + + do { + rep_data[0] = 2; + rep_data[1] = 2; + error = usb_set_report(intf, WAC_HID_FEATURE_REPORT, + 2, rep_data, 2); + if (error >= 0) + error = usb_get_report(intf, + WAC_HID_FEATURE_REPORT, 2, + rep_data, 2); + } while ((error < 0 || rep_data[1] != 2) && limit++ < 5); + + kfree(rep_data); + + return error < 0 ? error : 0; +} + static int wacom_probe(struct usb_interface *intf, const struct usb_device_id *id) { struct usb_device *dev = interface_to_usbdev(intf); @@ -398,7 +424,6 @@ static int wacom_probe(struct usb_interface *intf, const struct usb_device_id *i struct wacom_features *features; struct input_dev *input_dev; int error = -ENOMEM; - char rep_data[2], limit = 0; struct hid_descriptor *hid_desc; wacom = kzalloc(sizeof(struct wacom), GFP_KERNEL); @@ -489,20 +514,10 @@ static int wacom_probe(struct usb_interface *intf, const struct usb_device_id *i /* * Ask the tablet to report tablet data if it is not a Tablet PC. - * Repeat until it succeeds + * Note that if query fails it is not a hard failure. */ - if (wacom_wac->features->type != TABLETPC) { - do { - rep_data[0] = 2; - rep_data[1] = 2; - error = usb_set_report(intf, WAC_HID_FEATURE_REPORT, - 2, rep_data, 2); - if (error >= 0) - error = usb_get_report(intf, - WAC_HID_FEATURE_REPORT, 2, - rep_data, 2); - } while ((error < 0 || rep_data[1] != 2) && limit++ < 5); - } + if (wacom_wac->features->type != TABLETPC) + wacom_query_tablet_data(intf); usb_set_intfdata(intf, wacom); return 0; diff --git a/drivers/input/tablet/wacom_wac.c b/drivers/input/tablet/wacom_wac.c index 38bf86384ae..c896d6a21b7 100644 --- a/drivers/input/tablet/wacom_wac.c +++ b/drivers/input/tablet/wacom_wac.c @@ -384,6 +384,8 @@ static int wacom_intuos_inout(struct wacom_wac *wacom, void *wcombo) wacom_report_key(wcombo, BTN_STYLUS2, 0); wacom_report_key(wcombo, BTN_TOUCH, 0); wacom_report_abs(wcombo, ABS_WHEEL, 0); + if (wacom->features->type >= INTUOS3S) + wacom_report_abs(wcombo, ABS_Z, 0); } wacom_report_key(wcombo, wacom->tool[idx], 0); wacom_report_abs(wcombo, ABS_MISC, 0); /* reset tool id */ @@ -836,6 +838,7 @@ static struct wacom_features wacom_features[] = { { "Wacom DTU710", 8, 34080, 27660, 511, 0, PL }, { "Wacom DTF521", 8, 6282, 4762, 511, 0, PL }, { "Wacom DTF720", 8, 6858, 5506, 511, 0, PL }, + { "Wacom DTF720a", 8, 6858, 5506, 511, 0, PL }, { "Wacom Cintiq Partner",8, 20480, 15360, 511, 0, PTU }, { "Wacom Intuos2 4x5", 10, 12700, 10600, 1023, 31, INTUOS }, { "Wacom Intuos2 6x8", 10, 20320, 16240, 1023, 31, INTUOS }, @@ -897,8 +900,9 @@ static struct usb_device_id wacom_ids[] = { { USB_DEVICE(USB_VENDOR_ID_WACOM, 0x37) }, { USB_DEVICE(USB_VENDOR_ID_WACOM, 0x38) }, { USB_DEVICE(USB_VENDOR_ID_WACOM, 0x39) }, - { USB_DEVICE(USB_VENDOR_ID_WACOM, 0xC0) }, { USB_DEVICE(USB_VENDOR_ID_WACOM, 0xC4) }, + { USB_DEVICE(USB_VENDOR_ID_WACOM, 0xC0) }, + { USB_DEVICE(USB_VENDOR_ID_WACOM, 0xC2) }, { USB_DEVICE(USB_VENDOR_ID_WACOM, 0x03) }, { USB_DEVICE(USB_VENDOR_ID_WACOM, 0x41) }, { USB_DEVICE(USB_VENDOR_ID_WACOM, 0x42) }, diff --git a/drivers/input/touchscreen/ucb1400_ts.c b/drivers/input/touchscreen/ucb1400_ts.c index 6954f550010..3a7a58222f8 100644 --- a/drivers/input/touchscreen/ucb1400_ts.c +++ b/drivers/input/touchscreen/ucb1400_ts.c @@ -170,11 +170,11 @@ static void ucb1400_handle_pending_irq(struct ucb1400_ts *ucb) ucb1400_reg_write(ucb->ac97, UCB_IE_CLEAR, isr); ucb1400_reg_write(ucb->ac97, UCB_IE_CLEAR, 0); - if (isr & UCB_IE_TSPX) { + if (isr & UCB_IE_TSPX) ucb1400_ts_irq_disable(ucb->ac97); - enable_irq(ucb->irq); - } else - printk(KERN_ERR "ucb1400: unexpected IE_STATUS = %#x\n", isr); + else + dev_dbg(&ucb->ts_idev->dev, "ucb1400: unexpected IE_STATUS = %#x\n", isr); + enable_irq(ucb->irq); } static int ucb1400_ts_thread(void *_ucb) @@ -345,6 +345,7 @@ static int ucb1400_ts_detect_irq(struct ucb1400_ts *ucb) static int ucb1400_ts_probe(struct platform_device *dev) { int error, x_res, y_res; + u16 fcsr; struct ucb1400_ts *ucb = dev->dev.platform_data; ucb->ts_idev = input_allocate_device(); @@ -382,6 +383,14 @@ static int ucb1400_ts_probe(struct platform_device *dev) ucb->ts_idev->evbit[0] = BIT_MASK(EV_ABS) | BIT_MASK(EV_KEY); ucb->ts_idev->keybit[BIT_WORD(BTN_TOUCH)] = BIT_MASK(BTN_TOUCH); + /* + * Enable ADC filter to prevent horrible jitter on Colibri. + * This also further reduces jitter on boards where ADCSYNC + * pin is connected. + */ + fcsr = ucb1400_reg_read(ucb->ac97, UCB_FCSR); + ucb1400_reg_write(ucb->ac97, UCB_FCSR, fcsr | UCB_FCSR_AVE); + ucb1400_adc_enable(ucb->ac97); x_res = ucb1400_ts_read_xres(ucb); y_res = ucb1400_ts_read_yres(ucb); diff --git a/drivers/isdn/gigaset/ev-layer.c b/drivers/isdn/gigaset/ev-layer.c index ec5169604a6..2d91049571a 100644 --- a/drivers/isdn/gigaset/ev-layer.c +++ b/drivers/isdn/gigaset/ev-layer.c @@ -294,32 +294,33 @@ struct reply_t gigaset_tab_cid[] = {RSP_OK, 604,604, -1, 605, 5, {ACT_CMD+AT_MSN}}, {RSP_OK, 605,605, -1, 606, 5, {ACT_CMD+AT_ISO}}, {RSP_NULL, 605,605, -1, 606, 5, {ACT_CMD+AT_ISO}}, - {RSP_OK, 606,606, -1, 607, 5, {0}, "+VLS=17\r"}, /* set "Endgeraetemodus" */ + {RSP_OK, 606,606, -1, 607, 5, {0}, "+VLS=17\r"}, {RSP_OK, 607,607, -1, 608,-1}, - //{RSP_ZSAU, 608,608,ZSAU_PROCEEDING, 608, 0, {ACT_ERROR}},//DELETE {RSP_ZSAU, 608,608,ZSAU_PROCEEDING, 609, 5, {ACT_CMD+AT_DIAL}}, {RSP_OK, 609,609, -1, 650, 0, {ACT_DIALING}}, - {RSP_ZVLS, 608,608, 17, -1,-1, {ACT_DEBUG}}, - {RSP_ZCTP, 609,609, -1, -1,-1, {ACT_DEBUG}}, - {RSP_ZCPN, 609,609, -1, -1,-1, {ACT_DEBUG}}, {RSP_ERROR, 601,609, -1, 0, 0, {ACT_ABORTDIAL}}, {EV_TIMEOUT, 601,609, -1, 0, 0, {ACT_ABORTDIAL}}, - /* dialing */ - {RSP_ZCTP, 650,650, -1, -1,-1, {ACT_DEBUG}}, - {RSP_ZCPN, 650,650, -1, -1,-1, {ACT_DEBUG}}, - {RSP_ZSAU, 650,650,ZSAU_CALL_DELIVERED, -1,-1, {ACT_DEBUG}}, /* some devices don't send this */ - - /* connection established */ - {RSP_ZSAU, 650,650,ZSAU_ACTIVE, 800,-1, {ACT_CONNECT}}, //FIXME -> DLE1 - {RSP_ZSAU, 750,750,ZSAU_ACTIVE, 800,-1, {ACT_CONNECT}}, //FIXME -> DLE1 - - {EV_BC_OPEN, 800,800, -1, 800,-1, {ACT_NOTIFY_BC_UP}}, //FIXME new constate + timeout + /* optional dialing responses */ + {EV_BC_OPEN, 650,650, -1, 651,-1}, + {RSP_ZVLS, 608,651, 17, -1,-1, {ACT_DEBUG}}, + {RSP_ZCTP, 609,651, -1, -1,-1, {ACT_DEBUG}}, + {RSP_ZCPN, 609,651, -1, -1,-1, {ACT_DEBUG}}, + {RSP_ZSAU, 650,651,ZSAU_CALL_DELIVERED, -1,-1, {ACT_DEBUG}}, + + /* connect */ + {RSP_ZSAU, 650,650,ZSAU_ACTIVE, 800,-1, {ACT_CONNECT}}, + {RSP_ZSAU, 651,651,ZSAU_ACTIVE, 800,-1, {ACT_CONNECT, + ACT_NOTIFY_BC_UP}}, + {RSP_ZSAU, 750,750,ZSAU_ACTIVE, 800,-1, {ACT_CONNECT}}, + {RSP_ZSAU, 751,751,ZSAU_ACTIVE, 800,-1, {ACT_CONNECT, + ACT_NOTIFY_BC_UP}}, + {EV_BC_OPEN, 800,800, -1, 800,-1, {ACT_NOTIFY_BC_UP}}, /* remote hangup */ - {RSP_ZSAU, 650,650,ZSAU_DISCONNECT_IND, 0, 0, {ACT_REMOTEREJECT}}, - {RSP_ZSAU, 750,750,ZSAU_DISCONNECT_IND, 0, 0, {ACT_REMOTEHUP}}, + {RSP_ZSAU, 650,651,ZSAU_DISCONNECT_IND, 0, 0, {ACT_REMOTEREJECT}}, + {RSP_ZSAU, 750,751,ZSAU_DISCONNECT_IND, 0, 0, {ACT_REMOTEHUP}}, {RSP_ZSAU, 800,800,ZSAU_DISCONNECT_IND, 0, 0, {ACT_REMOTEHUP}}, /* hangup */ @@ -358,7 +359,8 @@ struct reply_t gigaset_tab_cid[] = {RSP_ZSAU, 700,729,ZSAU_ACTIVE, 0, 0, {ACT_ABORTACCEPT}}, {RSP_ZSAU, 700,729,ZSAU_DISCONNECT_IND, 0, 0, {ACT_ABORTACCEPT}}, - {EV_TIMEOUT, 750,750, -1, 0, 0, {ACT_CONNTIMEOUT}}, + {EV_BC_OPEN, 750,750, -1, 751,-1}, + {EV_TIMEOUT, 750,751, -1, 0, 0, {ACT_CONNTIMEOUT}}, /* B channel closed (general case) */ {EV_BC_CLOSED, -1, -1, -1, -1,-1, {ACT_NOTIFY_BC_DOWN}}, //FIXME @@ -876,12 +878,6 @@ static void bchannel_down(struct bc_state *bcs) static void bchannel_up(struct bc_state *bcs) { - if (!(bcs->chstate & CHS_D_UP)) { - dev_notice(bcs->cs->dev, "%s: D channel not up\n", __func__); - bcs->chstate |= CHS_D_UP; - gigaset_i4l_channel_cmd(bcs, ISDN_STAT_DCONN); - } - if (bcs->chstate & CHS_B_UP) { dev_notice(bcs->cs->dev, "%s: B channel already up\n", __func__); diff --git a/drivers/isdn/gigaset/interface.c b/drivers/isdn/gigaset/interface.c index 1ebfcab7466..8ff7e35c706 100644 --- a/drivers/isdn/gigaset/interface.c +++ b/drivers/isdn/gigaset/interface.c @@ -408,6 +408,8 @@ static int if_write_room(struct tty_struct *tty) return retval; } +/* FIXME: This function does not have error returns */ + static int if_chars_in_buffer(struct tty_struct *tty) { struct cardstate *cs; diff --git a/drivers/isdn/gigaset/isocdata.c b/drivers/isdn/gigaset/isocdata.c index db3a1e4cd48..bed38fcc432 100644 --- a/drivers/isdn/gigaset/isocdata.c +++ b/drivers/isdn/gigaset/isocdata.c @@ -174,12 +174,6 @@ int gigaset_isowbuf_getbytes(struct isowbuf_t *iwb, int size) pr_err("invalid size %d\n", size); return -EINVAL; } - src = iwb->read; - if (unlikely(limit >= BAS_OUTBUFSIZE + BAS_OUTBUFPAD || - (read < src && limit >= src))) { - pr_err("isoc write buffer frame reservation violated\n"); - return -EFAULT; - } #endif if (read < write) { diff --git a/drivers/isdn/hisax/hfc_usb.c b/drivers/isdn/hisax/hfc_usb.c index 8df889b0c1a..9de54202c90 100644 --- a/drivers/isdn/hisax/hfc_usb.c +++ b/drivers/isdn/hisax/hfc_usb.c @@ -37,7 +37,6 @@ #include <linux/kernel_stat.h> #include <linux/usb.h> #include <linux/kernel.h> -#include <linux/smp_lock.h> #include <linux/sched.h> #include <linux/moduleparam.h> #include "hisax.h" diff --git a/drivers/isdn/i4l/isdn_tty.c b/drivers/isdn/i4l/isdn_tty.c index b4d4522e507..2881a66c1aa 100644 --- a/drivers/isdn/i4l/isdn_tty.c +++ b/drivers/isdn/i4l/isdn_tty.c @@ -13,6 +13,7 @@ #include <linux/isdn.h> #include <linux/delay.h> +#include <linux/smp_lock.h> #include "isdn_common.h" #include "isdn_tty.h" #ifdef CONFIG_ISDN_AUDIO diff --git a/drivers/isdn/mISDN/l1oip_core.c b/drivers/isdn/mISDN/l1oip_core.c index 990e6a7e667..7e5f30dbc0a 100644 --- a/drivers/isdn/mISDN/l1oip_core.c +++ b/drivers/isdn/mISDN/l1oip_core.c @@ -731,10 +731,10 @@ l1oip_socket_thread(void *data) while (!signal_pending(current)) { struct kvec iov = { .iov_base = recvbuf, - .iov_len = sizeof(recvbuf), + .iov_len = recvbuf_size, }; recvlen = kernel_recvmsg(socket, &msg, &iov, 1, - sizeof(recvbuf), 0); + recvbuf_size, 0); if (recvlen > 0) { l1oip_socket_parse(hc, &sin_rx, recvbuf, recvlen); } else { @@ -1480,7 +1480,7 @@ l1oip_init(void) return -ENOMEM; l1oip_cnt = 0; - while (type[l1oip_cnt] && l1oip_cnt < MAX_CARDS) { + while (l1oip_cnt < MAX_CARDS && type[l1oip_cnt]) { switch (type[l1oip_cnt] & 0xff) { case 1: pri = 0; diff --git a/drivers/isdn/mISDN/stack.c b/drivers/isdn/mISDN/stack.c index e2f45019ebf..3e1532a180f 100644 --- a/drivers/isdn/mISDN/stack.c +++ b/drivers/isdn/mISDN/stack.c @@ -17,6 +17,7 @@ #include <linux/mISDNif.h> #include <linux/kthread.h> +#include <linux/smp_lock.h> #include "core.h" static u_int *debug; diff --git a/drivers/leds/Kconfig b/drivers/leds/Kconfig index 9b60b6b684d..7c8e7122aaa 100644 --- a/drivers/leds/Kconfig +++ b/drivers/leds/Kconfig @@ -75,6 +75,7 @@ config LEDS_ALIX2 depends on LEDS_CLASS && X86 && EXPERIMENTAL help This option enables support for the PCEngines ALIX.2 and ALIX.3 LEDs. + You have to set leds-alix2.force=1 for boards with Award BIOS. config LEDS_H1940 tristate "LED Support for iPAQ H1940 device" @@ -145,15 +146,16 @@ config LEDS_GPIO_OF of_platform devices. For instance, LEDs which are listed in a "dts" file. -config LEDS_LP5521 - tristate "LED Support for the LP5521 LEDs" +config LEDS_LP3944 + tristate "LED Support for N.S. LP3944 (Fun Light) I2C chip" depends on LEDS_CLASS && I2C help - If you say 'Y' here you get support for the National Semiconductor - LP5521 LED driver used in n8x0 boards. + This option enables support for LEDs connected to the National + Semiconductor LP3944 Lighting Management Unit (LMU) also known as + Fun Light Chip. - This driver can be built as a module by choosing 'M'. The module - will be called leds-lp5521. + To compile this driver as a module, choose M here: the + module will be called leds-lp3944. config LEDS_CLEVO_MAIL tristate "Mail LED on Clevo notebook" diff --git a/drivers/leds/Makefile b/drivers/leds/Makefile index 2d41c4dcf92..e8cdcf77a4c 100644 --- a/drivers/leds/Makefile +++ b/drivers/leds/Makefile @@ -20,6 +20,7 @@ obj-$(CONFIG_LEDS_COBALT_RAQ) += leds-cobalt-raq.o obj-$(CONFIG_LEDS_SUNFIRE) += leds-sunfire.o obj-$(CONFIG_LEDS_PCA9532) += leds-pca9532.o obj-$(CONFIG_LEDS_GPIO) += leds-gpio.o +obj-$(CONFIG_LEDS_LP3944) += leds-lp3944.o obj-$(CONFIG_LEDS_CLEVO_MAIL) += leds-clevo-mail.o obj-$(CONFIG_LEDS_HP6XX) += leds-hp6xx.o obj-$(CONFIG_LEDS_FSG) += leds-fsg.o diff --git a/drivers/leds/leds-alix2.c b/drivers/leds/leds-alix2.c index ddbd7730dfc..731d4eef342 100644 --- a/drivers/leds/leds-alix2.c +++ b/drivers/leds/leds-alix2.c @@ -14,7 +14,7 @@ static int force = 0; module_param(force, bool, 0444); -MODULE_PARM_DESC(force, "Assume system has ALIX.2 style LEDs"); +MODULE_PARM_DESC(force, "Assume system has ALIX.2/ALIX.3 style LEDs"); struct alix_led { struct led_classdev cdev; @@ -155,6 +155,11 @@ static int __init alix_led_init(void) goto out; } + /* enable output on GPIO for LED 1,2,3 */ + outl(1 << 6, 0x6104); + outl(1 << 9, 0x6184); + outl(1 << 11, 0x6184); + pdev = platform_device_register_simple(KBUILD_MODNAME, -1, NULL, 0); if (!IS_ERR(pdev)) { ret = platform_driver_probe(&alix_led_driver, alix_led_probe); diff --git a/drivers/leds/leds-bd2802.c b/drivers/leds/leds-bd2802.c index 4149ecb3a9b..779d7f262c0 100644 --- a/drivers/leds/leds-bd2802.c +++ b/drivers/leds/leds-bd2802.c @@ -97,6 +97,10 @@ struct bd2802_led { enum led_ids led_id; enum led_colors color; enum led_bits state; + + /* General attributes of RGB LEDs */ + int wave_pattern; + int rgb_current; }; @@ -254,7 +258,7 @@ static void bd2802_set_on(struct bd2802_led *led, enum led_ids id, bd2802_reset_cancel(led); reg = bd2802_get_reg_addr(id, color, BD2802_REG_CURRENT1SETUP); - bd2802_write_byte(led->client, reg, BD2802_CURRENT_032); + bd2802_write_byte(led->client, reg, led->rgb_current); reg = bd2802_get_reg_addr(id, color, BD2802_REG_CURRENT2SETUP); bd2802_write_byte(led->client, reg, BD2802_CURRENT_000); reg = bd2802_get_reg_addr(id, color, BD2802_REG_WAVEPATTERN); @@ -275,9 +279,9 @@ static void bd2802_set_blink(struct bd2802_led *led, enum led_ids id, reg = bd2802_get_reg_addr(id, color, BD2802_REG_CURRENT1SETUP); bd2802_write_byte(led->client, reg, BD2802_CURRENT_000); reg = bd2802_get_reg_addr(id, color, BD2802_REG_CURRENT2SETUP); - bd2802_write_byte(led->client, reg, BD2802_CURRENT_032); + bd2802_write_byte(led->client, reg, led->rgb_current); reg = bd2802_get_reg_addr(id, color, BD2802_REG_WAVEPATTERN); - bd2802_write_byte(led->client, reg, BD2802_PATTERN_HALF); + bd2802_write_byte(led->client, reg, led->wave_pattern); bd2802_enable(led, id); bd2802_update_state(led, id, color, BD2802_BLINK); @@ -406,7 +410,7 @@ static void bd2802_enable_adv_conf(struct bd2802_led *led) ret = device_create_file(&led->client->dev, bd2802_addr_attributes[i]); if (ret) { - dev_err(&led->client->dev, "failed to sysfs file %s\n", + dev_err(&led->client->dev, "failed: sysfs file %s\n", bd2802_addr_attributes[i]->attr.name); goto failed_remove_files; } @@ -483,6 +487,52 @@ static struct device_attribute bd2802_adv_conf_attr = { .store = bd2802_store_adv_conf, }; +#define BD2802_CONTROL_ATTR(attr_name, name_str) \ +static ssize_t bd2802_show_##attr_name(struct device *dev, \ + struct device_attribute *attr, char *buf) \ +{ \ + struct bd2802_led *led = i2c_get_clientdata(to_i2c_client(dev));\ + ssize_t ret; \ + down_read(&led->rwsem); \ + ret = sprintf(buf, "0x%02x\n", led->attr_name); \ + up_read(&led->rwsem); \ + return ret; \ +} \ +static ssize_t bd2802_store_##attr_name(struct device *dev, \ + struct device_attribute *attr, const char *buf, size_t count) \ +{ \ + struct bd2802_led *led = i2c_get_clientdata(to_i2c_client(dev));\ + unsigned long val; \ + int ret; \ + if (!count) \ + return -EINVAL; \ + ret = strict_strtoul(buf, 16, &val); \ + if (ret) \ + return ret; \ + down_write(&led->rwsem); \ + led->attr_name = val; \ + up_write(&led->rwsem); \ + return count; \ +} \ +static struct device_attribute bd2802_##attr_name##_attr = { \ + .attr = { \ + .name = name_str, \ + .mode = 0644, \ + .owner = THIS_MODULE \ + }, \ + .show = bd2802_show_##attr_name, \ + .store = bd2802_store_##attr_name, \ +}; + +BD2802_CONTROL_ATTR(wave_pattern, "wave_pattern"); +BD2802_CONTROL_ATTR(rgb_current, "rgb_current"); + +static struct device_attribute *bd2802_attributes[] = { + &bd2802_adv_conf_attr, + &bd2802_wave_pattern_attr, + &bd2802_rgb_current_attr, +}; + static void bd2802_led_work(struct work_struct *work) { struct bd2802_led *led = container_of(work, struct bd2802_led, work); @@ -538,7 +588,6 @@ static int bd2802_register_led_classdev(struct bd2802_led *led) led->cdev_led1r.brightness = LED_OFF; led->cdev_led1r.brightness_set = bd2802_set_led1r_brightness; led->cdev_led1r.blink_set = bd2802_set_led1r_blink; - led->cdev_led1r.flags |= LED_CORE_SUSPENDRESUME; ret = led_classdev_register(&led->client->dev, &led->cdev_led1r); if (ret < 0) { @@ -551,7 +600,6 @@ static int bd2802_register_led_classdev(struct bd2802_led *led) led->cdev_led1g.brightness = LED_OFF; led->cdev_led1g.brightness_set = bd2802_set_led1g_brightness; led->cdev_led1g.blink_set = bd2802_set_led1g_blink; - led->cdev_led1g.flags |= LED_CORE_SUSPENDRESUME; ret = led_classdev_register(&led->client->dev, &led->cdev_led1g); if (ret < 0) { @@ -564,7 +612,6 @@ static int bd2802_register_led_classdev(struct bd2802_led *led) led->cdev_led1b.brightness = LED_OFF; led->cdev_led1b.brightness_set = bd2802_set_led1b_brightness; led->cdev_led1b.blink_set = bd2802_set_led1b_blink; - led->cdev_led1b.flags |= LED_CORE_SUSPENDRESUME; ret = led_classdev_register(&led->client->dev, &led->cdev_led1b); if (ret < 0) { @@ -577,7 +624,6 @@ static int bd2802_register_led_classdev(struct bd2802_led *led) led->cdev_led2r.brightness = LED_OFF; led->cdev_led2r.brightness_set = bd2802_set_led2r_brightness; led->cdev_led2r.blink_set = bd2802_set_led2r_blink; - led->cdev_led2r.flags |= LED_CORE_SUSPENDRESUME; ret = led_classdev_register(&led->client->dev, &led->cdev_led2r); if (ret < 0) { @@ -590,7 +636,6 @@ static int bd2802_register_led_classdev(struct bd2802_led *led) led->cdev_led2g.brightness = LED_OFF; led->cdev_led2g.brightness_set = bd2802_set_led2g_brightness; led->cdev_led2g.blink_set = bd2802_set_led2g_blink; - led->cdev_led2g.flags |= LED_CORE_SUSPENDRESUME; ret = led_classdev_register(&led->client->dev, &led->cdev_led2g); if (ret < 0) { @@ -640,7 +685,7 @@ static int __devinit bd2802_probe(struct i2c_client *client, { struct bd2802_led *led; struct bd2802_led_platform_data *pdata; - int ret; + int ret, i; led = kzalloc(sizeof(struct bd2802_led), GFP_KERNEL); if (!led) { @@ -670,13 +715,20 @@ static int __devinit bd2802_probe(struct i2c_client *client, /* To save the power, reset BD2802 after detecting */ gpio_set_value(led->pdata->reset_gpio, 0); + /* Default attributes */ + led->wave_pattern = BD2802_PATTERN_HALF; + led->rgb_current = BD2802_CURRENT_032; + init_rwsem(&led->rwsem); - ret = device_create_file(&client->dev, &bd2802_adv_conf_attr); - if (ret) { - dev_err(&client->dev, "failed to create sysfs file %s\n", - bd2802_adv_conf_attr.attr.name); - goto failed_free; + for (i = 0; i < ARRAY_SIZE(bd2802_attributes); i++) { + ret = device_create_file(&led->client->dev, + bd2802_attributes[i]); + if (ret) { + dev_err(&led->client->dev, "failed: sysfs file %s\n", + bd2802_attributes[i]->attr.name); + goto failed_unregister_dev_file; + } } ret = bd2802_register_led_classdev(led); @@ -686,7 +738,8 @@ static int __devinit bd2802_probe(struct i2c_client *client, return 0; failed_unregister_dev_file: - device_remove_file(&client->dev, &bd2802_adv_conf_attr); + for (i--; i >= 0; i--) + device_remove_file(&led->client->dev, bd2802_attributes[i]); failed_free: i2c_set_clientdata(client, NULL); kfree(led); @@ -697,12 +750,14 @@ failed_free: static int __exit bd2802_remove(struct i2c_client *client) { struct bd2802_led *led = i2c_get_clientdata(client); + int i; - bd2802_unregister_led_classdev(led); gpio_set_value(led->pdata->reset_gpio, 0); + bd2802_unregister_led_classdev(led); if (led->adf_on) bd2802_disable_adv_conf(led); - device_remove_file(&client->dev, &bd2802_adv_conf_attr); + for (i = 0; i < ARRAY_SIZE(bd2802_attributes); i++) + device_remove_file(&led->client->dev, bd2802_attributes[i]); i2c_set_clientdata(client, NULL); kfree(led); @@ -723,8 +778,7 @@ static int bd2802_resume(struct i2c_client *client) struct bd2802_led *led = i2c_get_clientdata(client); if (!bd2802_is_all_off(led) || led->adf_on) { - gpio_set_value(led->pdata->reset_gpio, 1); - udelay(100); + bd2802_reset_cancel(led); bd2802_restore_state(led); } @@ -762,4 +816,4 @@ module_exit(bd2802_exit); MODULE_AUTHOR("Kim Kyuwon <q1.kim@samsung.com>"); MODULE_DESCRIPTION("BD2802 LED driver"); -MODULE_LICENSE("GPL"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/leds/leds-cobalt-raq.c b/drivers/leds/leds-cobalt-raq.c index ff0e8c3fbf9..5f1ce810815 100644 --- a/drivers/leds/leds-cobalt-raq.c +++ b/drivers/leds/leds-cobalt-raq.c @@ -1,7 +1,7 @@ /* * LEDs driver for the Cobalt Raq series. * - * Copyright (C) 2007 Yoichi Yuasa <yoichi_yuasa@tripeaks.co.jp> + * Copyright (C) 2007 Yoichi Yuasa <yuasa@linux-mips.org> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by diff --git a/drivers/leds/leds-gpio.c b/drivers/leds/leds-gpio.c index d2109054de8..6b06638eb5b 100644 --- a/drivers/leds/leds-gpio.c +++ b/drivers/leds/leds-gpio.c @@ -76,7 +76,7 @@ static int __devinit create_gpio_led(const struct gpio_led *template, struct gpio_led_data *led_dat, struct device *parent, int (*blink_set)(unsigned, unsigned long *, unsigned long *)) { - int ret; + int ret, state; /* skip leds that aren't available */ if (!gpio_is_valid(template->gpio)) { @@ -99,11 +99,15 @@ static int __devinit create_gpio_led(const struct gpio_led *template, led_dat->cdev.blink_set = gpio_blink_set; } led_dat->cdev.brightness_set = gpio_led_set; - led_dat->cdev.brightness = LED_OFF; + if (template->default_state == LEDS_GPIO_DEFSTATE_KEEP) + state = !!gpio_get_value(led_dat->gpio) ^ led_dat->active_low; + else + state = (template->default_state == LEDS_GPIO_DEFSTATE_ON); + led_dat->cdev.brightness = state ? LED_FULL : LED_OFF; if (!template->retain_state_suspended) led_dat->cdev.flags |= LED_CORE_SUSPENDRESUME; - ret = gpio_direction_output(led_dat->gpio, led_dat->active_low); + ret = gpio_direction_output(led_dat->gpio, led_dat->active_low ^ state); if (ret < 0) goto err; @@ -129,7 +133,7 @@ static void delete_gpio_led(struct gpio_led_data *led) } #ifdef CONFIG_LEDS_GPIO_PLATFORM -static int gpio_led_probe(struct platform_device *pdev) +static int __devinit gpio_led_probe(struct platform_device *pdev) { struct gpio_led_platform_data *pdata = pdev->dev.platform_data; struct gpio_led_data *leds_data; @@ -223,12 +227,22 @@ static int __devinit of_gpio_leds_probe(struct of_device *ofdev, memset(&led, 0, sizeof(led)); for_each_child_of_node(np, child) { enum of_gpio_flags flags; + const char *state; led.gpio = of_get_gpio_flags(child, 0, &flags); led.active_low = flags & OF_GPIO_ACTIVE_LOW; led.name = of_get_property(child, "label", NULL) ? : child->name; led.default_trigger = of_get_property(child, "linux,default-trigger", NULL); + state = of_get_property(child, "default-state", NULL); + if (state) { + if (!strcmp(state, "keep")) + led.default_state = LEDS_GPIO_DEFSTATE_KEEP; + else if(!strcmp(state, "on")) + led.default_state = LEDS_GPIO_DEFSTATE_ON; + else + led.default_state = LEDS_GPIO_DEFSTATE_OFF; + } ret = create_gpio_led(&led, &pdata->led_data[pdata->num_leds++], &ofdev->dev, NULL); diff --git a/drivers/leds/leds-lp3944.c b/drivers/leds/leds-lp3944.c new file mode 100644 index 00000000000..5946208ba26 --- /dev/null +++ b/drivers/leds/leds-lp3944.c @@ -0,0 +1,466 @@ +/* + * leds-lp3944.c - driver for National Semiconductor LP3944 Funlight Chip + * + * Copyright (C) 2009 Antonio Ospite <ospite@studenti.unina.it> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + */ + +/* + * I2C driver for National Semiconductor LP3944 Funlight Chip + * http://www.national.com/pf/LP/LP3944.html + * + * This helper chip can drive up to 8 leds, with two programmable DIM modes; + * it could even be used as a gpio expander but this driver assumes it is used + * as a led controller. + * + * The DIM modes are used to set _blink_ patterns for leds, the pattern is + * specified supplying two parameters: + * - period: from 0s to 1.6s + * - duty cycle: percentage of the period the led is on, from 0 to 100 + * + * LP3944 can be found on Motorola A910 smartphone, where it drives the rgb + * leds, the camera flash light and the displays backlights. + */ + +#include <linux/module.h> +#include <linux/i2c.h> +#include <linux/leds.h> +#include <linux/mutex.h> +#include <linux/workqueue.h> +#include <linux/leds-lp3944.h> + +/* Read Only Registers */ +#define LP3944_REG_INPUT1 0x00 /* LEDs 0-7 InputRegister (Read Only) */ +#define LP3944_REG_REGISTER1 0x01 /* None (Read Only) */ + +#define LP3944_REG_PSC0 0x02 /* Frequency Prescaler 0 (R/W) */ +#define LP3944_REG_PWM0 0x03 /* PWM Register 0 (R/W) */ +#define LP3944_REG_PSC1 0x04 /* Frequency Prescaler 1 (R/W) */ +#define LP3944_REG_PWM1 0x05 /* PWM Register 1 (R/W) */ +#define LP3944_REG_LS0 0x06 /* LEDs 0-3 Selector (R/W) */ +#define LP3944_REG_LS1 0x07 /* LEDs 4-7 Selector (R/W) */ + +/* These registers are not used to control leds in LP3944, they can store + * arbitrary values which the chip will ignore. + */ +#define LP3944_REG_REGISTER8 0x08 +#define LP3944_REG_REGISTER9 0x09 + +#define LP3944_DIM0 0 +#define LP3944_DIM1 1 + +/* period in ms */ +#define LP3944_PERIOD_MIN 0 +#define LP3944_PERIOD_MAX 1600 + +/* duty cycle is a percentage */ +#define LP3944_DUTY_CYCLE_MIN 0 +#define LP3944_DUTY_CYCLE_MAX 100 + +#define ldev_to_led(c) container_of(c, struct lp3944_led_data, ldev) + +/* Saved data */ +struct lp3944_led_data { + u8 id; + enum lp3944_type type; + enum lp3944_status status; + struct led_classdev ldev; + struct i2c_client *client; + struct work_struct work; +}; + +struct lp3944_data { + struct mutex lock; + struct i2c_client *client; + struct lp3944_led_data leds[LP3944_LEDS_MAX]; +}; + +static int lp3944_reg_read(struct i2c_client *client, u8 reg, u8 *value) +{ + int tmp; + + tmp = i2c_smbus_read_byte_data(client, reg); + if (tmp < 0) + return -EINVAL; + + *value = tmp; + + return 0; +} + +static int lp3944_reg_write(struct i2c_client *client, u8 reg, u8 value) +{ + return i2c_smbus_write_byte_data(client, reg, value); +} + +/** + * Set the period for DIM status + * + * @client: the i2c client + * @dim: either LP3944_DIM0 or LP3944_DIM1 + * @period: period of a blink, that is a on/off cycle, expressed in ms. + */ +static int lp3944_dim_set_period(struct i2c_client *client, u8 dim, u16 period) +{ + u8 psc_reg; + u8 psc_value; + int err; + + if (dim == LP3944_DIM0) + psc_reg = LP3944_REG_PSC0; + else if (dim == LP3944_DIM1) + psc_reg = LP3944_REG_PSC1; + else + return -EINVAL; + + /* Convert period to Prescaler value */ + if (period > LP3944_PERIOD_MAX) + return -EINVAL; + + psc_value = (period * 255) / LP3944_PERIOD_MAX; + + err = lp3944_reg_write(client, psc_reg, psc_value); + + return err; +} + +/** + * Set the duty cycle for DIM status + * + * @client: the i2c client + * @dim: either LP3944_DIM0 or LP3944_DIM1 + * @duty_cycle: percentage of a period during which a led is ON + */ +static int lp3944_dim_set_dutycycle(struct i2c_client *client, u8 dim, + u8 duty_cycle) +{ + u8 pwm_reg; + u8 pwm_value; + int err; + + if (dim == LP3944_DIM0) + pwm_reg = LP3944_REG_PWM0; + else if (dim == LP3944_DIM1) + pwm_reg = LP3944_REG_PWM1; + else + return -EINVAL; + + /* Convert duty cycle to PWM value */ + if (duty_cycle > LP3944_DUTY_CYCLE_MAX) + return -EINVAL; + + pwm_value = (duty_cycle * 255) / LP3944_DUTY_CYCLE_MAX; + + err = lp3944_reg_write(client, pwm_reg, pwm_value); + + return err; +} + +/** + * Set the led status + * + * @led: a lp3944_led_data structure + * @status: one of LP3944_LED_STATUS_OFF + * LP3944_LED_STATUS_ON + * LP3944_LED_STATUS_DIM0 + * LP3944_LED_STATUS_DIM1 + */ +static int lp3944_led_set(struct lp3944_led_data *led, u8 status) +{ + struct lp3944_data *data = i2c_get_clientdata(led->client); + u8 id = led->id; + u8 reg; + u8 val = 0; + int err; + + dev_dbg(&led->client->dev, "%s: %s, status before normalization:%d\n", + __func__, led->ldev.name, status); + + switch (id) { + case LP3944_LED0: + case LP3944_LED1: + case LP3944_LED2: + case LP3944_LED3: + reg = LP3944_REG_LS0; + break; + case LP3944_LED4: + case LP3944_LED5: + case LP3944_LED6: + case LP3944_LED7: + id -= LP3944_LED4; + reg = LP3944_REG_LS1; + break; + default: + return -EINVAL; + } + + if (status > LP3944_LED_STATUS_DIM1) + return -EINVAL; + + /* invert only 0 and 1, leave unchanged the other values, + * remember we are abusing status to set blink patterns + */ + if (led->type == LP3944_LED_TYPE_LED_INVERTED && status < 2) + status = 1 - status; + + mutex_lock(&data->lock); + lp3944_reg_read(led->client, reg, &val); + + val &= ~(LP3944_LED_STATUS_MASK << (id << 1)); + val |= (status << (id << 1)); + + dev_dbg(&led->client->dev, "%s: %s, reg:%d id:%d status:%d val:%#x\n", + __func__, led->ldev.name, reg, id, status, val); + + /* set led status */ + err = lp3944_reg_write(led->client, reg, val); + mutex_unlock(&data->lock); + + return err; +} + +static int lp3944_led_set_blink(struct led_classdev *led_cdev, + unsigned long *delay_on, + unsigned long *delay_off) +{ + struct lp3944_led_data *led = ldev_to_led(led_cdev); + u16 period; + u8 duty_cycle; + int err; + + /* units are in ms */ + if (*delay_on + *delay_off > LP3944_PERIOD_MAX) + return -EINVAL; + + if (*delay_on == 0 && *delay_off == 0) { + /* Special case: the leds subsystem requires a default user + * friendly blink pattern for the LED. Let's blink the led + * slowly (1Hz). + */ + *delay_on = 500; + *delay_off = 500; + } + + period = (*delay_on) + (*delay_off); + + /* duty_cycle is the percentage of period during which the led is ON */ + duty_cycle = 100 * (*delay_on) / period; + + /* invert duty cycle for inverted leds, this has the same effect of + * swapping delay_on and delay_off + */ + if (led->type == LP3944_LED_TYPE_LED_INVERTED) + duty_cycle = 100 - duty_cycle; + + /* NOTE: using always the first DIM mode, this means that all leds + * will have the same blinking pattern. + * + * We could find a way later to have two leds blinking in hardware + * with different patterns at the same time, falling back to software + * control for the other ones. + */ + err = lp3944_dim_set_period(led->client, LP3944_DIM0, period); + if (err) + return err; + + err = lp3944_dim_set_dutycycle(led->client, LP3944_DIM0, duty_cycle); + if (err) + return err; + + dev_dbg(&led->client->dev, "%s: OK hardware accelerated blink!\n", + __func__); + + led->status = LP3944_LED_STATUS_DIM0; + schedule_work(&led->work); + + return 0; +} + +static void lp3944_led_set_brightness(struct led_classdev *led_cdev, + enum led_brightness brightness) +{ + struct lp3944_led_data *led = ldev_to_led(led_cdev); + + dev_dbg(&led->client->dev, "%s: %s, %d\n", + __func__, led_cdev->name, brightness); + + led->status = brightness; + schedule_work(&led->work); +} + +static void lp3944_led_work(struct work_struct *work) +{ + struct lp3944_led_data *led; + + led = container_of(work, struct lp3944_led_data, work); + lp3944_led_set(led, led->status); +} + +static int lp3944_configure(struct i2c_client *client, + struct lp3944_data *data, + struct lp3944_platform_data *pdata) +{ + int i, err = 0; + + for (i = 0; i < pdata->leds_size; i++) { + struct lp3944_led *pled = &pdata->leds[i]; + struct lp3944_led_data *led = &data->leds[i]; + led->client = client; + led->id = i; + + switch (pled->type) { + + case LP3944_LED_TYPE_LED: + case LP3944_LED_TYPE_LED_INVERTED: + led->type = pled->type; + led->status = pled->status; + led->ldev.name = pled->name; + led->ldev.max_brightness = 1; + led->ldev.brightness_set = lp3944_led_set_brightness; + led->ldev.blink_set = lp3944_led_set_blink; + led->ldev.flags = LED_CORE_SUSPENDRESUME; + + INIT_WORK(&led->work, lp3944_led_work); + err = led_classdev_register(&client->dev, &led->ldev); + if (err < 0) { + dev_err(&client->dev, + "couldn't register LED %s\n", + led->ldev.name); + goto exit; + } + + /* to expose the default value to userspace */ + led->ldev.brightness = led->status; + + /* Set the default led status */ + err = lp3944_led_set(led, led->status); + if (err < 0) { + dev_err(&client->dev, + "%s couldn't set STATUS %d\n", + led->ldev.name, led->status); + goto exit; + } + break; + + case LP3944_LED_TYPE_NONE: + default: + break; + + } + } + return 0; + +exit: + if (i > 0) + for (i = i - 1; i >= 0; i--) + switch (pdata->leds[i].type) { + + case LP3944_LED_TYPE_LED: + case LP3944_LED_TYPE_LED_INVERTED: + led_classdev_unregister(&data->leds[i].ldev); + cancel_work_sync(&data->leds[i].work); + break; + + case LP3944_LED_TYPE_NONE: + default: + break; + } + + return err; +} + +static int __devinit lp3944_probe(struct i2c_client *client, + const struct i2c_device_id *id) +{ + struct lp3944_platform_data *lp3944_pdata = client->dev.platform_data; + struct lp3944_data *data; + + if (lp3944_pdata == NULL) { + dev_err(&client->dev, "no platform data\n"); + return -EINVAL; + } + + /* Let's see whether this adapter can support what we need. */ + if (!i2c_check_functionality(client->adapter, + I2C_FUNC_SMBUS_BYTE_DATA)) { + dev_err(&client->dev, "insufficient functionality!\n"); + return -ENODEV; + } + + data = kzalloc(sizeof(struct lp3944_data), GFP_KERNEL); + if (!data) + return -ENOMEM; + + data->client = client; + i2c_set_clientdata(client, data); + + mutex_init(&data->lock); + + dev_info(&client->dev, "lp3944 enabled\n"); + + lp3944_configure(client, data, lp3944_pdata); + return 0; +} + +static int __devexit lp3944_remove(struct i2c_client *client) +{ + struct lp3944_platform_data *pdata = client->dev.platform_data; + struct lp3944_data *data = i2c_get_clientdata(client); + int i; + + for (i = 0; i < pdata->leds_size; i++) + switch (data->leds[i].type) { + case LP3944_LED_TYPE_LED: + case LP3944_LED_TYPE_LED_INVERTED: + led_classdev_unregister(&data->leds[i].ldev); + cancel_work_sync(&data->leds[i].work); + break; + + case LP3944_LED_TYPE_NONE: + default: + break; + } + + kfree(data); + i2c_set_clientdata(client, NULL); + + return 0; +} + +/* lp3944 i2c driver struct */ +static const struct i2c_device_id lp3944_id[] = { + {"lp3944", 0}, + {} +}; + +MODULE_DEVICE_TABLE(i2c, lp3944_id); + +static struct i2c_driver lp3944_driver = { + .driver = { + .name = "lp3944", + }, + .probe = lp3944_probe, + .remove = __devexit_p(lp3944_remove), + .id_table = lp3944_id, +}; + +static int __init lp3944_module_init(void) +{ + return i2c_add_driver(&lp3944_driver); +} + +static void __exit lp3944_module_exit(void) +{ + i2c_del_driver(&lp3944_driver); +} + +module_init(lp3944_module_init); +module_exit(lp3944_module_exit); + +MODULE_AUTHOR("Antonio Ospite <ospite@studenti.unina.it>"); +MODULE_DESCRIPTION("LP3944 Fun Light Chip"); +MODULE_LICENSE("GPL"); diff --git a/drivers/leds/leds-pca9532.c b/drivers/leds/leds-pca9532.c index 3937244fdca..dba8921240f 100644 --- a/drivers/leds/leds-pca9532.c +++ b/drivers/leds/leds-pca9532.c @@ -35,7 +35,7 @@ struct pca9532_data { struct pca9532_led leds[16]; struct mutex update_lock; struct input_dev *idev; - struct work_struct work; + struct work_struct work; u8 pwm[2]; u8 psc[2]; }; @@ -87,14 +87,14 @@ static int pca9532_calcpwm(struct i2c_client *client, int pwm, int blink, if (b > 0xFF) return -EINVAL; data->pwm[pwm] = b; - data->psc[pwm] = blink; - return 0; + data->psc[pwm] = blink; + return 0; } static int pca9532_setpwm(struct i2c_client *client, int pwm) { - struct pca9532_data *data = i2c_get_clientdata(client); - mutex_lock(&data->update_lock); + struct pca9532_data *data = i2c_get_clientdata(client); + mutex_lock(&data->update_lock); i2c_smbus_write_byte_data(client, PCA9532_REG_PWM(pwm), data->pwm[pwm]); i2c_smbus_write_byte_data(client, PCA9532_REG_PSC(pwm), @@ -132,11 +132,11 @@ static void pca9532_set_brightness(struct led_classdev *led_cdev, led->state = PCA9532_ON; else { led->state = PCA9532_PWM0; /* Thecus: hardcode one pwm */ - err = pca9532_calcpwm(led->client, 0, 0, value); + err = pca9532_calcpwm(led->client, 0, 0, value); if (err) return; /* XXX: led api doesn't allow error code? */ } - schedule_work(&led->work); + schedule_work(&led->work); } static int pca9532_set_blink(struct led_classdev *led_cdev, @@ -145,7 +145,7 @@ static int pca9532_set_blink(struct led_classdev *led_cdev, struct pca9532_led *led = ldev_to_led(led_cdev); struct i2c_client *client = led->client; int psc; - int err = 0; + int err = 0; if (*delay_on == 0 && *delay_off == 0) { /* led subsystem ask us for a blink rate */ @@ -157,11 +157,11 @@ static int pca9532_set_blink(struct led_classdev *led_cdev, /* Thecus specific: only use PSC/PWM 0 */ psc = (*delay_on * 152-1)/1000; - err = pca9532_calcpwm(client, 0, psc, led_cdev->brightness); - if (err) - return err; - schedule_work(&led->work); - return 0; + err = pca9532_calcpwm(client, 0, psc, led_cdev->brightness); + if (err) + return err; + schedule_work(&led->work); + return 0; } static int pca9532_event(struct input_dev *dev, unsigned int type, @@ -178,15 +178,15 @@ static int pca9532_event(struct input_dev *dev, unsigned int type, else data->pwm[1] = 0; - schedule_work(&data->work); + schedule_work(&data->work); - return 0; + return 0; } static void pca9532_input_work(struct work_struct *work) { - struct pca9532_data *data; - data = container_of(work, struct pca9532_data, work); + struct pca9532_data *data; + data = container_of(work, struct pca9532_data, work); mutex_lock(&data->update_lock); i2c_smbus_write_byte_data(data->client, PCA9532_REG_PWM(1), data->pwm[1]); @@ -195,11 +195,11 @@ static void pca9532_input_work(struct work_struct *work) static void pca9532_led_work(struct work_struct *work) { - struct pca9532_led *led; - led = container_of(work, struct pca9532_led, work); - if (led->state == PCA9532_PWM0) - pca9532_setpwm(led->client, 0); - pca9532_setled(led); + struct pca9532_led *led; + led = container_of(work, struct pca9532_led, work); + if (led->state == PCA9532_PWM0) + pca9532_setpwm(led->client, 0); + pca9532_setled(led); } static int pca9532_configure(struct i2c_client *client, @@ -232,7 +232,7 @@ static int pca9532_configure(struct i2c_client *client, led->ldev.brightness = LED_OFF; led->ldev.brightness_set = pca9532_set_brightness; led->ldev.blink_set = pca9532_set_blink; - INIT_WORK(&led->work, pca9532_led_work); + INIT_WORK(&led->work, pca9532_led_work); err = led_classdev_register(&client->dev, &led->ldev); if (err < 0) { dev_err(&client->dev, @@ -262,11 +262,11 @@ static int pca9532_configure(struct i2c_client *client, BIT_MASK(SND_TONE); data->idev->event = pca9532_event; input_set_drvdata(data->idev, data); - INIT_WORK(&data->work, pca9532_input_work); + INIT_WORK(&data->work, pca9532_input_work); err = input_register_device(data->idev); if (err) { input_free_device(data->idev); - cancel_work_sync(&data->work); + cancel_work_sync(&data->work); data->idev = NULL; goto exit; } @@ -283,13 +283,13 @@ exit: break; case PCA9532_TYPE_LED: led_classdev_unregister(&data->leds[i].ldev); - cancel_work_sync(&data->leds[i].work); + cancel_work_sync(&data->leds[i].work); break; case PCA9532_TYPE_N2100_BEEP: if (data->idev != NULL) { input_unregister_device(data->idev); input_free_device(data->idev); - cancel_work_sync(&data->work); + cancel_work_sync(&data->work); data->idev = NULL; } break; @@ -340,13 +340,13 @@ static int pca9532_remove(struct i2c_client *client) break; case PCA9532_TYPE_LED: led_classdev_unregister(&data->leds[i].ldev); - cancel_work_sync(&data->leds[i].work); + cancel_work_sync(&data->leds[i].work); break; case PCA9532_TYPE_N2100_BEEP: if (data->idev != NULL) { input_unregister_device(data->idev); input_free_device(data->idev); - cancel_work_sync(&data->work); + cancel_work_sync(&data->work); data->idev = NULL; } break; diff --git a/drivers/leds/ledtrig-gpio.c b/drivers/leds/ledtrig-gpio.c index a247ae63374..1bc5db4ece0 100644 --- a/drivers/leds/ledtrig-gpio.c +++ b/drivers/leds/ledtrig-gpio.c @@ -117,6 +117,9 @@ static ssize_t gpio_trig_inverted_store(struct device *dev, gpio_data->inverted = !!inverted; + /* After inverting, we need to update the LED. */ + schedule_work(&gpio_data->work); + return n; } static DEVICE_ATTR(inverted, 0644, gpio_trig_inverted_show, @@ -146,20 +149,26 @@ static ssize_t gpio_trig_gpio_store(struct device *dev, return -EINVAL; } + if (gpio_data->gpio == gpio) + return n; + if (!gpio) { - free_irq(gpio_to_irq(gpio_data->gpio), led); + if (gpio_data->gpio != 0) + free_irq(gpio_to_irq(gpio_data->gpio), led); + gpio_data->gpio = 0; return n; } - if (gpio_data->gpio > 0 && gpio_data->gpio != gpio) - free_irq(gpio_to_irq(gpio_data->gpio), led); - - gpio_data->gpio = gpio; ret = request_irq(gpio_to_irq(gpio), gpio_trig_irq, IRQF_SHARED | IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING, "ledtrig-gpio", led); - if (ret) + if (ret) { dev_err(dev, "request_irq failed with error %d\n", ret); + } else { + if (gpio_data->gpio != 0) + free_irq(gpio_to_irq(gpio_data->gpio), led); + gpio_data->gpio = gpio; + } return ret ? ret : n; } @@ -211,7 +220,8 @@ static void gpio_trig_deactivate(struct led_classdev *led) device_remove_file(led->dev, &dev_attr_inverted); device_remove_file(led->dev, &dev_attr_desired_brightness); flush_work(&gpio_data->work); - free_irq(gpio_to_irq(gpio_data->gpio),led); + if (gpio_data->gpio != 0) + free_irq(gpio_to_irq(gpio_data->gpio), led); kfree(gpio_data); } } diff --git a/drivers/lguest/core.c b/drivers/lguest/core.c index a6974e9b8eb..1e2cb846b3c 100644 --- a/drivers/lguest/core.c +++ b/drivers/lguest/core.c @@ -1,6 +1,8 @@ -/*P:400 This contains run_guest() which actually calls into the Host<->Guest +/*P:400 + * This contains run_guest() which actually calls into the Host<->Guest * Switcher and analyzes the return, such as determining if the Guest wants the - * Host to do something. This file also contains useful helper routines. :*/ + * Host to do something. This file also contains useful helper routines. +:*/ #include <linux/module.h> #include <linux/stringify.h> #include <linux/stddef.h> @@ -24,7 +26,8 @@ static struct page **switcher_page; /* This One Big lock protects all inter-guest data structures. */ DEFINE_MUTEX(lguest_lock); -/*H:010 We need to set up the Switcher at a high virtual address. Remember the +/*H:010 + * We need to set up the Switcher at a high virtual address. Remember the * Switcher is a few hundred bytes of assembler code which actually changes the * CPU to run the Guest, and then changes back to the Host when a trap or * interrupt happens. @@ -33,7 +36,8 @@ DEFINE_MUTEX(lguest_lock); * Host since it will be running as the switchover occurs. * * Trying to map memory at a particular address is an unusual thing to do, so - * it's not a simple one-liner. */ + * it's not a simple one-liner. + */ static __init int map_switcher(void) { int i, err; @@ -47,8 +51,10 @@ static __init int map_switcher(void) * easy. */ - /* We allocate an array of struct page pointers. map_vm_area() wants - * this, rather than just an array of pages. */ + /* + * We allocate an array of struct page pointers. map_vm_area() wants + * this, rather than just an array of pages. + */ switcher_page = kmalloc(sizeof(switcher_page[0])*TOTAL_SWITCHER_PAGES, GFP_KERNEL); if (!switcher_page) { @@ -56,8 +62,10 @@ static __init int map_switcher(void) goto out; } - /* Now we actually allocate the pages. The Guest will see these pages, - * so we make sure they're zeroed. */ + /* + * Now we actually allocate the pages. The Guest will see these pages, + * so we make sure they're zeroed. + */ for (i = 0; i < TOTAL_SWITCHER_PAGES; i++) { unsigned long addr = get_zeroed_page(GFP_KERNEL); if (!addr) { @@ -67,19 +75,23 @@ static __init int map_switcher(void) switcher_page[i] = virt_to_page(addr); } - /* First we check that the Switcher won't overlap the fixmap area at + /* + * First we check that the Switcher won't overlap the fixmap area at * the top of memory. It's currently nowhere near, but it could have - * very strange effects if it ever happened. */ + * very strange effects if it ever happened. + */ if (SWITCHER_ADDR + (TOTAL_SWITCHER_PAGES+1)*PAGE_SIZE > FIXADDR_START){ err = -ENOMEM; printk("lguest: mapping switcher would thwack fixmap\n"); goto free_pages; } - /* Now we reserve the "virtual memory area" we want: 0xFFC00000 + /* + * Now we reserve the "virtual memory area" we want: 0xFFC00000 * (SWITCHER_ADDR). We might not get it in theory, but in practice * it's worked so far. The end address needs +1 because __get_vm_area - * allocates an extra guard page, so we need space for that. */ + * allocates an extra guard page, so we need space for that. + */ switcher_vma = __get_vm_area(TOTAL_SWITCHER_PAGES * PAGE_SIZE, VM_ALLOC, SWITCHER_ADDR, SWITCHER_ADDR + (TOTAL_SWITCHER_PAGES+1) * PAGE_SIZE); @@ -89,11 +101,13 @@ static __init int map_switcher(void) goto free_pages; } - /* This code actually sets up the pages we've allocated to appear at + /* + * This code actually sets up the pages we've allocated to appear at * SWITCHER_ADDR. map_vm_area() takes the vma we allocated above, the * kind of pages we're mapping (kernel pages), and a pointer to our * array of struct pages. It increments that pointer, but we don't - * care. */ + * care. + */ pagep = switcher_page; err = map_vm_area(switcher_vma, PAGE_KERNEL_EXEC, &pagep); if (err) { @@ -101,8 +115,10 @@ static __init int map_switcher(void) goto free_vma; } - /* Now the Switcher is mapped at the right address, we can't fail! - * Copy in the compiled-in Switcher code (from <arch>_switcher.S). */ + /* + * Now the Switcher is mapped at the right address, we can't fail! + * Copy in the compiled-in Switcher code (from <arch>_switcher.S). + */ memcpy(switcher_vma->addr, start_switcher_text, end_switcher_text - start_switcher_text); @@ -124,8 +140,7 @@ out: } /*:*/ -/* Cleaning up the mapping when the module is unloaded is almost... - * too easy. */ +/* Cleaning up the mapping when the module is unloaded is almost... too easy. */ static void unmap_switcher(void) { unsigned int i; @@ -151,16 +166,19 @@ static void unmap_switcher(void) * But we can't trust the Guest: it might be trying to access the Launcher * code. We have to check that the range is below the pfn_limit the Launcher * gave us. We have to make sure that addr + len doesn't give us a false - * positive by overflowing, too. */ + * positive by overflowing, too. + */ bool lguest_address_ok(const struct lguest *lg, unsigned long addr, unsigned long len) { return (addr+len) / PAGE_SIZE < lg->pfn_limit && (addr+len >= addr); } -/* This routine copies memory from the Guest. Here we can see how useful the +/* + * This routine copies memory from the Guest. Here we can see how useful the * kill_lguest() routine we met in the Launcher can be: we return a random - * value (all zeroes) instead of needing to return an error. */ + * value (all zeroes) instead of needing to return an error. + */ void __lgread(struct lg_cpu *cpu, void *b, unsigned long addr, unsigned bytes) { if (!lguest_address_ok(cpu->lg, addr, bytes) @@ -181,9 +199,11 @@ void __lgwrite(struct lg_cpu *cpu, unsigned long addr, const void *b, } /*:*/ -/*H:030 Let's jump straight to the the main loop which runs the Guest. +/*H:030 + * Let's jump straight to the the main loop which runs the Guest. * Remember, this is called by the Launcher reading /dev/lguest, and we keep - * going around and around until something interesting happens. */ + * going around and around until something interesting happens. + */ int run_guest(struct lg_cpu *cpu, unsigned long __user *user) { /* We stop running once the Guest is dead. */ @@ -195,10 +215,17 @@ int run_guest(struct lg_cpu *cpu, unsigned long __user *user) if (cpu->hcall) do_hypercalls(cpu); - /* It's possible the Guest did a NOTIFY hypercall to the - * Launcher, in which case we return from the read() now. */ + /* + * It's possible the Guest did a NOTIFY hypercall to the + * Launcher. + */ if (cpu->pending_notify) { + /* + * Does it just needs to write to a registered + * eventfd (ie. the appropriate virtqueue thread)? + */ if (!send_notify_to_eventfd(cpu)) { + /* OK, we tell the main Laucher. */ if (put_user(cpu->pending_notify, user)) return -EFAULT; return sizeof(cpu->pending_notify); @@ -209,29 +236,39 @@ int run_guest(struct lg_cpu *cpu, unsigned long __user *user) if (signal_pending(current)) return -ERESTARTSYS; - /* Check if there are any interrupts which can be delivered now: + /* + * Check if there are any interrupts which can be delivered now: * if so, this sets up the hander to be executed when we next - * run the Guest. */ + * run the Guest. + */ irq = interrupt_pending(cpu, &more); if (irq < LGUEST_IRQS) try_deliver_interrupt(cpu, irq, more); - /* All long-lived kernel loops need to check with this horrible + /* + * All long-lived kernel loops need to check with this horrible * thing called the freezer. If the Host is trying to suspend, - * it stops us. */ + * it stops us. + */ try_to_freeze(); - /* Just make absolutely sure the Guest is still alive. One of - * those hypercalls could have been fatal, for example. */ + /* + * Just make absolutely sure the Guest is still alive. One of + * those hypercalls could have been fatal, for example. + */ if (cpu->lg->dead) break; - /* If the Guest asked to be stopped, we sleep. The Guest's - * clock timer will wake us. */ + /* + * If the Guest asked to be stopped, we sleep. The Guest's + * clock timer will wake us. + */ if (cpu->halted) { set_current_state(TASK_INTERRUPTIBLE); - /* Just before we sleep, make sure no interrupt snuck in - * which we should be doing. */ + /* + * Just before we sleep, make sure no interrupt snuck in + * which we should be doing. + */ if (interrupt_pending(cpu, &more) < LGUEST_IRQS) set_current_state(TASK_RUNNING); else @@ -239,8 +276,10 @@ int run_guest(struct lg_cpu *cpu, unsigned long __user *user) continue; } - /* OK, now we're ready to jump into the Guest. First we put up - * the "Do Not Disturb" sign: */ + /* + * OK, now we're ready to jump into the Guest. First we put up + * the "Do Not Disturb" sign: + */ local_irq_disable(); /* Actually run the Guest until something happens. */ @@ -327,8 +366,10 @@ static void __exit fini(void) } /*:*/ -/* The Host side of lguest can be a module. This is a nice way for people to - * play with it. */ +/* + * The Host side of lguest can be a module. This is a nice way for people to + * play with it. + */ module_init(init); module_exit(fini); MODULE_LICENSE("GPL"); diff --git a/drivers/lguest/hypercalls.c b/drivers/lguest/hypercalls.c index c29ffa19cb7..83511eb0923 100644 --- a/drivers/lguest/hypercalls.c +++ b/drivers/lguest/hypercalls.c @@ -1,8 +1,10 @@ -/*P:500 Just as userspace programs request kernel operations through a system +/*P:500 + * Just as userspace programs request kernel operations through a system * call, the Guest requests Host operations through a "hypercall". You might * notice this nomenclature doesn't really follow any logic, but the name has * been around for long enough that we're stuck with it. As you'd expect, this - * code is basically a one big switch statement. :*/ + * code is basically a one big switch statement. +:*/ /* Copyright (C) 2006 Rusty Russell IBM Corporation @@ -28,30 +30,41 @@ #include <asm/pgtable.h> #include "lg.h" -/*H:120 This is the core hypercall routine: where the Guest gets what it wants. - * Or gets killed. Or, in the case of LHCALL_SHUTDOWN, both. */ +/*H:120 + * This is the core hypercall routine: where the Guest gets what it wants. + * Or gets killed. Or, in the case of LHCALL_SHUTDOWN, both. + */ static void do_hcall(struct lg_cpu *cpu, struct hcall_args *args) { switch (args->arg0) { case LHCALL_FLUSH_ASYNC: - /* This call does nothing, except by breaking out of the Guest - * it makes us process all the asynchronous hypercalls. */ + /* + * This call does nothing, except by breaking out of the Guest + * it makes us process all the asynchronous hypercalls. + */ break; case LHCALL_SEND_INTERRUPTS: - /* This call does nothing too, but by breaking out of the Guest - * it makes us process any pending interrupts. */ + /* + * This call does nothing too, but by breaking out of the Guest + * it makes us process any pending interrupts. + */ break; case LHCALL_LGUEST_INIT: - /* You can't get here unless you're already initialized. Don't - * do that. */ + /* + * You can't get here unless you're already initialized. Don't + * do that. + */ kill_guest(cpu, "already have lguest_data"); break; case LHCALL_SHUTDOWN: { - /* Shutdown is such a trivial hypercall that we do it in four - * lines right here. */ char msg[128]; - /* If the lgread fails, it will call kill_guest() itself; the - * kill_guest() with the message will be ignored. */ + /* + * Shutdown is such a trivial hypercall that we do it in five + * lines right here. + * + * If the lgread fails, it will call kill_guest() itself; the + * kill_guest() with the message will be ignored. + */ __lgread(cpu, msg, args->arg1, sizeof(msg)); msg[sizeof(msg)-1] = '\0'; kill_guest(cpu, "CRASH: %s", msg); @@ -60,16 +73,17 @@ static void do_hcall(struct lg_cpu *cpu, struct hcall_args *args) break; } case LHCALL_FLUSH_TLB: - /* FLUSH_TLB comes in two flavors, depending on the - * argument: */ + /* FLUSH_TLB comes in two flavors, depending on the argument: */ if (args->arg1) guest_pagetable_clear_all(cpu); else guest_pagetable_flush_user(cpu); break; - /* All these calls simply pass the arguments through to the right - * routines. */ + /* + * All these calls simply pass the arguments through to the right + * routines. + */ case LHCALL_NEW_PGTABLE: guest_new_pagetable(cpu, args->arg1); break; @@ -112,15 +126,16 @@ static void do_hcall(struct lg_cpu *cpu, struct hcall_args *args) kill_guest(cpu, "Bad hypercall %li\n", args->arg0); } } -/*:*/ -/*H:124 Asynchronous hypercalls are easy: we just look in the array in the +/*H:124 + * Asynchronous hypercalls are easy: we just look in the array in the * Guest's "struct lguest_data" to see if any new ones are marked "ready". * * We are careful to do these in order: obviously we respect the order the * Guest put them in the ring, but we also promise the Guest that they will * happen before any normal hypercall (which is why we check this before - * checking for a normal hcall). */ + * checking for a normal hcall). + */ static void do_async_hcalls(struct lg_cpu *cpu) { unsigned int i; @@ -133,22 +148,28 @@ static void do_async_hcalls(struct lg_cpu *cpu) /* We process "struct lguest_data"s hcalls[] ring once. */ for (i = 0; i < ARRAY_SIZE(st); i++) { struct hcall_args args; - /* We remember where we were up to from last time. This makes + /* + * We remember where we were up to from last time. This makes * sure that the hypercalls are done in the order the Guest - * places them in the ring. */ + * places them in the ring. + */ unsigned int n = cpu->next_hcall; /* 0xFF means there's no call here (yet). */ if (st[n] == 0xFF) break; - /* OK, we have hypercall. Increment the "next_hcall" cursor, - * and wrap back to 0 if we reach the end. */ + /* + * OK, we have hypercall. Increment the "next_hcall" cursor, + * and wrap back to 0 if we reach the end. + */ if (++cpu->next_hcall == LHCALL_RING_SIZE) cpu->next_hcall = 0; - /* Copy the hypercall arguments into a local copy of - * the hcall_args struct. */ + /* + * Copy the hypercall arguments into a local copy of the + * hcall_args struct. + */ if (copy_from_user(&args, &cpu->lg->lguest_data->hcalls[n], sizeof(struct hcall_args))) { kill_guest(cpu, "Fetching async hypercalls"); @@ -164,19 +185,25 @@ static void do_async_hcalls(struct lg_cpu *cpu) break; } - /* Stop doing hypercalls if they want to notify the Launcher: - * it needs to service this first. */ + /* + * Stop doing hypercalls if they want to notify the Launcher: + * it needs to service this first. + */ if (cpu->pending_notify) break; } } -/* Last of all, we look at what happens first of all. The very first time the - * Guest makes a hypercall, we end up here to set things up: */ +/* + * Last of all, we look at what happens first of all. The very first time the + * Guest makes a hypercall, we end up here to set things up: + */ static void initialize(struct lg_cpu *cpu) { - /* You can't do anything until you're initialized. The Guest knows the - * rules, so we're unforgiving here. */ + /* + * You can't do anything until you're initialized. The Guest knows the + * rules, so we're unforgiving here. + */ if (cpu->hcall->arg0 != LHCALL_LGUEST_INIT) { kill_guest(cpu, "hypercall %li before INIT", cpu->hcall->arg0); return; @@ -185,32 +212,44 @@ static void initialize(struct lg_cpu *cpu) if (lguest_arch_init_hypercalls(cpu)) kill_guest(cpu, "bad guest page %p", cpu->lg->lguest_data); - /* The Guest tells us where we're not to deliver interrupts by putting - * the range of addresses into "struct lguest_data". */ + /* + * The Guest tells us where we're not to deliver interrupts by putting + * the range of addresses into "struct lguest_data". + */ if (get_user(cpu->lg->noirq_start, &cpu->lg->lguest_data->noirq_start) || get_user(cpu->lg->noirq_end, &cpu->lg->lguest_data->noirq_end)) kill_guest(cpu, "bad guest page %p", cpu->lg->lguest_data); - /* We write the current time into the Guest's data page once so it can - * set its clock. */ + /* + * We write the current time into the Guest's data page once so it can + * set its clock. + */ write_timestamp(cpu); /* page_tables.c will also do some setup. */ page_table_guest_data_init(cpu); - /* This is the one case where the above accesses might have been the + /* + * This is the one case where the above accesses might have been the * first write to a Guest page. This may have caused a copy-on-write * fault, but the old page might be (read-only) in the Guest - * pagetable. */ + * pagetable. + */ guest_pagetable_clear_all(cpu); } /*:*/ -/*M:013 If a Guest reads from a page (so creates a mapping) that it has never +/*M:013 + * If a Guest reads from a page (so creates a mapping) that it has never * written to, and then the Launcher writes to it (ie. the output of a virtual * device), the Guest will still see the old page. In practice, this never * happens: why would the Guest read a page which it has never written to? But - * a similar scenario might one day bite us, so it's worth mentioning. :*/ + * a similar scenario might one day bite us, so it's worth mentioning. + * + * Note that if we used a shared anonymous mapping in the Launcher instead of + * mapping /dev/zero private, we wouldn't worry about cop-on-write. And we + * need that to switch the Launcher to processes (away from threads) anyway. +:*/ /*H:100 * Hypercalls @@ -229,17 +268,22 @@ void do_hypercalls(struct lg_cpu *cpu) return; } - /* The Guest has initialized. + /* + * The Guest has initialized. * - * Look in the hypercall ring for the async hypercalls: */ + * Look in the hypercall ring for the async hypercalls: + */ do_async_hcalls(cpu); - /* If we stopped reading the hypercall ring because the Guest did a + /* + * If we stopped reading the hypercall ring because the Guest did a * NOTIFY to the Launcher, we want to return now. Otherwise we do - * the hypercall. */ + * the hypercall. + */ if (!cpu->pending_notify) { do_hcall(cpu, cpu->hcall); - /* Tricky point: we reset the hcall pointer to mark the + /* + * Tricky point: we reset the hcall pointer to mark the * hypercall as "done". We use the hcall pointer rather than * the trap number to indicate a hypercall is pending. * Normally it doesn't matter: the Guest will run again and @@ -248,13 +292,16 @@ void do_hypercalls(struct lg_cpu *cpu) * However, if we are signalled or the Guest sends I/O to the * Launcher, the run_guest() loop will exit without running the * Guest. When it comes back it would try to re-run the - * hypercall. Finding that bug sucked. */ + * hypercall. Finding that bug sucked. + */ cpu->hcall = NULL; } } -/* This routine supplies the Guest with time: it's used for wallclock time at - * initial boot and as a rough time source if the TSC isn't available. */ +/* + * This routine supplies the Guest with time: it's used for wallclock time at + * initial boot and as a rough time source if the TSC isn't available. + */ void write_timestamp(struct lg_cpu *cpu) { struct timespec now; diff --git a/drivers/lguest/interrupts_and_traps.c b/drivers/lguest/interrupts_and_traps.c index 0e9067b0d50..18648180db0 100644 --- a/drivers/lguest/interrupts_and_traps.c +++ b/drivers/lguest/interrupts_and_traps.c @@ -1,4 +1,5 @@ -/*P:800 Interrupts (traps) are complicated enough to earn their own file. +/*P:800 + * Interrupts (traps) are complicated enough to earn their own file. * There are three classes of interrupts: * * 1) Real hardware interrupts which occur while we're running the Guest, @@ -10,7 +11,8 @@ * just like real hardware would deliver them. Traps from the Guest can be set * up to go directly back into the Guest, but sometimes the Host wants to see * them first, so we also have a way of "reflecting" them into the Guest as if - * they had been delivered to it directly. :*/ + * they had been delivered to it directly. +:*/ #include <linux/uaccess.h> #include <linux/interrupt.h> #include <linux/module.h> @@ -26,8 +28,10 @@ static unsigned long idt_address(u32 lo, u32 hi) return (lo & 0x0000FFFF) | (hi & 0xFFFF0000); } -/* The "type" of the interrupt handler is a 4 bit field: we only support a - * couple of types. */ +/* + * The "type" of the interrupt handler is a 4 bit field: we only support a + * couple of types. + */ static int idt_type(u32 lo, u32 hi) { return (hi >> 8) & 0xF; @@ -39,8 +43,10 @@ static bool idt_present(u32 lo, u32 hi) return (hi & 0x8000); } -/* We need a helper to "push" a value onto the Guest's stack, since that's a - * big part of what delivering an interrupt does. */ +/* + * We need a helper to "push" a value onto the Guest's stack, since that's a + * big part of what delivering an interrupt does. + */ static void push_guest_stack(struct lg_cpu *cpu, unsigned long *gstack, u32 val) { /* Stack grows upwards: move stack then write value. */ @@ -48,7 +54,8 @@ static void push_guest_stack(struct lg_cpu *cpu, unsigned long *gstack, u32 val) lgwrite(cpu, *gstack, u32, val); } -/*H:210 The set_guest_interrupt() routine actually delivers the interrupt or +/*H:210 + * The set_guest_interrupt() routine actually delivers the interrupt or * trap. The mechanics of delivering traps and interrupts to the Guest are the * same, except some traps have an "error code" which gets pushed onto the * stack as well: the caller tells us if this is one. @@ -59,7 +66,8 @@ static void push_guest_stack(struct lg_cpu *cpu, unsigned long *gstack, u32 val) * * We set up the stack just like the CPU does for a real interrupt, so it's * identical for the Guest (and the standard "iret" instruction will undo - * it). */ + * it). + */ static void set_guest_interrupt(struct lg_cpu *cpu, u32 lo, u32 hi, bool has_err) { @@ -67,20 +75,26 @@ static void set_guest_interrupt(struct lg_cpu *cpu, u32 lo, u32 hi, u32 eflags, ss, irq_enable; unsigned long virtstack; - /* There are two cases for interrupts: one where the Guest is already + /* + * There are two cases for interrupts: one where the Guest is already * in the kernel, and a more complex one where the Guest is in - * userspace. We check the privilege level to find out. */ + * userspace. We check the privilege level to find out. + */ if ((cpu->regs->ss&0x3) != GUEST_PL) { - /* The Guest told us their kernel stack with the SET_STACK - * hypercall: both the virtual address and the segment */ + /* + * The Guest told us their kernel stack with the SET_STACK + * hypercall: both the virtual address and the segment. + */ virtstack = cpu->esp1; ss = cpu->ss1; origstack = gstack = guest_pa(cpu, virtstack); - /* We push the old stack segment and pointer onto the new + /* + * We push the old stack segment and pointer onto the new * stack: when the Guest does an "iret" back from the interrupt * handler the CPU will notice they're dropping privilege - * levels and expect these here. */ + * levels and expect these here. + */ push_guest_stack(cpu, &gstack, cpu->regs->ss); push_guest_stack(cpu, &gstack, cpu->regs->esp); } else { @@ -91,18 +105,22 @@ static void set_guest_interrupt(struct lg_cpu *cpu, u32 lo, u32 hi, origstack = gstack = guest_pa(cpu, virtstack); } - /* Remember that we never let the Guest actually disable interrupts, so + /* + * Remember that we never let the Guest actually disable interrupts, so * the "Interrupt Flag" bit is always set. We copy that bit from the * Guest's "irq_enabled" field into the eflags word: we saw the Guest - * copy it back in "lguest_iret". */ + * copy it back in "lguest_iret". + */ eflags = cpu->regs->eflags; if (get_user(irq_enable, &cpu->lg->lguest_data->irq_enabled) == 0 && !(irq_enable & X86_EFLAGS_IF)) eflags &= ~X86_EFLAGS_IF; - /* An interrupt is expected to push three things on the stack: the old + /* + * An interrupt is expected to push three things on the stack: the old * "eflags" word, the old code segment, and the old instruction - * pointer. */ + * pointer. + */ push_guest_stack(cpu, &gstack, eflags); push_guest_stack(cpu, &gstack, cpu->regs->cs); push_guest_stack(cpu, &gstack, cpu->regs->eip); @@ -111,15 +129,19 @@ static void set_guest_interrupt(struct lg_cpu *cpu, u32 lo, u32 hi, if (has_err) push_guest_stack(cpu, &gstack, cpu->regs->errcode); - /* Now we've pushed all the old state, we change the stack, the code - * segment and the address to execute. */ + /* + * Now we've pushed all the old state, we change the stack, the code + * segment and the address to execute. + */ cpu->regs->ss = ss; cpu->regs->esp = virtstack + (gstack - origstack); cpu->regs->cs = (__KERNEL_CS|GUEST_PL); cpu->regs->eip = idt_address(lo, hi); - /* There are two kinds of interrupt handlers: 0xE is an "interrupt - * gate" which expects interrupts to be disabled on entry. */ + /* + * There are two kinds of interrupt handlers: 0xE is an "interrupt + * gate" which expects interrupts to be disabled on entry. + */ if (idt_type(lo, hi) == 0xE) if (put_user(0, &cpu->lg->lguest_data->irq_enabled)) kill_guest(cpu, "Disabling interrupts"); @@ -130,7 +152,8 @@ static void set_guest_interrupt(struct lg_cpu *cpu, u32 lo, u32 hi, * * interrupt_pending() returns the first pending interrupt which isn't blocked * by the Guest. It is called before every entry to the Guest, and just before - * we go to sleep when the Guest has halted itself. */ + * we go to sleep when the Guest has halted itself. + */ unsigned int interrupt_pending(struct lg_cpu *cpu, bool *more) { unsigned int irq; @@ -140,8 +163,10 @@ unsigned int interrupt_pending(struct lg_cpu *cpu, bool *more) if (!cpu->lg->lguest_data) return LGUEST_IRQS; - /* Take our "irqs_pending" array and remove any interrupts the Guest - * wants blocked: the result ends up in "blk". */ + /* + * Take our "irqs_pending" array and remove any interrupts the Guest + * wants blocked: the result ends up in "blk". + */ if (copy_from_user(&blk, cpu->lg->lguest_data->blocked_interrupts, sizeof(blk))) return LGUEST_IRQS; @@ -154,16 +179,20 @@ unsigned int interrupt_pending(struct lg_cpu *cpu, bool *more) return irq; } -/* This actually diverts the Guest to running an interrupt handler, once an - * interrupt has been identified by interrupt_pending(). */ +/* + * This actually diverts the Guest to running an interrupt handler, once an + * interrupt has been identified by interrupt_pending(). + */ void try_deliver_interrupt(struct lg_cpu *cpu, unsigned int irq, bool more) { struct desc_struct *idt; BUG_ON(irq >= LGUEST_IRQS); - /* They may be in the middle of an iret, where they asked us never to - * deliver interrupts. */ + /* + * They may be in the middle of an iret, where they asked us never to + * deliver interrupts. + */ if (cpu->regs->eip >= cpu->lg->noirq_start && (cpu->regs->eip < cpu->lg->noirq_end)) return; @@ -187,29 +216,37 @@ void try_deliver_interrupt(struct lg_cpu *cpu, unsigned int irq, bool more) } } - /* Look at the IDT entry the Guest gave us for this interrupt. The + /* + * Look at the IDT entry the Guest gave us for this interrupt. The * first 32 (FIRST_EXTERNAL_VECTOR) entries are for traps, so we skip - * over them. */ + * over them. + */ idt = &cpu->arch.idt[FIRST_EXTERNAL_VECTOR+irq]; /* If they don't have a handler (yet?), we just ignore it */ if (idt_present(idt->a, idt->b)) { /* OK, mark it no longer pending and deliver it. */ clear_bit(irq, cpu->irqs_pending); - /* set_guest_interrupt() takes the interrupt descriptor and a + /* + * set_guest_interrupt() takes the interrupt descriptor and a * flag to say whether this interrupt pushes an error code onto - * the stack as well: virtual interrupts never do. */ + * the stack as well: virtual interrupts never do. + */ set_guest_interrupt(cpu, idt->a, idt->b, false); } - /* Every time we deliver an interrupt, we update the timestamp in the + /* + * Every time we deliver an interrupt, we update the timestamp in the * Guest's lguest_data struct. It would be better for the Guest if we * did this more often, but it can actually be quite slow: doing it * here is a compromise which means at least it gets updated every - * timer interrupt. */ + * timer interrupt. + */ write_timestamp(cpu); - /* If there are no other interrupts we want to deliver, clear - * the pending flag. */ + /* + * If there are no other interrupts we want to deliver, clear + * the pending flag. + */ if (!more) put_user(0, &cpu->lg->lguest_data->irq_pending); } @@ -217,24 +254,29 @@ void try_deliver_interrupt(struct lg_cpu *cpu, unsigned int irq, bool more) /* And this is the routine when we want to set an interrupt for the Guest. */ void set_interrupt(struct lg_cpu *cpu, unsigned int irq) { - /* Next time the Guest runs, the core code will see if it can deliver - * this interrupt. */ + /* + * Next time the Guest runs, the core code will see if it can deliver + * this interrupt. + */ set_bit(irq, cpu->irqs_pending); - /* Make sure it sees it; it might be asleep (eg. halted), or - * running the Guest right now, in which case kick_process() - * will knock it out. */ + /* + * Make sure it sees it; it might be asleep (eg. halted), or running + * the Guest right now, in which case kick_process() will knock it out. + */ if (!wake_up_process(cpu->tsk)) kick_process(cpu->tsk); } /*:*/ -/* Linux uses trap 128 for system calls. Plan9 uses 64, and Ron Minnich sent +/* + * Linux uses trap 128 for system calls. Plan9 uses 64, and Ron Minnich sent * me a patch, so we support that too. It'd be a big step for lguest if half * the Plan 9 user base were to start using it. * * Actually now I think of it, it's possible that Ron *is* half the Plan 9 - * userbase. Oh well. */ + * userbase. Oh well. + */ static bool could_be_syscall(unsigned int num) { /* Normal Linux SYSCALL_VECTOR or reserved vector? */ @@ -274,9 +316,11 @@ void free_interrupts(void) clear_bit(syscall_vector, used_vectors); } -/*H:220 Now we've got the routines to deliver interrupts, delivering traps like +/*H:220 + * Now we've got the routines to deliver interrupts, delivering traps like * page fault is easy. The only trick is that Intel decided that some traps - * should have error codes: */ + * should have error codes: + */ static bool has_err(unsigned int trap) { return (trap == 8 || (trap >= 10 && trap <= 14) || trap == 17); @@ -285,13 +329,17 @@ static bool has_err(unsigned int trap) /* deliver_trap() returns true if it could deliver the trap. */ bool deliver_trap(struct lg_cpu *cpu, unsigned int num) { - /* Trap numbers are always 8 bit, but we set an impossible trap number - * for traps inside the Switcher, so check that here. */ + /* + * Trap numbers are always 8 bit, but we set an impossible trap number + * for traps inside the Switcher, so check that here. + */ if (num >= ARRAY_SIZE(cpu->arch.idt)) return false; - /* Early on the Guest hasn't set the IDT entries (or maybe it put a - * bogus one in): if we fail here, the Guest will be killed. */ + /* + * Early on the Guest hasn't set the IDT entries (or maybe it put a + * bogus one in): if we fail here, the Guest will be killed. + */ if (!idt_present(cpu->arch.idt[num].a, cpu->arch.idt[num].b)) return false; set_guest_interrupt(cpu, cpu->arch.idt[num].a, @@ -299,7 +347,8 @@ bool deliver_trap(struct lg_cpu *cpu, unsigned int num) return true; } -/*H:250 Here's the hard part: returning to the Host every time a trap happens +/*H:250 + * Here's the hard part: returning to the Host every time a trap happens * and then calling deliver_trap() and re-entering the Guest is slow. * Particularly because Guest userspace system calls are traps (usually trap * 128). @@ -311,69 +360,87 @@ bool deliver_trap(struct lg_cpu *cpu, unsigned int num) * the other hypervisors would beat it up at lunchtime. * * This routine indicates if a particular trap number could be delivered - * directly. */ + * directly. + */ static bool direct_trap(unsigned int num) { - /* Hardware interrupts don't go to the Guest at all (except system - * call). */ + /* + * Hardware interrupts don't go to the Guest at all (except system + * call). + */ if (num >= FIRST_EXTERNAL_VECTOR && !could_be_syscall(num)) return false; - /* The Host needs to see page faults (for shadow paging and to save the + /* + * The Host needs to see page faults (for shadow paging and to save the * fault address), general protection faults (in/out emulation) and * device not available (TS handling), invalid opcode fault (kvm hcall), - * and of course, the hypercall trap. */ + * and of course, the hypercall trap. + */ return num != 14 && num != 13 && num != 7 && num != 6 && num != LGUEST_TRAP_ENTRY; } /*:*/ -/*M:005 The Guest has the ability to turn its interrupt gates into trap gates, +/*M:005 + * The Guest has the ability to turn its interrupt gates into trap gates, * if it is careful. The Host will let trap gates can go directly to the * Guest, but the Guest needs the interrupts atomically disabled for an * interrupt gate. It can do this by pointing the trap gate at instructions - * within noirq_start and noirq_end, where it can safely disable interrupts. */ + * within noirq_start and noirq_end, where it can safely disable interrupts. + */ -/*M:006 The Guests do not use the sysenter (fast system call) instruction, +/*M:006 + * The Guests do not use the sysenter (fast system call) instruction, * because it's hardcoded to enter privilege level 0 and so can't go direct. * It's about twice as fast as the older "int 0x80" system call, so it might * still be worthwhile to handle it in the Switcher and lcall down to the * Guest. The sysenter semantics are hairy tho: search for that keyword in - * entry.S :*/ + * entry.S +:*/ -/*H:260 When we make traps go directly into the Guest, we need to make sure +/*H:260 + * When we make traps go directly into the Guest, we need to make sure * the kernel stack is valid (ie. mapped in the page tables). Otherwise, the * CPU trying to deliver the trap will fault while trying to push the interrupt * words on the stack: this is called a double fault, and it forces us to kill * the Guest. * - * Which is deeply unfair, because (literally!) it wasn't the Guests' fault. */ + * Which is deeply unfair, because (literally!) it wasn't the Guests' fault. + */ void pin_stack_pages(struct lg_cpu *cpu) { unsigned int i; - /* Depending on the CONFIG_4KSTACKS option, the Guest can have one or - * two pages of stack space. */ + /* + * Depending on the CONFIG_4KSTACKS option, the Guest can have one or + * two pages of stack space. + */ for (i = 0; i < cpu->lg->stack_pages; i++) - /* The stack grows *upwards*, so the address we're given is the + /* + * The stack grows *upwards*, so the address we're given is the * start of the page after the kernel stack. Subtract one to * get back onto the first stack page, and keep subtracting to - * get to the rest of the stack pages. */ + * get to the rest of the stack pages. + */ pin_page(cpu, cpu->esp1 - 1 - i * PAGE_SIZE); } -/* Direct traps also mean that we need to know whenever the Guest wants to use +/* + * Direct traps also mean that we need to know whenever the Guest wants to use * a different kernel stack, so we can change the IDT entries to use that * stack. The IDT entries expect a virtual address, so unlike most addresses * the Guest gives us, the "esp" (stack pointer) value here is virtual, not * physical. * * In Linux each process has its own kernel stack, so this happens a lot: we - * change stacks on each context switch. */ + * change stacks on each context switch. + */ void guest_set_stack(struct lg_cpu *cpu, u32 seg, u32 esp, unsigned int pages) { - /* You are not allowed have a stack segment with privilege level 0: bad - * Guest! */ + /* + * You're not allowed a stack segment with privilege level 0: bad Guest! + */ if ((seg & 0x3) != GUEST_PL) kill_guest(cpu, "bad stack segment %i", seg); /* We only expect one or two stack pages. */ @@ -387,11 +454,15 @@ void guest_set_stack(struct lg_cpu *cpu, u32 seg, u32 esp, unsigned int pages) pin_stack_pages(cpu); } -/* All this reference to mapping stacks leads us neatly into the other complex - * part of the Host: page table handling. */ +/* + * All this reference to mapping stacks leads us neatly into the other complex + * part of the Host: page table handling. + */ -/*H:235 This is the routine which actually checks the Guest's IDT entry and - * transfers it into the entry in "struct lguest": */ +/*H:235 + * This is the routine which actually checks the Guest's IDT entry and + * transfers it into the entry in "struct lguest": + */ static void set_trap(struct lg_cpu *cpu, struct desc_struct *trap, unsigned int num, u32 lo, u32 hi) { @@ -407,30 +478,38 @@ static void set_trap(struct lg_cpu *cpu, struct desc_struct *trap, if (type != 0xE && type != 0xF) kill_guest(cpu, "bad IDT type %i", type); - /* We only copy the handler address, present bit, privilege level and + /* + * We only copy the handler address, present bit, privilege level and * type. The privilege level controls where the trap can be triggered * manually with an "int" instruction. This is usually GUEST_PL, - * except for system calls which userspace can use. */ + * except for system calls which userspace can use. + */ trap->a = ((__KERNEL_CS|GUEST_PL)<<16) | (lo&0x0000FFFF); trap->b = (hi&0xFFFFEF00); } -/*H:230 While we're here, dealing with delivering traps and interrupts to the +/*H:230 + * While we're here, dealing with delivering traps and interrupts to the * Guest, we might as well complete the picture: how the Guest tells us where * it wants them to go. This would be simple, except making traps fast * requires some tricks. * * We saw the Guest setting Interrupt Descriptor Table (IDT) entries with the - * LHCALL_LOAD_IDT_ENTRY hypercall before: that comes here. */ + * LHCALL_LOAD_IDT_ENTRY hypercall before: that comes here. + */ void load_guest_idt_entry(struct lg_cpu *cpu, unsigned int num, u32 lo, u32 hi) { - /* Guest never handles: NMI, doublefault, spurious interrupt or - * hypercall. We ignore when it tries to set them. */ + /* + * Guest never handles: NMI, doublefault, spurious interrupt or + * hypercall. We ignore when it tries to set them. + */ if (num == 2 || num == 8 || num == 15 || num == LGUEST_TRAP_ENTRY) return; - /* Mark the IDT as changed: next time the Guest runs we'll know we have - * to copy this again. */ + /* + * Mark the IDT as changed: next time the Guest runs we'll know we have + * to copy this again. + */ cpu->changed |= CHANGED_IDT; /* Check that the Guest doesn't try to step outside the bounds. */ @@ -440,9 +519,11 @@ void load_guest_idt_entry(struct lg_cpu *cpu, unsigned int num, u32 lo, u32 hi) set_trap(cpu, &cpu->arch.idt[num], num, lo, hi); } -/* The default entry for each interrupt points into the Switcher routines which +/* + * The default entry for each interrupt points into the Switcher routines which * simply return to the Host. The run_guest() loop will then call - * deliver_trap() to bounce it back into the Guest. */ + * deliver_trap() to bounce it back into the Guest. + */ static void default_idt_entry(struct desc_struct *idt, int trap, const unsigned long handler, @@ -451,13 +532,17 @@ static void default_idt_entry(struct desc_struct *idt, /* A present interrupt gate. */ u32 flags = 0x8e00; - /* Set the privilege level on the entry for the hypercall: this allows - * the Guest to use the "int" instruction to trigger it. */ + /* + * Set the privilege level on the entry for the hypercall: this allows + * the Guest to use the "int" instruction to trigger it. + */ if (trap == LGUEST_TRAP_ENTRY) flags |= (GUEST_PL << 13); else if (base) - /* Copy priv. level from what Guest asked for. This allows - * debug (int 3) traps from Guest userspace, for example. */ + /* + * Copy privilege level from what Guest asked for. This allows + * debug (int 3) traps from Guest userspace, for example. + */ flags |= (base->b & 0x6000); /* Now pack it into the IDT entry in its weird format. */ @@ -475,16 +560,20 @@ void setup_default_idt_entries(struct lguest_ro_state *state, default_idt_entry(&state->guest_idt[i], i, def[i], NULL); } -/*H:240 We don't use the IDT entries in the "struct lguest" directly, instead +/*H:240 + * We don't use the IDT entries in the "struct lguest" directly, instead * we copy them into the IDT which we've set up for Guests on this CPU, just - * before we run the Guest. This routine does that copy. */ + * before we run the Guest. This routine does that copy. + */ void copy_traps(const struct lg_cpu *cpu, struct desc_struct *idt, const unsigned long *def) { unsigned int i; - /* We can simply copy the direct traps, otherwise we use the default - * ones in the Switcher: they will return to the Host. */ + /* + * We can simply copy the direct traps, otherwise we use the default + * ones in the Switcher: they will return to the Host. + */ for (i = 0; i < ARRAY_SIZE(cpu->arch.idt); i++) { const struct desc_struct *gidt = &cpu->arch.idt[i]; @@ -492,14 +581,16 @@ void copy_traps(const struct lg_cpu *cpu, struct desc_struct *idt, if (!direct_trap(i)) continue; - /* Only trap gates (type 15) can go direct to the Guest. + /* + * Only trap gates (type 15) can go direct to the Guest. * Interrupt gates (type 14) disable interrupts as they are * entered, which we never let the Guest do. Not present * entries (type 0x0) also can't go direct, of course. * * If it can't go direct, we still need to copy the priv. level: * they might want to give userspace access to a software - * interrupt. */ + * interrupt. + */ if (idt_type(gidt->a, gidt->b) == 0xF) idt[i] = *gidt; else @@ -518,7 +609,8 @@ void copy_traps(const struct lg_cpu *cpu, struct desc_struct *idt, * the next timer interrupt (in nanoseconds). We use the high-resolution timer * infrastructure to set a callback at that time. * - * 0 means "turn off the clock". */ + * 0 means "turn off the clock". + */ void guest_set_clockevent(struct lg_cpu *cpu, unsigned long delta) { ktime_t expires; @@ -529,9 +621,11 @@ void guest_set_clockevent(struct lg_cpu *cpu, unsigned long delta) return; } - /* We use wallclock time here, so the Guest might not be running for + /* + * We use wallclock time here, so the Guest might not be running for * all the time between now and the timer interrupt it asked for. This - * is almost always the right thing to do. */ + * is almost always the right thing to do. + */ expires = ktime_add_ns(ktime_get_real(), delta); hrtimer_start(&cpu->hrt, expires, HRTIMER_MODE_ABS); } diff --git a/drivers/lguest/lg.h b/drivers/lguest/lg.h index d4e8979735c..bc28745d05a 100644 --- a/drivers/lguest/lg.h +++ b/drivers/lguest/lg.h @@ -16,15 +16,13 @@ void free_pagetables(void); int init_pagetables(struct page **switcher_page, unsigned int pages); -struct pgdir -{ +struct pgdir { unsigned long gpgdir; pgd_t *pgdir; }; /* We have two pages shared with guests, per cpu. */ -struct lguest_pages -{ +struct lguest_pages { /* This is the stack page mapped rw in guest */ char spare[PAGE_SIZE - sizeof(struct lguest_regs)]; struct lguest_regs regs; @@ -38,8 +36,6 @@ struct lguest_pages #define CHANGED_GDT_TLS 4 /* Actually a subset of CHANGED_GDT */ #define CHANGED_ALL 3 -struct lguest; - struct lg_cpu { unsigned int id; struct lguest *lg; @@ -56,13 +52,13 @@ struct lg_cpu { unsigned long pending_notify; /* pfn from LHCALL_NOTIFY */ - /* At end of a page shared mapped over lguest_pages in guest. */ + /* At end of a page shared mapped over lguest_pages in guest. */ unsigned long regs_page; struct lguest_regs *regs; struct lguest_pages *last_pages; - int cpu_pgd; /* which pgd this cpu is currently using */ + int cpu_pgd; /* Which pgd this cpu is currently using */ /* If a hypercall was asked for, this points to the arguments. */ struct hcall_args *hcall; @@ -82,7 +78,7 @@ struct lg_cpu { struct lg_eventfd { unsigned long addr; - struct file *event; + struct eventfd_ctx *event; }; struct lg_eventfd_map { @@ -91,15 +87,17 @@ struct lg_eventfd_map { }; /* The private info the thread maintains about the guest. */ -struct lguest -{ +struct lguest { struct lguest_data __user *lguest_data; struct lg_cpu cpus[NR_CPUS]; unsigned int nr_cpus; u32 pfn_limit; - /* This provides the offset to the base of guest-physical - * memory in the Launcher. */ + + /* + * This provides the offset to the base of guest-physical memory in the + * Launcher. + */ void __user *mem_base; unsigned long kernel_address; @@ -124,11 +122,13 @@ bool lguest_address_ok(const struct lguest *lg, void __lgread(struct lg_cpu *, void *, unsigned long, unsigned); void __lgwrite(struct lg_cpu *, unsigned long, const void *, unsigned); -/*H:035 Using memory-copy operations like that is usually inconvient, so we +/*H:035 + * Using memory-copy operations like that is usually inconvient, so we * have the following helper macros which read and write a specific type (often * an unsigned long). * - * This reads into a variable of the given type then returns that. */ + * This reads into a variable of the given type then returns that. + */ #define lgread(cpu, addr, type) \ ({ type _v; __lgread((cpu), &_v, (addr), sizeof(_v)); _v; }) @@ -142,9 +142,11 @@ void __lgwrite(struct lg_cpu *, unsigned long, const void *, unsigned); int run_guest(struct lg_cpu *cpu, unsigned long __user *user); -/* Helper macros to obtain the first 12 or the last 20 bits, this is only the +/* + * Helper macros to obtain the first 12 or the last 20 bits, this is only the * first step in the migration to the kernel types. pte_pfn is already defined - * in the kernel. */ + * in the kernel. + */ #define pgd_flags(x) (pgd_val(x) & ~PAGE_MASK) #define pgd_pfn(x) (pgd_val(x) >> PAGE_SHIFT) #define pmd_flags(x) (pmd_val(x) & ~PAGE_MASK) diff --git a/drivers/lguest/lguest_device.c b/drivers/lguest/lguest_device.c index e082cdac88b..b6200bc39b5 100644 --- a/drivers/lguest/lguest_device.c +++ b/drivers/lguest/lguest_device.c @@ -1,10 +1,12 @@ -/*P:050 Lguest guests use a very simple method to describe devices. It's a +/*P:050 + * Lguest guests use a very simple method to describe devices. It's a * series of device descriptors contained just above the top of normal Guest * memory. * * We use the standard "virtio" device infrastructure, which provides us with a * console, a network and a block driver. Each one expects some configuration - * information and a "virtqueue" or two to send and receive data. :*/ + * information and a "virtqueue" or two to send and receive data. +:*/ #include <linux/init.h> #include <linux/bootmem.h> #include <linux/lguest_launcher.h> @@ -20,8 +22,10 @@ /* The pointer to our (page) of device descriptions. */ static void *lguest_devices; -/* For Guests, device memory can be used as normal memory, so we cast away the - * __iomem to quieten sparse. */ +/* + * For Guests, device memory can be used as normal memory, so we cast away the + * __iomem to quieten sparse. + */ static inline void *lguest_map(unsigned long phys_addr, unsigned long pages) { return (__force void *)ioremap_cache(phys_addr, PAGE_SIZE*pages); @@ -32,8 +36,10 @@ static inline void lguest_unmap(void *addr) iounmap((__force void __iomem *)addr); } -/*D:100 Each lguest device is just a virtio device plus a pointer to its entry - * in the lguest_devices page. */ +/*D:100 + * Each lguest device is just a virtio device plus a pointer to its entry + * in the lguest_devices page. + */ struct lguest_device { struct virtio_device vdev; @@ -41,9 +47,11 @@ struct lguest_device { struct lguest_device_desc *desc; }; -/* Since the virtio infrastructure hands us a pointer to the virtio_device all +/* + * Since the virtio infrastructure hands us a pointer to the virtio_device all * the time, it helps to have a curt macro to get a pointer to the struct - * lguest_device it's enclosed in. */ + * lguest_device it's enclosed in. + */ #define to_lgdev(vd) container_of(vd, struct lguest_device, vdev) /*D:130 @@ -55,7 +63,8 @@ struct lguest_device { * the driver will look at them during setup. * * A convenient routine to return the device's virtqueue config array: - * immediately after the descriptor. */ + * immediately after the descriptor. + */ static struct lguest_vqconfig *lg_vq(const struct lguest_device_desc *desc) { return (void *)(desc + 1); @@ -98,10 +107,12 @@ static u32 lg_get_features(struct virtio_device *vdev) return features; } -/* The virtio core takes the features the Host offers, and copies the - * ones supported by the driver into the vdev->features array. Once - * that's all sorted out, this routine is called so we can tell the - * Host which features we understand and accept. */ +/* + * The virtio core takes the features the Host offers, and copies the ones + * supported by the driver into the vdev->features array. Once that's all + * sorted out, this routine is called so we can tell the Host which features we + * understand and accept. + */ static void lg_finalize_features(struct virtio_device *vdev) { unsigned int i, bits; @@ -112,10 +123,11 @@ static void lg_finalize_features(struct virtio_device *vdev) /* Give virtio_ring a chance to accept features. */ vring_transport_features(vdev); - /* The vdev->feature array is a Linux bitmask: this isn't the - * same as a the simple array of bits used by lguest devices - * for features. So we do this slow, manual conversion which is - * completely general. */ + /* + * The vdev->feature array is a Linux bitmask: this isn't the same as a + * the simple array of bits used by lguest devices for features. So we + * do this slow, manual conversion which is completely general. + */ memset(out_features, 0, desc->feature_len); bits = min_t(unsigned, desc->feature_len, sizeof(vdev->features)) * 8; for (i = 0; i < bits; i++) { @@ -146,15 +158,19 @@ static void lg_set(struct virtio_device *vdev, unsigned int offset, memcpy(lg_config(desc) + offset, buf, len); } -/* The operations to get and set the status word just access the status field - * of the device descriptor. */ +/* + * The operations to get and set the status word just access the status field + * of the device descriptor. + */ static u8 lg_get_status(struct virtio_device *vdev) { return to_lgdev(vdev)->desc->status; } -/* To notify on status updates, we (ab)use the NOTIFY hypercall, with the - * descriptor address of the device. A zero status means "reset". */ +/* + * To notify on status updates, we (ab)use the NOTIFY hypercall, with the + * descriptor address of the device. A zero status means "reset". + */ static void set_status(struct virtio_device *vdev, u8 status) { unsigned long offset = (void *)to_lgdev(vdev)->desc - lguest_devices; @@ -191,8 +207,7 @@ static void lg_reset(struct virtio_device *vdev) */ /*D:140 This is the information we remember about each virtqueue. */ -struct lguest_vq_info -{ +struct lguest_vq_info { /* A copy of the information contained in the device config. */ struct lguest_vqconfig config; @@ -200,13 +215,17 @@ struct lguest_vq_info void *pages; }; -/* When the virtio_ring code wants to prod the Host, it calls us here and we +/* + * When the virtio_ring code wants to prod the Host, it calls us here and we * make a hypercall. We hand the physical address of the virtqueue so the Host - * knows which virtqueue we're talking about. */ + * knows which virtqueue we're talking about. + */ static void lg_notify(struct virtqueue *vq) { - /* We store our virtqueue information in the "priv" pointer of the - * virtqueue structure. */ + /* + * We store our virtqueue information in the "priv" pointer of the + * virtqueue structure. + */ struct lguest_vq_info *lvq = vq->priv; kvm_hypercall1(LHCALL_NOTIFY, lvq->config.pfn << PAGE_SHIFT); @@ -215,7 +234,8 @@ static void lg_notify(struct virtqueue *vq) /* An extern declaration inside a C file is bad form. Don't do it. */ extern void lguest_setup_irq(unsigned int irq); -/* This routine finds the first virtqueue described in the configuration of +/* + * This routine finds the Nth virtqueue described in the configuration of * this device and sets it up. * * This is kind of an ugly duckling. It'd be nicer to have a standard @@ -223,9 +243,7 @@ extern void lguest_setup_irq(unsigned int irq); * everyone wants to do it differently. The KVM coders want the Guest to * allocate its own pages and tell the Host where they are, but for lguest it's * simpler for the Host to simply tell us where the pages are. - * - * So we provide drivers with a "find the Nth virtqueue and set it up" - * function. */ + */ static struct virtqueue *lg_find_vq(struct virtio_device *vdev, unsigned index, void (*callback)(struct virtqueue *vq), @@ -244,9 +262,11 @@ static struct virtqueue *lg_find_vq(struct virtio_device *vdev, if (!lvq) return ERR_PTR(-ENOMEM); - /* Make a copy of the "struct lguest_vqconfig" entry, which sits after + /* + * Make a copy of the "struct lguest_vqconfig" entry, which sits after * the descriptor. We need a copy because the config space might not - * be aligned correctly. */ + * be aligned correctly. + */ memcpy(&lvq->config, lg_vq(ldev->desc)+index, sizeof(lvq->config)); printk("Mapping virtqueue %i addr %lx\n", index, @@ -261,8 +281,10 @@ static struct virtqueue *lg_find_vq(struct virtio_device *vdev, goto free_lvq; } - /* OK, tell virtio_ring.c to set up a virtqueue now we know its size - * and we've got a pointer to its pages. */ + /* + * OK, tell virtio_ring.c to set up a virtqueue now we know its size + * and we've got a pointer to its pages. + */ vq = vring_new_virtqueue(lvq->config.num, LGUEST_VRING_ALIGN, vdev, lvq->pages, lg_notify, callback, name); if (!vq) { @@ -273,18 +295,23 @@ static struct virtqueue *lg_find_vq(struct virtio_device *vdev, /* Make sure the interrupt is allocated. */ lguest_setup_irq(lvq->config.irq); - /* Tell the interrupt for this virtqueue to go to the virtio_ring - * interrupt handler. */ - /* FIXME: We used to have a flag for the Host to tell us we could use + /* + * Tell the interrupt for this virtqueue to go to the virtio_ring + * interrupt handler. + * + * FIXME: We used to have a flag for the Host to tell us we could use * the interrupt as a source of randomness: it'd be nice to have that - * back.. */ + * back. + */ err = request_irq(lvq->config.irq, vring_interrupt, IRQF_SHARED, dev_name(&vdev->dev), vq); if (err) goto destroy_vring; - /* Last of all we hook up our 'struct lguest_vq_info" to the - * virtqueue's priv pointer. */ + /* + * Last of all we hook up our 'struct lguest_vq_info" to the + * virtqueue's priv pointer. + */ vq->priv = lvq; return vq; @@ -358,11 +385,14 @@ static struct virtio_config_ops lguest_config_ops = { .del_vqs = lg_del_vqs, }; -/* The root device for the lguest virtio devices. This makes them appear as - * /sys/devices/lguest/0,1,2 not /sys/devices/0,1,2. */ +/* + * The root device for the lguest virtio devices. This makes them appear as + * /sys/devices/lguest/0,1,2 not /sys/devices/0,1,2. + */ static struct device *lguest_root; -/*D:120 This is the core of the lguest bus: actually adding a new device. +/*D:120 + * This is the core of the lguest bus: actually adding a new device. * It's a separate function because it's neater that way, and because an * earlier version of the code supported hotplug and unplug. They were removed * early on because they were never used. @@ -371,14 +401,14 @@ static struct device *lguest_root; * * It's worth reading this carefully: we start with a pointer to the new device * descriptor in the "lguest_devices" page, and the offset into the device - * descriptor page so we can uniquely identify it if things go badly wrong. */ + * descriptor page so we can uniquely identify it if things go badly wrong. + */ static void add_lguest_device(struct lguest_device_desc *d, unsigned int offset) { struct lguest_device *ldev; - /* Start with zeroed memory; Linux's device layer seems to count on - * it. */ + /* Start with zeroed memory; Linux's device layer counts on it. */ ldev = kzalloc(sizeof(*ldev), GFP_KERNEL); if (!ldev) { printk(KERN_EMERG "Cannot allocate lguest dev %u type %u\n", @@ -388,17 +418,25 @@ static void add_lguest_device(struct lguest_device_desc *d, /* This devices' parent is the lguest/ dir. */ ldev->vdev.dev.parent = lguest_root; - /* We have a unique device index thanks to the dev_index counter. */ + /* + * The device type comes straight from the descriptor. There's also a + * device vendor field in the virtio_device struct, which we leave as + * 0. + */ ldev->vdev.id.device = d->type; - /* We have a simple set of routines for querying the device's - * configuration information and setting its status. */ + /* + * We have a simple set of routines for querying the device's + * configuration information and setting its status. + */ ldev->vdev.config = &lguest_config_ops; /* And we remember the device's descriptor for lguest_config_ops. */ ldev->desc = d; - /* register_virtio_device() sets up the generic fields for the struct + /* + * register_virtio_device() sets up the generic fields for the struct * virtio_device and calls device_register(). This makes the bus - * infrastructure look for a matching driver. */ + * infrastructure look for a matching driver. + */ if (register_virtio_device(&ldev->vdev) != 0) { printk(KERN_ERR "Failed to register lguest dev %u type %u\n", offset, d->type); @@ -406,8 +444,10 @@ static void add_lguest_device(struct lguest_device_desc *d, } } -/*D:110 scan_devices() simply iterates through the device page. The type 0 is - * reserved to mean "end of devices". */ +/*D:110 + * scan_devices() simply iterates through the device page. The type 0 is + * reserved to mean "end of devices". + */ static void scan_devices(void) { unsigned int i; @@ -426,7 +466,8 @@ static void scan_devices(void) } } -/*D:105 Fairly early in boot, lguest_devices_init() is called to set up the +/*D:105 + * Fairly early in boot, lguest_devices_init() is called to set up the * lguest device infrastructure. We check that we are a Guest by checking * pv_info.name: there are other ways of checking, but this seems most * obvious to me. @@ -437,7 +478,8 @@ static void scan_devices(void) * correct sysfs incantation). * * Finally we call scan_devices() which adds all the devices found in the - * lguest_devices page. */ + * lguest_devices page. + */ static int __init lguest_devices_init(void) { if (strcmp(pv_info.name, "lguest") != 0) @@ -456,11 +498,13 @@ static int __init lguest_devices_init(void) /* We do this after core stuff, but before the drivers. */ postcore_initcall(lguest_devices_init); -/*D:150 At this point in the journey we used to now wade through the lguest +/*D:150 + * At this point in the journey we used to now wade through the lguest * devices themselves: net, block and console. Since they're all now virtio * devices rather than lguest-specific, I've decided to ignore them. Mostly, * they're kind of boring. But this does mean you'll never experience the * thrill of reading the forbidden love scene buried deep in the block driver. * * "make Launcher" beckons, where we answer questions like "Where do Guests - * come from?", and "What do you do when someone asks for optimization?". */ + * come from?", and "What do you do when someone asks for optimization?". + */ diff --git a/drivers/lguest/lguest_user.c b/drivers/lguest/lguest_user.c index 32e29712105..b4d3f7ca554 100644 --- a/drivers/lguest/lguest_user.c +++ b/drivers/lguest/lguest_user.c @@ -1,8 +1,9 @@ /*P:200 This contains all the /dev/lguest code, whereby the userspace launcher * controls and communicates with the Guest. For example, the first write will - * tell us the Guest's memory layout, pagetable, entry point and kernel address - * offset. A read will run the Guest until something happens, such as a signal - * or the Guest doing a NOTIFY out to the Launcher. :*/ + * tell us the Guest's memory layout and entry point. A read will run the + * Guest until something happens, such as a signal or the Guest doing a NOTIFY + * out to the Launcher. +:*/ #include <linux/uaccess.h> #include <linux/miscdevice.h> #include <linux/fs.h> @@ -11,14 +12,41 @@ #include <linux/file.h> #include "lg.h" +/*L:056 + * Before we move on, let's jump ahead and look at what the kernel does when + * it needs to look up the eventfds. That will complete our picture of how we + * use RCU. + * + * The notification value is in cpu->pending_notify: we return true if it went + * to an eventfd. + */ bool send_notify_to_eventfd(struct lg_cpu *cpu) { unsigned int i; struct lg_eventfd_map *map; - /* lg->eventfds is RCU-protected */ + /* + * This "rcu_read_lock()" helps track when someone is still looking at + * the (RCU-using) eventfds array. It's not actually a lock at all; + * indeed it's a noop in many configurations. (You didn't expect me to + * explain all the RCU secrets here, did you?) + */ rcu_read_lock(); + /* + * rcu_dereference is the counter-side of rcu_assign_pointer(); it + * makes sure we don't access the memory pointed to by + * cpu->lg->eventfds before cpu->lg->eventfds is set. Sounds crazy, + * but Alpha allows this! Paul McKenney points out that a really + * aggressive compiler could have the same effect: + * http://lists.ozlabs.org/pipermail/lguest/2009-July/001560.html + * + * So play safe, use rcu_dereference to get the rcu-protected pointer: + */ map = rcu_dereference(cpu->lg->eventfds); + /* + * Simple array search: even if they add an eventfd while we do this, + * we'll continue to use the old array and just won't see the new one. + */ for (i = 0; i < map->num; i++) { if (map->map[i].addr == cpu->pending_notify) { eventfd_signal(map->map[i].event, 1); @@ -26,19 +54,50 @@ bool send_notify_to_eventfd(struct lg_cpu *cpu) break; } } + /* We're done with the rcu-protected variable cpu->lg->eventfds. */ rcu_read_unlock(); + + /* If we cleared the notification, it's because we found a match. */ return cpu->pending_notify == 0; } +/*L:055 + * One of the more tricksy tricks in the Linux Kernel is a technique called + * Read Copy Update. Since one point of lguest is to teach lguest journeyers + * about kernel coding, I use it here. (In case you're curious, other purposes + * include learning about virtualization and instilling a deep appreciation for + * simplicity and puppies). + * + * We keep a simple array which maps LHCALL_NOTIFY values to eventfds, but we + * add new eventfds without ever blocking readers from accessing the array. + * The current Launcher only does this during boot, so that never happens. But + * Read Copy Update is cool, and adding a lock risks damaging even more puppies + * than this code does. + * + * We allocate a brand new one-larger array, copy the old one and add our new + * element. Then we make the lg eventfd pointer point to the new array. + * That's the easy part: now we need to free the old one, but we need to make + * sure no slow CPU somewhere is still looking at it. That's what + * synchronize_rcu does for us: waits until every CPU has indicated that it has + * moved on to know it's no longer using the old one. + * + * If that's unclear, see http://en.wikipedia.org/wiki/Read-copy-update. + */ static int add_eventfd(struct lguest *lg, unsigned long addr, int fd) { struct lg_eventfd_map *new, *old = lg->eventfds; + /* + * We don't allow notifications on value 0 anyway (pending_notify of + * 0 means "nothing pending"). + */ if (!addr) return -EINVAL; - /* Replace the old array with the new one, carefully: others can - * be accessing it at the same time */ + /* + * Replace the old array with the new one, carefully: others can + * be accessing it at the same time. + */ new = kmalloc(sizeof(*new) + sizeof(new->map[0]) * (old->num + 1), GFP_KERNEL); if (!new) @@ -50,24 +109,43 @@ static int add_eventfd(struct lguest *lg, unsigned long addr, int fd) /* Now append new entry. */ new->map[new->num].addr = addr; - new->map[new->num].event = eventfd_fget(fd); + new->map[new->num].event = eventfd_ctx_fdget(fd); if (IS_ERR(new->map[new->num].event)) { + int err = PTR_ERR(new->map[new->num].event); kfree(new); - return PTR_ERR(new->map[new->num].event); + return err; } new->num++; - /* Now put new one in place. */ + /* + * Now put new one in place: rcu_assign_pointer() is a fancy way of + * doing "lg->eventfds = new", but it uses memory barriers to make + * absolutely sure that the contents of "new" written above is nailed + * down before we actually do the assignment. + * + * We have to think about these kinds of things when we're operating on + * live data without locks. + */ rcu_assign_pointer(lg->eventfds, new); - /* We're not in a big hurry. Wait until noone's looking at old - * version, then delete it. */ + /* + * We're not in a big hurry. Wait until noone's looking at old + * version, then free it. + */ synchronize_rcu(); kfree(old); return 0; } +/*L:052 + * Receiving notifications from the Guest is usually done by attaching a + * particular LHCALL_NOTIFY value to an event filedescriptor. The eventfd will + * become readable when the Guest does an LHCALL_NOTIFY with that value. + * + * This is really convenient for processing each virtqueue in a separate + * thread. + */ static int attach_eventfd(struct lguest *lg, const unsigned long __user *input) { unsigned long addr, fd; @@ -79,15 +157,22 @@ static int attach_eventfd(struct lguest *lg, const unsigned long __user *input) if (get_user(fd, input) != 0) return -EFAULT; + /* + * Just make sure two callers don't add eventfds at once. We really + * only need to lock against callers adding to the same Guest, so using + * the Big Lguest Lock is overkill. But this is setup, not a fast path. + */ mutex_lock(&lguest_lock); err = add_eventfd(lg, addr, fd); mutex_unlock(&lguest_lock); - return 0; + return err; } -/*L:050 Sending an interrupt is done by writing LHREQ_IRQ and an interrupt - * number to /dev/lguest. */ +/*L:050 + * Sending an interrupt is done by writing LHREQ_IRQ and an interrupt + * number to /dev/lguest. + */ static int user_send_irq(struct lg_cpu *cpu, const unsigned long __user *input) { unsigned long irq; @@ -97,12 +182,18 @@ static int user_send_irq(struct lg_cpu *cpu, const unsigned long __user *input) if (irq >= LGUEST_IRQS) return -EINVAL; + /* + * Next time the Guest runs, the core code will see if it can deliver + * this interrupt. + */ set_interrupt(cpu, irq); return 0; } -/*L:040 Once our Guest is initialized, the Launcher makes it run by reading - * from /dev/lguest. */ +/*L:040 + * Once our Guest is initialized, the Launcher makes it run by reading + * from /dev/lguest. + */ static ssize_t read(struct file *file, char __user *user, size_t size,loff_t*o) { struct lguest *lg = file->private_data; @@ -138,8 +229,10 @@ static ssize_t read(struct file *file, char __user *user, size_t size,loff_t*o) return len; } - /* If we returned from read() last time because the Guest sent I/O, - * clear the flag. */ + /* + * If we returned from read() last time because the Guest sent I/O, + * clear the flag. + */ if (cpu->pending_notify) cpu->pending_notify = 0; @@ -147,8 +240,10 @@ static ssize_t read(struct file *file, char __user *user, size_t size,loff_t*o) return run_guest(cpu, (unsigned long __user *)user); } -/*L:025 This actually initializes a CPU. For the moment, a Guest is only - * uniprocessor, so "id" is always 0. */ +/*L:025 + * This actually initializes a CPU. For the moment, a Guest is only + * uniprocessor, so "id" is always 0. + */ static int lg_cpu_start(struct lg_cpu *cpu, unsigned id, unsigned long start_ip) { /* We have a limited number the number of CPUs in the lguest struct. */ @@ -163,8 +258,10 @@ static int lg_cpu_start(struct lg_cpu *cpu, unsigned id, unsigned long start_ip) /* Each CPU has a timer it can set. */ init_clockdev(cpu); - /* We need a complete page for the Guest registers: they are accessible - * to the Guest and we can only grant it access to whole pages. */ + /* + * We need a complete page for the Guest registers: they are accessible + * to the Guest and we can only grant it access to whole pages. + */ cpu->regs_page = get_zeroed_page(GFP_KERNEL); if (!cpu->regs_page) return -ENOMEM; @@ -172,29 +269,38 @@ static int lg_cpu_start(struct lg_cpu *cpu, unsigned id, unsigned long start_ip) /* We actually put the registers at the bottom of the page. */ cpu->regs = (void *)cpu->regs_page + PAGE_SIZE - sizeof(*cpu->regs); - /* Now we initialize the Guest's registers, handing it the start - * address. */ + /* + * Now we initialize the Guest's registers, handing it the start + * address. + */ lguest_arch_setup_regs(cpu, start_ip); - /* We keep a pointer to the Launcher task (ie. current task) for when - * other Guests want to wake this one (eg. console input). */ + /* + * We keep a pointer to the Launcher task (ie. current task) for when + * other Guests want to wake this one (eg. console input). + */ cpu->tsk = current; - /* We need to keep a pointer to the Launcher's memory map, because if + /* + * We need to keep a pointer to the Launcher's memory map, because if * the Launcher dies we need to clean it up. If we don't keep a - * reference, it is destroyed before close() is called. */ + * reference, it is destroyed before close() is called. + */ cpu->mm = get_task_mm(cpu->tsk); - /* We remember which CPU's pages this Guest used last, for optimization - * when the same Guest runs on the same CPU twice. */ + /* + * We remember which CPU's pages this Guest used last, for optimization + * when the same Guest runs on the same CPU twice. + */ cpu->last_pages = NULL; /* No error == success. */ return 0; } -/*L:020 The initialization write supplies 3 pointer sized (32 or 64 bit) - * values (in addition to the LHREQ_INITIALIZE value). These are: +/*L:020 + * The initialization write supplies 3 pointer sized (32 or 64 bit) values (in + * addition to the LHREQ_INITIALIZE value). These are: * * base: The start of the Guest-physical memory inside the Launcher memory. * @@ -206,14 +312,15 @@ static int lg_cpu_start(struct lg_cpu *cpu, unsigned id, unsigned long start_ip) */ static int initialize(struct file *file, const unsigned long __user *input) { - /* "struct lguest" contains everything we (the Host) know about a - * Guest. */ + /* "struct lguest" contains all we (the Host) know about a Guest. */ struct lguest *lg; int err; unsigned long args[3]; - /* We grab the Big Lguest lock, which protects against multiple - * simultaneous initializations. */ + /* + * We grab the Big Lguest lock, which protects against multiple + * simultaneous initializations. + */ mutex_lock(&lguest_lock); /* You can't initialize twice! Close the device and start again... */ if (file->private_data) { @@ -248,8 +355,10 @@ static int initialize(struct file *file, const unsigned long __user *input) if (err) goto free_eventfds; - /* Initialize the Guest's shadow page tables, using the toplevel - * address the Launcher gave us. This allocates memory, so can fail. */ + /* + * Initialize the Guest's shadow page tables, using the toplevel + * address the Launcher gave us. This allocates memory, so can fail. + */ err = init_guest_pagetable(lg); if (err) goto free_regs; @@ -274,20 +383,24 @@ unlock: return err; } -/*L:010 The first operation the Launcher does must be a write. All writes +/*L:010 + * The first operation the Launcher does must be a write. All writes * start with an unsigned long number: for the first write this must be * LHREQ_INITIALIZE to set up the Guest. After that the Launcher can use - * writes of other values to send interrupts. + * writes of other values to send interrupts or set up receipt of notifications. * * Note that we overload the "offset" in the /dev/lguest file to indicate what - * CPU number we're dealing with. Currently this is always 0, since we only + * CPU number we're dealing with. Currently this is always 0 since we only * support uniprocessor Guests, but you can see the beginnings of SMP support - * here. */ + * here. + */ static ssize_t write(struct file *file, const char __user *in, size_t size, loff_t *off) { - /* Once the Guest is initialized, we hold the "struct lguest" in the - * file private data. */ + /* + * Once the Guest is initialized, we hold the "struct lguest" in the + * file private data. + */ struct lguest *lg = file->private_data; const unsigned long __user *input = (const unsigned long __user *)in; unsigned long req; @@ -322,13 +435,15 @@ static ssize_t write(struct file *file, const char __user *in, } } -/*L:060 The final piece of interface code is the close() routine. It reverses +/*L:060 + * The final piece of interface code is the close() routine. It reverses * everything done in initialize(). This is usually called because the * Launcher exited. * * Note that the close routine returns 0 or a negative error number: it can't * really fail, but it can whine. I blame Sun for this wart, and K&R C for - * letting them do it. :*/ + * letting them do it. +:*/ static int close(struct inode *inode, struct file *file) { struct lguest *lg = file->private_data; @@ -338,8 +453,10 @@ static int close(struct inode *inode, struct file *file) if (!lg) return 0; - /* We need the big lock, to protect from inter-guest I/O and other - * Launchers initializing guests. */ + /* + * We need the big lock, to protect from inter-guest I/O and other + * Launchers initializing guests. + */ mutex_lock(&lguest_lock); /* Free up the shadow page tables for the Guest. */ @@ -350,18 +467,22 @@ static int close(struct inode *inode, struct file *file) hrtimer_cancel(&lg->cpus[i].hrt); /* We can free up the register page we allocated. */ free_page(lg->cpus[i].regs_page); - /* Now all the memory cleanups are done, it's safe to release - * the Launcher's memory management structure. */ + /* + * Now all the memory cleanups are done, it's safe to release + * the Launcher's memory management structure. + */ mmput(lg->cpus[i].mm); } /* Release any eventfds they registered. */ for (i = 0; i < lg->eventfds->num; i++) - fput(lg->eventfds->map[i].event); + eventfd_ctx_put(lg->eventfds->map[i].event); kfree(lg->eventfds); - /* If lg->dead doesn't contain an error code it will be NULL or a - * kmalloc()ed string, either of which is ok to hand to kfree(). */ + /* + * If lg->dead doesn't contain an error code it will be NULL or a + * kmalloc()ed string, either of which is ok to hand to kfree(). + */ if (!IS_ERR(lg->dead)) kfree(lg->dead); /* Free the memory allocated to the lguest_struct */ @@ -385,7 +506,8 @@ static int close(struct inode *inode, struct file *file) * * We begin our understanding with the Host kernel interface which the Launcher * uses: reading and writing a character device called /dev/lguest. All the - * work happens in the read(), write() and close() routines: */ + * work happens in the read(), write() and close() routines: + */ static struct file_operations lguest_fops = { .owner = THIS_MODULE, .release = close, @@ -393,8 +515,10 @@ static struct file_operations lguest_fops = { .read = read, }; -/* This is a textbook example of a "misc" character device. Populate a "struct - * miscdevice" and register it with misc_register(). */ +/* + * This is a textbook example of a "misc" character device. Populate a "struct + * miscdevice" and register it with misc_register(). + */ static struct miscdevice lguest_dev = { .minor = MISC_DYNAMIC_MINOR, .name = "lguest", diff --git a/drivers/lguest/page_tables.c b/drivers/lguest/page_tables.c index a6fe1abda24..a8d0aee3bc0 100644 --- a/drivers/lguest/page_tables.c +++ b/drivers/lguest/page_tables.c @@ -1,9 +1,11 @@ -/*P:700 The pagetable code, on the other hand, still shows the scars of +/*P:700 + * The pagetable code, on the other hand, still shows the scars of * previous encounters. It's functional, and as neat as it can be in the * circumstances, but be wary, for these things are subtle and break easily. * The Guest provides a virtual to physical mapping, but we can neither trust * it nor use it: we verify and convert it here then point the CPU to the - * converted Guest pages when running the Guest. :*/ + * converted Guest pages when running the Guest. +:*/ /* Copyright (C) Rusty Russell IBM Corporation 2006. * GPL v2 and any later version */ @@ -17,18 +19,20 @@ #include <asm/bootparam.h> #include "lg.h" -/*M:008 We hold reference to pages, which prevents them from being swapped. +/*M:008 + * We hold reference to pages, which prevents them from being swapped. * It'd be nice to have a callback in the "struct mm_struct" when Linux wants * to swap out. If we had this, and a shrinker callback to trim PTE pages, we - * could probably consider launching Guests as non-root. :*/ + * could probably consider launching Guests as non-root. +:*/ /*H:300 * The Page Table Code * - * We use two-level page tables for the Guest. If you're not entirely - * comfortable with virtual addresses, physical addresses and page tables then - * I recommend you review arch/x86/lguest/boot.c's "Page Table Handling" (with - * diagrams!). + * We use two-level page tables for the Guest, or three-level with PAE. If + * you're not entirely comfortable with virtual addresses, physical addresses + * and page tables then I recommend you review arch/x86/lguest/boot.c's "Page + * Table Handling" (with diagrams!). * * The Guest keeps page tables, but we maintain the actual ones here: these are * called "shadow" page tables. Which is a very Guest-centric name: these are @@ -45,16 +49,18 @@ * (v) Flushing (throwing away) page tables, * (vi) Mapping the Switcher when the Guest is about to run, * (vii) Setting up the page tables initially. - :*/ +:*/ - -/* 1024 entries in a page table page maps 1024 pages: 4MB. The Switcher is - * conveniently placed at the top 4MB, so it uses a separate, complete PTE - * page. */ +/* + * The Switcher uses the complete top PTE page. That's 1024 PTE entries (4MB) + * or 512 PTE entries with PAE (2MB). + */ #define SWITCHER_PGD_INDEX (PTRS_PER_PGD - 1) -/* For PAE we need the PMD index as well. We use the last 2MB, so we - * will need the last pmd entry of the last pmd page. */ +/* + * For PAE we need the PMD index as well. We use the last 2MB, so we + * will need the last pmd entry of the last pmd page. + */ #ifdef CONFIG_X86_PAE #define SWITCHER_PMD_INDEX (PTRS_PER_PMD - 1) #define RESERVE_MEM 2U @@ -64,14 +70,18 @@ #define CHECK_GPGD_MASK _PAGE_TABLE #endif -/* We actually need a separate PTE page for each CPU. Remember that after the +/* + * We actually need a separate PTE page for each CPU. Remember that after the * Switcher code itself comes two pages for each CPU, and we don't want this - * CPU's guest to see the pages of any other CPU. */ + * CPU's guest to see the pages of any other CPU. + */ static DEFINE_PER_CPU(pte_t *, switcher_pte_pages); #define switcher_pte_page(cpu) per_cpu(switcher_pte_pages, cpu) -/*H:320 The page table code is curly enough to need helper functions to keep it - * clear and clean. +/*H:320 + * The page table code is curly enough to need helper functions to keep it + * clear and clean. The kernel itself provides many of them; one advantage + * of insisting that the Guest and Host use the same CONFIG_PAE setting. * * There are two functions which return pointers to the shadow (aka "real") * page tables. @@ -79,7 +89,8 @@ static DEFINE_PER_CPU(pte_t *, switcher_pte_pages); * spgd_addr() takes the virtual address and returns a pointer to the top-level * page directory entry (PGD) for that address. Since we keep track of several * page tables, the "i" argument tells us which one we're interested in (it's - * usually the current one). */ + * usually the current one). + */ static pgd_t *spgd_addr(struct lg_cpu *cpu, u32 i, unsigned long vaddr) { unsigned int index = pgd_index(vaddr); @@ -96,9 +107,11 @@ static pgd_t *spgd_addr(struct lg_cpu *cpu, u32 i, unsigned long vaddr) } #ifdef CONFIG_X86_PAE -/* This routine then takes the PGD entry given above, which contains the +/* + * This routine then takes the PGD entry given above, which contains the * address of the PMD page. It then returns a pointer to the PMD entry for the - * given address. */ + * given address. + */ static pmd_t *spmd_addr(struct lg_cpu *cpu, pgd_t spgd, unsigned long vaddr) { unsigned int index = pmd_index(vaddr); @@ -119,9 +132,11 @@ static pmd_t *spmd_addr(struct lg_cpu *cpu, pgd_t spgd, unsigned long vaddr) } #endif -/* This routine then takes the page directory entry returned above, which +/* + * This routine then takes the page directory entry returned above, which * contains the address of the page table entry (PTE) page. It then returns a - * pointer to the PTE entry for the given address. */ + * pointer to the PTE entry for the given address. + */ static pte_t *spte_addr(struct lg_cpu *cpu, pgd_t spgd, unsigned long vaddr) { #ifdef CONFIG_X86_PAE @@ -139,8 +154,10 @@ static pte_t *spte_addr(struct lg_cpu *cpu, pgd_t spgd, unsigned long vaddr) return &page[pte_index(vaddr)]; } -/* These two functions just like the above two, except they access the Guest - * page tables. Hence they return a Guest address. */ +/* + * These functions are just like the above two, except they access the Guest + * page tables. Hence they return a Guest address. + */ static unsigned long gpgd_addr(struct lg_cpu *cpu, unsigned long vaddr) { unsigned int index = vaddr >> (PGDIR_SHIFT); @@ -148,6 +165,7 @@ static unsigned long gpgd_addr(struct lg_cpu *cpu, unsigned long vaddr) } #ifdef CONFIG_X86_PAE +/* Follow the PGD to the PMD. */ static unsigned long gpmd_addr(pgd_t gpgd, unsigned long vaddr) { unsigned long gpage = pgd_pfn(gpgd) << PAGE_SHIFT; @@ -155,6 +173,7 @@ static unsigned long gpmd_addr(pgd_t gpgd, unsigned long vaddr) return gpage + pmd_index(vaddr) * sizeof(pmd_t); } +/* Follow the PMD to the PTE. */ static unsigned long gpte_addr(struct lg_cpu *cpu, pmd_t gpmd, unsigned long vaddr) { @@ -164,6 +183,7 @@ static unsigned long gpte_addr(struct lg_cpu *cpu, return gpage + pte_index(vaddr) * sizeof(pte_t); } #else +/* Follow the PGD to the PTE (no mid-level for !PAE). */ static unsigned long gpte_addr(struct lg_cpu *cpu, pgd_t gpgd, unsigned long vaddr) { @@ -175,17 +195,21 @@ static unsigned long gpte_addr(struct lg_cpu *cpu, #endif /*:*/ -/*M:014 get_pfn is slow: we could probably try to grab batches of pages here as - * an optimization (ie. pre-faulting). :*/ +/*M:014 + * get_pfn is slow: we could probably try to grab batches of pages here as + * an optimization (ie. pre-faulting). +:*/ -/*H:350 This routine takes a page number given by the Guest and converts it to +/*H:350 + * This routine takes a page number given by the Guest and converts it to * an actual, physical page number. It can fail for several reasons: the * virtual address might not be mapped by the Launcher, the write flag is set * and the page is read-only, or the write flag was set and the page was * shared so had to be copied, but we ran out of memory. * * This holds a reference to the page, so release_pte() is careful to put that - * back. */ + * back. + */ static unsigned long get_pfn(unsigned long virtpfn, int write) { struct page *page; @@ -198,33 +222,41 @@ static unsigned long get_pfn(unsigned long virtpfn, int write) return -1UL; } -/*H:340 Converting a Guest page table entry to a shadow (ie. real) page table +/*H:340 + * Converting a Guest page table entry to a shadow (ie. real) page table * entry can be a little tricky. The flags are (almost) the same, but the * Guest PTE contains a virtual page number: the CPU needs the real page - * number. */ + * number. + */ static pte_t gpte_to_spte(struct lg_cpu *cpu, pte_t gpte, int write) { unsigned long pfn, base, flags; - /* The Guest sets the global flag, because it thinks that it is using + /* + * The Guest sets the global flag, because it thinks that it is using * PGE. We only told it to use PGE so it would tell us whether it was * flushing a kernel mapping or a userspace mapping. We don't actually - * use the global bit, so throw it away. */ + * use the global bit, so throw it away. + */ flags = (pte_flags(gpte) & ~_PAGE_GLOBAL); /* The Guest's pages are offset inside the Launcher. */ base = (unsigned long)cpu->lg->mem_base / PAGE_SIZE; - /* We need a temporary "unsigned long" variable to hold the answer from + /* + * We need a temporary "unsigned long" variable to hold the answer from * get_pfn(), because it returns 0xFFFFFFFF on failure, which wouldn't * fit in spte.pfn. get_pfn() finds the real physical number of the - * page, given the virtual number. */ + * page, given the virtual number. + */ pfn = get_pfn(base + pte_pfn(gpte), write); if (pfn == -1UL) { kill_guest(cpu, "failed to get page %lu", pte_pfn(gpte)); - /* When we destroy the Guest, we'll go through the shadow page + /* + * When we destroy the Guest, we'll go through the shadow page * tables and release_pte() them. Make sure we don't think - * this one is valid! */ + * this one is valid! + */ flags = 0; } /* Now we assemble our shadow PTE from the page number and flags. */ @@ -234,8 +266,10 @@ static pte_t gpte_to_spte(struct lg_cpu *cpu, pte_t gpte, int write) /*H:460 And to complete the chain, release_pte() looks like this: */ static void release_pte(pte_t pte) { - /* Remember that get_user_pages_fast() took a reference to the page, in - * get_pfn()? We have to put it back now. */ + /* + * Remember that get_user_pages_fast() took a reference to the page, in + * get_pfn()? We have to put it back now. + */ if (pte_flags(pte) & _PAGE_PRESENT) put_page(pte_page(pte)); } @@ -273,7 +307,8 @@ static void check_gpmd(struct lg_cpu *cpu, pmd_t gpmd) * and return to the Guest without it knowing. * * If we fixed up the fault (ie. we mapped the address), this routine returns - * true. Otherwise, it was a real fault and we need to tell the Guest. */ + * true. Otherwise, it was a real fault and we need to tell the Guest. + */ bool demand_page(struct lg_cpu *cpu, unsigned long vaddr, int errcode) { pgd_t gpgd; @@ -282,6 +317,7 @@ bool demand_page(struct lg_cpu *cpu, unsigned long vaddr, int errcode) pte_t gpte; pte_t *spte; + /* Mid level for PAE. */ #ifdef CONFIG_X86_PAE pmd_t *spmd; pmd_t gpmd; @@ -298,22 +334,26 @@ bool demand_page(struct lg_cpu *cpu, unsigned long vaddr, int errcode) if (!(pgd_flags(*spgd) & _PAGE_PRESENT)) { /* No shadow entry: allocate a new shadow PTE page. */ unsigned long ptepage = get_zeroed_page(GFP_KERNEL); - /* This is not really the Guest's fault, but killing it is - * simple for this corner case. */ + /* + * This is not really the Guest's fault, but killing it is + * simple for this corner case. + */ if (!ptepage) { kill_guest(cpu, "out of memory allocating pte page"); return false; } /* We check that the Guest pgd is OK. */ check_gpgd(cpu, gpgd); - /* And we copy the flags to the shadow PGD entry. The page - * number in the shadow PGD is the page we just allocated. */ + /* + * And we copy the flags to the shadow PGD entry. The page + * number in the shadow PGD is the page we just allocated. + */ set_pgd(spgd, __pgd(__pa(ptepage) | pgd_flags(gpgd))); } #ifdef CONFIG_X86_PAE gpmd = lgread(cpu, gpmd_addr(gpgd, vaddr), pmd_t); - /* middle level not present? We can't map it in. */ + /* Middle level not present? We can't map it in. */ if (!(pmd_flags(gpmd) & _PAGE_PRESENT)) return false; @@ -324,8 +364,10 @@ bool demand_page(struct lg_cpu *cpu, unsigned long vaddr, int errcode) /* No shadow entry: allocate a new shadow PTE page. */ unsigned long ptepage = get_zeroed_page(GFP_KERNEL); - /* This is not really the Guest's fault, but killing it is - * simple for this corner case. */ + /* + * This is not really the Guest's fault, but killing it is + * simple for this corner case. + */ if (!ptepage) { kill_guest(cpu, "out of memory allocating pte page"); return false; @@ -334,27 +376,37 @@ bool demand_page(struct lg_cpu *cpu, unsigned long vaddr, int errcode) /* We check that the Guest pmd is OK. */ check_gpmd(cpu, gpmd); - /* And we copy the flags to the shadow PMD entry. The page - * number in the shadow PMD is the page we just allocated. */ + /* + * And we copy the flags to the shadow PMD entry. The page + * number in the shadow PMD is the page we just allocated. + */ native_set_pmd(spmd, __pmd(__pa(ptepage) | pmd_flags(gpmd))); } - /* OK, now we look at the lower level in the Guest page table: keep its - * address, because we might update it later. */ + /* + * OK, now we look at the lower level in the Guest page table: keep its + * address, because we might update it later. + */ gpte_ptr = gpte_addr(cpu, gpmd, vaddr); #else - /* OK, now we look at the lower level in the Guest page table: keep its - * address, because we might update it later. */ + /* + * OK, now we look at the lower level in the Guest page table: keep its + * address, because we might update it later. + */ gpte_ptr = gpte_addr(cpu, gpgd, vaddr); #endif + + /* Read the actual PTE value. */ gpte = lgread(cpu, gpte_ptr, pte_t); /* If this page isn't in the Guest page tables, we can't page it in. */ if (!(pte_flags(gpte) & _PAGE_PRESENT)) return false; - /* Check they're not trying to write to a page the Guest wants - * read-only (bit 2 of errcode == write). */ + /* + * Check they're not trying to write to a page the Guest wants + * read-only (bit 2 of errcode == write). + */ if ((errcode & 2) && !(pte_flags(gpte) & _PAGE_RW)) return false; @@ -362,8 +414,10 @@ bool demand_page(struct lg_cpu *cpu, unsigned long vaddr, int errcode) if ((errcode & 4) && !(pte_flags(gpte) & _PAGE_USER)) return false; - /* Check that the Guest PTE flags are OK, and the page number is below - * the pfn_limit (ie. not mapping the Launcher binary). */ + /* + * Check that the Guest PTE flags are OK, and the page number is below + * the pfn_limit (ie. not mapping the Launcher binary). + */ check_gpte(cpu, gpte); /* Add the _PAGE_ACCESSED and (for a write) _PAGE_DIRTY flag */ @@ -373,29 +427,40 @@ bool demand_page(struct lg_cpu *cpu, unsigned long vaddr, int errcode) /* Get the pointer to the shadow PTE entry we're going to set. */ spte = spte_addr(cpu, *spgd, vaddr); - /* If there was a valid shadow PTE entry here before, we release it. - * This can happen with a write to a previously read-only entry. */ + + /* + * If there was a valid shadow PTE entry here before, we release it. + * This can happen with a write to a previously read-only entry. + */ release_pte(*spte); - /* If this is a write, we insist that the Guest page is writable (the - * final arg to gpte_to_spte()). */ + /* + * If this is a write, we insist that the Guest page is writable (the + * final arg to gpte_to_spte()). + */ if (pte_dirty(gpte)) *spte = gpte_to_spte(cpu, gpte, 1); else - /* If this is a read, don't set the "writable" bit in the page + /* + * If this is a read, don't set the "writable" bit in the page * table entry, even if the Guest says it's writable. That way * we will come back here when a write does actually occur, so - * we can update the Guest's _PAGE_DIRTY flag. */ + * we can update the Guest's _PAGE_DIRTY flag. + */ native_set_pte(spte, gpte_to_spte(cpu, pte_wrprotect(gpte), 0)); - /* Finally, we write the Guest PTE entry back: we've set the - * _PAGE_ACCESSED and maybe the _PAGE_DIRTY flags. */ + /* + * Finally, we write the Guest PTE entry back: we've set the + * _PAGE_ACCESSED and maybe the _PAGE_DIRTY flags. + */ lgwrite(cpu, gpte_ptr, pte_t, gpte); - /* The fault is fixed, the page table is populated, the mapping + /* + * The fault is fixed, the page table is populated, the mapping * manipulated, the result returned and the code complete. A small * delay and a trace of alliteration are the only indications the Guest - * has that a page fault occurred at all. */ + * has that a page fault occurred at all. + */ return true; } @@ -408,7 +473,8 @@ bool demand_page(struct lg_cpu *cpu, unsigned long vaddr, int errcode) * mapped, so it's overkill. * * This is a quick version which answers the question: is this virtual address - * mapped by the shadow page tables, and is it writable? */ + * mapped by the shadow page tables, and is it writable? + */ static bool page_writable(struct lg_cpu *cpu, unsigned long vaddr) { pgd_t *spgd; @@ -428,21 +494,26 @@ static bool page_writable(struct lg_cpu *cpu, unsigned long vaddr) return false; #endif - /* Check the flags on the pte entry itself: it must be present and - * writable. */ + /* + * Check the flags on the pte entry itself: it must be present and + * writable. + */ flags = pte_flags(*(spte_addr(cpu, *spgd, vaddr))); return (flags & (_PAGE_PRESENT|_PAGE_RW)) == (_PAGE_PRESENT|_PAGE_RW); } -/* So, when pin_stack_pages() asks us to pin a page, we check if it's already +/* + * So, when pin_stack_pages() asks us to pin a page, we check if it's already * in the page tables, and if not, we call demand_page() with error code 2 - * (meaning "write"). */ + * (meaning "write"). + */ void pin_page(struct lg_cpu *cpu, unsigned long vaddr) { if (!page_writable(cpu, vaddr) && !demand_page(cpu, vaddr, 2)) kill_guest(cpu, "bad stack page %#lx", vaddr); } +/*:*/ #ifdef CONFIG_X86_PAE static void release_pmd(pmd_t *spmd) @@ -479,15 +550,21 @@ static void release_pgd(pgd_t *spgd) } #else /* !CONFIG_X86_PAE */ -/*H:450 If we chase down the release_pgd() code, it looks like this: */ +/*H:450 + * If we chase down the release_pgd() code, the non-PAE version looks like + * this. The PAE version is almost identical, but instead of calling + * release_pte it calls release_pmd(), which looks much like this. + */ static void release_pgd(pgd_t *spgd) { /* If the entry's not present, there's nothing to release. */ if (pgd_flags(*spgd) & _PAGE_PRESENT) { unsigned int i; - /* Converting the pfn to find the actual PTE page is easy: turn + /* + * Converting the pfn to find the actual PTE page is easy: turn * the page number into a physical address, then convert to a - * virtual address (easy for kernel pages like this one). */ + * virtual address (easy for kernel pages like this one). + */ pte_t *ptepage = __va(pgd_pfn(*spgd) << PAGE_SHIFT); /* For each entry in the page, we might need to release it. */ for (i = 0; i < PTRS_PER_PTE; i++) @@ -499,9 +576,12 @@ static void release_pgd(pgd_t *spgd) } } #endif -/*H:445 We saw flush_user_mappings() twice: once from the flush_user_mappings() + +/*H:445 + * We saw flush_user_mappings() twice: once from the flush_user_mappings() * hypercall and once in new_pgdir() when we re-used a top-level pgdir page. - * It simply releases every PTE page from 0 up to the Guest's kernel address. */ + * It simply releases every PTE page from 0 up to the Guest's kernel address. + */ static void flush_user_mappings(struct lguest *lg, int idx) { unsigned int i; @@ -510,10 +590,12 @@ static void flush_user_mappings(struct lguest *lg, int idx) release_pgd(lg->pgdirs[idx].pgdir + i); } -/*H:440 (v) Flushing (throwing away) page tables, +/*H:440 + * (v) Flushing (throwing away) page tables, * * The Guest has a hypercall to throw away the page tables: it's used when a - * large number of mappings have been changed. */ + * large number of mappings have been changed. + */ void guest_pagetable_flush_user(struct lg_cpu *cpu) { /* Drop the userspace part of the current page table. */ @@ -551,9 +633,11 @@ unsigned long guest_pa(struct lg_cpu *cpu, unsigned long vaddr) return pte_pfn(gpte) * PAGE_SIZE | (vaddr & ~PAGE_MASK); } -/* We keep several page tables. This is a simple routine to find the page +/* + * We keep several page tables. This is a simple routine to find the page * table (if any) corresponding to this top-level address the Guest has given - * us. */ + * us. + */ static unsigned int find_pgdir(struct lguest *lg, unsigned long pgtable) { unsigned int i; @@ -563,9 +647,11 @@ static unsigned int find_pgdir(struct lguest *lg, unsigned long pgtable) return i; } -/*H:435 And this is us, creating the new page directory. If we really do +/*H:435 + * And this is us, creating the new page directory. If we really do * allocate a new one (and so the kernel parts are not there), we set - * blank_pgdir. */ + * blank_pgdir. + */ static unsigned int new_pgdir(struct lg_cpu *cpu, unsigned long gpgdir, int *blank_pgdir) @@ -575,8 +661,10 @@ static unsigned int new_pgdir(struct lg_cpu *cpu, pmd_t *pmd_table; #endif - /* We pick one entry at random to throw out. Choosing the Least - * Recently Used might be better, but this is easy. */ + /* + * We pick one entry at random to throw out. Choosing the Least + * Recently Used might be better, but this is easy. + */ next = random32() % ARRAY_SIZE(cpu->lg->pgdirs); /* If it's never been allocated at all before, try now. */ if (!cpu->lg->pgdirs[next].pgdir) { @@ -587,8 +675,10 @@ static unsigned int new_pgdir(struct lg_cpu *cpu, next = cpu->cpu_pgd; else { #ifdef CONFIG_X86_PAE - /* In PAE mode, allocate a pmd page and populate the - * last pgd entry. */ + /* + * In PAE mode, allocate a pmd page and populate the + * last pgd entry. + */ pmd_table = (pmd_t *)get_zeroed_page(GFP_KERNEL); if (!pmd_table) { free_page((long)cpu->lg->pgdirs[next].pgdir); @@ -598,8 +688,10 @@ static unsigned int new_pgdir(struct lg_cpu *cpu, set_pgd(cpu->lg->pgdirs[next].pgdir + SWITCHER_PGD_INDEX, __pgd(__pa(pmd_table) | _PAGE_PRESENT)); - /* This is a blank page, so there are no kernel - * mappings: caller must map the stack! */ + /* + * This is a blank page, so there are no kernel + * mappings: caller must map the stack! + */ *blank_pgdir = 1; } #else @@ -615,19 +707,23 @@ static unsigned int new_pgdir(struct lg_cpu *cpu, return next; } -/*H:430 (iv) Switching page tables +/*H:430 + * (iv) Switching page tables * * Now we've seen all the page table setting and manipulation, let's see * what happens when the Guest changes page tables (ie. changes the top-level - * pgdir). This occurs on almost every context switch. */ + * pgdir). This occurs on almost every context switch. + */ void guest_new_pagetable(struct lg_cpu *cpu, unsigned long pgtable) { int newpgdir, repin = 0; /* Look to see if we have this one already. */ newpgdir = find_pgdir(cpu->lg, pgtable); - /* If not, we allocate or mug an existing one: if it's a fresh one, - * repin gets set to 1. */ + /* + * If not, we allocate or mug an existing one: if it's a fresh one, + * repin gets set to 1. + */ if (newpgdir == ARRAY_SIZE(cpu->lg->pgdirs)) newpgdir = new_pgdir(cpu, pgtable, &repin); /* Change the current pgd index to the new one. */ @@ -637,9 +733,11 @@ void guest_new_pagetable(struct lg_cpu *cpu, unsigned long pgtable) pin_stack_pages(cpu); } -/*H:470 Finally, a routine which throws away everything: all PGD entries in all +/*H:470 + * Finally, a routine which throws away everything: all PGD entries in all * the shadow page tables, including the Guest's kernel mappings. This is used - * when we destroy the Guest. */ + * when we destroy the Guest. + */ static void release_all_pagetables(struct lguest *lg) { unsigned int i, j; @@ -656,8 +754,10 @@ static void release_all_pagetables(struct lguest *lg) spgd = lg->pgdirs[i].pgdir + SWITCHER_PGD_INDEX; pmdpage = __va(pgd_pfn(*spgd) << PAGE_SHIFT); - /* And release the pmd entries of that pmd page, - * except for the switcher pmd. */ + /* + * And release the pmd entries of that pmd page, + * except for the switcher pmd. + */ for (k = 0; k < SWITCHER_PMD_INDEX; k++) release_pmd(&pmdpage[k]); #endif @@ -667,10 +767,12 @@ static void release_all_pagetables(struct lguest *lg) } } -/* We also throw away everything when a Guest tells us it's changed a kernel +/* + * We also throw away everything when a Guest tells us it's changed a kernel * mapping. Since kernel mappings are in every page table, it's easiest to * throw them all away. This traps the Guest in amber for a while as - * everything faults back in, but it's rare. */ + * everything faults back in, but it's rare. + */ void guest_pagetable_clear_all(struct lg_cpu *cpu) { release_all_pagetables(cpu->lg); @@ -678,15 +780,19 @@ void guest_pagetable_clear_all(struct lg_cpu *cpu) pin_stack_pages(cpu); } /*:*/ -/*M:009 Since we throw away all mappings when a kernel mapping changes, our + +/*M:009 + * Since we throw away all mappings when a kernel mapping changes, our * performance sucks for guests using highmem. In fact, a guest with * PAGE_OFFSET 0xc0000000 (the default) and more than about 700MB of RAM is * usually slower than a Guest with less memory. * * This, of course, cannot be fixed. It would take some kind of... well, I - * don't know, but the term "puissant code-fu" comes to mind. :*/ + * don't know, but the term "puissant code-fu" comes to mind. +:*/ -/*H:420 This is the routine which actually sets the page table entry for then +/*H:420 + * This is the routine which actually sets the page table entry for then * "idx"'th shadow page table. * * Normally, we can just throw out the old entry and replace it with 0: if they @@ -715,31 +821,36 @@ static void do_set_pte(struct lg_cpu *cpu, int idx, spmd = spmd_addr(cpu, *spgd, vaddr); if (pmd_flags(*spmd) & _PAGE_PRESENT) { #endif - /* Otherwise, we start by releasing - * the existing entry. */ + /* Otherwise, start by releasing the existing entry. */ pte_t *spte = spte_addr(cpu, *spgd, vaddr); release_pte(*spte); - /* If they're setting this entry as dirty or accessed, - * we might as well put that entry they've given us - * in now. This shaves 10% off a - * copy-on-write micro-benchmark. */ + /* + * If they're setting this entry as dirty or accessed, + * we might as well put that entry they've given us in + * now. This shaves 10% off a copy-on-write + * micro-benchmark. + */ if (pte_flags(gpte) & (_PAGE_DIRTY | _PAGE_ACCESSED)) { check_gpte(cpu, gpte); native_set_pte(spte, gpte_to_spte(cpu, gpte, pte_flags(gpte) & _PAGE_DIRTY)); - } else - /* Otherwise kill it and we can demand_page() - * it in later. */ + } else { + /* + * Otherwise kill it and we can demand_page() + * it in later. + */ native_set_pte(spte, __pte(0)); + } #ifdef CONFIG_X86_PAE } #endif } } -/*H:410 Updating a PTE entry is a little trickier. +/*H:410 + * Updating a PTE entry is a little trickier. * * We keep track of several different page tables (the Guest uses one for each * process, so it makes sense to cache at least a few). Each of these have @@ -748,12 +859,15 @@ static void do_set_pte(struct lg_cpu *cpu, int idx, * all the page tables, not just the current one. This is rare. * * The benefit is that when we have to track a new page table, we can keep all - * the kernel mappings. This speeds up context switch immensely. */ + * the kernel mappings. This speeds up context switch immensely. + */ void guest_set_pte(struct lg_cpu *cpu, unsigned long gpgdir, unsigned long vaddr, pte_t gpte) { - /* Kernel mappings must be changed on all top levels. Slow, but doesn't - * happen often. */ + /* + * Kernel mappings must be changed on all top levels. Slow, but doesn't + * happen often. + */ if (vaddr >= cpu->lg->kernel_address) { unsigned int i; for (i = 0; i < ARRAY_SIZE(cpu->lg->pgdirs); i++) @@ -795,19 +909,25 @@ void guest_set_pgd(struct lguest *lg, unsigned long gpgdir, u32 idx) /* ... throw it away. */ release_pgd(lg->pgdirs[pgdir].pgdir + idx); } + #ifdef CONFIG_X86_PAE +/* For setting a mid-level, we just throw everything away. It's easy. */ void guest_set_pmd(struct lguest *lg, unsigned long pmdp, u32 idx) { guest_pagetable_clear_all(&lg->cpus[0]); } #endif -/* Once we know how much memory we have we can construct simple identity - * (which set virtual == physical) and linear mappings - * which will get the Guest far enough into the boot to create its own. +/*H:505 + * To get through boot, we construct simple identity page mappings (which + * set virtual == physical) and linear mappings which will get the Guest far + * enough into the boot to create its own. The linear mapping means we + * simplify the Guest boot, but it makes assumptions about their PAGE_OFFSET, + * as you'll see. * * We lay them out of the way, just below the initrd (which is why we need to - * know its size here). */ + * know its size here). + */ static unsigned long setup_pagetables(struct lguest *lg, unsigned long mem, unsigned long initrd_size) @@ -825,8 +945,10 @@ static unsigned long setup_pagetables(struct lguest *lg, unsigned int phys_linear; #endif - /* We have mapped_pages frames to map, so we need - * linear_pages page tables to map them. */ + /* + * We have mapped_pages frames to map, so we need linear_pages page + * tables to map them. + */ mapped_pages = mem / PAGE_SIZE; linear_pages = (mapped_pages + PTRS_PER_PTE - 1) / PTRS_PER_PTE; @@ -837,10 +959,16 @@ static unsigned long setup_pagetables(struct lguest *lg, linear = (void *)pgdir - linear_pages * PAGE_SIZE; #ifdef CONFIG_X86_PAE + /* + * And the single mid page goes below that. We only use one, but + * that's enough to map 1G, which definitely gets us through boot. + */ pmds = (void *)linear - PAGE_SIZE; #endif - /* Linear mapping is easy: put every page's address into the - * mapping in order. */ + /* + * Linear mapping is easy: put every page's address into the + * mapping in order. + */ for (i = 0; i < mapped_pages; i++) { pte_t pte; pte = pfn_pte(i, __pgprot(_PAGE_PRESENT|_PAGE_RW|_PAGE_USER)); @@ -848,11 +976,14 @@ static unsigned long setup_pagetables(struct lguest *lg, return -EFAULT; } - /* The top level points to the linear page table pages above. - * We setup the identity and linear mappings here. */ #ifdef CONFIG_X86_PAE + /* + * Make the Guest PMD entries point to the corresponding place in the + * linear mapping (up to one page worth of PMD). + */ for (i = j = 0; i < mapped_pages && j < PTRS_PER_PMD; i += PTRS_PER_PTE, j++) { + /* FIXME: native_set_pmd is overkill here. */ native_set_pmd(&pmd, __pmd(((unsigned long)(linear + i) - mem_base) | _PAGE_PRESENT | _PAGE_RW | _PAGE_USER)); @@ -860,18 +991,36 @@ static unsigned long setup_pagetables(struct lguest *lg, return -EFAULT; } + /* One PGD entry, pointing to that PMD page. */ set_pgd(&pgd, __pgd(((u32)pmds - mem_base) | _PAGE_PRESENT)); + /* Copy it in as the first PGD entry (ie. addresses 0-1G). */ if (copy_to_user(&pgdir[0], &pgd, sizeof(pgd)) != 0) return -EFAULT; + /* + * And the third PGD entry (ie. addresses 3G-4G). + * + * FIXME: This assumes that PAGE_OFFSET for the Guest is 0xC0000000. + */ if (copy_to_user(&pgdir[3], &pgd, sizeof(pgd)) != 0) return -EFAULT; #else + /* + * The top level points to the linear page table pages above. + * We setup the identity and linear mappings here. + */ phys_linear = (unsigned long)linear - mem_base; for (i = 0; i < mapped_pages; i += PTRS_PER_PTE) { pgd_t pgd; + /* + * Create a PGD entry which points to the right part of the + * linear PTE pages. + */ pgd = __pgd((phys_linear + i * sizeof(pte_t)) | (_PAGE_PRESENT | _PAGE_RW | _PAGE_USER)); + /* + * Copy it into the PGD page at 0 and PAGE_OFFSET. + */ if (copy_to_user(&pgdir[i / PTRS_PER_PTE], &pgd, sizeof(pgd)) || copy_to_user(&pgdir[pgd_index(PAGE_OFFSET) + i / PTRS_PER_PTE], @@ -880,15 +1029,19 @@ static unsigned long setup_pagetables(struct lguest *lg, } #endif - /* We return the top level (guest-physical) address: remember where - * this is. */ + /* + * We return the top level (guest-physical) address: we remember where + * this is to write it into lguest_data when the Guest initializes. + */ return (unsigned long)pgdir - mem_base; } -/*H:500 (vii) Setting up the page tables initially. +/*H:500 + * (vii) Setting up the page tables initially. * * When a Guest is first created, the Launcher tells us where the toplevel of - * its first page table is. We set some things up here: */ + * its first page table is. We set some things up here: + */ int init_guest_pagetable(struct lguest *lg) { u64 mem; @@ -898,21 +1051,27 @@ int init_guest_pagetable(struct lguest *lg) pgd_t *pgd; pmd_t *pmd_table; #endif - /* Get the Guest memory size and the ramdisk size from the boot header - * located at lg->mem_base (Guest address 0). */ + /* + * Get the Guest memory size and the ramdisk size from the boot header + * located at lg->mem_base (Guest address 0). + */ if (copy_from_user(&mem, &boot->e820_map[0].size, sizeof(mem)) || get_user(initrd_size, &boot->hdr.ramdisk_size)) return -EFAULT; - /* We start on the first shadow page table, and give it a blank PGD - * page. */ + /* + * We start on the first shadow page table, and give it a blank PGD + * page. + */ lg->pgdirs[0].gpgdir = setup_pagetables(lg, mem, initrd_size); if (IS_ERR_VALUE(lg->pgdirs[0].gpgdir)) return lg->pgdirs[0].gpgdir; lg->pgdirs[0].pgdir = (pgd_t *)get_zeroed_page(GFP_KERNEL); if (!lg->pgdirs[0].pgdir) return -ENOMEM; + #ifdef CONFIG_X86_PAE + /* For PAE, we also create the initial mid-level. */ pgd = lg->pgdirs[0].pgdir; pmd_table = (pmd_t *) get_zeroed_page(GFP_KERNEL); if (!pmd_table) @@ -921,27 +1080,33 @@ int init_guest_pagetable(struct lguest *lg) set_pgd(pgd + SWITCHER_PGD_INDEX, __pgd(__pa(pmd_table) | _PAGE_PRESENT)); #endif + + /* This is the current page table. */ lg->cpus[0].cpu_pgd = 0; return 0; } -/* When the Guest calls LHCALL_LGUEST_INIT we do more setup. */ +/*H:508 When the Guest calls LHCALL_LGUEST_INIT we do more setup. */ void page_table_guest_data_init(struct lg_cpu *cpu) { /* We get the kernel address: above this is all kernel memory. */ if (get_user(cpu->lg->kernel_address, &cpu->lg->lguest_data->kernel_address) - /* We tell the Guest that it can't use the top 2 or 4 MB - * of virtual addresses used by the Switcher. */ + /* + * We tell the Guest that it can't use the top 2 or 4 MB + * of virtual addresses used by the Switcher. + */ || put_user(RESERVE_MEM * 1024 * 1024, &cpu->lg->lguest_data->reserve_mem) || put_user(cpu->lg->pgdirs[0].gpgdir, &cpu->lg->lguest_data->pgdir)) kill_guest(cpu, "bad guest page %p", cpu->lg->lguest_data); - /* In flush_user_mappings() we loop from 0 to + /* + * In flush_user_mappings() we loop from 0 to * "pgd_index(lg->kernel_address)". This assumes it won't hit the - * Switcher mappings, so check that now. */ + * Switcher mappings, so check that now. + */ #ifdef CONFIG_X86_PAE if (pgd_index(cpu->lg->kernel_address) == SWITCHER_PGD_INDEX && pmd_index(cpu->lg->kernel_address) == SWITCHER_PMD_INDEX) @@ -964,12 +1129,14 @@ void free_guest_pagetable(struct lguest *lg) free_page((long)lg->pgdirs[i].pgdir); } -/*H:480 (vi) Mapping the Switcher when the Guest is about to run. +/*H:480 + * (vi) Mapping the Switcher when the Guest is about to run. * * The Switcher and the two pages for this CPU need to be visible in the * Guest (and not the pages for other CPUs). We have the appropriate PTE pages * for each CPU already set up, we just need to hook them in now we know which - * Guest is about to run on this CPU. */ + * Guest is about to run on this CPU. + */ void map_switcher_in_guest(struct lg_cpu *cpu, struct lguest_pages *pages) { pte_t *switcher_pte_page = __get_cpu_var(switcher_pte_pages); @@ -980,30 +1147,38 @@ void map_switcher_in_guest(struct lg_cpu *cpu, struct lguest_pages *pages) pmd_t switcher_pmd; pmd_t *pmd_table; + /* FIXME: native_set_pmd is overkill here. */ native_set_pmd(&switcher_pmd, pfn_pmd(__pa(switcher_pte_page) >> PAGE_SHIFT, PAGE_KERNEL_EXEC)); + /* Figure out where the pmd page is, by reading the PGD, and converting + * it to a virtual address. */ pmd_table = __va(pgd_pfn(cpu->lg-> pgdirs[cpu->cpu_pgd].pgdir[SWITCHER_PGD_INDEX]) << PAGE_SHIFT); + /* Now write it into the shadow page table. */ native_set_pmd(&pmd_table[SWITCHER_PMD_INDEX], switcher_pmd); #else pgd_t switcher_pgd; - /* Make the last PGD entry for this Guest point to the Switcher's PTE - * page for this CPU (with appropriate flags). */ + /* + * Make the last PGD entry for this Guest point to the Switcher's PTE + * page for this CPU (with appropriate flags). + */ switcher_pgd = __pgd(__pa(switcher_pte_page) | __PAGE_KERNEL_EXEC); cpu->lg->pgdirs[cpu->cpu_pgd].pgdir[SWITCHER_PGD_INDEX] = switcher_pgd; #endif - /* We also change the Switcher PTE page. When we're running the Guest, + /* + * We also change the Switcher PTE page. When we're running the Guest, * we want the Guest's "regs" page to appear where the first Switcher * page for this CPU is. This is an optimization: when the Switcher * saves the Guest registers, it saves them into the first page of this * CPU's "struct lguest_pages": if we make sure the Guest's register * page is already mapped there, we don't have to copy them out - * again. */ + * again. + */ pfn = __pa(cpu->regs_page) >> PAGE_SHIFT; native_set_pte(®s_pte, pfn_pte(pfn, PAGE_KERNEL)); native_set_pte(&switcher_pte_page[pte_index((unsigned long)pages)], @@ -1019,10 +1194,12 @@ static void free_switcher_pte_pages(void) free_page((long)switcher_pte_page(i)); } -/*H:520 Setting up the Switcher PTE page for given CPU is fairly easy, given +/*H:520 + * Setting up the Switcher PTE page for given CPU is fairly easy, given * the CPU number and the "struct page"s for the Switcher code itself. * - * Currently the Switcher is less than a page long, so "pages" is always 1. */ + * Currently the Switcher is less than a page long, so "pages" is always 1. + */ static __init void populate_switcher_pte_page(unsigned int cpu, struct page *switcher_page[], unsigned int pages) @@ -1043,13 +1220,16 @@ static __init void populate_switcher_pte_page(unsigned int cpu, native_set_pte(&pte[i], pfn_pte(page_to_pfn(switcher_page[i]), __pgprot(_PAGE_PRESENT|_PAGE_ACCESSED|_PAGE_RW))); - /* The second page contains the "struct lguest_ro_state", and is - * read-only. */ + /* + * The second page contains the "struct lguest_ro_state", and is + * read-only. + */ native_set_pte(&pte[i+1], pfn_pte(page_to_pfn(switcher_page[i+1]), __pgprot(_PAGE_PRESENT|_PAGE_ACCESSED))); } -/* We've made it through the page table code. Perhaps our tired brains are +/* + * We've made it through the page table code. Perhaps our tired brains are * still processing the details, or perhaps we're simply glad it's over. * * If nothing else, note that all this complexity in juggling shadow page tables @@ -1058,10 +1238,13 @@ static __init void populate_switcher_pte_page(unsigned int cpu, * uses exotic direct Guest pagetable manipulation, and why both Intel and AMD * have implemented shadow page table support directly into hardware. * - * There is just one file remaining in the Host. */ + * There is just one file remaining in the Host. + */ -/*H:510 At boot or module load time, init_pagetables() allocates and populates - * the Switcher PTE page for each CPU. */ +/*H:510 + * At boot or module load time, init_pagetables() allocates and populates + * the Switcher PTE page for each CPU. + */ __init int init_pagetables(struct page **switcher_page, unsigned int pages) { unsigned int i; diff --git a/drivers/lguest/segments.c b/drivers/lguest/segments.c index 482ed5a1875..951c57b0a7e 100644 --- a/drivers/lguest/segments.c +++ b/drivers/lguest/segments.c @@ -1,4 +1,5 @@ -/*P:600 The x86 architecture has segments, which involve a table of descriptors +/*P:600 + * The x86 architecture has segments, which involve a table of descriptors * which can be used to do funky things with virtual address interpretation. * We originally used to use segments so the Guest couldn't alter the * Guest<->Host Switcher, and then we had to trim Guest segments, and restore @@ -8,7 +9,8 @@ * * In these modern times, the segment handling code consists of simple sanity * checks, and the worst you'll experience reading this code is butterfly-rash - * from frolicking through its parklike serenity. :*/ + * from frolicking through its parklike serenity. +:*/ #include "lg.h" /*H:600 @@ -41,10 +43,12 @@ * begin. */ -/* There are several entries we don't let the Guest set. The TSS entry is the +/* + * There are several entries we don't let the Guest set. The TSS entry is the * "Task State Segment" which controls all kinds of delicate things. The * LGUEST_CS and LGUEST_DS entries are reserved for the Switcher, and the - * the Guest can't be trusted to deal with double faults. */ + * the Guest can't be trusted to deal with double faults. + */ static bool ignored_gdt(unsigned int num) { return (num == GDT_ENTRY_TSS @@ -53,42 +57,52 @@ static bool ignored_gdt(unsigned int num) || num == GDT_ENTRY_DOUBLEFAULT_TSS); } -/*H:630 Once the Guest gave us new GDT entries, we fix them up a little. We +/*H:630 + * Once the Guest gave us new GDT entries, we fix them up a little. We * don't care if they're invalid: the worst that can happen is a General * Protection Fault in the Switcher when it restores a Guest segment register * which tries to use that entry. Then we kill the Guest for causing such a - * mess: the message will be "unhandled trap 256". */ + * mess: the message will be "unhandled trap 256". + */ static void fixup_gdt_table(struct lg_cpu *cpu, unsigned start, unsigned end) { unsigned int i; for (i = start; i < end; i++) { - /* We never copy these ones to real GDT, so we don't care what - * they say */ + /* + * We never copy these ones to real GDT, so we don't care what + * they say + */ if (ignored_gdt(i)) continue; - /* Segment descriptors contain a privilege level: the Guest is + /* + * Segment descriptors contain a privilege level: the Guest is * sometimes careless and leaves this as 0, even though it's - * running at privilege level 1. If so, we fix it here. */ + * running at privilege level 1. If so, we fix it here. + */ if ((cpu->arch.gdt[i].b & 0x00006000) == 0) cpu->arch.gdt[i].b |= (GUEST_PL << 13); - /* Each descriptor has an "accessed" bit. If we don't set it + /* + * Each descriptor has an "accessed" bit. If we don't set it * now, the CPU will try to set it when the Guest first loads * that entry into a segment register. But the GDT isn't - * writable by the Guest, so bad things can happen. */ + * writable by the Guest, so bad things can happen. + */ cpu->arch.gdt[i].b |= 0x00000100; } } -/*H:610 Like the IDT, we never simply use the GDT the Guest gives us. We keep +/*H:610 + * Like the IDT, we never simply use the GDT the Guest gives us. We keep * a GDT for each CPU, and copy across the Guest's entries each time we want to * run the Guest on that CPU. * * This routine is called at boot or modprobe time for each CPU to set up the * constant GDT entries: the ones which are the same no matter what Guest we're - * running. */ + * running. + */ void setup_default_gdt_entries(struct lguest_ro_state *state) { struct desc_struct *gdt = state->guest_gdt; @@ -98,30 +112,37 @@ void setup_default_gdt_entries(struct lguest_ro_state *state) gdt[GDT_ENTRY_LGUEST_CS] = FULL_EXEC_SEGMENT; gdt[GDT_ENTRY_LGUEST_DS] = FULL_SEGMENT; - /* The TSS segment refers to the TSS entry for this particular CPU. + /* + * The TSS segment refers to the TSS entry for this particular CPU. * Forgive the magic flags: the 0x8900 means the entry is Present, it's * privilege level 0 Available 386 TSS system segment, and the 0x67 - * means Saturn is eclipsed by Mercury in the twelfth house. */ + * means Saturn is eclipsed by Mercury in the twelfth house. + */ gdt[GDT_ENTRY_TSS].a = 0x00000067 | (tss << 16); gdt[GDT_ENTRY_TSS].b = 0x00008900 | (tss & 0xFF000000) | ((tss >> 16) & 0x000000FF); } -/* This routine sets up the initial Guest GDT for booting. All entries start - * as 0 (unusable). */ +/* + * This routine sets up the initial Guest GDT for booting. All entries start + * as 0 (unusable). + */ void setup_guest_gdt(struct lg_cpu *cpu) { - /* Start with full 0-4G segments... */ + /* + * Start with full 0-4G segments...except the Guest is allowed to use + * them, so set the privilege level appropriately in the flags. + */ cpu->arch.gdt[GDT_ENTRY_KERNEL_CS] = FULL_EXEC_SEGMENT; cpu->arch.gdt[GDT_ENTRY_KERNEL_DS] = FULL_SEGMENT; - /* ...except the Guest is allowed to use them, so set the privilege - * level appropriately in the flags. */ cpu->arch.gdt[GDT_ENTRY_KERNEL_CS].b |= (GUEST_PL << 13); cpu->arch.gdt[GDT_ENTRY_KERNEL_DS].b |= (GUEST_PL << 13); } -/*H:650 An optimization of copy_gdt(), for just the three "thead-local storage" - * entries. */ +/*H:650 + * An optimization of copy_gdt(), for just the three "thead-local storage" + * entries. + */ void copy_gdt_tls(const struct lg_cpu *cpu, struct desc_struct *gdt) { unsigned int i; @@ -130,26 +151,34 @@ void copy_gdt_tls(const struct lg_cpu *cpu, struct desc_struct *gdt) gdt[i] = cpu->arch.gdt[i]; } -/*H:640 When the Guest is run on a different CPU, or the GDT entries have - * changed, copy_gdt() is called to copy the Guest's GDT entries across to this - * CPU's GDT. */ +/*H:640 + * When the Guest is run on a different CPU, or the GDT entries have changed, + * copy_gdt() is called to copy the Guest's GDT entries across to this CPU's + * GDT. + */ void copy_gdt(const struct lg_cpu *cpu, struct desc_struct *gdt) { unsigned int i; - /* The default entries from setup_default_gdt_entries() are not - * replaced. See ignored_gdt() above. */ + /* + * The default entries from setup_default_gdt_entries() are not + * replaced. See ignored_gdt() above. + */ for (i = 0; i < GDT_ENTRIES; i++) if (!ignored_gdt(i)) gdt[i] = cpu->arch.gdt[i]; } -/*H:620 This is where the Guest asks us to load a new GDT entry - * (LHCALL_LOAD_GDT_ENTRY). We tweak the entry and copy it in. */ +/*H:620 + * This is where the Guest asks us to load a new GDT entry + * (LHCALL_LOAD_GDT_ENTRY). We tweak the entry and copy it in. + */ void load_guest_gdt_entry(struct lg_cpu *cpu, u32 num, u32 lo, u32 hi) { - /* We assume the Guest has the same number of GDT entries as the - * Host, otherwise we'd have to dynamically allocate the Guest GDT. */ + /* + * We assume the Guest has the same number of GDT entries as the + * Host, otherwise we'd have to dynamically allocate the Guest GDT. + */ if (num >= ARRAY_SIZE(cpu->arch.gdt)) kill_guest(cpu, "too many gdt entries %i", num); @@ -157,15 +186,19 @@ void load_guest_gdt_entry(struct lg_cpu *cpu, u32 num, u32 lo, u32 hi) cpu->arch.gdt[num].a = lo; cpu->arch.gdt[num].b = hi; fixup_gdt_table(cpu, num, num+1); - /* Mark that the GDT changed so the core knows it has to copy it again, - * even if the Guest is run on the same CPU. */ + /* + * Mark that the GDT changed so the core knows it has to copy it again, + * even if the Guest is run on the same CPU. + */ cpu->changed |= CHANGED_GDT; } -/* This is the fast-track version for just changing the three TLS entries. +/* + * This is the fast-track version for just changing the three TLS entries. * Remember that this happens on every context switch, so it's worth * optimizing. But wouldn't it be neater to have a single hypercall to cover - * both cases? */ + * both cases? + */ void guest_load_tls(struct lg_cpu *cpu, unsigned long gtls) { struct desc_struct *tls = &cpu->arch.gdt[GDT_ENTRY_TLS_MIN]; @@ -175,7 +208,6 @@ void guest_load_tls(struct lg_cpu *cpu, unsigned long gtls) /* Note that just the TLS entries have changed. */ cpu->changed |= CHANGED_GDT_TLS; } -/*:*/ /*H:660 * With this, we have finished the Host. diff --git a/drivers/lguest/x86/core.c b/drivers/lguest/x86/core.c index eaf722fe309..6ae388849a3 100644 --- a/drivers/lguest/x86/core.c +++ b/drivers/lguest/x86/core.c @@ -17,13 +17,15 @@ * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ -/*P:450 This file contains the x86-specific lguest code. It used to be all +/*P:450 + * This file contains the x86-specific lguest code. It used to be all * mixed in with drivers/lguest/core.c but several foolhardy code slashers * wrestled most of the dependencies out to here in preparation for porting * lguest to other architectures (see what I mean by foolhardy?). * * This also contains a couple of non-obvious setup and teardown pieces which - * were implemented after days of debugging pain. :*/ + * were implemented after days of debugging pain. +:*/ #include <linux/kernel.h> #include <linux/start_kernel.h> #include <linux/string.h> @@ -82,25 +84,33 @@ static DEFINE_PER_CPU(struct lg_cpu *, last_cpu); */ static void copy_in_guest_info(struct lg_cpu *cpu, struct lguest_pages *pages) { - /* Copying all this data can be quite expensive. We usually run the + /* + * Copying all this data can be quite expensive. We usually run the * same Guest we ran last time (and that Guest hasn't run anywhere else * meanwhile). If that's not the case, we pretend everything in the - * Guest has changed. */ + * Guest has changed. + */ if (__get_cpu_var(last_cpu) != cpu || cpu->last_pages != pages) { __get_cpu_var(last_cpu) = cpu; cpu->last_pages = pages; cpu->changed = CHANGED_ALL; } - /* These copies are pretty cheap, so we do them unconditionally: */ - /* Save the current Host top-level page directory. */ + /* + * These copies are pretty cheap, so we do them unconditionally: */ + /* Save the current Host top-level page directory. + */ pages->state.host_cr3 = __pa(current->mm->pgd); - /* Set up the Guest's page tables to see this CPU's pages (and no - * other CPU's pages). */ + /* + * Set up the Guest's page tables to see this CPU's pages (and no + * other CPU's pages). + */ map_switcher_in_guest(cpu, pages); - /* Set up the two "TSS" members which tell the CPU what stack to use + /* + * Set up the two "TSS" members which tell the CPU what stack to use * for traps which do directly into the Guest (ie. traps at privilege - * level 1). */ + * level 1). + */ pages->state.guest_tss.sp1 = cpu->esp1; pages->state.guest_tss.ss1 = cpu->ss1; @@ -125,97 +135,126 @@ static void run_guest_once(struct lg_cpu *cpu, struct lguest_pages *pages) /* This is a dummy value we need for GCC's sake. */ unsigned int clobber; - /* Copy the guest-specific information into this CPU's "struct - * lguest_pages". */ + /* + * Copy the guest-specific information into this CPU's "struct + * lguest_pages". + */ copy_in_guest_info(cpu, pages); - /* Set the trap number to 256 (impossible value). If we fault while + /* + * Set the trap number to 256 (impossible value). If we fault while * switching to the Guest (bad segment registers or bug), this will - * cause us to abort the Guest. */ + * cause us to abort the Guest. + */ cpu->regs->trapnum = 256; - /* Now: we push the "eflags" register on the stack, then do an "lcall". + /* + * Now: we push the "eflags" register on the stack, then do an "lcall". * This is how we change from using the kernel code segment to using * the dedicated lguest code segment, as well as jumping into the * Switcher. * * The lcall also pushes the old code segment (KERNEL_CS) onto the * stack, then the address of this call. This stack layout happens to - * exactly match the stack layout created by an interrupt... */ + * exactly match the stack layout created by an interrupt... + */ asm volatile("pushf; lcall *lguest_entry" - /* This is how we tell GCC that %eax ("a") and %ebx ("b") - * are changed by this routine. The "=" means output. */ + /* + * This is how we tell GCC that %eax ("a") and %ebx ("b") + * are changed by this routine. The "=" means output. + */ : "=a"(clobber), "=b"(clobber) - /* %eax contains the pages pointer. ("0" refers to the + /* + * %eax contains the pages pointer. ("0" refers to the * 0-th argument above, ie "a"). %ebx contains the * physical address of the Guest's top-level page - * directory. */ + * directory. + */ : "0"(pages), "1"(__pa(cpu->lg->pgdirs[cpu->cpu_pgd].pgdir)) - /* We tell gcc that all these registers could change, + /* + * We tell gcc that all these registers could change, * which means we don't have to save and restore them in - * the Switcher. */ + * the Switcher. + */ : "memory", "%edx", "%ecx", "%edi", "%esi"); } /*:*/ -/*M:002 There are hooks in the scheduler which we can register to tell when we +/*M:002 + * There are hooks in the scheduler which we can register to tell when we * get kicked off the CPU (preempt_notifier_register()). This would allow us * to lazily disable SYSENTER which would regain some performance, and should * also simplify copy_in_guest_info(). Note that we'd still need to restore * things when we exit to Launcher userspace, but that's fairly easy. * - * We could also try using this hooks for PGE, but that might be too expensive. + * We could also try using these hooks for PGE, but that might be too expensive. * - * The hooks were designed for KVM, but we can also put them to good use. :*/ + * The hooks were designed for KVM, but we can also put them to good use. +:*/ -/*H:040 This is the i386-specific code to setup and run the Guest. Interrupts - * are disabled: we own the CPU. */ +/*H:040 + * This is the i386-specific code to setup and run the Guest. Interrupts + * are disabled: we own the CPU. + */ void lguest_arch_run_guest(struct lg_cpu *cpu) { - /* Remember the awfully-named TS bit? If the Guest has asked to set it + /* + * Remember the awfully-named TS bit? If the Guest has asked to set it * we set it now, so we can trap and pass that trap to the Guest if it - * uses the FPU. */ + * uses the FPU. + */ if (cpu->ts) unlazy_fpu(current); - /* SYSENTER is an optimized way of doing system calls. We can't allow + /* + * SYSENTER is an optimized way of doing system calls. We can't allow * it because it always jumps to privilege level 0. A normal Guest * won't try it because we don't advertise it in CPUID, but a malicious * Guest (or malicious Guest userspace program) could, so we tell the - * CPU to disable it before running the Guest. */ + * CPU to disable it before running the Guest. + */ if (boot_cpu_has(X86_FEATURE_SEP)) wrmsr(MSR_IA32_SYSENTER_CS, 0, 0); - /* Now we actually run the Guest. It will return when something + /* + * Now we actually run the Guest. It will return when something * interesting happens, and we can examine its registers to see what it - * was doing. */ + * was doing. + */ run_guest_once(cpu, lguest_pages(raw_smp_processor_id())); - /* Note that the "regs" structure contains two extra entries which are + /* + * Note that the "regs" structure contains two extra entries which are * not really registers: a trap number which says what interrupt or * trap made the switcher code come back, and an error code which some - * traps set. */ + * traps set. + */ /* Restore SYSENTER if it's supposed to be on. */ if (boot_cpu_has(X86_FEATURE_SEP)) wrmsr(MSR_IA32_SYSENTER_CS, __KERNEL_CS, 0); - /* If the Guest page faulted, then the cr2 register will tell us the + /* + * If the Guest page faulted, then the cr2 register will tell us the * bad virtual address. We have to grab this now, because once we * re-enable interrupts an interrupt could fault and thus overwrite - * cr2, or we could even move off to a different CPU. */ + * cr2, or we could even move off to a different CPU. + */ if (cpu->regs->trapnum == 14) cpu->arch.last_pagefault = read_cr2(); - /* Similarly, if we took a trap because the Guest used the FPU, + /* + * Similarly, if we took a trap because the Guest used the FPU, * we have to restore the FPU it expects to see. * math_state_restore() may sleep and we may even move off to * a different CPU. So all the critical stuff should be done - * before this. */ + * before this. + */ else if (cpu->regs->trapnum == 7) math_state_restore(); } -/*H:130 Now we've examined the hypercall code; our Guest can make requests. +/*H:130 + * Now we've examined the hypercall code; our Guest can make requests. * Our Guest is usually so well behaved; it never tries to do things it isn't * allowed to, and uses hypercalls instead. Unfortunately, Linux's paravirtual * infrastructure isn't quite complete, because it doesn't contain replacements @@ -225,26 +264,33 @@ void lguest_arch_run_guest(struct lg_cpu *cpu) * * When the Guest uses one of these instructions, we get a trap (General * Protection Fault) and come here. We see if it's one of those troublesome - * instructions and skip over it. We return true if we did. */ + * instructions and skip over it. We return true if we did. + */ static int emulate_insn(struct lg_cpu *cpu) { u8 insn; unsigned int insnlen = 0, in = 0, shift = 0; - /* The eip contains the *virtual* address of the Guest's instruction: - * guest_pa just subtracts the Guest's page_offset. */ + /* + * The eip contains the *virtual* address of the Guest's instruction: + * guest_pa just subtracts the Guest's page_offset. + */ unsigned long physaddr = guest_pa(cpu, cpu->regs->eip); - /* This must be the Guest kernel trying to do something, not userspace! + /* + * This must be the Guest kernel trying to do something, not userspace! * The bottom two bits of the CS segment register are the privilege - * level. */ + * level. + */ if ((cpu->regs->cs & 3) != GUEST_PL) return 0; /* Decoding x86 instructions is icky. */ insn = lgread(cpu, physaddr, u8); - /* 0x66 is an "operand prefix". It means it's using the upper 16 bits - of the eax register. */ + /* + * 0x66 is an "operand prefix". It means it's using the upper 16 bits + * of the eax register. + */ if (insn == 0x66) { shift = 16; /* The instruction is 1 byte so far, read the next byte. */ @@ -252,8 +298,10 @@ static int emulate_insn(struct lg_cpu *cpu) insn = lgread(cpu, physaddr + insnlen, u8); } - /* We can ignore the lower bit for the moment and decode the 4 opcodes - * we need to emulate. */ + /* + * We can ignore the lower bit for the moment and decode the 4 opcodes + * we need to emulate. + */ switch (insn & 0xFE) { case 0xE4: /* in <next byte>,%al */ insnlen += 2; @@ -274,9 +322,11 @@ static int emulate_insn(struct lg_cpu *cpu) return 0; } - /* If it was an "IN" instruction, they expect the result to be read + /* + * If it was an "IN" instruction, they expect the result to be read * into %eax, so we change %eax. We always return all-ones, which - * traditionally means "there's nothing there". */ + * traditionally means "there's nothing there". + */ if (in) { /* Lower bit tells is whether it's a 16 or 32 bit access */ if (insn & 0x1) @@ -290,7 +340,8 @@ static int emulate_insn(struct lg_cpu *cpu) return 1; } -/* Our hypercalls mechanism used to be based on direct software interrupts. +/* + * Our hypercalls mechanism used to be based on direct software interrupts. * After Anthony's "Refactor hypercall infrastructure" kvm patch, we decided to * change over to using kvm hypercalls. * @@ -318,16 +369,20 @@ static int emulate_insn(struct lg_cpu *cpu) */ static void rewrite_hypercall(struct lg_cpu *cpu) { - /* This are the opcodes we use to patch the Guest. The opcode for "int + /* + * This are the opcodes we use to patch the Guest. The opcode for "int * $0x1f" is "0xcd 0x1f" but vmcall instruction is 3 bytes long, so we - * complete the sequence with a NOP (0x90). */ + * complete the sequence with a NOP (0x90). + */ u8 insn[3] = {0xcd, 0x1f, 0x90}; __lgwrite(cpu, guest_pa(cpu, cpu->regs->eip), insn, sizeof(insn)); - /* The above write might have caused a copy of that page to be made + /* + * The above write might have caused a copy of that page to be made * (if it was read-only). We need to make sure the Guest has * up-to-date pagetables. As this doesn't happen often, we can just - * drop them all. */ + * drop them all. + */ guest_pagetable_clear_all(cpu); } @@ -335,9 +390,11 @@ static bool is_hypercall(struct lg_cpu *cpu) { u8 insn[3]; - /* This must be the Guest kernel trying to do something. + /* + * This must be the Guest kernel trying to do something. * The bottom two bits of the CS segment register are the privilege - * level. */ + * level. + */ if ((cpu->regs->cs & 3) != GUEST_PL) return false; @@ -351,86 +408,105 @@ void lguest_arch_handle_trap(struct lg_cpu *cpu) { switch (cpu->regs->trapnum) { case 13: /* We've intercepted a General Protection Fault. */ - /* Check if this was one of those annoying IN or OUT + /* + * Check if this was one of those annoying IN or OUT * instructions which we need to emulate. If so, we just go - * back into the Guest after we've done it. */ + * back into the Guest after we've done it. + */ if (cpu->regs->errcode == 0) { if (emulate_insn(cpu)) return; } - /* If KVM is active, the vmcall instruction triggers a - * General Protection Fault. Normally it triggers an - * invalid opcode fault (6): */ + /* + * If KVM is active, the vmcall instruction triggers a General + * Protection Fault. Normally it triggers an invalid opcode + * fault (6): + */ case 6: - /* We need to check if ring == GUEST_PL and - * faulting instruction == vmcall. */ + /* + * We need to check if ring == GUEST_PL and faulting + * instruction == vmcall. + */ if (is_hypercall(cpu)) { rewrite_hypercall(cpu); return; } break; case 14: /* We've intercepted a Page Fault. */ - /* The Guest accessed a virtual address that wasn't mapped. + /* + * The Guest accessed a virtual address that wasn't mapped. * This happens a lot: we don't actually set up most of the page * tables for the Guest at all when we start: as it runs it asks * for more and more, and we set them up as required. In this * case, we don't even tell the Guest that the fault happened. * * The errcode tells whether this was a read or a write, and - * whether kernel or userspace code. */ + * whether kernel or userspace code. + */ if (demand_page(cpu, cpu->arch.last_pagefault, cpu->regs->errcode)) return; - /* OK, it's really not there (or not OK): the Guest needs to + /* + * OK, it's really not there (or not OK): the Guest needs to * know. We write out the cr2 value so it knows where the * fault occurred. * * Note that if the Guest were really messed up, this could * happen before it's done the LHCALL_LGUEST_INIT hypercall, so - * lg->lguest_data could be NULL */ + * lg->lguest_data could be NULL + */ if (cpu->lg->lguest_data && put_user(cpu->arch.last_pagefault, &cpu->lg->lguest_data->cr2)) kill_guest(cpu, "Writing cr2"); break; case 7: /* We've intercepted a Device Not Available fault. */ - /* If the Guest doesn't want to know, we already restored the - * Floating Point Unit, so we just continue without telling - * it. */ + /* + * If the Guest doesn't want to know, we already restored the + * Floating Point Unit, so we just continue without telling it. + */ if (!cpu->ts) return; break; case 32 ... 255: - /* These values mean a real interrupt occurred, in which case + /* + * These values mean a real interrupt occurred, in which case * the Host handler has already been run. We just do a * friendly check if another process should now be run, then - * return to run the Guest again */ + * return to run the Guest again + */ cond_resched(); return; case LGUEST_TRAP_ENTRY: - /* Our 'struct hcall_args' maps directly over our regs: we set - * up the pointer now to indicate a hypercall is pending. */ + /* + * Our 'struct hcall_args' maps directly over our regs: we set + * up the pointer now to indicate a hypercall is pending. + */ cpu->hcall = (struct hcall_args *)cpu->regs; return; } /* We didn't handle the trap, so it needs to go to the Guest. */ if (!deliver_trap(cpu, cpu->regs->trapnum)) - /* If the Guest doesn't have a handler (either it hasn't + /* + * If the Guest doesn't have a handler (either it hasn't * registered any yet, or it's one of the faults we don't let - * it handle), it dies with this cryptic error message. */ + * it handle), it dies with this cryptic error message. + */ kill_guest(cpu, "unhandled trap %li at %#lx (%#lx)", cpu->regs->trapnum, cpu->regs->eip, cpu->regs->trapnum == 14 ? cpu->arch.last_pagefault : cpu->regs->errcode); } -/* Now we can look at each of the routines this calls, in increasing order of +/* + * Now we can look at each of the routines this calls, in increasing order of * complexity: do_hypercalls(), emulate_insn(), maybe_do_interrupt(), * deliver_trap() and demand_page(). After all those, we'll be ready to * examine the Switcher, and our philosophical understanding of the Host/Guest - * duality will be complete. :*/ + * duality will be complete. +:*/ static void adjust_pge(void *on) { if (on) @@ -439,13 +515,16 @@ static void adjust_pge(void *on) write_cr4(read_cr4() & ~X86_CR4_PGE); } -/*H:020 Now the Switcher is mapped and every thing else is ready, we need to do - * some more i386-specific initialization. */ +/*H:020 + * Now the Switcher is mapped and every thing else is ready, we need to do + * some more i386-specific initialization. + */ void __init lguest_arch_host_init(void) { int i; - /* Most of the i386/switcher.S doesn't care that it's been moved; on + /* + * Most of the i386/switcher.S doesn't care that it's been moved; on * Intel, jumps are relative, and it doesn't access any references to * external code or data. * @@ -453,7 +532,8 @@ void __init lguest_arch_host_init(void) * addresses are placed in a table (default_idt_entries), so we need to * update the table with the new addresses. switcher_offset() is a * convenience function which returns the distance between the - * compiled-in switcher code and the high-mapped copy we just made. */ + * compiled-in switcher code and the high-mapped copy we just made. + */ for (i = 0; i < IDT_ENTRIES; i++) default_idt_entries[i] += switcher_offset(); @@ -468,63 +548,81 @@ void __init lguest_arch_host_init(void) for_each_possible_cpu(i) { /* lguest_pages() returns this CPU's two pages. */ struct lguest_pages *pages = lguest_pages(i); - /* This is a convenience pointer to make the code fit one - * statement to a line. */ + /* This is a convenience pointer to make the code neater. */ struct lguest_ro_state *state = &pages->state; - /* The Global Descriptor Table: the Host has a different one + /* + * The Global Descriptor Table: the Host has a different one * for each CPU. We keep a descriptor for the GDT which says * where it is and how big it is (the size is actually the last - * byte, not the size, hence the "-1"). */ + * byte, not the size, hence the "-1"). + */ state->host_gdt_desc.size = GDT_SIZE-1; state->host_gdt_desc.address = (long)get_cpu_gdt_table(i); - /* All CPUs on the Host use the same Interrupt Descriptor + /* + * All CPUs on the Host use the same Interrupt Descriptor * Table, so we just use store_idt(), which gets this CPU's IDT - * descriptor. */ + * descriptor. + */ store_idt(&state->host_idt_desc); - /* The descriptors for the Guest's GDT and IDT can be filled + /* + * The descriptors for the Guest's GDT and IDT can be filled * out now, too. We copy the GDT & IDT into ->guest_gdt and - * ->guest_idt before actually running the Guest. */ + * ->guest_idt before actually running the Guest. + */ state->guest_idt_desc.size = sizeof(state->guest_idt)-1; state->guest_idt_desc.address = (long)&state->guest_idt; state->guest_gdt_desc.size = sizeof(state->guest_gdt)-1; state->guest_gdt_desc.address = (long)&state->guest_gdt; - /* We know where we want the stack to be when the Guest enters + /* + * We know where we want the stack to be when the Guest enters * the Switcher: in pages->regs. The stack grows upwards, so - * we start it at the end of that structure. */ + * we start it at the end of that structure. + */ state->guest_tss.sp0 = (long)(&pages->regs + 1); - /* And this is the GDT entry to use for the stack: we keep a - * couple of special LGUEST entries. */ + /* + * And this is the GDT entry to use for the stack: we keep a + * couple of special LGUEST entries. + */ state->guest_tss.ss0 = LGUEST_DS; - /* x86 can have a finegrained bitmap which indicates what I/O + /* + * x86 can have a finegrained bitmap which indicates what I/O * ports the process can use. We set it to the end of our - * structure, meaning "none". */ + * structure, meaning "none". + */ state->guest_tss.io_bitmap_base = sizeof(state->guest_tss); - /* Some GDT entries are the same across all Guests, so we can - * set them up now. */ + /* + * Some GDT entries are the same across all Guests, so we can + * set them up now. + */ setup_default_gdt_entries(state); /* Most IDT entries are the same for all Guests, too.*/ setup_default_idt_entries(state, default_idt_entries); - /* The Host needs to be able to use the LGUEST segments on this - * CPU, too, so put them in the Host GDT. */ + /* + * The Host needs to be able to use the LGUEST segments on this + * CPU, too, so put them in the Host GDT. + */ get_cpu_gdt_table(i)[GDT_ENTRY_LGUEST_CS] = FULL_EXEC_SEGMENT; get_cpu_gdt_table(i)[GDT_ENTRY_LGUEST_DS] = FULL_SEGMENT; } - /* In the Switcher, we want the %cs segment register to use the + /* + * In the Switcher, we want the %cs segment register to use the * LGUEST_CS GDT entry: we've put that in the Host and Guest GDTs, so * it will be undisturbed when we switch. To change %cs and jump we - * need this structure to feed to Intel's "lcall" instruction. */ + * need this structure to feed to Intel's "lcall" instruction. + */ lguest_entry.offset = (long)switch_to_guest + switcher_offset(); lguest_entry.segment = LGUEST_CS; - /* Finally, we need to turn off "Page Global Enable". PGE is an + /* + * Finally, we need to turn off "Page Global Enable". PGE is an * optimization where page table entries are specially marked to show * they never change. The Host kernel marks all the kernel pages this * way because it's always present, even when userspace is running. @@ -534,16 +632,21 @@ void __init lguest_arch_host_init(void) * you'll get really weird bugs that you'll chase for two days. * * I used to turn PGE off every time we switched to the Guest and back - * on when we return, but that slowed the Switcher down noticibly. */ + * on when we return, but that slowed the Switcher down noticibly. + */ - /* We don't need the complexity of CPUs coming and going while we're - * doing this. */ + /* + * We don't need the complexity of CPUs coming and going while we're + * doing this. + */ get_online_cpus(); if (cpu_has_pge) { /* We have a broader idea of "global". */ /* Remember that this was originally set (for cleanup). */ cpu_had_pge = 1; - /* adjust_pge is a helper function which sets or unsets the PGE - * bit on its CPU, depending on the argument (0 == unset). */ + /* + * adjust_pge is a helper function which sets or unsets the PGE + * bit on its CPU, depending on the argument (0 == unset). + */ on_each_cpu(adjust_pge, (void *)0, 1); /* Turn off the feature in the global feature set. */ clear_cpu_cap(&boot_cpu_data, X86_FEATURE_PGE); @@ -590,26 +693,32 @@ int lguest_arch_init_hypercalls(struct lg_cpu *cpu) { u32 tsc_speed; - /* The pointer to the Guest's "struct lguest_data" is the only argument. - * We check that address now. */ + /* + * The pointer to the Guest's "struct lguest_data" is the only argument. + * We check that address now. + */ if (!lguest_address_ok(cpu->lg, cpu->hcall->arg1, sizeof(*cpu->lg->lguest_data))) return -EFAULT; - /* Having checked it, we simply set lg->lguest_data to point straight + /* + * Having checked it, we simply set lg->lguest_data to point straight * into the Launcher's memory at the right place and then use * copy_to_user/from_user from now on, instead of lgread/write. I put * this in to show that I'm not immune to writing stupid - * optimizations. */ + * optimizations. + */ cpu->lg->lguest_data = cpu->lg->mem_base + cpu->hcall->arg1; - /* We insist that the Time Stamp Counter exist and doesn't change with + /* + * We insist that the Time Stamp Counter exist and doesn't change with * cpu frequency. Some devious chip manufacturers decided that TSC * changes could be handled in software. I decided that time going * backwards might be good for benchmarks, but it's bad for users. * * We also insist that the TSC be stable: the kernel detects unreliable - * TSCs for its own purposes, and we use that here. */ + * TSCs for its own purposes, and we use that here. + */ if (boot_cpu_has(X86_FEATURE_CONSTANT_TSC) && !check_tsc_unstable()) tsc_speed = tsc_khz; else @@ -625,38 +734,47 @@ int lguest_arch_init_hypercalls(struct lg_cpu *cpu) } /*:*/ -/*L:030 lguest_arch_setup_regs() +/*L:030 + * lguest_arch_setup_regs() * * Most of the Guest's registers are left alone: we used get_zeroed_page() to - * allocate the structure, so they will be 0. */ + * allocate the structure, so they will be 0. + */ void lguest_arch_setup_regs(struct lg_cpu *cpu, unsigned long start) { struct lguest_regs *regs = cpu->regs; - /* There are four "segment" registers which the Guest needs to boot: + /* + * There are four "segment" registers which the Guest needs to boot: * The "code segment" register (cs) refers to the kernel code segment * __KERNEL_CS, and the "data", "extra" and "stack" segment registers * refer to the kernel data segment __KERNEL_DS. * * The privilege level is packed into the lower bits. The Guest runs - * at privilege level 1 (GUEST_PL).*/ + * at privilege level 1 (GUEST_PL). + */ regs->ds = regs->es = regs->ss = __KERNEL_DS|GUEST_PL; regs->cs = __KERNEL_CS|GUEST_PL; - /* The "eflags" register contains miscellaneous flags. Bit 1 (0x002) + /* + * The "eflags" register contains miscellaneous flags. Bit 1 (0x002) * is supposed to always be "1". Bit 9 (0x200) controls whether * interrupts are enabled. We always leave interrupts enabled while - * running the Guest. */ + * running the Guest. + */ regs->eflags = X86_EFLAGS_IF | 0x2; - /* The "Extended Instruction Pointer" register says where the Guest is - * running. */ + /* + * The "Extended Instruction Pointer" register says where the Guest is + * running. + */ regs->eip = start; - /* %esi points to our boot information, at physical address 0, so don't - * touch it. */ + /* + * %esi points to our boot information, at physical address 0, so don't + * touch it. + */ - /* There are a couple of GDT entries the Guest expects when first - * booting. */ + /* There are a couple of GDT entries the Guest expects at boot. */ setup_guest_gdt(cpu); } diff --git a/drivers/lguest/x86/switcher_32.S b/drivers/lguest/x86/switcher_32.S index 3fc15318a80..40634b0db9f 100644 --- a/drivers/lguest/x86/switcher_32.S +++ b/drivers/lguest/x86/switcher_32.S @@ -1,12 +1,15 @@ -/*P:900 This is the Switcher: code which sits at 0xFFC00000 astride both the - * Host and Guest to do the low-level Guest<->Host switch. It is as simple as - * it can be made, but it's naturally very specific to x86. +/*P:900 + * This is the Switcher: code which sits at 0xFFC00000 (or 0xFFE00000) astride + * both the Host and Guest to do the low-level Guest<->Host switch. It is as + * simple as it can be made, but it's naturally very specific to x86. * * You have now completed Preparation. If this has whet your appetite; if you * are feeling invigorated and refreshed then the next, more challenging stage - * can be found in "make Guest". :*/ + * can be found in "make Guest". + :*/ -/*M:012 Lguest is meant to be simple: my rule of thumb is that 1% more LOC must +/*M:012 + * Lguest is meant to be simple: my rule of thumb is that 1% more LOC must * gain at least 1% more performance. Since neither LOC nor performance can be * measured beforehand, it generally means implementing a feature then deciding * if it's worth it. And once it's implemented, who can say no? @@ -31,11 +34,14 @@ * Host (which is actually really easy). * * Two questions remain. Would the performance gain outweigh the complexity? - * And who would write the verse documenting it? :*/ + * And who would write the verse documenting it? +:*/ -/*M:011 Lguest64 handles NMI. This gave me NMI envy (until I looked at their +/*M:011 + * Lguest64 handles NMI. This gave me NMI envy (until I looked at their * code). It's worth doing though, since it would let us use oprofile in the - * Host when a Guest is running. :*/ + * Host when a Guest is running. +:*/ /*S:100 * Welcome to the Switcher itself! diff --git a/drivers/macintosh/macio_asic.c b/drivers/macintosh/macio_asic.c index 6e149f4a1ff..a0f68386c12 100644 --- a/drivers/macintosh/macio_asic.c +++ b/drivers/macintosh/macio_asic.c @@ -378,6 +378,17 @@ static struct macio_dev * macio_add_one_device(struct macio_chip *chip, dev->ofdev.dev.bus = &macio_bus_type; dev->ofdev.dev.release = macio_release_dev; +#ifdef CONFIG_PCI + /* Set the DMA ops to the ones from the PCI device, this could be + * fishy if we didn't know that on PowerMac it's always direct ops + * or iommu ops that will work fine + */ + dev->ofdev.dev.archdata.dma_ops = + chip->lbus.pdev->dev.archdata.dma_ops; + dev->ofdev.dev.archdata.dma_data = + chip->lbus.pdev->dev.archdata.dma_data; +#endif /* CONFIG_PCI */ + #ifdef DEBUG printk("preparing mdev @%p, ofdev @%p, dev @%p, kobj @%p\n", dev, &dev->ofdev, &dev->ofdev.dev, &dev->ofdev.dev.kobj); diff --git a/drivers/macintosh/via-maciisi.c b/drivers/macintosh/via-maciisi.c index 4d686c0bdea..9ab5b0c34f0 100644 --- a/drivers/macintosh/via-maciisi.c +++ b/drivers/macintosh/via-maciisi.c @@ -288,7 +288,7 @@ static void maciisi_sync(struct adb_request *req) } /* This could be BAD... when the ADB controller doesn't respond * for this long, it's probably not coming back :-( */ - if(count >= 50) /* Hopefully shouldn't happen */ + if (count > 50) /* Hopefully shouldn't happen */ printk(KERN_ERR "maciisi_send_request: poll timed out!\n"); } diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c index 9933eb861c7..ed103816401 100644 --- a/drivers/md/dm-crypt.c +++ b/drivers/md/dm-crypt.c @@ -776,7 +776,7 @@ static void kcryptd_crypt_write_convert(struct dm_crypt_io *io) * But don't wait if split was due to the io size restriction */ if (unlikely(out_of_pages)) - congestion_wait(WRITE, HZ/100); + congestion_wait(BLK_RW_ASYNC, HZ/100); /* * With async crypto it is unsafe to share the crypto context @@ -1318,7 +1318,7 @@ static int crypt_iterate_devices(struct dm_target *ti, { struct crypt_config *cc = ti->private; - return fn(ti, cc->dev, cc->start, data); + return fn(ti, cc->dev, cc->start, ti->len, data); } static struct target_type crypt_target = { diff --git a/drivers/md/dm-delay.c b/drivers/md/dm-delay.c index 4e5b843cd4d..ebe7381f47c 100644 --- a/drivers/md/dm-delay.c +++ b/drivers/md/dm-delay.c @@ -324,12 +324,12 @@ static int delay_iterate_devices(struct dm_target *ti, struct delay_c *dc = ti->private; int ret = 0; - ret = fn(ti, dc->dev_read, dc->start_read, data); + ret = fn(ti, dc->dev_read, dc->start_read, ti->len, data); if (ret) goto out; if (dc->dev_write) - ret = fn(ti, dc->dev_write, dc->start_write, data); + ret = fn(ti, dc->dev_write, dc->start_write, ti->len, data); out: return ret; diff --git a/drivers/md/dm-exception-store.c b/drivers/md/dm-exception-store.c index c3ae51584b1..556acff3952 100644 --- a/drivers/md/dm-exception-store.c +++ b/drivers/md/dm-exception-store.c @@ -171,6 +171,14 @@ static int set_chunk_size(struct dm_exception_store *store, */ chunk_size_ulong = round_up(chunk_size_ulong, PAGE_SIZE >> 9); + return dm_exception_store_set_chunk_size(store, chunk_size_ulong, + error); +} + +int dm_exception_store_set_chunk_size(struct dm_exception_store *store, + unsigned long chunk_size_ulong, + char **error) +{ /* Check chunk_size is a power of 2 */ if (!is_power_of_2(chunk_size_ulong)) { *error = "Chunk size is not a power of 2"; @@ -183,6 +191,11 @@ static int set_chunk_size(struct dm_exception_store *store, return -EINVAL; } + if (chunk_size_ulong > INT_MAX >> SECTOR_SHIFT) { + *error = "Chunk size is too high"; + return -EINVAL; + } + store->chunk_size = chunk_size_ulong; store->chunk_mask = chunk_size_ulong - 1; store->chunk_shift = ffs(chunk_size_ulong) - 1; @@ -195,7 +208,7 @@ int dm_exception_store_create(struct dm_target *ti, int argc, char **argv, struct dm_exception_store **store) { int r = 0; - struct dm_exception_store_type *type; + struct dm_exception_store_type *type = NULL; struct dm_exception_store *tmp_store; char persistent; @@ -211,12 +224,15 @@ int dm_exception_store_create(struct dm_target *ti, int argc, char **argv, } persistent = toupper(*argv[1]); - if (persistent != 'P' && persistent != 'N') { + if (persistent == 'P') + type = get_type("P"); + else if (persistent == 'N') + type = get_type("N"); + else { ti->error = "Persistent flag is not P or N"; return -EINVAL; } - type = get_type(&persistent); if (!type) { ti->error = "Exception store type not recognised"; r = -EINVAL; diff --git a/drivers/md/dm-exception-store.h b/drivers/md/dm-exception-store.h index 2442c8c0789..812c71872ba 100644 --- a/drivers/md/dm-exception-store.h +++ b/drivers/md/dm-exception-store.h @@ -168,6 +168,10 @@ static inline chunk_t sector_to_chunk(struct dm_exception_store *store, int dm_exception_store_type_register(struct dm_exception_store_type *type); int dm_exception_store_type_unregister(struct dm_exception_store_type *type); +int dm_exception_store_set_chunk_size(struct dm_exception_store *store, + unsigned long chunk_size_ulong, + char **error); + int dm_exception_store_create(struct dm_target *ti, int argc, char **argv, unsigned *args_used, struct dm_exception_store **store); diff --git a/drivers/md/dm-linear.c b/drivers/md/dm-linear.c index 9184b6deb86..82f7d6e6b1e 100644 --- a/drivers/md/dm-linear.c +++ b/drivers/md/dm-linear.c @@ -139,7 +139,7 @@ static int linear_iterate_devices(struct dm_target *ti, { struct linear_c *lc = ti->private; - return fn(ti, lc->dev, lc->start, data); + return fn(ti, lc->dev, lc->start, ti->len, data); } static struct target_type linear_target = { diff --git a/drivers/md/dm-log-userspace-base.c b/drivers/md/dm-log-userspace-base.c index e69b9656099..6e186b1a062 100644 --- a/drivers/md/dm-log-userspace-base.c +++ b/drivers/md/dm-log-userspace-base.c @@ -21,6 +21,7 @@ struct log_c { struct dm_target *ti; uint32_t region_size; region_t region_count; + uint64_t luid; char uuid[DM_UUID_LEN]; char *usr_argv_str; @@ -63,7 +64,7 @@ static int userspace_do_request(struct log_c *lc, const char *uuid, * restored. */ retry: - r = dm_consult_userspace(uuid, request_type, data, + r = dm_consult_userspace(uuid, lc->luid, request_type, data, data_size, rdata, rdata_size); if (r != -ESRCH) @@ -74,14 +75,15 @@ retry: set_current_state(TASK_INTERRUPTIBLE); schedule_timeout(2*HZ); DMWARN("Attempting to contact userspace log server..."); - r = dm_consult_userspace(uuid, DM_ULOG_CTR, lc->usr_argv_str, + r = dm_consult_userspace(uuid, lc->luid, DM_ULOG_CTR, + lc->usr_argv_str, strlen(lc->usr_argv_str) + 1, NULL, NULL); if (!r) break; } DMINFO("Reconnected to userspace log server... DM_ULOG_CTR complete"); - r = dm_consult_userspace(uuid, DM_ULOG_RESUME, NULL, + r = dm_consult_userspace(uuid, lc->luid, DM_ULOG_RESUME, NULL, 0, NULL, NULL); if (!r) goto retry; @@ -111,10 +113,9 @@ static int build_constructor_string(struct dm_target *ti, return -ENOMEM; } - for (i = 0, str_size = 0; i < argc; i++) - str_size += sprintf(str + str_size, "%s ", argv[i]); - str_size += sprintf(str + str_size, "%llu", - (unsigned long long)ti->len); + str_size = sprintf(str, "%llu", (unsigned long long)ti->len); + for (i = 0; i < argc; i++) + str_size += sprintf(str + str_size, " %s", argv[i]); *ctr_str = str; return str_size; @@ -154,6 +155,9 @@ static int userspace_ctr(struct dm_dirty_log *log, struct dm_target *ti, return -ENOMEM; } + /* The ptr value is sufficient for local unique id */ + lc->luid = (uint64_t)lc; + lc->ti = ti; if (strlen(argv[0]) > (DM_UUID_LEN - 1)) { @@ -173,7 +177,7 @@ static int userspace_ctr(struct dm_dirty_log *log, struct dm_target *ti, } /* Send table string */ - r = dm_consult_userspace(lc->uuid, DM_ULOG_CTR, + r = dm_consult_userspace(lc->uuid, lc->luid, DM_ULOG_CTR, ctr_str, str_size, NULL, NULL); if (r == -ESRCH) { @@ -183,7 +187,7 @@ static int userspace_ctr(struct dm_dirty_log *log, struct dm_target *ti, /* Since the region size does not change, get it now */ rdata_size = sizeof(rdata); - r = dm_consult_userspace(lc->uuid, DM_ULOG_GET_REGION_SIZE, + r = dm_consult_userspace(lc->uuid, lc->luid, DM_ULOG_GET_REGION_SIZE, NULL, 0, (char *)&rdata, &rdata_size); if (r) { @@ -212,7 +216,7 @@ static void userspace_dtr(struct dm_dirty_log *log) int r; struct log_c *lc = log->context; - r = dm_consult_userspace(lc->uuid, DM_ULOG_DTR, + r = dm_consult_userspace(lc->uuid, lc->luid, DM_ULOG_DTR, NULL, 0, NULL, NULL); @@ -227,7 +231,7 @@ static int userspace_presuspend(struct dm_dirty_log *log) int r; struct log_c *lc = log->context; - r = dm_consult_userspace(lc->uuid, DM_ULOG_PRESUSPEND, + r = dm_consult_userspace(lc->uuid, lc->luid, DM_ULOG_PRESUSPEND, NULL, 0, NULL, NULL); @@ -239,7 +243,7 @@ static int userspace_postsuspend(struct dm_dirty_log *log) int r; struct log_c *lc = log->context; - r = dm_consult_userspace(lc->uuid, DM_ULOG_POSTSUSPEND, + r = dm_consult_userspace(lc->uuid, lc->luid, DM_ULOG_POSTSUSPEND, NULL, 0, NULL, NULL); @@ -252,7 +256,7 @@ static int userspace_resume(struct dm_dirty_log *log) struct log_c *lc = log->context; lc->in_sync_hint = 0; - r = dm_consult_userspace(lc->uuid, DM_ULOG_RESUME, + r = dm_consult_userspace(lc->uuid, lc->luid, DM_ULOG_RESUME, NULL, 0, NULL, NULL); @@ -561,6 +565,7 @@ static int userspace_status(struct dm_dirty_log *log, status_type_t status_type, char *result, unsigned maxlen) { int r = 0; + char *table_args; size_t sz = (size_t)maxlen; struct log_c *lc = log->context; @@ -577,8 +582,12 @@ static int userspace_status(struct dm_dirty_log *log, status_type_t status_type, break; case STATUSTYPE_TABLE: sz = 0; - DMEMIT("%s %u %s %s", log->type->name, lc->usr_argc + 1, - lc->uuid, lc->usr_argv_str); + table_args = strstr(lc->usr_argv_str, " "); + BUG_ON(!table_args); /* There will always be a ' ' */ + table_args++; + + DMEMIT("%s %u %s %s ", log->type->name, lc->usr_argc, + lc->uuid, table_args); break; } return (r) ? 0 : (int)sz; diff --git a/drivers/md/dm-log-userspace-transfer.c b/drivers/md/dm-log-userspace-transfer.c index 0ca1ee768a1..ba0edad2d04 100644 --- a/drivers/md/dm-log-userspace-transfer.c +++ b/drivers/md/dm-log-userspace-transfer.c @@ -108,7 +108,7 @@ static int fill_pkg(struct cn_msg *msg, struct dm_ulog_request *tfr) *(pkg->data_size) = 0; } else if (tfr->data_size > *(pkg->data_size)) { DMERR("Insufficient space to receive package [%u] " - "(%u vs %lu)", tfr->request_type, + "(%u vs %zu)", tfr->request_type, tfr->data_size, *(pkg->data_size)); *(pkg->data_size) = 0; @@ -147,7 +147,8 @@ static void cn_ulog_callback(void *data) /** * dm_consult_userspace - * @uuid: log's uuid (must be DM_UUID_LEN in size) + * @uuid: log's universal unique identifier (must be DM_UUID_LEN in size) + * @luid: log's local unique identifier * @request_type: found in include/linux/dm-log-userspace.h * @data: data to tx to the server * @data_size: size of data in bytes @@ -163,7 +164,7 @@ static void cn_ulog_callback(void *data) * * Returns: 0 on success, -EXXX on failure **/ -int dm_consult_userspace(const char *uuid, int request_type, +int dm_consult_userspace(const char *uuid, uint64_t luid, int request_type, char *data, size_t data_size, char *rdata, size_t *rdata_size) { @@ -190,6 +191,7 @@ resend: memset(tfr, 0, DM_ULOG_PREALLOCED_SIZE - overhead_size); memcpy(tfr->uuid, uuid, DM_UUID_LEN); + tfr->luid = luid; tfr->seq = dm_ulog_seq++; /* diff --git a/drivers/md/dm-log-userspace-transfer.h b/drivers/md/dm-log-userspace-transfer.h index c26d8e4e271..04ee874f915 100644 --- a/drivers/md/dm-log-userspace-transfer.h +++ b/drivers/md/dm-log-userspace-transfer.h @@ -11,7 +11,7 @@ int dm_ulog_tfr_init(void); void dm_ulog_tfr_exit(void); -int dm_consult_userspace(const char *uuid, int request_type, +int dm_consult_userspace(const char *uuid, uint64_t luid, int request_type, char *data, size_t data_size, char *rdata, size_t *rdata_size); diff --git a/drivers/md/dm-mpath.c b/drivers/md/dm-mpath.c index c70604a2089..6f0d90d4a54 100644 --- a/drivers/md/dm-mpath.c +++ b/drivers/md/dm-mpath.c @@ -1453,7 +1453,7 @@ static int multipath_iterate_devices(struct dm_target *ti, list_for_each_entry(pg, &m->priority_groups, list) { list_for_each_entry(p, &pg->pgpaths, list) { - ret = fn(ti, p->path.dev, ti->begin, data); + ret = fn(ti, p->path.dev, ti->begin, ti->len, data); if (ret) goto out; } diff --git a/drivers/md/dm-raid1.c b/drivers/md/dm-raid1.c index ce8868c768c..33f179e66bf 100644 --- a/drivers/md/dm-raid1.c +++ b/drivers/md/dm-raid1.c @@ -638,6 +638,7 @@ static void do_writes(struct mirror_set *ms, struct bio_list *writes) spin_lock_irq(&ms->lock); bio_list_merge(&ms->writes, &requeue); spin_unlock_irq(&ms->lock); + delayed_wake(ms); } /* @@ -647,7 +648,13 @@ static void do_writes(struct mirror_set *ms, struct bio_list *writes) */ dm_rh_inc_pending(ms->rh, &sync); dm_rh_inc_pending(ms->rh, &nosync); - ms->log_failure = dm_rh_flush(ms->rh) ? 1 : 0; + + /* + * If the flush fails on a previous call and succeeds here, + * we must not reset the log_failure variable. We need + * userspace interaction to do that. + */ + ms->log_failure = dm_rh_flush(ms->rh) ? 1 : ms->log_failure; /* * Dispatch io. @@ -1292,7 +1299,7 @@ static int mirror_iterate_devices(struct dm_target *ti, for (i = 0; !ret && i < ms->nr_mirrors; i++) ret = fn(ti, ms->mirror[i].dev, - ms->mirror[i].offset, data); + ms->mirror[i].offset, ti->len, data); return ret; } diff --git a/drivers/md/dm-snap-persistent.c b/drivers/md/dm-snap-persistent.c index 6e3fe4f1493..d5b2e08750d 100644 --- a/drivers/md/dm-snap-persistent.c +++ b/drivers/md/dm-snap-persistent.c @@ -106,6 +106,13 @@ struct pstore { void *zero_area; /* + * An area used for header. The header can be written + * concurrently with metadata (when invalidating the snapshot), + * so it needs a separate buffer. + */ + void *header_area; + + /* * Used to keep track of which metadata area the data in * 'chunk' refers to. */ @@ -148,16 +155,27 @@ static int alloc_area(struct pstore *ps) */ ps->area = vmalloc(len); if (!ps->area) - return r; + goto err_area; ps->zero_area = vmalloc(len); - if (!ps->zero_area) { - vfree(ps->area); - return r; - } + if (!ps->zero_area) + goto err_zero_area; memset(ps->zero_area, 0, len); + ps->header_area = vmalloc(len); + if (!ps->header_area) + goto err_header_area; + return 0; + +err_header_area: + vfree(ps->zero_area); + +err_zero_area: + vfree(ps->area); + +err_area: + return r; } static void free_area(struct pstore *ps) @@ -169,6 +187,10 @@ static void free_area(struct pstore *ps) if (ps->zero_area) vfree(ps->zero_area); ps->zero_area = NULL; + + if (ps->header_area) + vfree(ps->header_area); + ps->header_area = NULL; } struct mdata_req { @@ -188,7 +210,8 @@ static void do_metadata(struct work_struct *work) /* * Read or write a chunk aligned and sized block of data from a device. */ -static int chunk_io(struct pstore *ps, chunk_t chunk, int rw, int metadata) +static int chunk_io(struct pstore *ps, void *area, chunk_t chunk, int rw, + int metadata) { struct dm_io_region where = { .bdev = ps->store->cow->bdev, @@ -198,7 +221,7 @@ static int chunk_io(struct pstore *ps, chunk_t chunk, int rw, int metadata) struct dm_io_request io_req = { .bi_rw = rw, .mem.type = DM_IO_VMA, - .mem.ptr.vma = ps->area, + .mem.ptr.vma = area, .client = ps->io_client, .notify.fn = NULL, }; @@ -240,7 +263,7 @@ static int area_io(struct pstore *ps, int rw) chunk = area_location(ps, ps->current_area); - r = chunk_io(ps, chunk, rw, 0); + r = chunk_io(ps, ps->area, chunk, rw, 0); if (r) return r; @@ -254,20 +277,7 @@ static void zero_memory_area(struct pstore *ps) static int zero_disk_area(struct pstore *ps, chunk_t area) { - struct dm_io_region where = { - .bdev = ps->store->cow->bdev, - .sector = ps->store->chunk_size * area_location(ps, area), - .count = ps->store->chunk_size, - }; - struct dm_io_request io_req = { - .bi_rw = WRITE, - .mem.type = DM_IO_VMA, - .mem.ptr.vma = ps->zero_area, - .client = ps->io_client, - .notify.fn = NULL, - }; - - return dm_io(&io_req, 1, &where, NULL); + return chunk_io(ps, ps->zero_area, area_location(ps, area), WRITE, 0); } static int read_header(struct pstore *ps, int *new_snapshot) @@ -276,6 +286,7 @@ static int read_header(struct pstore *ps, int *new_snapshot) struct disk_header *dh; chunk_t chunk_size; int chunk_size_supplied = 1; + char *chunk_err; /* * Use default chunk size (or hardsect_size, if larger) if none supplied @@ -297,11 +308,11 @@ static int read_header(struct pstore *ps, int *new_snapshot) if (r) return r; - r = chunk_io(ps, 0, READ, 1); + r = chunk_io(ps, ps->header_area, 0, READ, 1); if (r) goto bad; - dh = (struct disk_header *) ps->area; + dh = ps->header_area; if (le32_to_cpu(dh->magic) == 0) { *new_snapshot = 1; @@ -319,20 +330,25 @@ static int read_header(struct pstore *ps, int *new_snapshot) ps->version = le32_to_cpu(dh->version); chunk_size = le32_to_cpu(dh->chunk_size); - if (!chunk_size_supplied || ps->store->chunk_size == chunk_size) + if (ps->store->chunk_size == chunk_size) return 0; - DMWARN("chunk size %llu in device metadata overrides " - "table chunk size of %llu.", - (unsigned long long)chunk_size, - (unsigned long long)ps->store->chunk_size); + if (chunk_size_supplied) + DMWARN("chunk size %llu in device metadata overrides " + "table chunk size of %llu.", + (unsigned long long)chunk_size, + (unsigned long long)ps->store->chunk_size); /* We had a bogus chunk_size. Fix stuff up. */ free_area(ps); - ps->store->chunk_size = chunk_size; - ps->store->chunk_mask = chunk_size - 1; - ps->store->chunk_shift = ffs(chunk_size) - 1; + r = dm_exception_store_set_chunk_size(ps->store, chunk_size, + &chunk_err); + if (r) { + DMERR("invalid on-disk chunk size %llu: %s.", + (unsigned long long)chunk_size, chunk_err); + return r; + } r = dm_io_client_resize(sectors_to_pages(ps->store->chunk_size), ps->io_client); @@ -351,15 +367,15 @@ static int write_header(struct pstore *ps) { struct disk_header *dh; - memset(ps->area, 0, ps->store->chunk_size << SECTOR_SHIFT); + memset(ps->header_area, 0, ps->store->chunk_size << SECTOR_SHIFT); - dh = (struct disk_header *) ps->area; + dh = ps->header_area; dh->magic = cpu_to_le32(SNAP_MAGIC); dh->valid = cpu_to_le32(ps->valid); dh->version = cpu_to_le32(ps->version); dh->chunk_size = cpu_to_le32(ps->store->chunk_size); - return chunk_io(ps, 0, WRITE, 1); + return chunk_io(ps, ps->header_area, 0, WRITE, 1); } /* @@ -679,6 +695,8 @@ static int persistent_ctr(struct dm_exception_store *store, ps->valid = 1; ps->version = SNAPSHOT_DISK_VERSION; ps->area = NULL; + ps->zero_area = NULL; + ps->header_area = NULL; ps->next_free = 2; /* skipping the header and first area */ ps->current_committed = 0; diff --git a/drivers/md/dm-snap.c b/drivers/md/dm-snap.c index d573165cd2b..57f1bf7f3b7 100644 --- a/drivers/md/dm-snap.c +++ b/drivers/md/dm-snap.c @@ -1176,6 +1176,15 @@ static int snapshot_status(struct dm_target *ti, status_type_t type, return 0; } +static int snapshot_iterate_devices(struct dm_target *ti, + iterate_devices_callout_fn fn, void *data) +{ + struct dm_snapshot *snap = ti->private; + + return fn(ti, snap->origin, 0, ti->len, data); +} + + /*----------------------------------------------------------------- * Origin methods *---------------------------------------------------------------*/ @@ -1410,20 +1419,29 @@ static int origin_status(struct dm_target *ti, status_type_t type, char *result, return 0; } +static int origin_iterate_devices(struct dm_target *ti, + iterate_devices_callout_fn fn, void *data) +{ + struct dm_dev *dev = ti->private; + + return fn(ti, dev, 0, ti->len, data); +} + static struct target_type origin_target = { .name = "snapshot-origin", - .version = {1, 6, 0}, + .version = {1, 7, 0}, .module = THIS_MODULE, .ctr = origin_ctr, .dtr = origin_dtr, .map = origin_map, .resume = origin_resume, .status = origin_status, + .iterate_devices = origin_iterate_devices, }; static struct target_type snapshot_target = { .name = "snapshot", - .version = {1, 6, 0}, + .version = {1, 7, 0}, .module = THIS_MODULE, .ctr = snapshot_ctr, .dtr = snapshot_dtr, @@ -1431,6 +1449,7 @@ static struct target_type snapshot_target = { .end_io = snapshot_end_io, .resume = snapshot_resume, .status = snapshot_status, + .iterate_devices = snapshot_iterate_devices, }; static int __init dm_snapshot_init(void) diff --git a/drivers/md/dm-stripe.c b/drivers/md/dm-stripe.c index b240e85ae39..3e563d25173 100644 --- a/drivers/md/dm-stripe.c +++ b/drivers/md/dm-stripe.c @@ -320,17 +320,28 @@ static int stripe_iterate_devices(struct dm_target *ti, int ret = 0; unsigned i = 0; - do + do { ret = fn(ti, sc->stripe[i].dev, - sc->stripe[i].physical_start, data); - while (!ret && ++i < sc->stripes); + sc->stripe[i].physical_start, + sc->stripe_width, data); + } while (!ret && ++i < sc->stripes); return ret; } +static void stripe_io_hints(struct dm_target *ti, + struct queue_limits *limits) +{ + struct stripe_c *sc = ti->private; + unsigned chunk_size = (sc->chunk_mask + 1) << 9; + + blk_limits_io_min(limits, chunk_size); + limits->io_opt = chunk_size * sc->stripes; +} + static struct target_type stripe_target = { .name = "striped", - .version = {1, 2, 0}, + .version = {1, 3, 0}, .module = THIS_MODULE, .ctr = stripe_ctr, .dtr = stripe_dtr, @@ -338,6 +349,7 @@ static struct target_type stripe_target = { .end_io = stripe_end_io, .status = stripe_status, .iterate_devices = stripe_iterate_devices, + .io_hints = stripe_io_hints, }; int __init dm_stripe_init(void) diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c index 4899ebe767c..1a6cb3c7822 100644 --- a/drivers/md/dm-table.c +++ b/drivers/md/dm-table.c @@ -343,10 +343,10 @@ static void close_dev(struct dm_dev_internal *d, struct mapped_device *md) } /* - * If possible, this checks an area of a destination device is valid. + * If possible, this checks an area of a destination device is invalid. */ -static int device_area_is_valid(struct dm_target *ti, struct dm_dev *dev, - sector_t start, void *data) +static int device_area_is_invalid(struct dm_target *ti, struct dm_dev *dev, + sector_t start, sector_t len, void *data) { struct queue_limits *limits = data; struct block_device *bdev = dev->bdev; @@ -357,36 +357,40 @@ static int device_area_is_valid(struct dm_target *ti, struct dm_dev *dev, char b[BDEVNAME_SIZE]; if (!dev_size) - return 1; - - if ((start >= dev_size) || (start + ti->len > dev_size)) { - DMWARN("%s: %s too small for target", - dm_device_name(ti->table->md), bdevname(bdev, b)); return 0; + + if ((start >= dev_size) || (start + len > dev_size)) { + DMWARN("%s: %s too small for target: " + "start=%llu, len=%llu, dev_size=%llu", + dm_device_name(ti->table->md), bdevname(bdev, b), + (unsigned long long)start, + (unsigned long long)len, + (unsigned long long)dev_size); + return 1; } if (logical_block_size_sectors <= 1) - return 1; + return 0; if (start & (logical_block_size_sectors - 1)) { DMWARN("%s: start=%llu not aligned to h/w " - "logical block size %hu of %s", + "logical block size %u of %s", dm_device_name(ti->table->md), (unsigned long long)start, limits->logical_block_size, bdevname(bdev, b)); - return 0; + return 1; } - if (ti->len & (logical_block_size_sectors - 1)) { + if (len & (logical_block_size_sectors - 1)) { DMWARN("%s: len=%llu not aligned to h/w " - "logical block size %hu of %s", + "logical block size %u of %s", dm_device_name(ti->table->md), - (unsigned long long)ti->len, + (unsigned long long)len, limits->logical_block_size, bdevname(bdev, b)); - return 0; + return 1; } - return 1; + return 0; } /* @@ -482,7 +486,7 @@ static int __table_get_device(struct dm_table *t, struct dm_target *ti, #define min_not_zero(l, r) (l == 0) ? r : ((r == 0) ? l : min(l, r)) int dm_set_device_limits(struct dm_target *ti, struct dm_dev *dev, - sector_t start, void *data) + sector_t start, sector_t len, void *data) { struct queue_limits *limits = data; struct block_device *bdev = dev->bdev; @@ -495,9 +499,16 @@ int dm_set_device_limits(struct dm_target *ti, struct dm_dev *dev, return 0; } - if (blk_stack_limits(limits, &q->limits, start) < 0) - DMWARN("%s: target device %s is misaligned", - dm_device_name(ti->table->md), bdevname(bdev, b)); + if (blk_stack_limits(limits, &q->limits, start << 9) < 0) + DMWARN("%s: target device %s is misaligned: " + "physical_block_size=%u, logical_block_size=%u, " + "alignment_offset=%u, start=%llu", + dm_device_name(ti->table->md), bdevname(bdev, b), + q->limits.physical_block_size, + q->limits.logical_block_size, + q->limits.alignment_offset, + (unsigned long long) start << 9); + /* * Check if merge fn is supported. @@ -698,7 +709,7 @@ static int validate_hardware_logical_block_alignment(struct dm_table *table, if (remaining) { DMWARN("%s: table line %u (start sect %llu len %llu) " - "not aligned to h/w logical block size %hu", + "not aligned to h/w logical block size %u", dm_device_name(table->md), i, (unsigned long long) ti->begin, (unsigned long long) ti->len, @@ -830,11 +841,6 @@ unsigned dm_table_get_type(struct dm_table *t) return t->type; } -bool dm_table_bio_based(struct dm_table *t) -{ - return dm_table_get_type(t) == DM_TYPE_BIO_BASED; -} - bool dm_table_request_based(struct dm_table *t) { return dm_table_get_type(t) == DM_TYPE_REQUEST_BASED; @@ -1001,12 +1007,16 @@ int dm_calculate_queue_limits(struct dm_table *table, ti->type->iterate_devices(ti, dm_set_device_limits, &ti_limits); + /* Set I/O hints portion of queue limits */ + if (ti->type->io_hints) + ti->type->io_hints(ti, &ti_limits); + /* * Check each device area is consistent with the target's * overall queue limits. */ - if (!ti->type->iterate_devices(ti, device_area_is_valid, - &ti_limits)) + if (ti->type->iterate_devices(ti, device_area_is_invalid, + &ti_limits)) return -EINVAL; combine_limits: diff --git a/drivers/md/dm.c b/drivers/md/dm.c index 3c6d4ee8921..b4845b14740 100644 --- a/drivers/md/dm.c +++ b/drivers/md/dm.c @@ -738,16 +738,22 @@ static void rq_completed(struct mapped_device *md, int run_queue) dm_put(md); } +static void free_rq_clone(struct request *clone) +{ + struct dm_rq_target_io *tio = clone->end_io_data; + + blk_rq_unprep_clone(clone); + free_rq_tio(tio); +} + static void dm_unprep_request(struct request *rq) { struct request *clone = rq->special; - struct dm_rq_target_io *tio = clone->end_io_data; rq->special = NULL; rq->cmd_flags &= ~REQ_DONTPREP; - blk_rq_unprep_clone(clone); - free_rq_tio(tio); + free_rq_clone(clone); } /* @@ -825,8 +831,7 @@ static void dm_end_request(struct request *clone, int error) rq->sense_len = clone->sense_len; } - BUG_ON(clone->bio); - free_rq_tio(tio); + free_rq_clone(clone); blk_end_request_all(rq, error); @@ -1017,7 +1022,7 @@ static struct bio *split_bvec(struct bio *bio, sector_t sector, clone->bi_flags |= 1 << BIO_CLONED; if (bio_integrity(bio)) { - bio_integrity_clone(clone, bio, GFP_NOIO); + bio_integrity_clone(clone, bio, GFP_NOIO, bs); bio_integrity_trim(clone, bio_sector_offset(bio, idx, offset), len); } @@ -1045,7 +1050,7 @@ static struct bio *clone_bio(struct bio *bio, sector_t sector, clone->bi_flags &= ~(1 << BIO_SEG_VALID); if (bio_integrity(bio)) { - bio_integrity_clone(clone, bio, GFP_NOIO); + bio_integrity_clone(clone, bio, GFP_NOIO, bs); if (idx != bio->bi_idx || clone->bi_size < bio->bi_size) bio_integrity_trim(clone, @@ -2203,16 +2208,6 @@ int dm_swap_table(struct mapped_device *md, struct dm_table *table) goto out; } - /* - * It is enought that blk_queue_ordered() is called only once when - * the first bio-based table is bound. - * - * This setting should be moved to alloc_dev() when request-based dm - * supports barrier. - */ - if (!md->map && dm_table_bio_based(table)) - blk_queue_ordered(md->queue, QUEUE_ORDERED_DRAIN, NULL); - __unbind(md); r = __bind(md, table, &limits); diff --git a/drivers/md/dm.h b/drivers/md/dm.h index 23278ae80f0..a7663eba17e 100644 --- a/drivers/md/dm.h +++ b/drivers/md/dm.h @@ -61,7 +61,6 @@ int dm_table_any_congested(struct dm_table *t, int bdi_bits); int dm_table_any_busy_target(struct dm_table *t); int dm_table_set_type(struct dm_table *t); unsigned dm_table_get_type(struct dm_table *t); -bool dm_table_bio_based(struct dm_table *t); bool dm_table_request_based(struct dm_table *t); int dm_table_alloc_md_mempools(struct dm_table *t); void dm_table_free_md_mempools(struct dm_table *t); diff --git a/drivers/md/linear.c b/drivers/md/linear.c index 15c8b7b25a9..5fe39c2a3d2 100644 --- a/drivers/md/linear.c +++ b/drivers/md/linear.c @@ -166,8 +166,8 @@ static linear_conf_t *linear_conf(mddev_t *mddev, int raid_disks) rdev->sectors = sectors * mddev->chunk_sectors; } - blk_queue_stack_limits(mddev->queue, - rdev->bdev->bd_disk->queue); + disk_stack_limits(mddev->gendisk, rdev->bdev, + rdev->data_offset << 9); /* as we don't honour merge_bvec_fn, we must never risk * violating it, so limit ->max_sector to one PAGE, as * a one page request is never in violation. @@ -220,6 +220,7 @@ static int linear_run (mddev_t *mddev) mddev->queue->unplug_fn = linear_unplug; mddev->queue->backing_dev_info.congested_fn = linear_congested; mddev->queue->backing_dev_info.congested_data = mddev; + md_integrity_register(mddev); return 0; } @@ -256,6 +257,7 @@ static int linear_add(mddev_t *mddev, mdk_rdev_t *rdev) rcu_assign_pointer(mddev->private, newconf); md_set_array_sectors(mddev, linear_size(mddev, 0, 0)); set_capacity(mddev->gendisk, mddev->array_sectors); + revalidate_disk(mddev->gendisk); call_rcu(&oldconf->rcu, free_conf); return 0; } diff --git a/drivers/md/md.c b/drivers/md/md.c index 09be637d52c..9dd872000ce 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c @@ -359,6 +359,7 @@ static mddev_t * mddev_find(dev_t unit) else new->md_minor = MINOR(unit) >> MdpMinorShift; + mutex_init(&new->open_mutex); mutex_init(&new->reconfig_mutex); INIT_LIST_HEAD(&new->disks); INIT_LIST_HEAD(&new->all_mddevs); @@ -1308,7 +1309,12 @@ static int super_1_validate(mddev_t *mddev, mdk_rdev_t *rdev) } if (mddev->level != LEVEL_MULTIPATH) { int role; - role = le16_to_cpu(sb->dev_roles[rdev->desc_nr]); + if (rdev->desc_nr < 0 || + rdev->desc_nr >= le32_to_cpu(sb->max_dev)) { + role = 0xffff; + rdev->desc_nr = -1; + } else + role = le16_to_cpu(sb->dev_roles[rdev->desc_nr]); switch(role) { case 0xffff: /* spare */ break; @@ -1394,8 +1400,14 @@ static void super_1_sync(mddev_t *mddev, mdk_rdev_t *rdev) if (rdev2->desc_nr+1 > max_dev) max_dev = rdev2->desc_nr+1; - if (max_dev > le32_to_cpu(sb->max_dev)) + if (max_dev > le32_to_cpu(sb->max_dev)) { + int bmask; sb->max_dev = cpu_to_le32(max_dev); + rdev->sb_size = max_dev * 2 + 256; + bmask = queue_logical_block_size(rdev->bdev->bd_disk->queue)-1; + if (rdev->sb_size & bmask) + rdev->sb_size = (rdev->sb_size | bmask) + 1; + } for (i=0; i<max_dev;i++) sb->dev_roles[i] = cpu_to_le16(0xfffe); @@ -1487,37 +1499,76 @@ static int match_mddev_units(mddev_t *mddev1, mddev_t *mddev2) static LIST_HEAD(pending_raid_disks); -static void md_integrity_check(mdk_rdev_t *rdev, mddev_t *mddev) +/* + * Try to register data integrity profile for an mddev + * + * This is called when an array is started and after a disk has been kicked + * from the array. It only succeeds if all working and active component devices + * are integrity capable with matching profiles. + */ +int md_integrity_register(mddev_t *mddev) +{ + mdk_rdev_t *rdev, *reference = NULL; + + if (list_empty(&mddev->disks)) + return 0; /* nothing to do */ + if (blk_get_integrity(mddev->gendisk)) + return 0; /* already registered */ + list_for_each_entry(rdev, &mddev->disks, same_set) { + /* skip spares and non-functional disks */ + if (test_bit(Faulty, &rdev->flags)) + continue; + if (rdev->raid_disk < 0) + continue; + /* + * If at least one rdev is not integrity capable, we can not + * enable data integrity for the md device. + */ + if (!bdev_get_integrity(rdev->bdev)) + return -EINVAL; + if (!reference) { + /* Use the first rdev as the reference */ + reference = rdev; + continue; + } + /* does this rdev's profile match the reference profile? */ + if (blk_integrity_compare(reference->bdev->bd_disk, + rdev->bdev->bd_disk) < 0) + return -EINVAL; + } + /* + * All component devices are integrity capable and have matching + * profiles, register the common profile for the md device. + */ + if (blk_integrity_register(mddev->gendisk, + bdev_get_integrity(reference->bdev)) != 0) { + printk(KERN_ERR "md: failed to register integrity for %s\n", + mdname(mddev)); + return -EINVAL; + } + printk(KERN_NOTICE "md: data integrity on %s enabled\n", + mdname(mddev)); + return 0; +} +EXPORT_SYMBOL(md_integrity_register); + +/* Disable data integrity if non-capable/non-matching disk is being added */ +void md_integrity_add_rdev(mdk_rdev_t *rdev, mddev_t *mddev) { - struct mdk_personality *pers = mddev->pers; - struct gendisk *disk = mddev->gendisk; struct blk_integrity *bi_rdev = bdev_get_integrity(rdev->bdev); - struct blk_integrity *bi_mddev = blk_get_integrity(disk); + struct blk_integrity *bi_mddev = blk_get_integrity(mddev->gendisk); - /* Data integrity passthrough not supported on RAID 4, 5 and 6 */ - if (pers && pers->level >= 4 && pers->level <= 6) + if (!bi_mddev) /* nothing to do */ return; - - /* If rdev is integrity capable, register profile for mddev */ - if (!bi_mddev && bi_rdev) { - if (blk_integrity_register(disk, bi_rdev)) - printk(KERN_ERR "%s: %s Could not register integrity!\n", - __func__, disk->disk_name); - else - printk(KERN_NOTICE "Enabling data integrity on %s\n", - disk->disk_name); + if (rdev->raid_disk < 0) /* skip spares */ return; - } - - /* Check that mddev and rdev have matching profiles */ - if (blk_integrity_compare(disk, rdev->bdev->bd_disk) < 0) { - printk(KERN_ERR "%s: %s/%s integrity mismatch!\n", __func__, - disk->disk_name, rdev->bdev->bd_disk->disk_name); - printk(KERN_NOTICE "Disabling data integrity on %s\n", - disk->disk_name); - blk_integrity_unregister(disk); - } + if (bi_rdev && blk_integrity_compare(mddev->gendisk, + rdev->bdev->bd_disk) >= 0) + return; + printk(KERN_NOTICE "disabling data integrity on %s\n", mdname(mddev)); + blk_integrity_unregister(mddev->gendisk); } +EXPORT_SYMBOL(md_integrity_add_rdev); static int bind_rdev_to_array(mdk_rdev_t * rdev, mddev_t * mddev) { @@ -1591,7 +1642,6 @@ static int bind_rdev_to_array(mdk_rdev_t * rdev, mddev_t * mddev) /* May as well allow recovery to be retried once */ mddev->recovery_disabled = 0; - md_integrity_check(rdev, mddev); return 0; fail: @@ -1756,9 +1806,10 @@ static void print_sb_1(struct mdp_superblock_1 *sb) __u8 *uuid; uuid = sb->set_uuid; - printk(KERN_INFO "md: SB: (V:%u) (F:0x%08x) Array-ID:<%02x%02x%02x%02x" - ":%02x%02x:%02x%02x:%02x%02x:%02x%02x%02x%02x%02x%02x>\n" - KERN_INFO "md: Name: \"%s\" CT:%llu\n", + printk(KERN_INFO + "md: SB: (V:%u) (F:0x%08x) Array-ID:<%02x%02x%02x%02x" + ":%02x%02x:%02x%02x:%02x%02x:%02x%02x%02x%02x%02x%02x>\n" + "md: Name: \"%s\" CT:%llu\n", le32_to_cpu(sb->major_version), le32_to_cpu(sb->feature_map), uuid[0], uuid[1], uuid[2], uuid[3], @@ -1770,12 +1821,13 @@ static void print_sb_1(struct mdp_superblock_1 *sb) & MD_SUPERBLOCK_1_TIME_SEC_MASK); uuid = sb->device_uuid; - printk(KERN_INFO "md: L%u SZ%llu RD:%u LO:%u CS:%u DO:%llu DS:%llu SO:%llu" + printk(KERN_INFO + "md: L%u SZ%llu RD:%u LO:%u CS:%u DO:%llu DS:%llu SO:%llu" " RO:%llu\n" - KERN_INFO "md: Dev:%08x UUID: %02x%02x%02x%02x:%02x%02x:%02x%02x:%02x%02x" - ":%02x%02x%02x%02x%02x%02x\n" - KERN_INFO "md: (F:0x%08x) UT:%llu Events:%llu ResyncOffset:%llu CSUM:0x%08x\n" - KERN_INFO "md: (MaxDev:%u) \n", + "md: Dev:%08x UUID: %02x%02x%02x%02x:%02x%02x:%02x%02x:%02x%02x" + ":%02x%02x%02x%02x%02x%02x\n" + "md: (F:0x%08x) UT:%llu Events:%llu ResyncOffset:%llu CSUM:0x%08x\n" + "md: (MaxDev:%u) \n", le32_to_cpu(sb->level), (unsigned long long)le64_to_cpu(sb->size), le32_to_cpu(sb->raid_disks), @@ -1923,17 +1975,14 @@ repeat: /* otherwise we have to go forward and ... */ mddev->events ++; if (!mddev->in_sync || mddev->recovery_cp != MaxSector) { /* not clean */ - /* .. if the array isn't clean, insist on an odd 'events' */ - if ((mddev->events&1)==0) { - mddev->events++; + /* .. if the array isn't clean, an 'even' event must also go + * to spares. */ + if ((mddev->events&1)==0) nospares = 0; - } } else { - /* otherwise insist on an even 'events' (for clean states) */ - if ((mddev->events&1)) { - mddev->events++; + /* otherwise an 'odd' event must go to spares */ + if ((mddev->events&1)) nospares = 0; - } } } @@ -2655,6 +2704,7 @@ level_store(mddev_t *mddev, const char *buf, size_t len) ssize_t rv = len; struct mdk_personality *pers; void *priv; + mdk_rdev_t *rdev; if (mddev->pers == NULL) { if (len == 0) @@ -2734,6 +2784,12 @@ level_store(mddev_t *mddev, const char *buf, size_t len) mddev_suspend(mddev); mddev->pers->stop(mddev); module_put(mddev->pers->owner); + /* Invalidate devices that are now superfluous */ + list_for_each_entry(rdev, &mddev->disks, same_set) + if (rdev->raid_disk >= mddev->raid_disks) { + rdev->raid_disk = -1; + clear_bit(In_sync, &rdev->flags); + } mddev->pers = pers; mddev->private = priv; strlcpy(mddev->clevel, pers->name, sizeof(mddev->clevel)); @@ -3543,6 +3599,7 @@ max_sync_store(mddev_t *mddev, const char *buf, size_t len) if (max < mddev->resync_min) return -EINVAL; if (max < mddev->resync_max && + mddev->ro == 0 && test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)) return -EBUSY; @@ -3573,7 +3630,8 @@ suspend_lo_store(mddev_t *mddev, const char *buf, size_t len) char *e; unsigned long long new = simple_strtoull(buf, &e, 10); - if (mddev->pers->quiesce == NULL) + if (mddev->pers == NULL || + mddev->pers->quiesce == NULL) return -EINVAL; if (buf == e || (*e && *e != '\n')) return -EINVAL; @@ -3601,7 +3659,8 @@ suspend_hi_store(mddev_t *mddev, const char *buf, size_t len) char *e; unsigned long long new = simple_strtoull(buf, &e, 10); - if (mddev->pers->quiesce == NULL) + if (mddev->pers == NULL || + mddev->pers->quiesce == NULL) return -EINVAL; if (buf == e || (*e && *e != '\n')) return -EINVAL; @@ -3681,17 +3740,8 @@ array_size_store(mddev_t *mddev, const char *buf, size_t len) mddev->array_sectors = sectors; set_capacity(mddev->gendisk, mddev->array_sectors); - if (mddev->pers) { - struct block_device *bdev = bdget_disk(mddev->gendisk, 0); - - if (bdev) { - mutex_lock(&bdev->bd_inode->i_mutex); - i_size_write(bdev->bd_inode, - (loff_t)mddev->array_sectors << 9); - mutex_unlock(&bdev->bd_inode->i_mutex); - bdput(bdev); - } - } + if (mddev->pers) + revalidate_disk(mddev->gendisk); return len; } @@ -3844,11 +3894,9 @@ static int md_alloc(dev_t dev, char *name) flush_scheduled_work(); mutex_lock(&disks_mutex); - if (mddev->gendisk) { - mutex_unlock(&disks_mutex); - mddev_put(mddev); - return -EEXIST; - } + error = -EEXIST; + if (mddev->gendisk) + goto abort; if (name) { /* Need to ensure that 'name' is not a duplicate. @@ -3860,17 +3908,15 @@ static int md_alloc(dev_t dev, char *name) if (mddev2->gendisk && strcmp(mddev2->gendisk->disk_name, name) == 0) { spin_unlock(&all_mddevs_lock); - return -EEXIST; + goto abort; } spin_unlock(&all_mddevs_lock); } + error = -ENOMEM; mddev->queue = blk_alloc_queue(GFP_KERNEL); - if (!mddev->queue) { - mutex_unlock(&disks_mutex); - mddev_put(mddev); - return -ENOMEM; - } + if (!mddev->queue) + goto abort; mddev->queue->queuedata = mddev; /* Can be unlocked because the queue is new: no concurrency */ @@ -3880,11 +3926,9 @@ static int md_alloc(dev_t dev, char *name) disk = alloc_disk(1 << shift); if (!disk) { - mutex_unlock(&disks_mutex); blk_cleanup_queue(mddev->queue); mddev->queue = NULL; - mddev_put(mddev); - return -ENOMEM; + goto abort; } disk->major = MAJOR(mddev->unit); disk->first_minor = unit << shift; @@ -3906,16 +3950,22 @@ static int md_alloc(dev_t dev, char *name) mddev->gendisk = disk; error = kobject_init_and_add(&mddev->kobj, &md_ktype, &disk_to_dev(disk)->kobj, "%s", "md"); - mutex_unlock(&disks_mutex); - if (error) + if (error) { + /* This isn't possible, but as kobject_init_and_add is marked + * __must_check, we must do something with the result + */ printk(KERN_WARNING "md: cannot register %s/md - name in use\n", disk->disk_name); - else { + error = 0; + } + abort: + mutex_unlock(&disks_mutex); + if (!error) { kobject_uevent(&mddev->kobj, KOBJ_ADD); mddev->sysfs_state = sysfs_get_dirent(mddev->kobj.sd, "array_state"); } mddev_put(mddev); - return 0; + return error; } static struct kobject *md_probe(dev_t dev, int *part, void *data) @@ -4044,10 +4094,6 @@ static int do_md_run(mddev_t * mddev) } strlcpy(mddev->clevel, pers->name, sizeof(mddev->clevel)); - if (pers->level >= 4 && pers->level <= 6) - /* Cannot support integrity (yet) */ - blk_integrity_unregister(mddev->gendisk); - if (mddev->reshape_position != MaxSector && pers->start_reshape == NULL) { /* This personality cannot handle reshaping... */ @@ -4185,6 +4231,7 @@ static int do_md_run(mddev_t * mddev) md_wakeup_thread(mddev->thread); md_wakeup_thread(mddev->sync_thread); /* possibly kick off a reshape */ + revalidate_disk(mddev->gendisk); mddev->changed = 1; md_new_event(mddev); sysfs_notify_dirent(mddev->sysfs_state); @@ -4256,12 +4303,11 @@ static int do_md_stop(mddev_t * mddev, int mode, int is_open) struct gendisk *disk = mddev->gendisk; mdk_rdev_t *rdev; + mutex_lock(&mddev->open_mutex); if (atomic_read(&mddev->openers) > is_open) { printk("md: %s still in use.\n",mdname(mddev)); - return -EBUSY; - } - - if (mddev->pers) { + err = -EBUSY; + } else if (mddev->pers) { if (mddev->sync_thread) { set_bit(MD_RECOVERY_FROZEN, &mddev->recovery); @@ -4318,8 +4364,12 @@ static int do_md_stop(mddev_t * mddev, int mode, int is_open) if (mode == 1) set_disk_ro(disk, 1); clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery); + err = 0; } - +out: + mutex_unlock(&mddev->open_mutex); + if (err) + return err; /* * Free resources if final stop */ @@ -4385,7 +4435,6 @@ static int do_md_stop(mddev_t * mddev, int mode, int is_open) blk_integrity_unregister(disk); md_new_event(mddev); sysfs_notify_dirent(mddev->sysfs_state); -out: return err; } @@ -5083,18 +5132,8 @@ static int update_size(mddev_t *mddev, sector_t num_sectors) return -ENOSPC; } rv = mddev->pers->resize(mddev, num_sectors); - if (!rv) { - struct block_device *bdev; - - bdev = bdget_disk(mddev->gendisk, 0); - if (bdev) { - mutex_lock(&bdev->bd_inode->i_mutex); - i_size_write(bdev->bd_inode, - (loff_t)mddev->array_sectors << 9); - mutex_unlock(&bdev->bd_inode->i_mutex); - bdput(bdev); - } - } + if (!rv) + revalidate_disk(mddev->gendisk); return rv; } @@ -5480,12 +5519,12 @@ static int md_open(struct block_device *bdev, fmode_t mode) } BUG_ON(mddev != bdev->bd_disk->private_data); - if ((err = mutex_lock_interruptible_nested(&mddev->reconfig_mutex, 1))) + if ((err = mutex_lock_interruptible(&mddev->open_mutex))) goto out; err = 0; atomic_inc(&mddev->openers); - mddev_unlock(mddev); + mutex_unlock(&mddev->open_mutex); check_disk_change(bdev); out: @@ -6334,10 +6373,16 @@ void md_do_sync(mddev_t *mddev) sysfs_notify(&mddev->kobj, NULL, "sync_completed"); } - if (j >= mddev->resync_max) - wait_event(mddev->recovery_wait, - mddev->resync_max > j - || kthread_should_stop()); + while (j >= mddev->resync_max && !kthread_should_stop()) { + /* As this condition is controlled by user-space, + * we can block indefinitely, so use '_interruptible' + * to avoid triggering warnings. + */ + flush_signals(current); /* just in case */ + wait_event_interruptible(mddev->recovery_wait, + mddev->resync_max > j + || kthread_should_stop()); + } if (kthread_should_stop()) goto interrupted; diff --git a/drivers/md/md.h b/drivers/md/md.h index 9430a110db9..f8fc188bc76 100644 --- a/drivers/md/md.h +++ b/drivers/md/md.h @@ -223,6 +223,16 @@ struct mddev_s * so we don't loop trying */ int in_sync; /* know to not need resync */ + /* 'open_mutex' avoids races between 'md_open' and 'do_md_stop', so + * that we are never stopping an array while it is open. + * 'reconfig_mutex' protects all other reconfiguration. + * These locks are separate due to conflicting interactions + * with bdev->bd_mutex. + * Lock ordering is: + * reconfig_mutex -> bd_mutex : e.g. do_md_run -> revalidate_disk + * bd_mutex -> open_mutex: e.g. __blkdev_get -> md_open + */ + struct mutex open_mutex; struct mutex reconfig_mutex; atomic_t active; /* general refcount */ atomic_t openers; /* number of active opens */ @@ -431,5 +441,7 @@ extern int md_allow_write(mddev_t *mddev); extern void md_wait_for_blocked_rdev(mdk_rdev_t *rdev, mddev_t *mddev); extern void md_set_array_sectors(mddev_t *mddev, sector_t array_sectors); extern int md_check_no_bitmap(mddev_t *mddev); +extern int md_integrity_register(mddev_t *mddev); +void md_integrity_add_rdev(mdk_rdev_t *rdev, mddev_t *mddev); #endif /* _MD_MD_H */ diff --git a/drivers/md/multipath.c b/drivers/md/multipath.c index cbe368fa659..7140909f666 100644 --- a/drivers/md/multipath.c +++ b/drivers/md/multipath.c @@ -294,7 +294,8 @@ static int multipath_add_disk(mddev_t *mddev, mdk_rdev_t *rdev) for (path = first; path <= last; path++) if ((p=conf->multipaths+path)->rdev == NULL) { q = rdev->bdev->bd_disk->queue; - blk_queue_stack_limits(mddev->queue, q); + disk_stack_limits(mddev->gendisk, rdev->bdev, + rdev->data_offset << 9); /* as we don't honour merge_bvec_fn, we must never risk * violating it, so limit ->max_sector to one PAGE, as @@ -312,6 +313,7 @@ static int multipath_add_disk(mddev_t *mddev, mdk_rdev_t *rdev) set_bit(In_sync, &rdev->flags); rcu_assign_pointer(p->rdev, rdev); err = 0; + md_integrity_add_rdev(rdev, mddev); break; } @@ -344,7 +346,9 @@ static int multipath_remove_disk(mddev_t *mddev, int number) /* lost the race, try later */ err = -EBUSY; p->rdev = rdev; + goto abort; } + md_integrity_register(mddev); } abort: @@ -463,9 +467,9 @@ static int multipath_run (mddev_t *mddev) disk = conf->multipaths + disk_idx; disk->rdev = rdev; + disk_stack_limits(mddev->gendisk, rdev->bdev, + rdev->data_offset << 9); - blk_queue_stack_limits(mddev->queue, - rdev->bdev->bd_disk->queue); /* as we don't honour merge_bvec_fn, we must never risk * violating it, not that we ever expect a device with * a merge_bvec_fn to be involved in multipath */ @@ -518,7 +522,7 @@ static int multipath_run (mddev_t *mddev) mddev->queue->unplug_fn = multipath_unplug; mddev->queue->backing_dev_info.congested_fn = multipath_congested; mddev->queue->backing_dev_info.congested_data = mddev; - + md_integrity_register(mddev); return 0; out_free_conf: diff --git a/drivers/md/raid0.c b/drivers/md/raid0.c index ab4a489d869..898e2bdfee4 100644 --- a/drivers/md/raid0.c +++ b/drivers/md/raid0.c @@ -170,8 +170,8 @@ static int create_strip_zones(mddev_t *mddev) } dev[j] = rdev1; - blk_queue_stack_limits(mddev->queue, - rdev1->bdev->bd_disk->queue); + disk_stack_limits(mddev->gendisk, rdev1->bdev, + rdev1->data_offset << 9); /* as we don't honour merge_bvec_fn, we must never risk * violating it, so limit ->max_sector to one PAGE, as * a one page request is never in violation. @@ -250,6 +250,11 @@ static int create_strip_zones(mddev_t *mddev) mddev->chunk_sectors << 9); goto abort; } + + blk_queue_io_min(mddev->queue, mddev->chunk_sectors << 9); + blk_queue_io_opt(mddev->queue, + (mddev->chunk_sectors << 9) * mddev->raid_disks); + printk(KERN_INFO "raid0: done.\n"); mddev->private = conf; return 0; @@ -346,6 +351,7 @@ static int raid0_run(mddev_t *mddev) blk_queue_merge_bvec(mddev->queue, raid0_mergeable_bvec); dump_zones(mddev); + md_integrity_register(mddev); return 0; } diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c index 89939a7aef5..8726fd7ebce 100644 --- a/drivers/md/raid1.c +++ b/drivers/md/raid1.c @@ -1123,8 +1123,8 @@ static int raid1_add_disk(mddev_t *mddev, mdk_rdev_t *rdev) for (mirror = first; mirror <= last; mirror++) if ( !(p=conf->mirrors+mirror)->rdev) { - blk_queue_stack_limits(mddev->queue, - rdev->bdev->bd_disk->queue); + disk_stack_limits(mddev->gendisk, rdev->bdev, + rdev->data_offset << 9); /* as we don't honour merge_bvec_fn, we must never risk * violating it, so limit ->max_sector to one PAGE, as * a one page request is never in violation. @@ -1144,7 +1144,7 @@ static int raid1_add_disk(mddev_t *mddev, mdk_rdev_t *rdev) rcu_assign_pointer(p->rdev, rdev); break; } - + md_integrity_add_rdev(rdev, mddev); print_conf(conf); return err; } @@ -1178,7 +1178,9 @@ static int raid1_remove_disk(mddev_t *mddev, int number) /* lost the race, try later */ err = -EBUSY; p->rdev = rdev; + goto abort; } + md_integrity_register(mddev); } abort: @@ -1988,9 +1990,8 @@ static int run(mddev_t *mddev) disk = conf->mirrors + disk_idx; disk->rdev = rdev; - - blk_queue_stack_limits(mddev->queue, - rdev->bdev->bd_disk->queue); + disk_stack_limits(mddev->gendisk, rdev->bdev, + rdev->data_offset << 9); /* as we don't honour merge_bvec_fn, we must never risk * violating it, so limit ->max_sector to one PAGE, as * a one page request is never in violation. @@ -2068,7 +2069,7 @@ static int run(mddev_t *mddev) mddev->queue->unplug_fn = raid1_unplug; mddev->queue->backing_dev_info.congested_fn = raid1_congested; mddev->queue->backing_dev_info.congested_data = mddev; - + md_integrity_register(mddev); return 0; out_no_mem: @@ -2133,6 +2134,7 @@ static int raid1_resize(mddev_t *mddev, sector_t sectors) return -EINVAL; set_capacity(mddev->gendisk, mddev->array_sectors); mddev->changed = 1; + revalidate_disk(mddev->gendisk); if (sectors > mddev->dev_sectors && mddev->recovery_cp == MaxSector) { mddev->recovery_cp = mddev->dev_sectors; diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c index ae12ceafe10..3d9020cf6f6 100644 --- a/drivers/md/raid10.c +++ b/drivers/md/raid10.c @@ -1151,8 +1151,8 @@ static int raid10_add_disk(mddev_t *mddev, mdk_rdev_t *rdev) for ( ; mirror <= last ; mirror++) if ( !(p=conf->mirrors+mirror)->rdev) { - blk_queue_stack_limits(mddev->queue, - rdev->bdev->bd_disk->queue); + disk_stack_limits(mddev->gendisk, rdev->bdev, + rdev->data_offset << 9); /* as we don't honour merge_bvec_fn, we must never risk * violating it, so limit ->max_sector to one PAGE, as * a one page request is never in violation. @@ -1170,6 +1170,7 @@ static int raid10_add_disk(mddev_t *mddev, mdk_rdev_t *rdev) break; } + md_integrity_add_rdev(rdev, mddev); print_conf(conf); return err; } @@ -1203,7 +1204,9 @@ static int raid10_remove_disk(mddev_t *mddev, int number) /* lost the race, try later */ err = -EBUSY; p->rdev = rdev; + goto abort; } + md_integrity_register(mddev); } abort: @@ -2044,7 +2047,7 @@ raid10_size(mddev_t *mddev, sector_t sectors, int raid_disks) static int run(mddev_t *mddev) { conf_t *conf; - int i, disk_idx; + int i, disk_idx, chunk_size; mirror_info_t *disk; mdk_rdev_t *rdev; int nc, fc, fo; @@ -2130,6 +2133,14 @@ static int run(mddev_t *mddev) spin_lock_init(&conf->device_lock); mddev->queue->queue_lock = &conf->device_lock; + chunk_size = mddev->chunk_sectors << 9; + blk_queue_io_min(mddev->queue, chunk_size); + if (conf->raid_disks % conf->near_copies) + blk_queue_io_opt(mddev->queue, chunk_size * conf->raid_disks); + else + blk_queue_io_opt(mddev->queue, chunk_size * + (conf->raid_disks / conf->near_copies)); + list_for_each_entry(rdev, &mddev->disks, same_set) { disk_idx = rdev->raid_disk; if (disk_idx >= mddev->raid_disks @@ -2138,9 +2149,8 @@ static int run(mddev_t *mddev) disk = conf->mirrors + disk_idx; disk->rdev = rdev; - - blk_queue_stack_limits(mddev->queue, - rdev->bdev->bd_disk->queue); + disk_stack_limits(mddev->gendisk, rdev->bdev, + rdev->data_offset << 9); /* as we don't honour merge_bvec_fn, we must never risk * violating it, so limit ->max_sector to one PAGE, as * a one page request is never in violation. @@ -2218,6 +2228,7 @@ static int run(mddev_t *mddev) if (conf->near_copies < mddev->raid_disks) blk_queue_merge_bvec(mddev->queue, raid10_mergeable_bvec); + md_integrity_register(mddev); return 0; out_free_conf: diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c index f9f991e6e13..b8a2c5dc67b 100644 --- a/drivers/md/raid5.c +++ b/drivers/md/raid5.c @@ -3699,13 +3699,21 @@ static int make_request(struct request_queue *q, struct bio * bi) goto retry; } } - /* FIXME what if we get a false positive because these - * are being updated. - */ - if (logical_sector >= mddev->suspend_lo && + + if (bio_data_dir(bi) == WRITE && + logical_sector >= mddev->suspend_lo && logical_sector < mddev->suspend_hi) { release_stripe(sh); - schedule(); + /* As the suspend_* range is controlled by + * userspace, we want an interruptible + * wait. + */ + flush_signals(current); + prepare_to_wait(&conf->wait_for_overlap, + &w, TASK_INTERRUPTIBLE); + if (logical_sector >= mddev->suspend_lo && + logical_sector < mddev->suspend_hi) + schedule(); goto retry; } @@ -3777,7 +3785,7 @@ static sector_t reshape_request(mddev_t *mddev, sector_t sector_nr, int *skipped conf->reshape_progress < raid5_size(mddev, 0, 0)) { sector_nr = raid5_size(mddev, 0, 0) - conf->reshape_progress; - } else if (mddev->delta_disks > 0 && + } else if (mddev->delta_disks >= 0 && conf->reshape_progress > 0) sector_nr = conf->reshape_progress; sector_div(sector_nr, new_data_disks); @@ -3991,6 +3999,9 @@ static inline sector_t sync_request(mddev_t *mddev, sector_t sector_nr, int *ski return 0; } + /* Allow raid5_quiesce to complete */ + wait_event(conf->wait_for_overlap, conf->quiesce != 2); + if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery)) return reshape_request(mddev, sector_nr, skipped); @@ -4308,6 +4319,15 @@ raid5_size(mddev_t *mddev, sector_t sectors, int raid_disks) return sectors * (raid_disks - conf->max_degraded); } +static void free_conf(raid5_conf_t *conf) +{ + shrink_stripes(conf); + safe_put_page(conf->spare_page); + kfree(conf->disks); + kfree(conf->stripe_hashtbl); + kfree(conf); +} + static raid5_conf_t *setup_conf(mddev_t *mddev) { raid5_conf_t *conf; @@ -4439,11 +4459,7 @@ static raid5_conf_t *setup_conf(mddev_t *mddev) abort: if (conf) { - shrink_stripes(conf); - safe_put_page(conf->spare_page); - kfree(conf->disks); - kfree(conf->stripe_hashtbl); - kfree(conf); + free_conf(conf); return ERR_PTR(-EIO); } else return ERR_PTR(-ENOMEM); @@ -4452,7 +4468,7 @@ static raid5_conf_t *setup_conf(mddev_t *mddev) static int run(mddev_t *mddev) { raid5_conf_t *conf; - int working_disks = 0; + int working_disks = 0, chunk_size; mdk_rdev_t *rdev; if (mddev->recovery_cp != MaxSector) @@ -4493,7 +4509,26 @@ static int run(mddev_t *mddev) (old_disks-max_degraded)); /* here_old is the first stripe that we might need to read * from */ - if (here_new >= here_old) { + if (mddev->delta_disks == 0) { + /* We cannot be sure it is safe to start an in-place + * reshape. It is only safe if user-space if monitoring + * and taking constant backups. + * mdadm always starts a situation like this in + * readonly mode so it can take control before + * allowing any writes. So just check for that. + */ + if ((here_new * mddev->new_chunk_sectors != + here_old * mddev->chunk_sectors) || + mddev->ro == 0) { + printk(KERN_ERR "raid5: in-place reshape must be started" + " in read-only mode - aborting\n"); + return -EINVAL; + } + } else if (mddev->delta_disks < 0 + ? (here_new * mddev->new_chunk_sectors <= + here_old * mddev->chunk_sectors) + : (here_new * mddev->new_chunk_sectors >= + here_old * mddev->chunk_sectors)) { /* Reading from the same stripe as writing to - bad */ printk(KERN_ERR "raid5: reshape_position too early for " "auto-recovery - aborting.\n"); @@ -4607,18 +4642,22 @@ static int run(mddev_t *mddev) md_set_array_sectors(mddev, raid5_size(mddev, 0, 0)); blk_queue_merge_bvec(mddev->queue, raid5_mergeable_bvec); + chunk_size = mddev->chunk_sectors << 9; + blk_queue_io_min(mddev->queue, chunk_size); + blk_queue_io_opt(mddev->queue, chunk_size * + (conf->raid_disks - conf->max_degraded)); + + list_for_each_entry(rdev, &mddev->disks, same_set) + disk_stack_limits(mddev->gendisk, rdev->bdev, + rdev->data_offset << 9); return 0; abort: md_unregister_thread(mddev->thread); mddev->thread = NULL; if (conf) { - shrink_stripes(conf); print_raid5_conf(conf); - safe_put_page(conf->spare_page); - kfree(conf->disks); - kfree(conf->stripe_hashtbl); - kfree(conf); + free_conf(conf); } mddev->private = NULL; printk(KERN_ALERT "raid5: failed to run raid set %s\n", mdname(mddev)); @@ -4633,13 +4672,10 @@ static int stop(mddev_t *mddev) md_unregister_thread(mddev->thread); mddev->thread = NULL; - shrink_stripes(conf); - kfree(conf->stripe_hashtbl); mddev->queue->backing_dev_info.congested_fn = NULL; blk_sync_queue(mddev->queue); /* the unplug fn references 'conf'*/ sysfs_remove_group(&mddev->kobj, &raid5_attrs_group); - kfree(conf->disks); - kfree(conf); + free_conf(conf); mddev->private = NULL; return 0; } @@ -4841,6 +4877,7 @@ static int raid5_resize(mddev_t *mddev, sector_t sectors) return -EINVAL; set_capacity(mddev->gendisk, mddev->array_sectors); mddev->changed = 1; + revalidate_disk(mddev->gendisk); if (sectors > mddev->dev_sectors && mddev->recovery_cp == MaxSector) { mddev->recovery_cp = mddev->dev_sectors; set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); @@ -4986,7 +5023,7 @@ static int raid5_start_reshape(mddev_t *mddev) spin_unlock_irqrestore(&conf->device_lock, flags); } mddev->raid_disks = conf->raid_disks; - mddev->reshape_position = 0; + mddev->reshape_position = conf->reshape_progress; set_bit(MD_CHANGE_DEVS, &mddev->flags); clear_bit(MD_RECOVERY_SYNC, &mddev->recovery); @@ -5041,7 +5078,6 @@ static void end_reshape(raid5_conf_t *conf) */ static void raid5_finish_reshape(mddev_t *mddev) { - struct block_device *bdev; raid5_conf_t *conf = mddev->private; if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery)) { @@ -5050,15 +5086,7 @@ static void raid5_finish_reshape(mddev_t *mddev) md_set_array_sectors(mddev, raid5_size(mddev, 0, 0)); set_capacity(mddev->gendisk, mddev->array_sectors); mddev->changed = 1; - - bdev = bdget_disk(mddev->gendisk, 0); - if (bdev) { - mutex_lock(&bdev->bd_inode->i_mutex); - i_size_write(bdev->bd_inode, - (loff_t)mddev->array_sectors << 9); - mutex_unlock(&bdev->bd_inode->i_mutex); - bdput(bdev); - } + revalidate_disk(mddev->gendisk); } else { int d; mddev->degraded = conf->raid_disks; @@ -5069,8 +5097,15 @@ static void raid5_finish_reshape(mddev_t *mddev) mddev->degraded--; for (d = conf->raid_disks ; d < conf->raid_disks - mddev->delta_disks; - d++) - raid5_remove_disk(mddev, d); + d++) { + mdk_rdev_t *rdev = conf->disks[d].rdev; + if (rdev && raid5_remove_disk(mddev, d) == 0) { + char nm[20]; + sprintf(nm, "rd%d", rdev->raid_disk); + sysfs_remove_link(&mddev->kobj, nm); + rdev->raid_disk = -1; + } + } } mddev->layout = conf->algorithm; mddev->chunk_sectors = conf->chunk_sectors; @@ -5090,12 +5125,18 @@ static void raid5_quiesce(mddev_t *mddev, int state) case 1: /* stop all writes */ spin_lock_irq(&conf->device_lock); - conf->quiesce = 1; + /* '2' tells resync/reshape to pause so that all + * active stripes can drain + */ + conf->quiesce = 2; wait_event_lock_irq(conf->wait_for_stripe, atomic_read(&conf->active_stripes) == 0 && atomic_read(&conf->active_aligned_reads) == 0, conf->device_lock, /* nothing */); + conf->quiesce = 1; spin_unlock_irq(&conf->device_lock); + /* allow reshape to continue */ + wake_up(&conf->wait_for_overlap); break; case 0: /* re-enable writes */ diff --git a/drivers/media/common/tuners/qt1010.c b/drivers/media/common/tuners/qt1010.c index 825aa1412e6..9f5dba244cb 100644 --- a/drivers/media/common/tuners/qt1010.c +++ b/drivers/media/common/tuners/qt1010.c @@ -64,24 +64,22 @@ static int qt1010_writereg(struct qt1010_priv *priv, u8 reg, u8 val) /* dump all registers */ static void qt1010_dump_regs(struct qt1010_priv *priv) { - char buf[52], buf2[4]; u8 reg, val; for (reg = 0; ; reg++) { if (reg % 16 == 0) { if (reg) - printk("%s\n", buf); - sprintf(buf, "%02x: ", reg); + printk(KERN_CONT "\n"); + printk(KERN_DEBUG "%02x:", reg); } if (qt1010_readreg(priv, reg, &val) == 0) - sprintf(buf2, "%02x ", val); + printk(KERN_CONT " %02x", val); else - strcpy(buf2, "-- "); - strcat(buf, buf2); + printk(KERN_CONT " --"); if (reg == 0x2f) break; } - printk("%s\n", buf); + printk(KERN_CONT "\n"); } static int qt1010_set_params(struct dvb_frontend *fe, diff --git a/drivers/media/common/tuners/tuner-xc2028.c b/drivers/media/common/tuners/tuner-xc2028.c index b6da9c3873f..f270e605da8 100644 --- a/drivers/media/common/tuners/tuner-xc2028.c +++ b/drivers/media/common/tuners/tuner-xc2028.c @@ -1096,8 +1096,19 @@ static int xc2028_set_params(struct dvb_frontend *fe, } /* All S-code tables need a 200kHz shift */ - if (priv->ctrl.demod) + if (priv->ctrl.demod) { demod = priv->ctrl.demod + 200; + /* + * The DTV7 S-code table needs a 700 kHz shift. + * Thanks to Terry Wu <terrywu2009@gmail.com> for reporting this + * + * DTV7 is only used in Australia. Germany or Italy may also + * use this firmware after initialization, but a tune to a UHF + * channel should then cause DTV78 to be used. + */ + if (type & DTV7) + demod += 500; + } return generic_set_freq(fe, p->frequency, T_DIGITAL_TV, type, 0, demod); @@ -1108,8 +1119,8 @@ static int xc2028_sleep(struct dvb_frontend *fe) struct xc2028_data *priv = fe->tuner_priv; int rc = 0; - /* Avoid firmware reload on slow devices */ - if (no_poweroff) + /* Avoid firmware reload on slow devices or if PM disabled */ + if (no_poweroff || priv->ctrl.disable_power_mgmt) return 0; tuner_dbg("Putting xc2028/3028 into poweroff mode.\n"); diff --git a/drivers/media/common/tuners/tuner-xc2028.h b/drivers/media/common/tuners/tuner-xc2028.h index 19de7928a74..a90c35d50ad 100644 --- a/drivers/media/common/tuners/tuner-xc2028.h +++ b/drivers/media/common/tuners/tuner-xc2028.h @@ -38,6 +38,7 @@ struct xc2028_ctrl { unsigned int input1:1; unsigned int vhfbw7:1; unsigned int uhfbw8:1; + unsigned int disable_power_mgmt:1; unsigned int demod; enum firmware_type type:2; }; diff --git a/drivers/media/dvb/b2c2/flexcop-fe-tuner.c b/drivers/media/dvb/b2c2/flexcop-fe-tuner.c index efb4a6c2b57..9a6307a347b 100644 --- a/drivers/media/dvb/b2c2/flexcop-fe-tuner.c +++ b/drivers/media/dvb/b2c2/flexcop-fe-tuner.c @@ -20,8 +20,14 @@ #include "tuner-simple.h" #include "stv0297.h" + +/* Can we use the specified front-end? Remember that if we are compiled + * into the kernel we can't call code that's in modules. */ +#define FE_SUPPORTED(fe) (defined(CONFIG_DVB_##fe) || \ + (defined(CONFIG_DVB_##fe##_MODULE) && defined(MODULE))) + /* lnb control */ -#if defined(CONFIG_DVB_MT312_MODULE) || defined(CONFIG_DVB_STV0299_MODULE) +#if FE_SUPPORTED(MT312) || FE_SUPPORTED(STV0299) static int flexcop_set_voltage(struct dvb_frontend *fe, fe_sec_voltage_t voltage) { struct flexcop_device *fc = fe->dvb->priv; @@ -49,8 +55,7 @@ static int flexcop_set_voltage(struct dvb_frontend *fe, fe_sec_voltage_t voltage } #endif -#if defined(CONFIG_DVB_S5H1420_MODULE) || defined(CONFIG_DVB_STV0299_MODULE) \ - || defined(CONFIG_DVB_MT312_MODULE) +#if FE_SUPPORTED(S5H1420) || FE_SUPPORTED(STV0299) || FE_SUPPORTED(MT312) static int flexcop_sleep(struct dvb_frontend* fe) { struct flexcop_device *fc = fe->dvb->priv; @@ -61,7 +66,7 @@ static int flexcop_sleep(struct dvb_frontend* fe) #endif /* SkyStar2 DVB-S rev 2.3 */ -#if defined(CONFIG_DVB_MT312_MODULE) +#if FE_SUPPORTED(MT312) static int flexcop_set_tone(struct dvb_frontend *fe, fe_sec_tone_mode_t tone) { /* u16 wz_half_period_for_45_mhz[] = { 0x01ff, 0x0154, 0x00ff, 0x00cc }; */ @@ -193,10 +198,12 @@ static int skystar2_rev23_attach(struct flexcop_device *fc, } return 0; } +#else +#define skystar2_rev23_attach NULL #endif /* SkyStar2 DVB-S rev 2.6 */ -#if defined(CONFIG_DVB_STV0299_MODULE) +#if FE_SUPPORTED(STV0299) static int samsung_tbmu24112_set_symbol_rate(struct dvb_frontend *fe, u32 srate, u32 ratio) { @@ -321,10 +328,12 @@ static int skystar2_rev26_attach(struct flexcop_device *fc, } return 0; } +#else +#define skystar2_rev26_attach NULL #endif /* SkyStar2 DVB-S rev 2.7 */ -#if defined(CONFIG_DVB_S5H1420_MODULE) +#if FE_SUPPORTED(S5H1420) && FE_SUPPORTED(ISL6421) && FE_SUPPORTED(TUNER_ITD1000) static struct s5h1420_config skystar2_rev2_7_s5h1420_config = { .demod_address = 0x53, .invert = 1, @@ -385,10 +394,12 @@ fail: fc->fc_i2c_adap[0].no_base_addr = 0; return 0; } +#else +#define skystar2_rev27_attach NULL #endif /* SkyStar2 rev 2.8 */ -#if defined(CONFIG_DVB_CX24123_MODULE) +#if FE_SUPPORTED(CX24123) && FE_SUPPORTED(ISL6421) && FE_SUPPORTED(TUNER_CX24113) static struct cx24123_config skystar2_rev2_8_cx24123_config = { .demod_address = 0x55, .dont_use_pll = 1, @@ -433,10 +444,12 @@ static int skystar2_rev28_attach(struct flexcop_device *fc, * IR-receiver (PIC16F818) - but the card has no input for that ??? */ return 1; } +#else +#define skystar2_rev28_attach NULL #endif /* AirStar DVB-T */ -#if defined(CONFIG_DVB_MT352_MODULE) +#if FE_SUPPORTED(MT352) static int samsung_tdtc9251dh0_demod_init(struct dvb_frontend *fe) { static u8 mt352_clock_config[] = { 0x89, 0x18, 0x2d }; @@ -495,10 +508,12 @@ static int airstar_dvbt_attach(struct flexcop_device *fc, } return 0; } +#else +#define airstar_dvbt_attach NULL #endif /* AirStar ATSC 1st generation */ -#if defined(CONFIG_DVB_BCM3510_MODULE) +#if FE_SUPPORTED(BCM3510) static int flexcop_fe_request_firmware(struct dvb_frontend *fe, const struct firmware **fw, char* name) { @@ -517,10 +532,12 @@ static int airstar_atsc1_attach(struct flexcop_device *fc, fc->fe = dvb_attach(bcm3510_attach, &air2pc_atsc_first_gen_config, i2c); return fc->fe != NULL; } +#else +#define airstar_atsc1_attach NULL #endif /* AirStar ATSC 2nd generation */ -#if defined(CONFIG_DVB_NXT200X_MODULE) +#if FE_SUPPORTED(NXT200X) && FE_SUPPORTED(PLL) static struct nxt200x_config samsung_tbmv_config = { .demod_address = 0x0a, }; @@ -535,10 +552,12 @@ static int airstar_atsc2_attach(struct flexcop_device *fc, return !!dvb_attach(dvb_pll_attach, fc->fe, 0x61, NULL, DVB_PLL_SAMSUNG_TBMV); } +#else +#define airstar_atsc2_attach NULL #endif /* AirStar ATSC 3rd generation */ -#if defined(CONFIG_DVB_LGDT330X_MODULE) +#if FE_SUPPORTED(LGDT330X) static struct lgdt330x_config air2pc_atsc_hd5000_config = { .demod_address = 0x59, .demod_chip = LGDT3303, @@ -556,10 +575,12 @@ static int airstar_atsc3_attach(struct flexcop_device *fc, return !!dvb_attach(simple_tuner_attach, fc->fe, i2c, 0x61, TUNER_LG_TDVS_H06XF); } +#else +#define airstar_atsc3_attach NULL #endif /* CableStar2 DVB-C */ -#if defined(CONFIG_DVB_STV0297_MODULE) +#if FE_SUPPORTED(STV0297) static int alps_tdee4_stv0297_tuner_set_params(struct dvb_frontend* fe, struct dvb_frontend_parameters *fep) { @@ -698,39 +719,23 @@ static int cablestar2_attach(struct flexcop_device *fc, fc->fe->ops.tuner_ops.set_params = alps_tdee4_stv0297_tuner_set_params; return 1; } +#else +#define cablestar2_attach NULL #endif static struct { flexcop_device_type_t type; int (*attach)(struct flexcop_device *, struct i2c_adapter *); } flexcop_frontends[] = { -#if defined(CONFIG_DVB_S5H1420_MODULE) { FC_SKY_REV27, skystar2_rev27_attach }, -#endif -#if defined(CONFIG_DVB_CX24123_MODULE) { FC_SKY_REV28, skystar2_rev28_attach }, -#endif -#if defined(CONFIG_DVB_STV0299_MODULE) { FC_SKY_REV26, skystar2_rev26_attach }, -#endif -#if defined(CONFIG_DVB_MT352_MODULE) { FC_AIR_DVBT, airstar_dvbt_attach }, -#endif -#if defined(CONFIG_DVB_NXT200X_MODULE) { FC_AIR_ATSC2, airstar_atsc2_attach }, -#endif -#if defined(CONFIG_DVB_LGDT330X_MODULE) { FC_AIR_ATSC3, airstar_atsc3_attach }, -#endif -#if defined(CONFIG_DVB_BCM3510_MODULE) { FC_AIR_ATSC1, airstar_atsc1_attach }, -#endif -#if defined(CONFIG_DVB_STV0297_MODULE) { FC_CABLE, cablestar2_attach }, -#endif -#if defined(CONFIG_DVB_MT312_MODULE) { FC_SKY_REV23, skystar2_rev23_attach }, -#endif }; /* try to figure out the frontend */ @@ -738,6 +743,8 @@ int flexcop_frontend_init(struct flexcop_device *fc) { int i; for (i = 0; i < ARRAY_SIZE(flexcop_frontends); i++) { + if (!flexcop_frontends[i].attach) + continue; /* type needs to be set before, because of some workarounds * done based on the probed card type */ fc->dev_type = flexcop_frontends[i].type; diff --git a/drivers/media/dvb/bt8xx/dst_ca.c b/drivers/media/dvb/bt8xx/dst_ca.c index 4601b059b2b..0e246eaad05 100644 --- a/drivers/media/dvb/bt8xx/dst_ca.c +++ b/drivers/media/dvb/bt8xx/dst_ca.c @@ -21,6 +21,7 @@ #include <linux/kernel.h> #include <linux/module.h> #include <linux/init.h> +#include <linux/smp_lock.h> #include <linux/string.h> #include <linux/dvb/ca.h> #include "dvbdev.h" diff --git a/drivers/media/dvb/dvb-core/dvbdev.h b/drivers/media/dvb/dvb-core/dvbdev.h index 79927305e84..487919bea7a 100644 --- a/drivers/media/dvb/dvb-core/dvbdev.h +++ b/drivers/media/dvb/dvb-core/dvbdev.h @@ -27,7 +27,6 @@ #include <linux/poll.h> #include <linux/fs.h> #include <linux/list.h> -#include <linux/smp_lock.h> #define DVB_MAJOR 212 diff --git a/drivers/media/dvb/dvb-usb/af9015.c b/drivers/media/dvb/dvb-usb/af9015.c index 4cb31e7c13c..26690dfb326 100644 --- a/drivers/media/dvb/dvb-usb/af9015.c +++ b/drivers/media/dvb/dvb-usb/af9015.c @@ -81,7 +81,6 @@ static int af9015_rw_udev(struct usb_device *udev, struct req_t *req) switch (req->cmd) { case GET_CONFIG: - case BOOT: case READ_MEMORY: case RECONNECT_USB: case GET_IR_CODE: @@ -100,6 +99,7 @@ static int af9015_rw_udev(struct usb_device *udev, struct req_t *req) case WRITE_VIRTUAL_MEMORY: case COPY_FIRMWARE: case DOWNLOAD_FIRMWARE: + case BOOT: break; default: err("unknown command:%d", req->cmd); diff --git a/drivers/media/dvb/frontends/af9013.c b/drivers/media/dvb/frontends/af9013.c index 136c5863d81..12e018b4107 100644 --- a/drivers/media/dvb/frontends/af9013.c +++ b/drivers/media/dvb/frontends/af9013.c @@ -527,6 +527,10 @@ static int af9013_set_ofdm_params(struct af9013_state *state, u8 i, buf[3] = {0, 0, 0}; *auto_mode = 0; /* set if parameters are requested to auto set */ + /* Try auto-detect transmission parameters in case of AUTO requested or + garbage parameters given by application for compatibility. + MPlayer seems to provide garbage parameters currently. */ + switch (params->transmission_mode) { case TRANSMISSION_MODE_AUTO: *auto_mode = 1; @@ -536,7 +540,8 @@ static int af9013_set_ofdm_params(struct af9013_state *state, buf[0] |= (1 << 0); break; default: - return -EINVAL; + deb_info("%s: invalid transmission_mode\n", __func__); + *auto_mode = 1; } switch (params->guard_interval) { @@ -554,7 +559,8 @@ static int af9013_set_ofdm_params(struct af9013_state *state, buf[0] |= (3 << 2); break; default: - return -EINVAL; + deb_info("%s: invalid guard_interval\n", __func__); + *auto_mode = 1; } switch (params->hierarchy_information) { @@ -572,7 +578,8 @@ static int af9013_set_ofdm_params(struct af9013_state *state, buf[0] |= (3 << 4); break; default: - return -EINVAL; + deb_info("%s: invalid hierarchy_information\n", __func__); + *auto_mode = 1; }; switch (params->constellation) { @@ -587,7 +594,8 @@ static int af9013_set_ofdm_params(struct af9013_state *state, buf[1] |= (2 << 6); break; default: - return -EINVAL; + deb_info("%s: invalid constellation\n", __func__); + *auto_mode = 1; } /* Use HP. How and which case we can switch to LP? */ @@ -611,7 +619,8 @@ static int af9013_set_ofdm_params(struct af9013_state *state, buf[2] |= (4 << 0); break; default: - return -EINVAL; + deb_info("%s: invalid code_rate_HP\n", __func__); + *auto_mode = 1; } switch (params->code_rate_LP) { @@ -638,7 +647,8 @@ static int af9013_set_ofdm_params(struct af9013_state *state, if (params->hierarchy_information == HIERARCHY_AUTO) break; default: - return -EINVAL; + deb_info("%s: invalid code_rate_LP\n", __func__); + *auto_mode = 1; } switch (params->bandwidth) { @@ -651,7 +661,8 @@ static int af9013_set_ofdm_params(struct af9013_state *state, buf[1] |= (2 << 2); break; default: - return -EINVAL; + deb_info("%s: invalid bandwidth\n", __func__); + buf[1] |= (2 << 2); /* cannot auto-detect BW, try 8 MHz */ } /* program */ diff --git a/drivers/media/dvb/frontends/cx22700.c b/drivers/media/dvb/frontends/cx22700.c index ace5cb17165..fbd838eca26 100644 --- a/drivers/media/dvb/frontends/cx22700.c +++ b/drivers/media/dvb/frontends/cx22700.c @@ -380,7 +380,7 @@ struct dvb_frontend* cx22700_attach(const struct cx22700_config* config, struct cx22700_state* state = NULL; /* allocate memory for the internal state */ - state = kmalloc(sizeof(struct cx22700_state), GFP_KERNEL); + state = kzalloc(sizeof(struct cx22700_state), GFP_KERNEL); if (state == NULL) goto error; /* setup the state */ diff --git a/drivers/media/dvb/frontends/cx22702.c b/drivers/media/dvb/frontends/cx22702.c index 5d1abe34bdd..00b5c7e91d5 100644 --- a/drivers/media/dvb/frontends/cx22702.c +++ b/drivers/media/dvb/frontends/cx22702.c @@ -580,7 +580,7 @@ struct dvb_frontend *cx22702_attach(const struct cx22702_config *config, struct cx22702_state *state = NULL; /* allocate memory for the internal state */ - state = kmalloc(sizeof(struct cx22702_state), GFP_KERNEL); + state = kzalloc(sizeof(struct cx22702_state), GFP_KERNEL); if (state == NULL) goto error; diff --git a/drivers/media/dvb/frontends/cx24110.c b/drivers/media/dvb/frontends/cx24110.c index 87ae29db024..ffbcfabd83f 100644 --- a/drivers/media/dvb/frontends/cx24110.c +++ b/drivers/media/dvb/frontends/cx24110.c @@ -598,7 +598,7 @@ struct dvb_frontend* cx24110_attach(const struct cx24110_config* config, int ret; /* allocate memory for the internal state */ - state = kmalloc(sizeof(struct cx24110_state), GFP_KERNEL); + state = kzalloc(sizeof(struct cx24110_state), GFP_KERNEL); if (state == NULL) goto error; /* setup the state */ diff --git a/drivers/media/dvb/frontends/dvb_dummy_fe.c b/drivers/media/dvb/frontends/dvb_dummy_fe.c index db8a937cc63..a7fc7e53a55 100644 --- a/drivers/media/dvb/frontends/dvb_dummy_fe.c +++ b/drivers/media/dvb/frontends/dvb_dummy_fe.c @@ -117,7 +117,7 @@ struct dvb_frontend* dvb_dummy_fe_ofdm_attach(void) struct dvb_dummy_fe_state* state = NULL; /* allocate memory for the internal state */ - state = kmalloc(sizeof(struct dvb_dummy_fe_state), GFP_KERNEL); + state = kzalloc(sizeof(struct dvb_dummy_fe_state), GFP_KERNEL); if (state == NULL) goto error; /* create dvb_frontend */ @@ -137,7 +137,7 @@ struct dvb_frontend *dvb_dummy_fe_qpsk_attach(void) struct dvb_dummy_fe_state* state = NULL; /* allocate memory for the internal state */ - state = kmalloc(sizeof(struct dvb_dummy_fe_state), GFP_KERNEL); + state = kzalloc(sizeof(struct dvb_dummy_fe_state), GFP_KERNEL); if (state == NULL) goto error; /* create dvb_frontend */ @@ -157,7 +157,7 @@ struct dvb_frontend *dvb_dummy_fe_qam_attach(void) struct dvb_dummy_fe_state* state = NULL; /* allocate memory for the internal state */ - state = kmalloc(sizeof(struct dvb_dummy_fe_state), GFP_KERNEL); + state = kzalloc(sizeof(struct dvb_dummy_fe_state), GFP_KERNEL); if (state == NULL) goto error; /* create dvb_frontend */ diff --git a/drivers/media/dvb/frontends/l64781.c b/drivers/media/dvb/frontends/l64781.c index e1e70e9e0cb..3051b64aa17 100644 --- a/drivers/media/dvb/frontends/l64781.c +++ b/drivers/media/dvb/frontends/l64781.c @@ -501,7 +501,7 @@ struct dvb_frontend* l64781_attach(const struct l64781_config* config, { .addr = config->demod_address, .flags = I2C_M_RD, .buf = b1, .len = 1 } }; /* allocate memory for the internal state */ - state = kmalloc(sizeof(struct l64781_state), GFP_KERNEL); + state = kzalloc(sizeof(struct l64781_state), GFP_KERNEL); if (state == NULL) goto error; /* setup the state */ diff --git a/drivers/media/dvb/frontends/lgs8gl5.c b/drivers/media/dvb/frontends/lgs8gl5.c index 855852fddf2..bb37ed289a0 100644 --- a/drivers/media/dvb/frontends/lgs8gl5.c +++ b/drivers/media/dvb/frontends/lgs8gl5.c @@ -387,7 +387,7 @@ lgs8gl5_attach(const struct lgs8gl5_config *config, struct i2c_adapter *i2c) dprintk("%s\n", __func__); /* Allocate memory for the internal state */ - state = kmalloc(sizeof(struct lgs8gl5_state), GFP_KERNEL); + state = kzalloc(sizeof(struct lgs8gl5_state), GFP_KERNEL); if (state == NULL) goto error; diff --git a/drivers/media/dvb/frontends/mt312.c b/drivers/media/dvb/frontends/mt312.c index a621f727935..f69daaac78c 100644 --- a/drivers/media/dvb/frontends/mt312.c +++ b/drivers/media/dvb/frontends/mt312.c @@ -782,7 +782,7 @@ struct dvb_frontend *mt312_attach(const struct mt312_config *config, struct mt312_state *state = NULL; /* allocate memory for the internal state */ - state = kmalloc(sizeof(struct mt312_state), GFP_KERNEL); + state = kzalloc(sizeof(struct mt312_state), GFP_KERNEL); if (state == NULL) goto error; diff --git a/drivers/media/dvb/frontends/nxt6000.c b/drivers/media/dvb/frontends/nxt6000.c index 0eef22dbf8a..a763ec756f7 100644 --- a/drivers/media/dvb/frontends/nxt6000.c +++ b/drivers/media/dvb/frontends/nxt6000.c @@ -545,7 +545,7 @@ struct dvb_frontend* nxt6000_attach(const struct nxt6000_config* config, struct nxt6000_state* state = NULL; /* allocate memory for the internal state */ - state = kmalloc(sizeof(struct nxt6000_state), GFP_KERNEL); + state = kzalloc(sizeof(struct nxt6000_state), GFP_KERNEL); if (state == NULL) goto error; /* setup the state */ diff --git a/drivers/media/dvb/frontends/or51132.c b/drivers/media/dvb/frontends/or51132.c index 8133ea3cddd..38e67accb8c 100644 --- a/drivers/media/dvb/frontends/or51132.c +++ b/drivers/media/dvb/frontends/or51132.c @@ -562,7 +562,7 @@ struct dvb_frontend* or51132_attach(const struct or51132_config* config, struct or51132_state* state = NULL; /* Allocate memory for the internal state */ - state = kmalloc(sizeof(struct or51132_state), GFP_KERNEL); + state = kzalloc(sizeof(struct or51132_state), GFP_KERNEL); if (state == NULL) return NULL; diff --git a/drivers/media/dvb/frontends/or51211.c b/drivers/media/dvb/frontends/or51211.c index 16cf2fdd5d7..c709ce6771c 100644 --- a/drivers/media/dvb/frontends/or51211.c +++ b/drivers/media/dvb/frontends/or51211.c @@ -527,7 +527,7 @@ struct dvb_frontend* or51211_attach(const struct or51211_config* config, struct or51211_state* state = NULL; /* Allocate memory for the internal state */ - state = kmalloc(sizeof(struct or51211_state), GFP_KERNEL); + state = kzalloc(sizeof(struct or51211_state), GFP_KERNEL); if (state == NULL) return NULL; diff --git a/drivers/media/dvb/frontends/s5h1409.c b/drivers/media/dvb/frontends/s5h1409.c index 3e08d985d6e..fb301151842 100644 --- a/drivers/media/dvb/frontends/s5h1409.c +++ b/drivers/media/dvb/frontends/s5h1409.c @@ -796,7 +796,7 @@ struct dvb_frontend *s5h1409_attach(const struct s5h1409_config *config, u16 reg; /* allocate memory for the internal state */ - state = kmalloc(sizeof(struct s5h1409_state), GFP_KERNEL); + state = kzalloc(sizeof(struct s5h1409_state), GFP_KERNEL); if (state == NULL) goto error; diff --git a/drivers/media/dvb/frontends/s5h1411.c b/drivers/media/dvb/frontends/s5h1411.c index 66e2dd6d6fe..d8adf1e3201 100644 --- a/drivers/media/dvb/frontends/s5h1411.c +++ b/drivers/media/dvb/frontends/s5h1411.c @@ -844,7 +844,7 @@ struct dvb_frontend *s5h1411_attach(const struct s5h1411_config *config, u16 reg; /* allocate memory for the internal state */ - state = kmalloc(sizeof(struct s5h1411_state), GFP_KERNEL); + state = kzalloc(sizeof(struct s5h1411_state), GFP_KERNEL); if (state == NULL) goto error; diff --git a/drivers/media/dvb/frontends/si21xx.c b/drivers/media/dvb/frontends/si21xx.c index 0bd16af8a6c..9552a22ccff 100644 --- a/drivers/media/dvb/frontends/si21xx.c +++ b/drivers/media/dvb/frontends/si21xx.c @@ -928,7 +928,7 @@ struct dvb_frontend *si21xx_attach(const struct si21xx_config *config, dprintk("%s\n", __func__); /* allocate memory for the internal state */ - state = kmalloc(sizeof(struct si21xx_state), GFP_KERNEL); + state = kzalloc(sizeof(struct si21xx_state), GFP_KERNEL); if (state == NULL) goto error; diff --git a/drivers/media/dvb/frontends/sp8870.c b/drivers/media/dvb/frontends/sp8870.c index 1c9a9b4051b..b85eb60a893 100644 --- a/drivers/media/dvb/frontends/sp8870.c +++ b/drivers/media/dvb/frontends/sp8870.c @@ -557,7 +557,7 @@ struct dvb_frontend* sp8870_attach(const struct sp8870_config* config, struct sp8870_state* state = NULL; /* allocate memory for the internal state */ - state = kmalloc(sizeof(struct sp8870_state), GFP_KERNEL); + state = kzalloc(sizeof(struct sp8870_state), GFP_KERNEL); if (state == NULL) goto error; /* setup the state */ diff --git a/drivers/media/dvb/frontends/sp887x.c b/drivers/media/dvb/frontends/sp887x.c index 559509ab4da..4a7c3d84260 100644 --- a/drivers/media/dvb/frontends/sp887x.c +++ b/drivers/media/dvb/frontends/sp887x.c @@ -557,7 +557,7 @@ struct dvb_frontend* sp887x_attach(const struct sp887x_config* config, struct sp887x_state* state = NULL; /* allocate memory for the internal state */ - state = kmalloc(sizeof(struct sp887x_state), GFP_KERNEL); + state = kzalloc(sizeof(struct sp887x_state), GFP_KERNEL); if (state == NULL) goto error; /* setup the state */ diff --git a/drivers/media/dvb/frontends/stv0288.c b/drivers/media/dvb/frontends/stv0288.c index ff1194de34c..2930a5d6768 100644 --- a/drivers/media/dvb/frontends/stv0288.c +++ b/drivers/media/dvb/frontends/stv0288.c @@ -570,7 +570,7 @@ struct dvb_frontend *stv0288_attach(const struct stv0288_config *config, int id; /* allocate memory for the internal state */ - state = kmalloc(sizeof(struct stv0288_state), GFP_KERNEL); + state = kzalloc(sizeof(struct stv0288_state), GFP_KERNEL); if (state == NULL) goto error; diff --git a/drivers/media/dvb/frontends/stv0297.c b/drivers/media/dvb/frontends/stv0297.c index 62caf802ed9..4fd7479bb62 100644 --- a/drivers/media/dvb/frontends/stv0297.c +++ b/drivers/media/dvb/frontends/stv0297.c @@ -663,7 +663,7 @@ struct dvb_frontend *stv0297_attach(const struct stv0297_config *config, struct stv0297_state *state = NULL; /* allocate memory for the internal state */ - state = kmalloc(sizeof(struct stv0297_state), GFP_KERNEL); + state = kzalloc(sizeof(struct stv0297_state), GFP_KERNEL); if (state == NULL) goto error; diff --git a/drivers/media/dvb/frontends/stv0299.c b/drivers/media/dvb/frontends/stv0299.c index 6c1cb1973c6..96887446972 100644 --- a/drivers/media/dvb/frontends/stv0299.c +++ b/drivers/media/dvb/frontends/stv0299.c @@ -667,7 +667,7 @@ struct dvb_frontend* stv0299_attach(const struct stv0299_config* config, int id; /* allocate memory for the internal state */ - state = kmalloc(sizeof(struct stv0299_state), GFP_KERNEL); + state = kzalloc(sizeof(struct stv0299_state), GFP_KERNEL); if (state == NULL) goto error; /* setup the state */ diff --git a/drivers/media/dvb/frontends/tda10021.c b/drivers/media/dvb/frontends/tda10021.c index f648fdb64bb..f5d7b3277a2 100644 --- a/drivers/media/dvb/frontends/tda10021.c +++ b/drivers/media/dvb/frontends/tda10021.c @@ -413,7 +413,7 @@ struct dvb_frontend* tda10021_attach(const struct tda1002x_config* config, u8 id; /* allocate memory for the internal state */ - state = kmalloc(sizeof(struct tda10021_state), GFP_KERNEL); + state = kzalloc(sizeof(struct tda10021_state), GFP_KERNEL); if (state == NULL) goto error; /* setup the state */ diff --git a/drivers/media/dvb/frontends/tda10048.c b/drivers/media/dvb/frontends/tda10048.c index cc8862ce4aa..4e2a7c8b2f6 100644 --- a/drivers/media/dvb/frontends/tda10048.c +++ b/drivers/media/dvb/frontends/tda10048.c @@ -1095,7 +1095,7 @@ struct dvb_frontend *tda10048_attach(const struct tda10048_config *config, dprintk(1, "%s()\n", __func__); /* allocate memory for the internal state */ - state = kmalloc(sizeof(struct tda10048_state), GFP_KERNEL); + state = kzalloc(sizeof(struct tda10048_state), GFP_KERNEL); if (state == NULL) goto error; diff --git a/drivers/media/dvb/frontends/tda1004x.c b/drivers/media/dvb/frontends/tda1004x.c index 4981cef8b44..f2a8abe0a24 100644 --- a/drivers/media/dvb/frontends/tda1004x.c +++ b/drivers/media/dvb/frontends/tda1004x.c @@ -1269,7 +1269,7 @@ struct dvb_frontend* tda10045_attach(const struct tda1004x_config* config, int id; /* allocate memory for the internal state */ - state = kmalloc(sizeof(struct tda1004x_state), GFP_KERNEL); + state = kzalloc(sizeof(struct tda1004x_state), GFP_KERNEL); if (!state) { printk(KERN_ERR "Can't alocate memory for tda10045 state\n"); return NULL; @@ -1339,7 +1339,7 @@ struct dvb_frontend* tda10046_attach(const struct tda1004x_config* config, int id; /* allocate memory for the internal state */ - state = kmalloc(sizeof(struct tda1004x_state), GFP_KERNEL); + state = kzalloc(sizeof(struct tda1004x_state), GFP_KERNEL); if (!state) { printk(KERN_ERR "Can't alocate memory for tda10046 state\n"); return NULL; diff --git a/drivers/media/dvb/frontends/tda10086.c b/drivers/media/dvb/frontends/tda10086.c index a17ce3c4ad8..f2c8faac6f3 100644 --- a/drivers/media/dvb/frontends/tda10086.c +++ b/drivers/media/dvb/frontends/tda10086.c @@ -745,7 +745,7 @@ struct dvb_frontend* tda10086_attach(const struct tda10086_config* config, dprintk ("%s\n", __func__); /* allocate memory for the internal state */ - state = kmalloc(sizeof(struct tda10086_state), GFP_KERNEL); + state = kzalloc(sizeof(struct tda10086_state), GFP_KERNEL); if (!state) return NULL; diff --git a/drivers/media/dvb/frontends/tda8083.c b/drivers/media/dvb/frontends/tda8083.c index 5b843b2e67e..9369f7442f2 100644 --- a/drivers/media/dvb/frontends/tda8083.c +++ b/drivers/media/dvb/frontends/tda8083.c @@ -417,7 +417,7 @@ struct dvb_frontend* tda8083_attach(const struct tda8083_config* config, struct tda8083_state* state = NULL; /* allocate memory for the internal state */ - state = kmalloc(sizeof(struct tda8083_state), GFP_KERNEL); + state = kzalloc(sizeof(struct tda8083_state), GFP_KERNEL); if (state == NULL) goto error; /* setup the state */ diff --git a/drivers/media/dvb/frontends/ves1820.c b/drivers/media/dvb/frontends/ves1820.c index a184597f1d9..6e78e486551 100644 --- a/drivers/media/dvb/frontends/ves1820.c +++ b/drivers/media/dvb/frontends/ves1820.c @@ -374,7 +374,7 @@ struct dvb_frontend* ves1820_attach(const struct ves1820_config* config, struct ves1820_state* state = NULL; /* allocate memory for the internal state */ - state = kmalloc(sizeof(struct ves1820_state), GFP_KERNEL); + state = kzalloc(sizeof(struct ves1820_state), GFP_KERNEL); if (state == NULL) goto error; diff --git a/drivers/media/dvb/frontends/ves1x93.c b/drivers/media/dvb/frontends/ves1x93.c index bd558960bd8..8d7854c2fb0 100644 --- a/drivers/media/dvb/frontends/ves1x93.c +++ b/drivers/media/dvb/frontends/ves1x93.c @@ -456,7 +456,7 @@ struct dvb_frontend* ves1x93_attach(const struct ves1x93_config* config, u8 identity; /* allocate memory for the internal state */ - state = kmalloc(sizeof(struct ves1x93_state), GFP_KERNEL); + state = kzalloc(sizeof(struct ves1x93_state), GFP_KERNEL); if (state == NULL) goto error; /* setup the state */ diff --git a/drivers/media/dvb/frontends/zl10353.c b/drivers/media/dvb/frontends/zl10353.c index 148b6f7f6cb..66f5c1fb307 100644 --- a/drivers/media/dvb/frontends/zl10353.c +++ b/drivers/media/dvb/frontends/zl10353.c @@ -98,7 +98,6 @@ static int zl10353_read_register(struct zl10353_state *state, u8 reg) static void zl10353_dump_regs(struct dvb_frontend *fe) { struct zl10353_state *state = fe->demodulator_priv; - char buf[52], buf2[4]; int ret; u8 reg; @@ -106,19 +105,18 @@ static void zl10353_dump_regs(struct dvb_frontend *fe) for (reg = 0; ; reg++) { if (reg % 16 == 0) { if (reg) - printk(KERN_DEBUG "%s\n", buf); - sprintf(buf, "%02x: ", reg); + printk(KERN_CONT "\n"); + printk(KERN_DEBUG "%02x:", reg); } ret = zl10353_read_register(state, reg); if (ret >= 0) - sprintf(buf2, "%02x ", (u8)ret); + printk(KERN_CONT " %02x", (u8)ret); else - strcpy(buf2, "-- "); - strcat(buf, buf2); + printk(KERN_CONT " --"); if (reg == 0xff) break; } - printk(KERN_DEBUG "%s\n", buf); + printk(KERN_CONT "\n"); } static void zl10353_calc_nominal_rate(struct dvb_frontend *fe, diff --git a/drivers/media/dvb/siano/Kconfig b/drivers/media/dvb/siano/Kconfig index dd863f26167..8c1aed77ea3 100644 --- a/drivers/media/dvb/siano/Kconfig +++ b/drivers/media/dvb/siano/Kconfig @@ -2,25 +2,33 @@ # Siano Mobile Silicon Digital TV device configuration # -config DVB_SIANO_SMS1XXX - tristate "Siano SMS1XXX USB dongle support" - depends on DVB_CORE && USB +config SMS_SIANO_MDTV + tristate "Siano SMS1xxx based MDTV receiver" + depends on DVB_CORE && INPUT ---help--- - Choose Y here if you have a USB dongle with a SMS1XXX chipset. + Choose Y or M here if you have MDTV receiver with a Siano chipset. - To compile this driver as a module, choose M here: the - module will be called sms1xxx. + To compile this driver as a module, choose M here + (The module will be called smsmdtv). -config DVB_SIANO_SMS1XXX_SMS_IDS - bool "Enable support for Siano Mobile Silicon default USB IDs" - depends on DVB_SIANO_SMS1XXX - default y - ---help--- - Choose Y here if you have a USB dongle with a SMS1XXX chipset - that uses Siano Mobile Silicon's default usb vid:pid. + Further documentation on this driver can be found on the WWW + at http://www.siano-ms.com/ + +if SMS_SIANO_MDTV +menu "Siano module components" - Choose N here if you would prefer to use Siano's external driver. +# Hardware interfaces support - Further documentation on this driver can be found on the WWW at - <http://www.siano-ms.com/>. +config SMS_USB_DRV + tristate "USB interface support" + depends on DVB_CORE && USB + ---help--- + Choose if you would like to have Siano's support for USB interface +config SMS_SDIO_DRV + tristate "SDIO interface support" + depends on DVB_CORE && MMC + ---help--- + Choose if you would like to have Siano's support for SDIO interface +endmenu +endif # SMS_SIANO_MDTV diff --git a/drivers/media/dvb/siano/Makefile b/drivers/media/dvb/siano/Makefile index c6644d90943..c54140b5ab5 100644 --- a/drivers/media/dvb/siano/Makefile +++ b/drivers/media/dvb/siano/Makefile @@ -1,8 +1,9 @@ -sms1xxx-objs := smscoreapi.o sms-cards.o smsendian.o smsir.o -obj-$(CONFIG_DVB_SIANO_SMS1XXX) += sms1xxx.o -obj-$(CONFIG_DVB_SIANO_SMS1XXX) += smsusb.o -obj-$(CONFIG_DVB_SIANO_SMS1XXX) += smsdvb.o +smsmdtv-objs := smscoreapi.o sms-cards.o smsendian.o smsir.o + +obj-$(CONFIG_SMS_SIANO_MDTV) += smsmdtv.o smsdvb.o +obj-$(CONFIG_SMS_USB_DRV) += smsusb.o +obj-$(CONFIG_SMS_SDIO_DRV) += smssdio.o EXTRA_CFLAGS += -Idrivers/media/dvb/dvb-core diff --git a/drivers/media/dvb/siano/sms-cards.c b/drivers/media/dvb/siano/sms-cards.c index d8b15d583bd..0420e2885e7 100644 --- a/drivers/media/dvb/siano/sms-cards.c +++ b/drivers/media/dvb/siano/sms-cards.c @@ -116,99 +116,21 @@ static inline void sms_gpio_assign_11xx_default_led_config( int sms_board_event(struct smscore_device_t *coredev, enum SMS_BOARD_EVENTS gevent) { - int board_id = smscore_get_board_id(coredev); - struct sms_board *board = sms_get_board(board_id); struct smscore_gpio_config MyGpioConfig; sms_gpio_assign_11xx_default_led_config(&MyGpioConfig); switch (gevent) { case BOARD_EVENT_POWER_INIT: /* including hotplug */ - switch (board_id) { - case SMS1XXX_BOARD_HAUPPAUGE_WINDHAM: - /* set I/O and turn off all LEDs */ - smscore_gpio_configure(coredev, - board->board_cfg.leds_power, - &MyGpioConfig); - smscore_gpio_set_level(coredev, - board->board_cfg.leds_power, 0); - smscore_gpio_configure(coredev, board->board_cfg.led0, - &MyGpioConfig); - smscore_gpio_set_level(coredev, - board->board_cfg.led0, 0); - smscore_gpio_configure(coredev, board->board_cfg.led1, - &MyGpioConfig); - smscore_gpio_set_level(coredev, - board->board_cfg.led1, 0); - break; - case SMS1XXX_BOARD_HAUPPAUGE_TIGER_MINICARD_R2: - case SMS1XXX_BOARD_HAUPPAUGE_TIGER_MINICARD: - /* set I/O and turn off LNA */ - smscore_gpio_configure(coredev, - board->board_cfg.foreign_lna0_ctrl, - &MyGpioConfig); - smscore_gpio_set_level(coredev, - board->board_cfg.foreign_lna0_ctrl, - 0); - break; - } break; /* BOARD_EVENT_BIND */ case BOARD_EVENT_POWER_SUSPEND: - switch (board_id) { - case SMS1XXX_BOARD_HAUPPAUGE_WINDHAM: - smscore_gpio_set_level(coredev, - board->board_cfg.leds_power, 0); - smscore_gpio_set_level(coredev, - board->board_cfg.led0, 0); - smscore_gpio_set_level(coredev, - board->board_cfg.led1, 0); - break; - case SMS1XXX_BOARD_HAUPPAUGE_TIGER_MINICARD_R2: - case SMS1XXX_BOARD_HAUPPAUGE_TIGER_MINICARD: - smscore_gpio_set_level(coredev, - board->board_cfg.foreign_lna0_ctrl, - 0); - break; - } break; /* BOARD_EVENT_POWER_SUSPEND */ case BOARD_EVENT_POWER_RESUME: - switch (board_id) { - case SMS1XXX_BOARD_HAUPPAUGE_WINDHAM: - smscore_gpio_set_level(coredev, - board->board_cfg.leds_power, 1); - smscore_gpio_set_level(coredev, - board->board_cfg.led0, 1); - smscore_gpio_set_level(coredev, - board->board_cfg.led1, 0); - break; - case SMS1XXX_BOARD_HAUPPAUGE_TIGER_MINICARD_R2: - case SMS1XXX_BOARD_HAUPPAUGE_TIGER_MINICARD: - smscore_gpio_set_level(coredev, - board->board_cfg.foreign_lna0_ctrl, - 1); - break; - } break; /* BOARD_EVENT_POWER_RESUME */ case BOARD_EVENT_BIND: - switch (board_id) { - case SMS1XXX_BOARD_HAUPPAUGE_WINDHAM: - smscore_gpio_set_level(coredev, - board->board_cfg.leds_power, 1); - smscore_gpio_set_level(coredev, - board->board_cfg.led0, 1); - smscore_gpio_set_level(coredev, - board->board_cfg.led1, 0); - break; - case SMS1XXX_BOARD_HAUPPAUGE_TIGER_MINICARD_R2: - case SMS1XXX_BOARD_HAUPPAUGE_TIGER_MINICARD: - smscore_gpio_set_level(coredev, - board->board_cfg.foreign_lna0_ctrl, - 1); - break; - } break; /* BOARD_EVENT_BIND */ case BOARD_EVENT_SCAN_PROG: @@ -218,20 +140,8 @@ int sms_board_event(struct smscore_device_t *coredev, case BOARD_EVENT_EMERGENCY_WARNING_SIGNAL: break; /* BOARD_EVENT_EMERGENCY_WARNING_SIGNAL */ case BOARD_EVENT_FE_LOCK: - switch (board_id) { - case SMS1XXX_BOARD_HAUPPAUGE_WINDHAM: - smscore_gpio_set_level(coredev, - board->board_cfg.led1, 1); - break; - } break; /* BOARD_EVENT_FE_LOCK */ case BOARD_EVENT_FE_UNLOCK: - switch (board_id) { - case SMS1XXX_BOARD_HAUPPAUGE_WINDHAM: - smscore_gpio_set_level(coredev, - board->board_cfg.led1, 0); - break; - } break; /* BOARD_EVENT_FE_UNLOCK */ case BOARD_EVENT_DEMOD_LOCK: break; /* BOARD_EVENT_DEMOD_LOCK */ @@ -248,20 +158,8 @@ int sms_board_event(struct smscore_device_t *coredev, case BOARD_EVENT_RECEPTION_LOST_0: break; /* BOARD_EVENT_RECEPTION_LOST_0 */ case BOARD_EVENT_MULTIPLEX_OK: - switch (board_id) { - case SMS1XXX_BOARD_HAUPPAUGE_WINDHAM: - smscore_gpio_set_level(coredev, - board->board_cfg.led1, 1); - break; - } break; /* BOARD_EVENT_MULTIPLEX_OK */ case BOARD_EVENT_MULTIPLEX_ERRORS: - switch (board_id) { - case SMS1XXX_BOARD_HAUPPAUGE_WINDHAM: - smscore_gpio_set_level(coredev, - board->board_cfg.led1, 0); - break; - } break; /* BOARD_EVENT_MULTIPLEX_ERRORS */ default: diff --git a/drivers/media/dvb/siano/smscoreapi.c b/drivers/media/dvb/siano/smscoreapi.c index a246903c334..bd9ab9d0d12 100644 --- a/drivers/media/dvb/siano/smscoreapi.c +++ b/drivers/media/dvb/siano/smscoreapi.c @@ -816,7 +816,7 @@ int smscore_set_device_mode(struct smscore_device_t *coredev, int mode) sms_debug("set device mode to %d", mode); if (coredev->device_flags & SMS_DEVICE_FAMILY2) { - if (mode < DEVICE_MODE_DVBT || mode > DEVICE_MODE_RAW_TUNER) { + if (mode < DEVICE_MODE_DVBT || mode >= DEVICE_MODE_RAW_TUNER) { sms_err("invalid mode specified %d", mode); return -EINVAL; } diff --git a/drivers/media/dvb/siano/smsdvb.c b/drivers/media/dvb/siano/smsdvb.c index 3ee1c3902c5..266033ae278 100644 --- a/drivers/media/dvb/siano/smsdvb.c +++ b/drivers/media/dvb/siano/smsdvb.c @@ -325,6 +325,16 @@ static int smsdvb_sendrequest_and_wait(struct smsdvb_client_t *client, 0 : -ETIME; } +static inline int led_feedback(struct smsdvb_client_t *client) +{ + if (client->fe_status & FE_HAS_LOCK) + return sms_board_led_feedback(client->coredev, + (client->sms_stat_dvb.ReceptionData.BER + == 0) ? SMS_LED_HI : SMS_LED_LO); + else + return sms_board_led_feedback(client->coredev, SMS_LED_OFF); +} + static int smsdvb_read_status(struct dvb_frontend *fe, fe_status_t *stat) { struct smsdvb_client_t *client; @@ -332,6 +342,8 @@ static int smsdvb_read_status(struct dvb_frontend *fe, fe_status_t *stat) *stat = client->fe_status; + led_feedback(client); + return 0; } @@ -342,6 +354,8 @@ static int smsdvb_read_ber(struct dvb_frontend *fe, u32 *ber) *ber = client->sms_stat_dvb.ReceptionData.BER; + led_feedback(client); + return 0; } @@ -359,6 +373,8 @@ static int smsdvb_read_signal_strength(struct dvb_frontend *fe, u16 *strength) (client->sms_stat_dvb.ReceptionData.InBandPwr + 95) * 3 / 2; + led_feedback(client); + return 0; } @@ -369,6 +385,8 @@ static int smsdvb_read_snr(struct dvb_frontend *fe, u16 *snr) *snr = client->sms_stat_dvb.ReceptionData.SNR; + led_feedback(client); + return 0; } @@ -379,6 +397,8 @@ static int smsdvb_read_ucblocks(struct dvb_frontend *fe, u32 *ucblocks) *ucblocks = client->sms_stat_dvb.ReceptionData.ErrorTSPackets; + led_feedback(client); + return 0; } @@ -404,6 +424,8 @@ static int smsdvb_set_frontend(struct dvb_frontend *fe, u32 Data[3]; } Msg; + int ret; + client->fe_status = FE_HAS_SIGNAL; client->event_fe_state = -1; client->event_unc_state = -1; @@ -426,6 +448,23 @@ static int smsdvb_set_frontend(struct dvb_frontend *fe, case BANDWIDTH_AUTO: return -EOPNOTSUPP; default: return -EINVAL; } + /* Disable LNA, if any. An error is returned if no LNA is present */ + ret = sms_board_lna_control(client->coredev, 0); + if (ret == 0) { + fe_status_t status; + + /* tune with LNA off at first */ + ret = smsdvb_sendrequest_and_wait(client, &Msg, sizeof(Msg), + &client->tune_done); + + smsdvb_read_status(fe, &status); + + if (status & FE_HAS_LOCK) + return ret; + + /* previous tune didnt lock - enable LNA and tune again */ + sms_board_lna_control(client->coredev, 1); + } return smsdvb_sendrequest_and_wait(client, &Msg, sizeof(Msg), &client->tune_done); @@ -451,6 +490,8 @@ static int smsdvb_init(struct dvb_frontend *fe) struct smsdvb_client_t *client = container_of(fe, struct smsdvb_client_t, frontend); + sms_board_power(client->coredev, 1); + sms_board_dvb3_event(client, DVB3_EVENT_INIT); return 0; } @@ -460,6 +501,9 @@ static int smsdvb_sleep(struct dvb_frontend *fe) struct smsdvb_client_t *client = container_of(fe, struct smsdvb_client_t, frontend); + sms_board_led_feedback(client->coredev, SMS_LED_OFF); + sms_board_power(client->coredev, 0); + sms_board_dvb3_event(client, DVB3_EVENT_SLEEP); return 0; diff --git a/drivers/media/dvb/siano/smssdio.c b/drivers/media/dvb/siano/smssdio.c index dfaa49a53f3..d1d652e7f89 100644 --- a/drivers/media/dvb/siano/smssdio.c +++ b/drivers/media/dvb/siano/smssdio.c @@ -46,6 +46,7 @@ #define SMSSDIO_DATA 0x00 #define SMSSDIO_INT 0x04 +#define SMSSDIO_BLOCK_SIZE 128 static const struct sdio_device_id smssdio_ids[] = { {SDIO_DEVICE(SDIO_VENDOR_ID_SIANO, SDIO_DEVICE_ID_SIANO_STELLAR), @@ -85,7 +86,8 @@ static int smssdio_sendrequest(void *context, void *buffer, size_t size) sdio_claim_host(smsdev->func); while (size >= smsdev->func->cur_blksize) { - ret = sdio_write_blocks(smsdev->func, SMSSDIO_DATA, buffer, 1); + ret = sdio_memcpy_toio(smsdev->func, SMSSDIO_DATA, + buffer, smsdev->func->cur_blksize); if (ret) goto out; @@ -94,8 +96,8 @@ static int smssdio_sendrequest(void *context, void *buffer, size_t size) } if (size) { - ret = sdio_write_bytes(smsdev->func, SMSSDIO_DATA, - buffer, size); + ret = sdio_memcpy_toio(smsdev->func, SMSSDIO_DATA, + buffer, size); } out: @@ -125,23 +127,23 @@ static void smssdio_interrupt(struct sdio_func *func) */ isr = sdio_readb(func, SMSSDIO_INT, &ret); if (ret) { - dev_err(&smsdev->func->dev, - "Unable to read interrupt register!\n"); + sms_err("Unable to read interrupt register!\n"); return; } if (smsdev->split_cb == NULL) { cb = smscore_getbuffer(smsdev->coredev); if (!cb) { - dev_err(&smsdev->func->dev, - "Unable to allocate data buffer!\n"); + sms_err("Unable to allocate data buffer!\n"); return; } - ret = sdio_read_blocks(smsdev->func, cb->p, SMSSDIO_DATA, 1); + ret = sdio_memcpy_fromio(smsdev->func, + cb->p, + SMSSDIO_DATA, + SMSSDIO_BLOCK_SIZE); if (ret) { - dev_err(&smsdev->func->dev, - "Error %d reading initial block!\n", ret); + sms_err("Error %d reading initial block!\n", ret); return; } @@ -152,7 +154,10 @@ static void smssdio_interrupt(struct sdio_func *func) return; } - size = hdr->msgLength - smsdev->func->cur_blksize; + if (hdr->msgLength > smsdev->func->cur_blksize) + size = hdr->msgLength - smsdev->func->cur_blksize; + else + size = 0; } else { cb = smsdev->split_cb; hdr = cb->p; @@ -162,23 +167,24 @@ static void smssdio_interrupt(struct sdio_func *func) smsdev->split_cb = NULL; } - if (hdr->msgLength > smsdev->func->cur_blksize) { + if (size) { void *buffer; - size = ALIGN(size, 128); - buffer = cb->p + hdr->msgLength; + buffer = cb->p + (hdr->msgLength - size); + size = ALIGN(size, SMSSDIO_BLOCK_SIZE); - BUG_ON(smsdev->func->cur_blksize != 128); + BUG_ON(smsdev->func->cur_blksize != SMSSDIO_BLOCK_SIZE); /* * First attempt to transfer all of it in one go... */ - ret = sdio_read_blocks(smsdev->func, buffer, - SMSSDIO_DATA, size / 128); + ret = sdio_memcpy_fromio(smsdev->func, + buffer, + SMSSDIO_DATA, + size); if (ret && ret != -EINVAL) { smscore_putbuffer(smsdev->coredev, cb); - dev_err(&smsdev->func->dev, - "Error %d reading data from card!\n", ret); + sms_err("Error %d reading data from card!\n", ret); return; } @@ -191,12 +197,12 @@ static void smssdio_interrupt(struct sdio_func *func) */ if (ret == -EINVAL) { while (size) { - ret = sdio_read_blocks(smsdev->func, - buffer, SMSSDIO_DATA, 1); + ret = sdio_memcpy_fromio(smsdev->func, + buffer, SMSSDIO_DATA, + smsdev->func->cur_blksize); if (ret) { smscore_putbuffer(smsdev->coredev, cb); - dev_err(&smsdev->func->dev, - "Error %d reading " + sms_err("Error %d reading " "data from card!\n", ret); return; } @@ -269,7 +275,7 @@ static int smssdio_probe(struct sdio_func *func, if (ret) goto release; - ret = sdio_set_block_size(func, 128); + ret = sdio_set_block_size(func, SMSSDIO_BLOCK_SIZE); if (ret) goto disable; diff --git a/drivers/media/dvb/ttpci/Kconfig b/drivers/media/dvb/ttpci/Kconfig index 68eb4493f99..d8d4214fd65 100644 --- a/drivers/media/dvb/ttpci/Kconfig +++ b/drivers/media/dvb/ttpci/Kconfig @@ -1,5 +1,6 @@ config TTPCI_EEPROM tristate + depends on I2C default n config DVB_AV7110 diff --git a/drivers/media/dvb/ttpci/av7110.c b/drivers/media/dvb/ttpci/av7110.c index d1d959ed37b..8d65c652ba5 100644 --- a/drivers/media/dvb/ttpci/av7110.c +++ b/drivers/media/dvb/ttpci/av7110.c @@ -36,7 +36,6 @@ #include <linux/fs.h> #include <linux/timer.h> #include <linux/poll.h> -#include <linux/smp_lock.h> #include <linux/kernel.h> #include <linux/sched.h> diff --git a/drivers/media/radio/radio-mr800.c b/drivers/media/radio/radio-mr800.c index 837467f9380..575bf9d8941 100644 --- a/drivers/media/radio/radio-mr800.c +++ b/drivers/media/radio/radio-mr800.c @@ -58,6 +58,7 @@ #include <linux/module.h> #include <linux/init.h> #include <linux/slab.h> +#include <linux/smp_lock.h> #include <linux/input.h> #include <linux/videodev2.h> #include <media/v4l2-device.h> diff --git a/drivers/media/radio/radio-si470x.c b/drivers/media/radio/radio-si470x.c index 640421ceb24..e85f318b4d2 100644 --- a/drivers/media/radio/radio-si470x.c +++ b/drivers/media/radio/radio-si470x.c @@ -127,6 +127,7 @@ #include <linux/module.h> #include <linux/init.h> #include <linux/slab.h> +#include <linux/smp_lock.h> #include <linux/input.h> #include <linux/usb.h> #include <linux/hid.h> @@ -1200,7 +1201,7 @@ static int si470x_fops_release(struct file *file) video_unregister_device(radio->videodev); kfree(radio->buffer); kfree(radio); - goto done; + goto unlock; } /* stop rds reception */ @@ -1213,9 +1214,8 @@ static int si470x_fops_release(struct file *file) retval = si470x_stop(radio); usb_autopm_put_interface(radio->intf); } - +unlock: mutex_unlock(&radio->disconnect_lock); - done: return retval; } diff --git a/drivers/media/video/Kconfig b/drivers/media/video/Kconfig index 061e147f6f2..dcf9fa9264b 100644 --- a/drivers/media/video/Kconfig +++ b/drivers/media/video/Kconfig @@ -312,6 +312,14 @@ config VIDEO_OV7670 OV7670 VGA camera. It currently only works with the M88ALP01 controller. +config VIDEO_MT9V011 + tristate "Micron mt9v011 sensor support" + depends on I2C && VIDEO_V4L2 + ---help--- + This is a Video4Linux2 sensor-level driver for the Micron + mt0v011 1.3 Mpixel camera. It currently only works with the + em28xx driver. + config VIDEO_TCM825X tristate "TCM825x camera sensor support" depends on I2C && VIDEO_V4L2 @@ -912,6 +920,8 @@ source "drivers/media/video/pwc/Kconfig" config USB_ZR364XX tristate "USB ZR364XX Camera support" depends on VIDEO_V4L2 + select VIDEOBUF_GEN + select VIDEOBUF_VMALLOC ---help--- Say Y here if you want to connect this type of camera to your computer's USB port. diff --git a/drivers/media/video/Makefile b/drivers/media/video/Makefile index 7fb3add1b38..9f2e3214a48 100644 --- a/drivers/media/video/Makefile +++ b/drivers/media/video/Makefile @@ -69,6 +69,7 @@ obj-$(CONFIG_VIDEO_UPD64083) += upd64083.o obj-$(CONFIG_VIDEO_OV7670) += ov7670.o obj-$(CONFIG_VIDEO_TCM825X) += tcm825x.o obj-$(CONFIG_VIDEO_TVEEPROM) += tveeprom.o +obj-$(CONFIG_VIDEO_MT9V011) += mt9v011.o obj-$(CONFIG_SOC_CAMERA_MT9M001) += mt9m001.o obj-$(CONFIG_SOC_CAMERA_MT9M111) += mt9m111.o diff --git a/drivers/media/video/bt8xx/bttv-cards.c b/drivers/media/video/bt8xx/bttv-cards.c index fdb4adff3d2..ca6558c394b 100644 --- a/drivers/media/video/bt8xx/bttv-cards.c +++ b/drivers/media/video/bt8xx/bttv-cards.c @@ -3324,8 +3324,6 @@ void __devinit bttv_init_card1(struct bttv *btv) /* initialization part two -- after registering i2c bus */ void __devinit bttv_init_card2(struct bttv *btv) { - int addr=ADDR_UNSET; - btv->tuner_type = UNSET; if (BTTV_BOARD_UNKNOWN == btv->c.type) { @@ -3470,9 +3468,6 @@ void __devinit bttv_init_card2(struct bttv *btv) btv->pll.pll_current = -1; /* tuner configuration (from card list / autodetect / insmod option) */ - if (ADDR_UNSET != bttv_tvcards[btv->c.type].tuner_addr) - addr = bttv_tvcards[btv->c.type].tuner_addr; - if (UNSET != bttv_tvcards[btv->c.type].tuner_type) if (UNSET == btv->tuner_type) btv->tuner_type = bttv_tvcards[btv->c.type].tuner_type; @@ -3496,40 +3491,6 @@ void __devinit bttv_init_card2(struct bttv *btv) if (UNSET == btv->tuner_type) btv->tuner_type = TUNER_ABSENT; - if (btv->tuner_type != TUNER_ABSENT) { - struct tuner_setup tun_setup; - - /* Load tuner module before issuing tuner config call! */ - if (bttv_tvcards[btv->c.type].has_radio) - v4l2_i2c_new_probed_subdev(&btv->c.v4l2_dev, - &btv->c.i2c_adap, "tuner", "tuner", - v4l2_i2c_tuner_addrs(ADDRS_RADIO)); - v4l2_i2c_new_probed_subdev(&btv->c.v4l2_dev, - &btv->c.i2c_adap, "tuner", "tuner", - v4l2_i2c_tuner_addrs(ADDRS_DEMOD)); - v4l2_i2c_new_probed_subdev(&btv->c.v4l2_dev, - &btv->c.i2c_adap, "tuner", "tuner", - v4l2_i2c_tuner_addrs(ADDRS_TV_WITH_DEMOD)); - - tun_setup.mode_mask = T_ANALOG_TV | T_DIGITAL_TV; - tun_setup.type = btv->tuner_type; - tun_setup.addr = addr; - - if (bttv_tvcards[btv->c.type].has_radio) - tun_setup.mode_mask |= T_RADIO; - - bttv_call_all(btv, tuner, s_type_addr, &tun_setup); - } - - if (btv->tda9887_conf) { - struct v4l2_priv_tun_config tda9887_cfg; - - tda9887_cfg.tuner = TUNER_TDA9887; - tda9887_cfg.priv = &btv->tda9887_conf; - - bttv_call_all(btv, tuner, s_config, &tda9887_cfg); - } - btv->dig = bttv_tvcards[btv->c.type].has_dig_in ? bttv_tvcards[btv->c.type].video_inputs - 1 : UNSET; btv->svhs = bttv_tvcards[btv->c.type].svhs == NO_SVHS ? @@ -3540,15 +3501,15 @@ void __devinit bttv_init_card2(struct bttv *btv) btv->has_remote = remote[btv->c.nr]; if (bttv_tvcards[btv->c.type].has_radio) - btv->has_radio=1; + btv->has_radio = 1; if (bttv_tvcards[btv->c.type].has_remote) - btv->has_remote=1; + btv->has_remote = 1; if (!bttv_tvcards[btv->c.type].no_gpioirq) - btv->gpioirq=1; + btv->gpioirq = 1; if (bttv_tvcards[btv->c.type].volume_gpio) - btv->volume_gpio=bttv_tvcards[btv->c.type].volume_gpio; + btv->volume_gpio = bttv_tvcards[btv->c.type].volume_gpio; if (bttv_tvcards[btv->c.type].audio_mode_gpio) - btv->audio_mode_gpio=bttv_tvcards[btv->c.type].audio_mode_gpio; + btv->audio_mode_gpio = bttv_tvcards[btv->c.type].audio_mode_gpio; if (btv->tuner_type == TUNER_ABSENT) return; /* no tuner or related drivers to load */ @@ -3666,6 +3627,49 @@ no_audio: } +/* initialize the tuner */ +void __devinit bttv_init_tuner(struct bttv *btv) +{ + int addr = ADDR_UNSET; + + if (ADDR_UNSET != bttv_tvcards[btv->c.type].tuner_addr) + addr = bttv_tvcards[btv->c.type].tuner_addr; + + if (btv->tuner_type != TUNER_ABSENT) { + struct tuner_setup tun_setup; + + /* Load tuner module before issuing tuner config call! */ + if (bttv_tvcards[btv->c.type].has_radio) + v4l2_i2c_new_probed_subdev(&btv->c.v4l2_dev, + &btv->c.i2c_adap, "tuner", "tuner", + v4l2_i2c_tuner_addrs(ADDRS_RADIO)); + v4l2_i2c_new_probed_subdev(&btv->c.v4l2_dev, + &btv->c.i2c_adap, "tuner", "tuner", + v4l2_i2c_tuner_addrs(ADDRS_DEMOD)); + v4l2_i2c_new_probed_subdev(&btv->c.v4l2_dev, + &btv->c.i2c_adap, "tuner", "tuner", + v4l2_i2c_tuner_addrs(ADDRS_TV_WITH_DEMOD)); + + tun_setup.mode_mask = T_ANALOG_TV | T_DIGITAL_TV; + tun_setup.type = btv->tuner_type; + tun_setup.addr = addr; + + if (bttv_tvcards[btv->c.type].has_radio) + tun_setup.mode_mask |= T_RADIO; + + bttv_call_all(btv, tuner, s_type_addr, &tun_setup); + } + + if (btv->tda9887_conf) { + struct v4l2_priv_tun_config tda9887_cfg; + + tda9887_cfg.tuner = TUNER_TDA9887; + tda9887_cfg.priv = &btv->tda9887_conf; + + bttv_call_all(btv, tuner, s_config, &tda9887_cfg); + } +} + /* ----------------------------------------------------------------------- */ static void modtec_eeprom(struct bttv *btv) diff --git a/drivers/media/video/bt8xx/bttv-driver.c b/drivers/media/video/bt8xx/bttv-driver.c index 5eb1464af67..8cc6dd28d6a 100644 --- a/drivers/media/video/bt8xx/bttv-driver.c +++ b/drivers/media/video/bt8xx/bttv-driver.c @@ -41,6 +41,7 @@ #include <linux/fs.h> #include <linux/kernel.h> #include <linux/sched.h> +#include <linux/smp_lock.h> #include <linux/interrupt.h> #include <linux/kdev_t.h> #include "bttvp.h" @@ -4418,6 +4419,7 @@ static int __devinit bttv_probe(struct pci_dev *dev, /* some card-specific stuff (needs working i2c) */ bttv_init_card2(btv); + bttv_init_tuner(btv); init_irqreg(btv); /* register video4linux + input */ diff --git a/drivers/media/video/bt8xx/bttv.h b/drivers/media/video/bt8xx/bttv.h index 3d36daf206f..3ec2402c6b4 100644 --- a/drivers/media/video/bt8xx/bttv.h +++ b/drivers/media/video/bt8xx/bttv.h @@ -283,6 +283,7 @@ extern struct tvcard bttv_tvcards[]; extern void bttv_idcard(struct bttv *btv); extern void bttv_init_card1(struct bttv *btv); extern void bttv_init_card2(struct bttv *btv); +extern void bttv_init_tuner(struct bttv *btv); /* card-specific funtions */ extern void tea5757_set_freq(struct bttv *btv, unsigned short freq); diff --git a/drivers/media/video/bw-qcam.c b/drivers/media/video/bw-qcam.c index 10dbd4a11b3..9e39bc5f7b0 100644 --- a/drivers/media/video/bw-qcam.c +++ b/drivers/media/video/bw-qcam.c @@ -992,7 +992,7 @@ static int accept_bwqcam(struct parport *port) if (parport[0] && strncmp(parport[0], "auto", 4) != 0) { /* user gave parport parameters */ - for(n=0; parport[n] && n<MAX_CAMS; n++){ + for (n = 0; n < MAX_CAMS && parport[n]; n++) { char *ep; unsigned long r; r = simple_strtoul(parport[n], &ep, 0); diff --git a/drivers/media/video/cx18/cx18-cards.c b/drivers/media/video/cx18/cx18-cards.c index c92a25036f0..36f2d76006f 100644 --- a/drivers/media/video/cx18/cx18-cards.c +++ b/drivers/media/video/cx18/cx18-cards.c @@ -198,11 +198,14 @@ static const struct cx18_card_pci_info cx18_pci_mpc718[] = { static const struct cx18_card cx18_card_mpc718 = { .type = CX18_CARD_YUAN_MPC718, - .name = "Yuan MPC718", - .comment = "Analog video capture works; some audio line in may not.\n", + .name = "Yuan MPC718 MiniPCI DVB-T/Analog", + .comment = "Experimenters needed for device to work well.\n" + "\tTo help, mail the ivtv-devel list (www.ivtvdriver.org).\n", .v4l2_capabilities = CX18_CAP_ENCODER, .hw_audio_ctrl = CX18_HW_418_AV, - .hw_all = CX18_HW_418_AV | CX18_HW_TUNER | CX18_HW_GPIO_RESET_CTRL, + .hw_muxer = CX18_HW_GPIO_MUX, + .hw_all = CX18_HW_418_AV | CX18_HW_TUNER | + CX18_HW_GPIO_MUX | CX18_HW_DVB | CX18_HW_GPIO_RESET_CTRL, .video_inputs = { { CX18_CARD_INPUT_VID_TUNER, 0, CX18_AV_COMPOSITE2 }, { CX18_CARD_INPUT_SVIDEO1, 1, @@ -211,27 +214,34 @@ static const struct cx18_card cx18_card_mpc718 = { { CX18_CARD_INPUT_SVIDEO2, 2, CX18_AV_SVIDEO_LUMA7 | CX18_AV_SVIDEO_CHROMA8 }, { CX18_CARD_INPUT_COMPOSITE2, 2, CX18_AV_COMPOSITE6 }, - { CX18_CARD_INPUT_COMPOSITE3, 2, CX18_AV_COMPOSITE3 }, }, .audio_inputs = { { CX18_CARD_INPUT_AUD_TUNER, CX18_AV_AUDIO5, 0 }, - { CX18_CARD_INPUT_LINE_IN1, CX18_AV_AUDIO_SERIAL1, 0 }, - { CX18_CARD_INPUT_LINE_IN2, CX18_AV_AUDIO_SERIAL1, 0 }, + { CX18_CARD_INPUT_LINE_IN1, CX18_AV_AUDIO_SERIAL1, 1 }, + { CX18_CARD_INPUT_LINE_IN2, CX18_AV_AUDIO_SERIAL2, 1 }, }, - .radio_input = { CX18_CARD_INPUT_AUD_TUNER, CX18_AV_AUDIO_SERIAL1, 0 }, .tuners = { /* XC3028 tuner */ { .std = V4L2_STD_ALL, .tuner = TUNER_XC2028 }, }, + /* FIXME - the FM radio is just a guess and driver doesn't use SIF */ + .radio_input = { CX18_CARD_INPUT_AUD_TUNER, CX18_AV_AUDIO5, 2 }, .ddr = { - /* Probably Samsung K4D263238G-VC33 memory */ - .chip_config = 0x003, - .refresh = 0x30c, - .timing1 = 0x23230b73, - .timing2 = 0x08, + /* Hynix HY5DU283222B DDR RAM */ + .chip_config = 0x303, + .refresh = 0x3bd, + .timing1 = 0x36320966, + .timing2 = 0x1f, .tune_lane = 0, .initial_emrs = 2, }, + .gpio_init.initial_value = 0x1, + .gpio_init.direction = 0x3, + /* FIXME - these GPIO's are just guesses */ + .gpio_audio_input = { .mask = 0x3, + .tuner = 0x1, + .linein = 0x3, + .radio = 0x1 }, .xceive_pin = 0, .pci_list = cx18_pci_mpc718, .i2c = &cx18_i2c_std, diff --git a/drivers/media/video/cx18/cx18-controls.c b/drivers/media/video/cx18/cx18-controls.c index 5136df19833..93f0dae0135 100644 --- a/drivers/media/video/cx18/cx18-controls.c +++ b/drivers/media/video/cx18/cx18-controls.c @@ -20,6 +20,7 @@ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA * 02111-1307 USA */ +#include <linux/kernel.h> #include "cx18-driver.h" #include "cx18-cards.h" @@ -317,7 +318,7 @@ int cx18_s_ext_ctrls(struct file *file, void *fh, struct v4l2_ext_controls *c) idx = p.audio_properties & 0x03; /* The audio clock of the digitizer must match the codec sample rate otherwise you get some very strange effects. */ - if (idx < sizeof(freqs)) + if (idx < ARRAY_SIZE(freqs)) cx18_call_all(cx, audio, s_clock_freq, freqs[idx]); return err; } diff --git a/drivers/media/video/cx18/cx18-dvb.c b/drivers/media/video/cx18/cx18-dvb.c index 6ea3fe623ef..51a0c33b25b 100644 --- a/drivers/media/video/cx18/cx18-dvb.c +++ b/drivers/media/video/cx18/cx18-dvb.c @@ -30,6 +30,10 @@ #include "s5h1409.h" #include "mxl5005s.h" #include "zl10353.h" + +#include <linux/firmware.h> +#include "mt352.h" +#include "mt352_priv.h" #include "tuner-xc2028.h" DVB_DEFINE_MOD_OPT_ADAPTER_NR(adapter_nr); @@ -38,6 +42,11 @@ DVB_DEFINE_MOD_OPT_ADAPTER_NR(adapter_nr); #define CX18_CLOCK_ENABLE2 0xc71024 #define CX18_DMUX_CLK_MASK 0x0080 +/* + * CX18_CARD_HVR_1600_ESMT + * CX18_CARD_HVR_1600_SAMSUNG + */ + static struct mxl5005s_config hauppauge_hvr1600_tuner = { .i2c_address = 0xC6 >> 1, .if_freq = IF_FREQ_5380000HZ, @@ -65,6 +74,9 @@ static struct s5h1409_config hauppauge_hvr1600_config = { .mpeg_timing = S5H1409_MPEGTIMING_CONTINOUS_NONINVERTING_CLOCK }; +/* + * CX18_CARD_LEADTEK_DVR3100H + */ /* Information/confirmation of proper config values provided by Terry Wu */ static struct zl10353_config leadtek_dvr3100h_demod = { .demod_address = 0x1e >> 1, /* Datasheet suggested straps */ @@ -74,6 +86,121 @@ static struct zl10353_config leadtek_dvr3100h_demod = { .disable_i2c_gate_ctrl = 1, /* Disable the I2C gate */ }; +/* + * CX18_CARD_YUAN_MPC718 + */ +/* + * Due to + * + * 1. an absence of information on how to prgram the MT352 + * 2. the Linux mt352 module pushing MT352 initialzation off onto us here + * + * We have to use an init sequence that *you* must extract from the Windows + * driver (yuanrap.sys) and which we load as a firmware. + * + * If someone can provide me with a Zarlink MT352 (Intel CE6352?) Design Manual + * with chip programming details, then I can remove this annoyance. + */ +static int yuan_mpc718_mt352_reqfw(struct cx18_stream *stream, + const struct firmware **fw) +{ + struct cx18 *cx = stream->cx; + const char *fn = "dvb-cx18-mpc718-mt352.fw"; + int ret; + + ret = request_firmware(fw, fn, &cx->pci_dev->dev); + if (ret) + CX18_ERR("Unable to open firmware file %s\n", fn); + else { + size_t sz = (*fw)->size; + if (sz < 2 || sz > 64 || (sz % 2) != 0) { + CX18_ERR("Firmware %s has a bad size: %lu bytes\n", + fn, (unsigned long) sz); + ret = -EILSEQ; + release_firmware(*fw); + *fw = NULL; + } + } + + if (ret) { + CX18_ERR("The MPC718 board variant with the MT352 DVB-T" + "demodualtor will not work without it\n"); + CX18_ERR("Run 'linux/Documentation/dvb/get_dvb_firmware " + "mpc718' if you need the firmware\n"); + } + return ret; +} + +static int yuan_mpc718_mt352_init(struct dvb_frontend *fe) +{ + struct cx18_dvb *dvb = container_of(fe->dvb, + struct cx18_dvb, dvb_adapter); + struct cx18_stream *stream = container_of(dvb, struct cx18_stream, dvb); + const struct firmware *fw = NULL; + int ret; + int i; + u8 buf[3]; + + ret = yuan_mpc718_mt352_reqfw(stream, &fw); + if (ret) + return ret; + + /* Loop through all the register-value pairs in the firmware file */ + for (i = 0; i < fw->size; i += 2) { + buf[0] = fw->data[i]; + /* Intercept a few registers we want to set ourselves */ + switch (buf[0]) { + case TRL_NOMINAL_RATE_0: + /* Set our custom OFDM bandwidth in the case below */ + break; + case TRL_NOMINAL_RATE_1: + /* 6 MHz: 64/7 * 6/8 / 20.48 * 2^16 = 0x55b6.db6 */ + /* 7 MHz: 64/7 * 7/8 / 20.48 * 2^16 = 0x6400 */ + /* 8 MHz: 64/7 * 8/8 / 20.48 * 2^16 = 0x7249.249 */ + buf[1] = 0x72; + buf[2] = 0x49; + mt352_write(fe, buf, 3); + break; + case INPUT_FREQ_0: + /* Set our custom IF in the case below */ + break; + case INPUT_FREQ_1: + /* 4.56 MHz IF: (20.48 - 4.56)/20.48 * 2^14 = 0x31c0 */ + buf[1] = 0x31; + buf[2] = 0xc0; + mt352_write(fe, buf, 3); + break; + default: + /* Pass through the register-value pair from the fw */ + buf[1] = fw->data[i+1]; + mt352_write(fe, buf, 2); + break; + } + } + + buf[0] = (u8) TUNER_GO; + buf[1] = 0x01; /* Go */ + mt352_write(fe, buf, 2); + release_firmware(fw); + return 0; +} + +static struct mt352_config yuan_mpc718_mt352_demod = { + .demod_address = 0x1e >> 1, + .adc_clock = 20480, /* 20.480 MHz */ + .if2 = 4560, /* 4.560 MHz */ + .no_tuner = 1, /* XC3028 is not behind the gate */ + .demod_init = yuan_mpc718_mt352_init, +}; + +static struct zl10353_config yuan_mpc718_zl10353_demod = { + .demod_address = 0x1e >> 1, /* Datasheet suggested straps */ + .if2 = 45600, /* 4.560 MHz IF from the XC3028 */ + .parallel_ts = 1, /* Not a serial TS */ + .no_tuner = 1, /* XC3028 is not behind the gate */ + .disable_i2c_gate_ctrl = 1, /* Disable the I2C gate */ +}; + static int dvb_register(struct cx18_stream *stream); /* Kernel DVB framework calls this when the feed needs to start. @@ -113,6 +240,7 @@ static int cx18_dvb_start_feed(struct dvb_demux_feed *feed) break; case CX18_CARD_LEADTEK_DVR3100H: + case CX18_CARD_YUAN_MPC718: default: /* Assumption - Parallel transport - Signalling * undefined or default. @@ -326,6 +454,38 @@ static int dvb_register(struct cx18_stream *stream) fe->ops.tuner_ops.set_config(fe, &ctrl); } break; + case CX18_CARD_YUAN_MPC718: + /* + * TODO + * Apparently, these cards also could instead have a + * DiBcom demod supported by one of the db7000 drivers + */ + dvb->fe = dvb_attach(mt352_attach, + &yuan_mpc718_mt352_demod, + &cx->i2c_adap[1]); + if (dvb->fe == NULL) + dvb->fe = dvb_attach(zl10353_attach, + &yuan_mpc718_zl10353_demod, + &cx->i2c_adap[1]); + if (dvb->fe != NULL) { + struct dvb_frontend *fe; + struct xc2028_config cfg = { + .i2c_adap = &cx->i2c_adap[1], + .i2c_addr = 0xc2 >> 1, + .ctrl = NULL, + }; + static struct xc2028_ctrl ctrl = { + .fname = XC2028_DEFAULT_FIRMWARE, + .max_len = 64, + .demod = XC3028_FE_ZARLINK456, + .type = XC2028_AUTO, + }; + + fe = dvb_attach(xc2028_attach, dvb->fe, &cfg); + if (fe != NULL && fe->ops.tuner_ops.set_config != NULL) + fe->ops.tuner_ops.set_config(fe, &ctrl); + } + break; default: /* No Digital Tv Support */ break; diff --git a/drivers/media/video/cx23885/cx23885-417.c b/drivers/media/video/cx23885/cx23885-417.c index 2943bfd32a9..1a1048b18f7 100644 --- a/drivers/media/video/cx23885/cx23885-417.c +++ b/drivers/media/video/cx23885/cx23885-417.c @@ -31,6 +31,7 @@ #include <linux/delay.h> #include <linux/device.h> #include <linux/firmware.h> +#include <linux/smp_lock.h> #include <media/v4l2-common.h> #include <media/v4l2-ioctl.h> #include <media/cx2341x.h> @@ -57,7 +58,8 @@ MODULE_PARM_DESC(v4l_debug, "enable V4L debug messages"); #define dprintk(level, fmt, arg...)\ do { if (v4l_debug >= level) \ - printk(KERN_DEBUG "%s: " fmt, dev->name , ## arg);\ + printk(KERN_DEBUG "%s: " fmt, \ + (dev) ? dev->name : "cx23885[?]", ## arg); \ } while (0) static struct cx23885_tvnorm cx23885_tvnorms[] = { @@ -1676,6 +1678,7 @@ static struct v4l2_file_operations mpeg_fops = { .read = mpeg_read, .poll = mpeg_poll, .mmap = mpeg_mmap, + .ioctl = video_ioctl2, }; static const struct v4l2_ioctl_ops mpeg_ioctl_ops = { @@ -1712,6 +1715,8 @@ static struct video_device cx23885_mpeg_template = { .fops = &mpeg_fops, .ioctl_ops = &mpeg_ioctl_ops, .minor = -1, + .tvnorms = CX23885_NORMS, + .current_norm = V4L2_STD_NTSC_M, }; void cx23885_417_unregister(struct cx23885_dev *dev) diff --git a/drivers/media/video/cx23885/cx23885-dvb.c b/drivers/media/video/cx23885/cx23885-dvb.c index 48a975134ac..86ac529e62b 100644 --- a/drivers/media/video/cx23885/cx23885-dvb.c +++ b/drivers/media/video/cx23885/cx23885-dvb.c @@ -463,6 +463,30 @@ static struct xc5000_config mygica_x8506_xc5000_config = { .if_khz = 5380, }; +static int cx23885_dvb_set_frontend(struct dvb_frontend *fe, + struct dvb_frontend_parameters *param) +{ + struct cx23885_tsport *port = fe->dvb->priv; + struct cx23885_dev *dev = port->dev; + + switch (dev->board) { + case CX23885_BOARD_HAUPPAUGE_HVR1275: + switch (param->u.vsb.modulation) { + case VSB_8: + cx23885_gpio_clear(dev, GPIO_5); + break; + case QAM_64: + case QAM_256: + default: + cx23885_gpio_set(dev, GPIO_5); + break; + } + break; + } + return (port->set_frontend_save) ? + port->set_frontend_save(fe, param) : -ENODEV; +} + static int dvb_register(struct cx23885_tsport *port) { struct cx23885_dev *dev = port->dev; @@ -502,6 +526,12 @@ static int dvb_register(struct cx23885_tsport *port) 0x60, &dev->i2c_bus[1].i2c_adap, &hauppauge_hvr127x_config); } + + /* FIXME: temporary hack */ + /* define bridge override to set_frontend */ + port->set_frontend_save = fe0->dvb.frontend->ops.set_frontend; + fe0->dvb.frontend->ops.set_frontend = cx23885_dvb_set_frontend; + break; case CX23885_BOARD_HAUPPAUGE_HVR1255: i2c_bus = &dev->i2c_bus[0]; diff --git a/drivers/media/video/cx23885/cx23885-video.c b/drivers/media/video/cx23885/cx23885-video.c index 70836af3ab4..5d609333630 100644 --- a/drivers/media/video/cx23885/cx23885-video.c +++ b/drivers/media/video/cx23885/cx23885-video.c @@ -26,6 +26,7 @@ #include <linux/kmod.h> #include <linux/kernel.h> #include <linux/slab.h> +#include <linux/smp_lock.h> #include <linux/interrupt.h> #include <linux/delay.h> #include <linux/kthread.h> diff --git a/drivers/media/video/cx23885/cx23885.h b/drivers/media/video/cx23885/cx23885.h index 1a2ac518a3f..214a55e943b 100644 --- a/drivers/media/video/cx23885/cx23885.h +++ b/drivers/media/video/cx23885/cx23885.h @@ -288,6 +288,10 @@ struct cx23885_tsport { /* Allow a single tsport to have multiple frontends */ u32 num_frontends; void *port_priv; + + /* FIXME: temporary hack */ + int (*set_frontend_save) (struct dvb_frontend *, + struct dvb_frontend_parameters *); }; struct cx23885_dev { diff --git a/drivers/media/video/cx88/cx88-blackbird.c b/drivers/media/video/cx88/cx88-blackbird.c index 44eacfb0d0d..356d6896da3 100644 --- a/drivers/media/video/cx88/cx88-blackbird.c +++ b/drivers/media/video/cx88/cx88-blackbird.c @@ -32,6 +32,7 @@ #include <linux/delay.h> #include <linux/device.h> #include <linux/firmware.h> +#include <linux/smp_lock.h> #include <media/v4l2-common.h> #include <media/v4l2-ioctl.h> #include <media/cx2341x.h> diff --git a/drivers/media/video/cx88/cx88-cards.c b/drivers/media/video/cx88/cx88-cards.c index a5cc1c1fc2d..39465301ec9 100644 --- a/drivers/media/video/cx88/cx88-cards.c +++ b/drivers/media/video/cx88/cx88-cards.c @@ -3003,6 +3003,14 @@ void cx88_setup_xc3028(struct cx88_core *core, struct xc2028_ctrl *ctl) case CX88_BOARD_DVICO_FUSIONHDTV_5_PCI_NANO: ctl->demod = XC3028_FE_OREN538; break; + case CX88_BOARD_GENIATECH_X8000_MT: + /* FIXME: For this board, the xc3028 never recovers after being + powered down (the reset GPIO probably is not set properly). + We don't have access to the hardware so we cannot determine + which GPIO is used for xc3028, so just disable power xc3028 + power management for now */ + ctl->disable_power_mgmt = 1; + break; case CX88_BOARD_WINFAST_TV2000_XP_GLOBAL: case CX88_BOARD_PROLINK_PV_GLOBAL_XTREME: case CX88_BOARD_PROLINK_PV_8000GT: diff --git a/drivers/media/video/cx88/cx88-dvb.c b/drivers/media/video/cx88/cx88-dvb.c index c44e8760021..e237b507659 100644 --- a/drivers/media/video/cx88/cx88-dvb.c +++ b/drivers/media/video/cx88/cx88-dvb.c @@ -501,6 +501,7 @@ static struct zl10353_config cx88_pinnacle_hybrid_pctv = { static struct zl10353_config cx88_geniatech_x8000_mt = { .demod_address = (0x1e >> 1), .no_tuner = 1, + .disable_i2c_gate_ctrl = 1, }; static struct s5h1411_config dvico_fusionhdtv7_config = { diff --git a/drivers/media/video/cx88/cx88-mpeg.c b/drivers/media/video/cx88/cx88-mpeg.c index da4e3912cd3..7172dcf2a4f 100644 --- a/drivers/media/video/cx88/cx88-mpeg.c +++ b/drivers/media/video/cx88/cx88-mpeg.c @@ -116,6 +116,10 @@ static int cx8802_start_dma(struct cx8802_dev *dev, udelay(100); break; case CX88_BOARD_HAUPPAUGE_HVR1300: + /* Enable MPEG parallel IO and video signal pins */ + cx_write(MO_PINMUX_IO, 0x88); + cx_write(TS_SOP_STAT, 0); + cx_write(TS_VALERR_CNTRL, 0); break; case CX88_BOARD_PINNACLE_PCTV_HD_800i: /* Enable MPEG parallel IO and video signal pins */ diff --git a/drivers/media/video/cx88/cx88-video.c b/drivers/media/video/cx88/cx88-video.c index b12770848c0..2bb54c3ef5c 100644 --- a/drivers/media/video/cx88/cx88-video.c +++ b/drivers/media/video/cx88/cx88-video.c @@ -31,6 +31,7 @@ #include <linux/kmod.h> #include <linux/kernel.h> #include <linux/slab.h> +#include <linux/smp_lock.h> #include <linux/interrupt.h> #include <linux/dma-mapping.h> #include <linux/delay.h> diff --git a/drivers/media/video/dabusb.c b/drivers/media/video/dabusb.c index ec2f45dde16..0664d111085 100644 --- a/drivers/media/video/dabusb.c +++ b/drivers/media/video/dabusb.c @@ -32,6 +32,7 @@ #include <linux/list.h> #include <linux/vmalloc.h> #include <linux/slab.h> +#include <linux/smp_lock.h> #include <linux/init.h> #include <asm/uaccess.h> #include <asm/atomic.h> diff --git a/drivers/media/video/em28xx/Kconfig b/drivers/media/video/em28xx/Kconfig index 16a5af30e9d..6524b493e03 100644 --- a/drivers/media/video/em28xx/Kconfig +++ b/drivers/media/video/em28xx/Kconfig @@ -8,6 +8,8 @@ config VIDEO_EM28XX select VIDEO_SAA711X if VIDEO_HELPER_CHIPS_AUTO select VIDEO_TVP5150 if VIDEO_HELPER_CHIPS_AUTO select VIDEO_MSP3400 if VIDEO_HELPER_CHIPS_AUTO + select VIDEO_MT9V011 if VIDEO_HELPER_CHIPS_AUTO + ---help--- This is a video4linux driver for Empia 28xx based TV cards. diff --git a/drivers/media/video/em28xx/em28xx-cards.c b/drivers/media/video/em28xx/em28xx-cards.c index c43fdb9bc88..1c2e544eda7 100644 --- a/drivers/media/video/em28xx/em28xx-cards.c +++ b/drivers/media/video/em28xx/em28xx-cards.c @@ -157,6 +157,20 @@ static struct em28xx_reg_seq evga_indtube_digital[] = { { -1, -1, -1, -1}, }; +/* Pinnacle Hybrid Pro eb1a:2881 */ +static struct em28xx_reg_seq pinnacle_hybrid_pro_analog[] = { + {EM28XX_R08_GPIO, 0xfd, ~EM_GPIO_4, 10}, + { -1, -1, -1, -1}, +}; + +static struct em28xx_reg_seq pinnacle_hybrid_pro_digital[] = { + {EM28XX_R08_GPIO, 0x6e, ~EM_GPIO_4, 10}, + {EM2880_R04_GPO, 0x04, 0xff, 100},/* zl10353 reset */ + {EM2880_R04_GPO, 0x0c, 0xff, 1}, + { -1, -1, -1, -1}, +}; + + /* Callback for the most boards */ static struct em28xx_reg_seq default_tuner_gpio[] = { {EM28XX_R08_GPIO, EM_GPIO_4, EM_GPIO_4, 10}, @@ -191,18 +205,27 @@ static struct em28xx_reg_seq terratec_av350_unmute_gpio[] = { {EM28XX_R08_GPIO, 0xff, 0xff, 10}, { -1, -1, -1, -1}, }; + +static struct em28xx_reg_seq silvercrest_reg_seq[] = { + {EM28XX_R08_GPIO, 0xff, 0xff, 10}, + {EM28XX_R08_GPIO, 0x01, 0xf7, 10}, + { -1, -1, -1, -1}, +}; + /* * Board definitions */ struct em28xx_board em28xx_boards[] = { [EM2750_BOARD_UNKNOWN] = { - .name = "Unknown EM2750/EM2751 webcam grabber", - .xclk = EM28XX_XCLK_FREQUENCY_48MHZ, - .tuner_type = TUNER_ABSENT, /* This is a webcam */ + .name = "EM2710/EM2750/EM2751 webcam grabber", + .xclk = EM28XX_XCLK_FREQUENCY_20MHZ, + .tuner_type = TUNER_ABSENT, + .is_webcam = 1, .input = { { .type = EM28XX_VMUX_COMPOSITE1, .vmux = 0, .amux = EM28XX_AMUX_VIDEO, + .gpio = silvercrest_reg_seq, } }, }, [EM2800_BOARD_UNKNOWN] = { @@ -224,13 +247,15 @@ struct em28xx_board em28xx_boards[] = { [EM2820_BOARD_UNKNOWN] = { .name = "Unknown EM2750/28xx video grabber", .tuner_type = TUNER_ABSENT, + .is_webcam = 1, /* To enable sensor probe */ }, [EM2750_BOARD_DLCW_130] = { /* Beijing Huaqi Information Digital Technology Co., Ltd */ .name = "Huaqi DLCW-130", .valid = EM28XX_BOARD_NOT_VALIDATED, .xclk = EM28XX_XCLK_FREQUENCY_48MHZ, - .tuner_type = TUNER_ABSENT, /* This is a webcam */ + .tuner_type = TUNER_ABSENT, + .is_webcam = 1, .input = { { .type = EM28XX_VMUX_COMPOSITE1, .vmux = 0, @@ -431,11 +456,23 @@ struct em28xx_board em28xx_boards[] = { [EM2820_BOARD_VIDEOLOGY_20K14XUSB] = { .name = "Videology 20K14XUSB USB2.0", .valid = EM28XX_BOARD_NOT_VALIDATED, - .tuner_type = TUNER_ABSENT, /* This is a webcam */ + .tuner_type = TUNER_ABSENT, + .is_webcam = 1, + .input = { { + .type = EM28XX_VMUX_COMPOSITE1, + .vmux = 0, + .amux = EM28XX_AMUX_VIDEO, + } }, + }, + [EM2820_BOARD_SILVERCREST_WEBCAM] = { + .name = "Silvercrest Webcam 1.3mpix", + .tuner_type = TUNER_ABSENT, + .is_webcam = 1, .input = { { .type = EM28XX_VMUX_COMPOSITE1, .vmux = 0, .amux = EM28XX_AMUX_VIDEO, + .gpio = silvercrest_reg_seq, } }, }, [EM2821_BOARD_SUPERCOMP_USB_2] = { @@ -479,7 +516,8 @@ struct em28xx_board em28xx_boards[] = { /* Beijing Huaqi Information Digital Technology Co., Ltd */ .name = "NetGMBH Cam", .valid = EM28XX_BOARD_NOT_VALIDATED, - .tuner_type = TUNER_ABSENT, /* This is a webcam */ + .tuner_type = TUNER_ABSENT, + .is_webcam = 1, .input = { { .type = EM28XX_VMUX_COMPOSITE1, .vmux = 0, @@ -584,22 +622,27 @@ struct em28xx_board em28xx_boards[] = { }, [EM2861_BOARD_PLEXTOR_PX_TV100U] = { .name = "Plextor ConvertX PX-TV100U", - .valid = EM28XX_BOARD_NOT_VALIDATED, .tuner_type = TUNER_TNF_5335MF, + .xclk = EM28XX_XCLK_I2S_MSB_TIMING | + EM28XX_XCLK_FREQUENCY_12MHZ, .tda9887_conf = TDA9887_PRESENT, .decoder = EM28XX_TVP5150, + .has_msp34xx = 1, .input = { { .type = EM28XX_VMUX_TELEVISION, .vmux = TVP5150_COMPOSITE0, .amux = EM28XX_AMUX_LINE_IN, + .gpio = pinnacle_hybrid_pro_analog, }, { .type = EM28XX_VMUX_COMPOSITE1, .vmux = TVP5150_COMPOSITE1, .amux = EM28XX_AMUX_LINE_IN, + .gpio = pinnacle_hybrid_pro_analog, }, { .type = EM28XX_VMUX_SVIDEO, .vmux = TVP5150_SVIDEO, .amux = EM28XX_AMUX_LINE_IN, + .gpio = pinnacle_hybrid_pro_analog, } }, }, @@ -826,7 +869,7 @@ struct em28xx_board em28xx_boards[] = { .tuner_gpio = default_tuner_gpio, .decoder = EM28XX_TVP5150, .has_dvb = 1, - .dvb_gpio = default_analog, + .dvb_gpio = default_digital, .input = { { .type = EM28XX_VMUX_TELEVISION, .vmux = TVP5150_COMPOSITE0, @@ -1229,25 +1272,26 @@ struct em28xx_board em28xx_boards[] = { }, [EM2881_BOARD_PINNACLE_HYBRID_PRO] = { .name = "Pinnacle Hybrid Pro", - .valid = EM28XX_BOARD_NOT_VALIDATED, .tuner_type = TUNER_XC2028, .tuner_gpio = default_tuner_gpio, .decoder = EM28XX_TVP5150, + .has_dvb = 1, + .dvb_gpio = pinnacle_hybrid_pro_digital, .input = { { .type = EM28XX_VMUX_TELEVISION, .vmux = TVP5150_COMPOSITE0, .amux = EM28XX_AMUX_VIDEO, - .gpio = default_analog, + .gpio = pinnacle_hybrid_pro_analog, }, { .type = EM28XX_VMUX_COMPOSITE1, .vmux = TVP5150_COMPOSITE1, .amux = EM28XX_AMUX_LINE_IN, - .gpio = default_analog, + .gpio = pinnacle_hybrid_pro_analog, }, { .type = EM28XX_VMUX_SVIDEO, .vmux = TVP5150_SVIDEO, .amux = EM28XX_AMUX_LINE_IN, - .gpio = default_analog, + .gpio = pinnacle_hybrid_pro_analog, } }, }, [EM2882_BOARD_PINNACLE_HYBRID_PRO] = { @@ -1505,6 +1549,8 @@ struct usb_device_id em28xx_id_table[] = { .driver_info = EM2750_BOARD_UNKNOWN }, { USB_DEVICE(0xeb1a, 0x2800), .driver_info = EM2800_BOARD_UNKNOWN }, + { USB_DEVICE(0xeb1a, 0x2710), + .driver_info = EM2820_BOARD_UNKNOWN }, { USB_DEVICE(0xeb1a, 0x2820), .driver_info = EM2820_BOARD_UNKNOWN }, { USB_DEVICE(0xeb1a, 0x2821), @@ -1617,6 +1663,7 @@ static struct em28xx_hash_table em28xx_eeprom_hash[] = { {0x966a0441, EM2880_BOARD_KWORLD_DVB_310U, TUNER_XC2028}, {0x9567eb1a, EM2880_BOARD_EMPIRE_DUAL_TV, TUNER_XC2028}, {0xcee44a99, EM2882_BOARD_EVGA_INDTUBE, TUNER_XC2028}, + {0xb8846b20, EM2881_BOARD_PINNACLE_HYBRID_PRO, TUNER_XC2028}, }; /* I2C devicelist hash table for devices with generic USB IDs */ @@ -1639,6 +1686,11 @@ static unsigned short tvp5150_addrs[] = { I2C_CLIENT_END }; +static unsigned short mt9v011_addrs[] = { + 0xba >> 1, + I2C_CLIENT_END +}; + static unsigned short msp3400_addrs[] = { 0x80 >> 1, 0x88 >> 1, @@ -1678,65 +1730,150 @@ static inline void em28xx_set_model(struct em28xx *dev) EM28XX_I2C_FREQ_100_KHZ; } -/* Since em28xx_pre_card_setup() requires a proper dev->model, - * this won't work for boards with generic PCI IDs + +/* FIXME: Should be replaced by a proper mt9m111 driver */ +static int em28xx_initialize_mt9m111(struct em28xx *dev) +{ + int i; + unsigned char regs[][3] = { + { 0x0d, 0x00, 0x01, }, /* reset and use defaults */ + { 0x0d, 0x00, 0x00, }, + { 0x0a, 0x00, 0x21, }, + { 0x21, 0x04, 0x00, }, /* full readout speed, no row/col skipping */ + }; + + for (i = 0; i < ARRAY_SIZE(regs); i++) + i2c_master_send(&dev->i2c_client, ®s[i][0], 3); + + return 0; +} + + +/* FIXME: Should be replaced by a proper mt9m001 driver */ +static int em28xx_initialize_mt9m001(struct em28xx *dev) +{ + int i; + unsigned char regs[][3] = { + { 0x0d, 0x00, 0x01, }, + { 0x0d, 0x00, 0x00, }, + { 0x04, 0x05, 0x00, }, /* hres = 1280 */ + { 0x03, 0x04, 0x00, }, /* vres = 1024 */ + { 0x20, 0x11, 0x00, }, + { 0x06, 0x00, 0x10, }, + { 0x2b, 0x00, 0x24, }, + { 0x2e, 0x00, 0x24, }, + { 0x35, 0x00, 0x24, }, + { 0x2d, 0x00, 0x20, }, + { 0x2c, 0x00, 0x20, }, + { 0x09, 0x0a, 0xd4, }, + { 0x35, 0x00, 0x57, }, + }; + + for (i = 0; i < ARRAY_SIZE(regs); i++) + i2c_master_send(&dev->i2c_client, ®s[i][0], 3); + + return 0; +} + +/* HINT method: webcam I2C chips + * + * This method works for webcams with Micron sensors */ -void em28xx_pre_card_setup(struct em28xx *dev) +static int em28xx_hint_sensor(struct em28xx *dev) { int rc; + char *sensor_name; + unsigned char cmd; + __be16 version_be; + u16 version; + + /* Micron sensor detection */ + dev->i2c_client.addr = 0xba >> 1; + cmd = 0; + i2c_master_send(&dev->i2c_client, &cmd, 1); + rc = i2c_master_recv(&dev->i2c_client, (char *)&version_be, 2); + if (rc != 2) + return -EINVAL; + + version = be16_to_cpu(version_be); + switch (version) { + case 0x8232: /* mt9v011 640x480 1.3 Mpix sensor */ + case 0x8243: /* mt9v011 rev B 640x480 1.3 Mpix sensor */ + dev->model = EM2820_BOARD_SILVERCREST_WEBCAM; + em28xx_set_model(dev); + + sensor_name = "mt9v011"; + dev->em28xx_sensor = EM28XX_MT9V011; + dev->sensor_xres = 640; + dev->sensor_yres = 480; + /* + * FIXME: mt9v011 uses I2S speed as xtal clk - at least with + * the Silvercrest cam I have here for testing - for higher + * resolutions, a high clock cause horizontal artifacts, so we + * need to use a lower xclk frequency. + * Yet, it would be possible to adjust xclk depending on the + * desired resolution, since this affects directly the + * frame rate. + */ + dev->board.xclk = EM28XX_XCLK_FREQUENCY_4_3MHZ; + dev->sensor_xtal = 4300000; - em28xx_set_model(dev); + /* probably means GRGB 16 bit bayer */ + dev->vinmode = 0x0d; + dev->vinctl = 0x00; - em28xx_info("Identified as %s (card=%d)\n", - dev->board.name, dev->model); + break; - /* Set the default GPO/GPIO for legacy devices */ - dev->reg_gpo_num = EM2880_R04_GPO; - dev->reg_gpio_num = EM28XX_R08_GPIO; + case 0x143a: /* MT9M111 as found in the ECS G200 */ + dev->model = EM2750_BOARD_UNKNOWN; + em28xx_set_model(dev); - dev->wait_after_write = 5; + sensor_name = "mt9m111"; + dev->board.xclk = EM28XX_XCLK_FREQUENCY_48MHZ; + dev->em28xx_sensor = EM28XX_MT9M111; + em28xx_initialize_mt9m111(dev); + dev->sensor_xres = 640; + dev->sensor_yres = 512; - /* Based on the Chip ID, set the device configuration */ - rc = em28xx_read_reg(dev, EM28XX_R0A_CHIPID); - if (rc > 0) { - dev->chip_id = rc; + dev->vinmode = 0x0a; + dev->vinctl = 0x00; - switch (dev->chip_id) { - case CHIP_ID_EM2750: - em28xx_info("chip ID is em2750\n"); - break; - case CHIP_ID_EM2820: - em28xx_info("chip ID is em2820\n"); - break; - case CHIP_ID_EM2840: - em28xx_info("chip ID is em2840\n"); - break; - case CHIP_ID_EM2860: - em28xx_info("chip ID is em2860\n"); - break; - case CHIP_ID_EM2870: - em28xx_info("chip ID is em2870\n"); - dev->wait_after_write = 0; - break; - case CHIP_ID_EM2874: - em28xx_info("chip ID is em2874\n"); - dev->reg_gpio_num = EM2874_R80_GPIO; - dev->wait_after_write = 0; - break; - case CHIP_ID_EM2883: - em28xx_info("chip ID is em2882/em2883\n"); - dev->wait_after_write = 0; - break; - default: - em28xx_info("em28xx chip ID = %d\n", dev->chip_id); - } + break; + + case 0x8431: + dev->model = EM2750_BOARD_UNKNOWN; + em28xx_set_model(dev); + + sensor_name = "mt9m001"; + dev->em28xx_sensor = EM28XX_MT9M001; + em28xx_initialize_mt9m001(dev); + dev->sensor_xres = 1280; + dev->sensor_yres = 1024; + + /* probably means BGGR 16 bit bayer */ + dev->vinmode = 0x0c; + dev->vinctl = 0x00; + + break; + default: + printk("Unknown Micron Sensor 0x%04x\n", version); + return -EINVAL; } - /* Prepopulate cached GPO register content */ - rc = em28xx_read_reg(dev, dev->reg_gpo_num); - if (rc >= 0) - dev->reg_gpo = rc; + /* Setup webcam defaults */ + em28xx_pre_card_setup(dev); + + em28xx_errdev("Sensor is %s, using model %s entry.\n", + sensor_name, em28xx_boards[dev->model].name); + + return 0; +} +/* Since em28xx_pre_card_setup() requires a proper dev->model, + * this won't work for boards with generic PCI IDs + */ +void em28xx_pre_card_setup(struct em28xx *dev) +{ /* Set the initial XCLK and I2C clock values based on the board definition */ em28xx_write_reg(dev, EM28XX_R0F_XCLK, dev->board.xclk & 0x7f); @@ -1746,9 +1883,8 @@ void em28xx_pre_card_setup(struct em28xx *dev) /* request some modules */ switch (dev->model) { case EM2861_BOARD_PLEXTOR_PX_TV100U: - /* FIXME guess */ - /* Turn on analog audio output */ - em28xx_write_reg(dev, EM28XX_R08_GPIO, 0xfd); + /* Sets the msp34xx I2S speed */ + dev->i2s_speed = 2048000; break; case EM2861_BOARD_KWORLD_PVRTV_300U: case EM2880_BOARD_KWORLD_DVB_305U: @@ -1860,6 +1996,7 @@ static void em28xx_setup_xc3028(struct em28xx *dev, struct xc2028_ctrl *ctl) ctl->demod = XC3028_FE_ZARLINK456; break; case EM2880_BOARD_TERRATEC_HYBRID_XS: + case EM2881_BOARD_PINNACLE_HYBRID_PRO: ctl->demod = XC3028_FE_ZARLINK456; break; case EM2880_BOARD_HAUPPAUGE_WINTV_HVR_900_R2: @@ -2085,7 +2222,20 @@ void em28xx_register_i2c_ir(struct em28xx *dev) void em28xx_card_setup(struct em28xx *dev) { - em28xx_set_model(dev); + /* + * If the device can be a webcam, seek for a sensor. + * If sensor is not found, then it isn't a webcam. + */ + if (dev->board.is_webcam) { + if (em28xx_hint_sensor(dev) < 0) + dev->board.is_webcam = 0; + else + dev->progressive = 1; + } else + em28xx_set_model(dev); + + em28xx_info("Identified as %s (card=%d)\n", + dev->board.name, dev->model); dev->tuner_type = em28xx_boards[dev->model].tuner_type; if (em28xx_boards[dev->model].tuner_addr) @@ -2156,6 +2306,7 @@ void em28xx_card_setup(struct em28xx *dev) em28xx_set_mode() in em28xx_pre_card_setup() was a no-op, so make the call now so the analog GPIOs are set properly before probing the i2c bus. */ + em28xx_gpio_set(dev, dev->board.tuner_gpio); em28xx_set_mode(dev, EM28XX_ANALOG_MODE); break; } @@ -2189,6 +2340,15 @@ void em28xx_card_setup(struct em28xx *dev) v4l2_i2c_new_probed_subdev(&dev->v4l2_dev, &dev->i2c_adap, "tvp5150", "tvp5150", tvp5150_addrs); + if (dev->em28xx_sensor == EM28XX_MT9V011) { + struct v4l2_subdev *sd; + + sd = v4l2_i2c_new_probed_subdev(&dev->v4l2_dev, + &dev->i2c_adap, "mt9v011", "mt9v011", mt9v011_addrs); + v4l2_subdev_call(sd, core, s_config, 0, &dev->sensor_xtal); + } + + if (dev->board.adecoder == EM28XX_TVAUDIO) v4l2_i2c_new_subdev(&dev->v4l2_dev, &dev->i2c_adap, "tvaudio", "tvaudio", dev->board.tvaudio_addr); @@ -2222,7 +2382,9 @@ void em28xx_card_setup(struct em28xx *dev) } em28xx_tuner_setup(dev); - em28xx_ir_init(dev); + + if(!disable_ir) + em28xx_ir_init(dev); } @@ -2288,7 +2450,7 @@ static int em28xx_init_dev(struct em28xx **devhandle, struct usb_device *udev, int minor) { struct em28xx *dev = *devhandle; - int retval = -ENOMEM; + int retval; int errCode; dev->udev = udev; @@ -2305,6 +2467,58 @@ static int em28xx_init_dev(struct em28xx **devhandle, struct usb_device *udev, dev->em28xx_read_reg_req = em28xx_read_reg_req; dev->board.is_em2800 = em28xx_boards[dev->model].is_em2800; + em28xx_set_model(dev); + + /* Set the default GPO/GPIO for legacy devices */ + dev->reg_gpo_num = EM2880_R04_GPO; + dev->reg_gpio_num = EM28XX_R08_GPIO; + + dev->wait_after_write = 5; + + /* Based on the Chip ID, set the device configuration */ + retval = em28xx_read_reg(dev, EM28XX_R0A_CHIPID); + if (retval > 0) { + dev->chip_id = retval; + + switch (dev->chip_id) { + case CHIP_ID_EM2710: + em28xx_info("chip ID is em2710\n"); + break; + case CHIP_ID_EM2750: + em28xx_info("chip ID is em2750\n"); + break; + case CHIP_ID_EM2820: + em28xx_info("chip ID is em2820 (or em2710)\n"); + break; + case CHIP_ID_EM2840: + em28xx_info("chip ID is em2840\n"); + break; + case CHIP_ID_EM2860: + em28xx_info("chip ID is em2860\n"); + break; + case CHIP_ID_EM2870: + em28xx_info("chip ID is em2870\n"); + dev->wait_after_write = 0; + break; + case CHIP_ID_EM2874: + em28xx_info("chip ID is em2874\n"); + dev->reg_gpio_num = EM2874_R80_GPIO; + dev->wait_after_write = 0; + break; + case CHIP_ID_EM2883: + em28xx_info("chip ID is em2882/em2883\n"); + dev->wait_after_write = 0; + break; + default: + em28xx_info("em28xx chip ID = %d\n", dev->chip_id); + } + } + + /* Prepopulate cached GPO register content */ + retval = em28xx_read_reg(dev, dev->reg_gpo_num); + if (retval >= 0) + dev->reg_gpo = retval; + em28xx_pre_card_setup(dev); if (!dev->board.is_em2800) { @@ -2333,6 +2547,12 @@ static int em28xx_init_dev(struct em28xx **devhandle, struct usb_device *udev, return errCode; } + /* + * Default format, used for tvp5150 or saa711x output formats + */ + dev->vinmode = 0x10; + dev->vinctl = 0x11; + /* Do board specific init and eeprom reading */ em28xx_card_setup(dev); @@ -2573,6 +2793,7 @@ static int em28xx_usb_probe(struct usb_interface *interface, retval = em28xx_init_dev(&dev, udev, interface, nr); if (retval) { em28xx_devused &= ~(1<<dev->devno); + mutex_unlock(&dev->lock); kfree(dev); goto err; } diff --git a/drivers/media/video/em28xx/em28xx-core.c b/drivers/media/video/em28xx/em28xx-core.c index c8d7ce8fbd3..98e140b5d95 100644 --- a/drivers/media/video/em28xx/em28xx-core.c +++ b/drivers/media/video/em28xx/em28xx-core.c @@ -632,6 +632,9 @@ int em28xx_capture_start(struct em28xx *dev, int start) return rc; } + if (dev->board.is_webcam) + rc = em28xx_write_reg(dev, 0x13, 0x0c); + /* enable video capture */ rc = em28xx_write_reg(dev, 0x48, 0x00); @@ -650,15 +653,15 @@ int em28xx_set_outfmt(struct em28xx *dev) int ret; ret = em28xx_write_reg_bits(dev, EM28XX_R27_OUTFMT, - dev->format->reg | 0x20, 0x3f); + dev->format->reg | 0x20, 0xff); if (ret < 0) - return ret; + return ret; - ret = em28xx_write_reg(dev, EM28XX_R10_VINMODE, 0x10); + ret = em28xx_write_reg(dev, EM28XX_R10_VINMODE, dev->vinmode); if (ret < 0) return ret; - return em28xx_write_reg(dev, EM28XX_R11_VINCTRL, 0x11); + return em28xx_write_reg(dev, EM28XX_R11_VINCTRL, dev->vinctl); } static int em28xx_accumulator_set(struct em28xx *dev, u8 xmin, u8 xmax, @@ -695,13 +698,16 @@ static int em28xx_scaler_set(struct em28xx *dev, u16 h, u16 v) { u8 mode; /* the em2800 scaler only supports scaling down to 50% */ - if (dev->board.is_em2800) + + if (dev->board.is_em2800) { mode = (v ? 0x20 : 0x00) | (h ? 0x10 : 0x00); - else { + } else { u8 buf[2]; + buf[0] = h; buf[1] = h >> 8; em28xx_write_regs(dev, EM28XX_R30_HSCALELOW, (char *)buf, 2); + buf[0] = v; buf[1] = v >> 8; em28xx_write_regs(dev, EM28XX_R32_VSCALELOW, (char *)buf, 2); @@ -717,11 +723,17 @@ int em28xx_resolution_set(struct em28xx *dev) { int width, height; width = norm_maxw(dev); - height = norm_maxh(dev) >> 1; + height = norm_maxh(dev); + + if (!dev->progressive) + height >>= norm_maxh(dev); em28xx_set_outfmt(dev); + + em28xx_accumulator_set(dev, 1, (width - 4) >> 2, 1, (height - 4) >> 2); em28xx_capture_area_set(dev, 0, 0, width >> 2, height >> 2); + return em28xx_scaler_set(dev, dev->hscale, dev->vscale); } diff --git a/drivers/media/video/em28xx/em28xx-dvb.c b/drivers/media/video/em28xx/em28xx-dvb.c index e7b47c8da8f..d603575431b 100644 --- a/drivers/media/video/em28xx/em28xx-dvb.c +++ b/drivers/media/video/em28xx/em28xx-dvb.c @@ -31,6 +31,8 @@ #include "lgdt330x.h" #include "zl10353.h" #include "s5h1409.h" +#include "mt352.h" +#include "mt352_priv.h" /* FIXME */ MODULE_DESCRIPTION("driver for em28xx based DVB cards"); MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@infradead.org>"); @@ -243,6 +245,14 @@ static struct s5h1409_config em28xx_s5h1409_with_xc3028 = { .mpeg_timing = S5H1409_MPEGTIMING_CONTINOUS_NONINVERTING_CLOCK }; +static struct zl10353_config em28xx_zl10353_xc3028_no_i2c_gate = { + .demod_address = (0x1e >> 1), + .no_tuner = 1, + .disable_i2c_gate_ctrl = 1, + .parallel_ts = 1, + .if2 = 45600, +}; + #ifdef EM28XX_DRX397XD_SUPPORT /* [TODO] djh - not sure yet what the device config needs to contain */ static struct drx397xD_config em28xx_drx397xD_with_xc3028 = { @@ -250,6 +260,41 @@ static struct drx397xD_config em28xx_drx397xD_with_xc3028 = { }; #endif +static int mt352_terratec_xs_init(struct dvb_frontend *fe) +{ + /* Values extracted from a USB trace of the Terratec Windows driver */ + static u8 clock_config[] = { CLOCK_CTL, 0x38, 0x2c }; + static u8 reset[] = { RESET, 0x80 }; + static u8 adc_ctl_1_cfg[] = { ADC_CTL_1, 0x40 }; + static u8 agc_cfg[] = { AGC_TARGET, 0x28, 0xa0 }; + static u8 input_freq_cfg[] = { INPUT_FREQ_1, 0x31, 0xb8 }; + static u8 rs_err_cfg[] = { RS_ERR_PER_1, 0x00, 0x4d }; + static u8 capt_range_cfg[] = { CAPT_RANGE, 0x32 }; + static u8 trl_nom_cfg[] = { TRL_NOMINAL_RATE_1, 0x64, 0x00 }; + static u8 tps_given_cfg[] = { TPS_GIVEN_1, 0x40, 0x80, 0x50 }; + static u8 tuner_go[] = { TUNER_GO, 0x01}; + + mt352_write(fe, clock_config, sizeof(clock_config)); + udelay(200); + mt352_write(fe, reset, sizeof(reset)); + mt352_write(fe, adc_ctl_1_cfg, sizeof(adc_ctl_1_cfg)); + mt352_write(fe, agc_cfg, sizeof(agc_cfg)); + mt352_write(fe, input_freq_cfg, sizeof(input_freq_cfg)); + mt352_write(fe, rs_err_cfg, sizeof(rs_err_cfg)); + mt352_write(fe, capt_range_cfg, sizeof(capt_range_cfg)); + mt352_write(fe, trl_nom_cfg, sizeof(trl_nom_cfg)); + mt352_write(fe, tps_given_cfg, sizeof(tps_given_cfg)); + mt352_write(fe, tuner_go, sizeof(tuner_go)); + return 0; +} + +static struct mt352_config terratec_xs_mt352_cfg = { + .demod_address = (0x1e >> 1), + .no_tuner = 1, + .if2 = 45600, + .demod_init = mt352_terratec_xs_init, +}; + /* ------------------------------------------------------------------ */ static int attach_xc3028(u8 addr, struct em28xx *dev) @@ -432,10 +477,7 @@ static int dvb_init(struct em28xx *dev) goto out_free; } break; - case EM2880_BOARD_HAUPPAUGE_WINTV_HVR_900: - case EM2880_BOARD_TERRATEC_HYBRID_XS: case EM2880_BOARD_KWORLD_DVB_310U: - case EM2880_BOARD_EMPIRE_DUAL_TV: dvb->frontend = dvb_attach(zl10353_attach, &em28xx_zl10353_with_xc3028, &dev->i2c_adap); @@ -444,6 +486,34 @@ static int dvb_init(struct em28xx *dev) goto out_free; } break; + case EM2880_BOARD_HAUPPAUGE_WINTV_HVR_900: + case EM2880_BOARD_EMPIRE_DUAL_TV: + dvb->frontend = dvb_attach(zl10353_attach, + &em28xx_zl10353_xc3028_no_i2c_gate, + &dev->i2c_adap); + if (attach_xc3028(0x61, dev) < 0) { + result = -EINVAL; + goto out_free; + } + break; + case EM2880_BOARD_TERRATEC_HYBRID_XS: + case EM2881_BOARD_PINNACLE_HYBRID_PRO: + dvb->frontend = dvb_attach(zl10353_attach, + &em28xx_zl10353_xc3028_no_i2c_gate, + &dev->i2c_adap); + if (dvb->frontend == NULL) { + /* This board could have either a zl10353 or a mt352. + If the chip id isn't for zl10353, try mt352 */ + dvb->frontend = dvb_attach(mt352_attach, + &terratec_xs_mt352_cfg, + &dev->i2c_adap); + } + + if (attach_xc3028(0x61, dev) < 0) { + result = -EINVAL; + goto out_free; + } + break; case EM2883_BOARD_KWORLD_HYBRID_330U: case EM2882_BOARD_EVGA_INDTUBE: dvb->frontend = dvb_attach(s5h1409_attach, diff --git a/drivers/media/video/em28xx/em28xx-i2c.c b/drivers/media/video/em28xx/em28xx-i2c.c index 2c86fcf089f..27e33a287df 100644 --- a/drivers/media/video/em28xx/em28xx-i2c.c +++ b/drivers/media/video/em28xx/em28xx-i2c.c @@ -483,7 +483,7 @@ static char *i2c_devs[128] = { [0xa0 >> 1] = "eeprom", [0xb0 >> 1] = "tda9874", [0xb8 >> 1] = "tvp5150a", - [0xba >> 1] = "tvp5150a", + [0xba >> 1] = "webcam sensor or tvp5150a", [0xc0 >> 1] = "tuner (analog)", [0xc2 >> 1] = "tuner (analog)", [0xc4 >> 1] = "tuner (analog)", diff --git a/drivers/media/video/em28xx/em28xx-reg.h b/drivers/media/video/em28xx/em28xx-reg.h index a2676d63cfd..6bf84bd787d 100644 --- a/drivers/media/video/em28xx/em28xx-reg.h +++ b/drivers/media/video/em28xx/em28xx-reg.h @@ -176,7 +176,8 @@ /* FIXME: Need to be populated with the other chip ID's */ enum em28xx_chip_id { - CHIP_ID_EM2820 = 18, /* Also used by em2710 */ + CHIP_ID_EM2710 = 17, + CHIP_ID_EM2820 = 18, /* Also used by some em2710 */ CHIP_ID_EM2840 = 20, CHIP_ID_EM2750 = 33, CHIP_ID_EM2860 = 34, diff --git a/drivers/media/video/em28xx/em28xx-video.c b/drivers/media/video/em28xx/em28xx-video.c index 8fe1beecfff..ab079d9256c 100644 --- a/drivers/media/video/em28xx/em28xx-video.c +++ b/drivers/media/video/em28xx/em28xx-video.c @@ -90,10 +90,35 @@ MODULE_PARM_DESC(video_debug, "enable debug messages [video]"); /* supported video standards */ static struct em28xx_fmt format[] = { { - .name = "16bpp YUY2, 4:2:2, packed", + .name = "16 bpp YUY2, 4:2:2, packed", .fourcc = V4L2_PIX_FMT_YUYV, .depth = 16, .reg = EM28XX_OUTFMT_YUV422_Y0UY1V, + }, { + .name = "16 bpp RGB 565, LE", + .fourcc = V4L2_PIX_FMT_RGB565, + .depth = 16, + .reg = EM28XX_OUTFMT_RGB_16_656, + }, { + .name = "8 bpp Bayer BGBG..GRGR", + .fourcc = V4L2_PIX_FMT_SBGGR8, + .depth = 8, + .reg = EM28XX_OUTFMT_RGB_8_BGBG, + }, { + .name = "8 bpp Bayer GRGR..BGBG", + .fourcc = V4L2_PIX_FMT_SGRBG8, + .depth = 8, + .reg = EM28XX_OUTFMT_RGB_8_GRGR, + }, { + .name = "8 bpp Bayer GBGB..RGRG", + .fourcc = V4L2_PIX_FMT_SGBRG8, + .depth = 8, + .reg = EM28XX_OUTFMT_RGB_8_GBGB, + }, { + .name = "12 bpp YUV411", + .fourcc = V4L2_PIX_FMT_YUV411P, + .depth = 12, + .reg = EM28XX_OUTFMT_YUV411, }, }; @@ -169,15 +194,24 @@ static void em28xx_copy_video(struct em28xx *dev, startread = p; remain = len; - /* Interlaces frame */ - if (buf->top_field) + if (dev->progressive) fieldstart = outp; - else - fieldstart = outp + bytesperline; + else { + /* Interlaces two half frames */ + if (buf->top_field) + fieldstart = outp; + else + fieldstart = outp + bytesperline; + } linesdone = dma_q->pos / bytesperline; currlinedone = dma_q->pos % bytesperline; - offset = linesdone * bytesperline * 2 + currlinedone; + + if (dev->progressive) + offset = linesdone * bytesperline + currlinedone; + else + offset = linesdone * bytesperline * 2 + currlinedone; + startwrite = fieldstart + offset; lencopy = bytesperline - currlinedone; lencopy = lencopy > remain ? remain : lencopy; @@ -351,7 +385,7 @@ static inline int em28xx_isoc_copy(struct em28xx *dev, struct urb *urb) em28xx_isocdbg("Video frame %d, length=%i, %s\n", p[2], len, (p[2] & 1) ? "odd" : "even"); - if (!(p[2] & 1)) { + if (dev->progressive || !(p[2] & 1)) { if (buf != NULL) buffer_filled(dev, dma_q, buf); get_next_buf(dma_q, &buf); @@ -632,8 +666,8 @@ static void get_scale(struct em28xx *dev, unsigned int width, unsigned int height, unsigned int *hscale, unsigned int *vscale) { - unsigned int maxw = norm_maxw(dev); - unsigned int maxh = norm_maxh(dev); + unsigned int maxw = norm_maxw(dev); + unsigned int maxh = norm_maxh(dev); *hscale = (((unsigned long)maxw) << 12) / width - 4096L; if (*hscale >= 0x4000) @@ -664,7 +698,10 @@ static int vidioc_g_fmt_vid_cap(struct file *file, void *priv, f->fmt.pix.colorspace = V4L2_COLORSPACE_SMPTE170M; /* FIXME: TOP? NONE? BOTTOM? ALTENATE? */ - f->fmt.pix.field = dev->interlaced ? + if (dev->progressive) + f->fmt.pix.field = V4L2_FIELD_NONE; + else + f->fmt.pix.field = dev->interlaced ? V4L2_FIELD_INTERLACED : V4L2_FIELD_TOP; mutex_unlock(&dev->lock); @@ -728,7 +765,33 @@ static int vidioc_try_fmt_vid_cap(struct file *file, void *priv, f->fmt.pix.bytesperline = (dev->width * fmt->depth + 7) >> 3; f->fmt.pix.sizeimage = f->fmt.pix.bytesperline * height; f->fmt.pix.colorspace = V4L2_COLORSPACE_SMPTE170M; - f->fmt.pix.field = V4L2_FIELD_INTERLACED; + if (dev->progressive) + f->fmt.pix.field = V4L2_FIELD_NONE; + else + f->fmt.pix.field = dev->interlaced ? + V4L2_FIELD_INTERLACED : V4L2_FIELD_TOP; + + return 0; +} + +static int em28xx_set_video_format(struct em28xx *dev, unsigned int fourcc, + unsigned width, unsigned height) +{ + struct em28xx_fmt *fmt; + + fmt = format_by_fourcc(fourcc); + if (!fmt) + return -EINVAL; + + dev->format = fmt; + dev->width = width; + dev->height = height; + + /* set new image size */ + get_scale(dev, dev->width, dev->height, &dev->hscale, &dev->vscale); + + em28xx_set_alternate(dev); + em28xx_resolution_set(dev); return 0; } @@ -739,7 +802,6 @@ static int vidioc_s_fmt_vid_cap(struct file *file, void *priv, struct em28xx_fh *fh = priv; struct em28xx *dev = fh->dev; int rc; - struct em28xx_fmt *fmt; rc = check_dev(dev); if (rc < 0) @@ -749,12 +811,6 @@ static int vidioc_s_fmt_vid_cap(struct file *file, void *priv, vidioc_try_fmt_vid_cap(file, priv, f); - fmt = format_by_fourcc(f->fmt.pix.pixelformat); - if (!fmt) { - rc = -EINVAL; - goto out; - } - if (videobuf_queue_is_busy(&fh->vb_vidq)) { em28xx_errdev("%s queue busy\n", __func__); rc = -EBUSY; @@ -767,16 +823,8 @@ static int vidioc_s_fmt_vid_cap(struct file *file, void *priv, goto out; } - /* set new image size */ - dev->width = f->fmt.pix.width; - dev->height = f->fmt.pix.height; - dev->format = fmt; - get_scale(dev, dev->width, dev->height, &dev->hscale, &dev->vscale); - - em28xx_set_alternate(dev); - em28xx_resolution_set(dev); - - rc = 0; + rc = em28xx_set_video_format(dev, f->fmt.pix.pixelformat, + f->fmt.pix.width, f->fmt.pix.height); out: mutex_unlock(&dev->lock); @@ -814,6 +862,41 @@ static int vidioc_s_std(struct file *file, void *priv, v4l2_std_id *norm) return 0; } +static int vidioc_g_parm(struct file *file, void *priv, + struct v4l2_streamparm *p) +{ + struct em28xx_fh *fh = priv; + struct em28xx *dev = fh->dev; + int rc = 0; + + if (p->type != V4L2_BUF_TYPE_VIDEO_CAPTURE) + return -EINVAL; + + if (dev->board.is_webcam) + rc = v4l2_device_call_until_err(&dev->v4l2_dev, 0, + video, g_parm, p); + else + v4l2_video_std_frame_period(dev->norm, + &p->parm.capture.timeperframe); + + return rc; +} + +static int vidioc_s_parm(struct file *file, void *priv, + struct v4l2_streamparm *p) +{ + struct em28xx_fh *fh = priv; + struct em28xx *dev = fh->dev; + + if (!dev->board.is_webcam) + return -EINVAL; + + if (p->type != V4L2_BUF_TYPE_VIDEO_CAPTURE) + return -EINVAL; + + return v4l2_device_call_until_err(&dev->v4l2_dev, 0, video, s_parm, p); +} + static const char *iname[] = { [EM28XX_VMUX_COMPOSITE1] = "Composite1", [EM28XX_VMUX_COMPOSITE2] = "Composite2", @@ -1592,6 +1675,7 @@ static int em28xx_v4l2_open(struct file *filp) struct em28xx *dev; enum v4l2_buf_type fh_type; struct em28xx_fh *fh; + enum v4l2_field field; dev = em28xx_get_device(minor, &fh_type, &radio); @@ -1616,11 +1700,6 @@ static int em28xx_v4l2_open(struct file *filp) filp->private_data = fh; if (fh->type == V4L2_BUF_TYPE_VIDEO_CAPTURE && dev->users == 0) { - dev->width = norm_maxw(dev); - dev->height = norm_maxh(dev); - dev->hscale = 0; - dev->vscale = 0; - em28xx_set_mode(dev, EM28XX_ANALOG_MODE); em28xx_set_alternate(dev); em28xx_resolution_set(dev); @@ -1638,8 +1717,13 @@ static int em28xx_v4l2_open(struct file *filp) dev->users++; + if (dev->progressive) + field = V4L2_FIELD_NONE; + else + field = V4L2_FIELD_INTERLACED; + videobuf_queue_vmalloc_init(&fh->vb_vidq, &em28xx_video_qops, - NULL, &dev->slock, fh->type, V4L2_FIELD_INTERLACED, + NULL, &dev->slock, fh->type, field, sizeof(struct em28xx_buffer), fh); mutex_unlock(&dev->lock); @@ -1858,6 +1942,8 @@ static const struct v4l2_ioctl_ops video_ioctl_ops = { .vidioc_qbuf = vidioc_qbuf, .vidioc_dqbuf = vidioc_dqbuf, .vidioc_s_std = vidioc_s_std, + .vidioc_g_parm = vidioc_g_parm, + .vidioc_s_parm = vidioc_s_parm, .vidioc_enum_input = vidioc_enum_input, .vidioc_g_input = vidioc_g_input, .vidioc_s_input = vidioc_s_input, @@ -1962,15 +2048,14 @@ int em28xx_register_analog_devices(struct em28xx *dev) /* set default norm */ dev->norm = em28xx_video_template.current_norm; - dev->width = norm_maxw(dev); - dev->height = norm_maxh(dev); dev->interlaced = EM28XX_INTERLACED_DEFAULT; - dev->hscale = 0; - dev->vscale = 0; dev->ctl_input = 0; /* Analog specific initialization */ dev->format = &format[0]; + em28xx_set_video_format(dev, format[0].fourcc, + norm_maxw(dev), norm_maxh(dev)); + video_mux(dev, dev->ctl_input); /* Audio defaults */ diff --git a/drivers/media/video/em28xx/em28xx.h b/drivers/media/video/em28xx/em28xx.h index 813ce45c2f9..a2add61f7d5 100644 --- a/drivers/media/video/em28xx/em28xx.h +++ b/drivers/media/video/em28xx/em28xx.h @@ -107,6 +107,7 @@ #define EM2860_BOARD_TERRATEC_AV350 68 #define EM2882_BOARD_KWORLD_ATSC_315U 69 #define EM2882_BOARD_EVGA_INDTUBE 70 +#define EM2820_BOARD_SILVERCREST_WEBCAM 71 /* Limits minimum and default number of buffers */ #define EM28XX_MIN_BUF 4 @@ -357,11 +358,18 @@ struct em28xx_input { #define INPUT(nr) (&em28xx_boards[dev->model].input[nr]) enum em28xx_decoder { - EM28XX_NODECODER, + EM28XX_NODECODER = 0, EM28XX_TVP5150, EM28XX_SAA711X, }; +enum em28xx_sensor { + EM28XX_NOSENSOR = 0, + EM28XX_MT9V011, + EM28XX_MT9M001, + EM28XX_MT9M111, +}; + enum em28xx_adecoder { EM28XX_NOADECODER = 0, EM28XX_TVAUDIO, @@ -388,6 +396,7 @@ struct em28xx_board { unsigned int max_range_640_480:1; unsigned int has_dvb:1; unsigned int has_snapshot_button:1; + unsigned int is_webcam:1; unsigned int valid:1; unsigned char xclk, i2c_speed; @@ -471,6 +480,17 @@ struct em28xx { struct v4l2_device v4l2_dev; struct em28xx_board board; + /* Webcam specific fields */ + enum em28xx_sensor em28xx_sensor; + int sensor_xres, sensor_yres; + int sensor_xtal; + + /* Allows progressive (e. g. non-interlaced) mode */ + int progressive; + + /* Vinmode/Vinctl used at the driver */ + int vinmode, vinctl; + unsigned int stream_on:1; /* Locks streams */ unsigned int has_audio_class:1; unsigned int has_alsa_audio:1; @@ -751,17 +771,23 @@ static inline int em28xx_gamma_set(struct em28xx *dev, s32 val) /*FIXME: maxw should be dependent of alt mode */ static inline unsigned int norm_maxw(struct em28xx *dev) { + if (dev->board.is_webcam) + return dev->sensor_xres; + if (dev->board.max_range_640_480) return 640; - else - return 720; + + return 720; } static inline unsigned int norm_maxh(struct em28xx *dev) { + if (dev->board.is_webcam) + return dev->sensor_yres; + if (dev->board.max_range_640_480) return 480; - else - return (dev->norm & V4L2_STD_625_50) ? 576 : 480; + + return (dev->norm & V4L2_STD_625_50) ? 576 : 480; } #endif diff --git a/drivers/media/video/gspca/Kconfig b/drivers/media/video/gspca/Kconfig index 578dc4ffc96..e994dcac43f 100644 --- a/drivers/media/video/gspca/Kconfig +++ b/drivers/media/video/gspca/Kconfig @@ -102,6 +102,22 @@ config USB_GSPCA_PAC7311 To compile this driver as a module, choose M here: the module will be called gspca_pac7311. +config USB_GSPCA_SN9C20X + tristate "SN9C20X USB Camera Driver" + depends on VIDEO_V4L2 && USB_GSPCA + help + Say Y here if you want support for cameras based on the + sn9c20x chips (SN9C201 and SN9C202). + + To compile this driver as a module, choose M here: the + module will be called gspca_sn9c20x. + +config USB_GSPCA_SN9C20X_EVDEV + bool "Enable evdev support" + depends on USB_GSPCA_SN9C20X && INPUT + ---help--- + Say Y here in order to enable evdev support for sn9c20x webcam button. + config USB_GSPCA_SONIXB tristate "SONIX Bayer USB Camera Driver" depends on VIDEO_V4L2 && USB_GSPCA diff --git a/drivers/media/video/gspca/Makefile b/drivers/media/video/gspca/Makefile index 8a6643e8eb9..f6d3b86e9ad 100644 --- a/drivers/media/video/gspca/Makefile +++ b/drivers/media/video/gspca/Makefile @@ -8,6 +8,7 @@ obj-$(CONFIG_USB_GSPCA_OV519) += gspca_ov519.o obj-$(CONFIG_USB_GSPCA_OV534) += gspca_ov534.o obj-$(CONFIG_USB_GSPCA_PAC207) += gspca_pac207.o obj-$(CONFIG_USB_GSPCA_PAC7311) += gspca_pac7311.o +obj-$(CONFIG_USB_GSPCA_SN9C20X) += gspca_sn9c20x.o obj-$(CONFIG_USB_GSPCA_SONIXB) += gspca_sonixb.o obj-$(CONFIG_USB_GSPCA_SONIXJ) += gspca_sonixj.o obj-$(CONFIG_USB_GSPCA_SPCA500) += gspca_spca500.o @@ -35,6 +36,7 @@ gspca_ov519-objs := ov519.o gspca_ov534-objs := ov534.o gspca_pac207-objs := pac207.o gspca_pac7311-objs := pac7311.o +gspca_sn9c20x-objs := sn9c20x.o gspca_sonixb-objs := sonixb.o gspca_sonixj-objs := sonixj.o gspca_spca500-objs := spca500.o diff --git a/drivers/media/video/gspca/conex.c b/drivers/media/video/gspca/conex.c index 219cfa6fb87..8d48ea1742c 100644 --- a/drivers/media/video/gspca/conex.c +++ b/drivers/media/video/gspca/conex.c @@ -846,6 +846,8 @@ static int sd_start(struct gspca_dev *gspca_dev) /* create the JPEG header */ sd->jpeg_hdr = kmalloc(JPEG_HDR_SZ, GFP_KERNEL); + if (!sd->jpeg_hdr) + return -ENOMEM; jpeg_define(sd->jpeg_hdr, gspca_dev->height, gspca_dev->width, 0x22); /* JPEG 411 */ jpeg_set_qual(sd->jpeg_hdr, sd->quality); diff --git a/drivers/media/video/gspca/gspca.c b/drivers/media/video/gspca/gspca.c index 1e89600986c..b8561dfb6c8 100644 --- a/drivers/media/video/gspca/gspca.c +++ b/drivers/media/video/gspca/gspca.c @@ -727,6 +727,74 @@ static int gspca_get_mode(struct gspca_dev *gspca_dev, return -EINVAL; } +#ifdef CONFIG_VIDEO_ADV_DEBUG +static int vidioc_g_register(struct file *file, void *priv, + struct v4l2_dbg_register *reg) +{ + int ret; + struct gspca_dev *gspca_dev = priv; + + if (!gspca_dev->sd_desc->get_chip_ident) + return -EINVAL; + + if (!gspca_dev->sd_desc->get_register) + return -EINVAL; + + if (mutex_lock_interruptible(&gspca_dev->usb_lock)) + return -ERESTARTSYS; + if (gspca_dev->present) + ret = gspca_dev->sd_desc->get_register(gspca_dev, reg); + else + ret = -ENODEV; + mutex_unlock(&gspca_dev->usb_lock); + + return ret; +} + +static int vidioc_s_register(struct file *file, void *priv, + struct v4l2_dbg_register *reg) +{ + int ret; + struct gspca_dev *gspca_dev = priv; + + if (!gspca_dev->sd_desc->get_chip_ident) + return -EINVAL; + + if (!gspca_dev->sd_desc->set_register) + return -EINVAL; + + if (mutex_lock_interruptible(&gspca_dev->usb_lock)) + return -ERESTARTSYS; + if (gspca_dev->present) + ret = gspca_dev->sd_desc->set_register(gspca_dev, reg); + else + ret = -ENODEV; + mutex_unlock(&gspca_dev->usb_lock); + + return ret; +} +#endif + +static int vidioc_g_chip_ident(struct file *file, void *priv, + struct v4l2_dbg_chip_ident *chip) +{ + int ret; + struct gspca_dev *gspca_dev = priv; + + if (!gspca_dev->sd_desc->get_chip_ident) + return -EINVAL; + + if (mutex_lock_interruptible(&gspca_dev->usb_lock)) + return -ERESTARTSYS; + if (gspca_dev->present) + ret = gspca_dev->sd_desc->get_chip_ident(gspca_dev, chip); + else + ret = -ENODEV; + mutex_unlock(&gspca_dev->usb_lock); + + return ret; +} + static int vidioc_enum_fmt_vid_cap(struct file *file, void *priv, struct v4l2_fmtdesc *fmtdesc) { @@ -1883,6 +1951,11 @@ static const struct v4l2_ioctl_ops dev_ioctl_ops = { .vidioc_s_parm = vidioc_s_parm, .vidioc_s_std = vidioc_s_std, .vidioc_enum_framesizes = vidioc_enum_framesizes, +#ifdef CONFIG_VIDEO_ADV_DEBUG + .vidioc_g_register = vidioc_g_register, + .vidioc_s_register = vidioc_s_register, +#endif + .vidioc_g_chip_ident = vidioc_g_chip_ident, #ifdef CONFIG_VIDEO_V4L1_COMPAT .vidiocgmbuf = vidiocgmbuf, #endif diff --git a/drivers/media/video/gspca/gspca.h b/drivers/media/video/gspca/gspca.h index bd1faff8864..46c4effdfcd 100644 --- a/drivers/media/video/gspca/gspca.h +++ b/drivers/media/video/gspca/gspca.h @@ -69,6 +69,10 @@ typedef void (*cam_v_op) (struct gspca_dev *); typedef int (*cam_cf_op) (struct gspca_dev *, const struct usb_device_id *); typedef int (*cam_jpg_op) (struct gspca_dev *, struct v4l2_jpegcompression *); +typedef int (*cam_reg_op) (struct gspca_dev *, + struct v4l2_dbg_register *); +typedef int (*cam_ident_op) (struct gspca_dev *, + struct v4l2_dbg_chip_ident *); typedef int (*cam_streamparm_op) (struct gspca_dev *, struct v4l2_streamparm *); typedef int (*cam_qmnu_op) (struct gspca_dev *, @@ -105,6 +109,11 @@ struct sd_desc { cam_qmnu_op querymenu; cam_streamparm_op get_streamparm; cam_streamparm_op set_streamparm; +#ifdef CONFIG_VIDEO_ADV_DEBUG + cam_reg_op set_register; + cam_reg_op get_register; +#endif + cam_ident_op get_chip_ident; }; /* packet types when moving from iso buf to frame buf */ diff --git a/drivers/media/video/gspca/m5602/m5602_s5k4aa.c b/drivers/media/video/gspca/m5602/m5602_s5k4aa.c index 191bcd71897..0163903d1c0 100644 --- a/drivers/media/video/gspca/m5602/m5602_s5k4aa.c +++ b/drivers/media/video/gspca/m5602/m5602_s5k4aa.c @@ -476,9 +476,6 @@ static int s5k4aa_set_vflip(struct gspca_dev *gspca_dev, __s32 val) err = m5602_write_sensor(sd, S5K4AA_PAGE_MAP, &data, 1); if (err < 0) return err; - err = m5602_write_sensor(sd, S5K4AA_READ_MODE, &data, 1); - if (err < 0) - return err; err = m5602_read_sensor(sd, S5K4AA_READ_MODE, &data, 1); if (err < 0) @@ -524,9 +521,6 @@ static int s5k4aa_set_hflip(struct gspca_dev *gspca_dev, __s32 val) err = m5602_write_sensor(sd, S5K4AA_PAGE_MAP, &data, 1); if (err < 0) return err; - err = m5602_write_sensor(sd, S5K4AA_READ_MODE, &data, 1); - if (err < 0) - return err; err = m5602_read_sensor(sd, S5K4AA_READ_MODE, &data, 1); if (err < 0) diff --git a/drivers/media/video/gspca/mars.c b/drivers/media/video/gspca/mars.c index 75e8d14e4ac..de769caf013 100644 --- a/drivers/media/video/gspca/mars.c +++ b/drivers/media/video/gspca/mars.c @@ -201,6 +201,8 @@ static int sd_start(struct gspca_dev *gspca_dev) /* create the JPEG header */ sd->jpeg_hdr = kmalloc(JPEG_HDR_SZ, GFP_KERNEL); + if (!sd->jpeg_hdr) + return -ENOMEM; jpeg_define(sd->jpeg_hdr, gspca_dev->height, gspca_dev->width, 0x21); /* JPEG 422 */ jpeg_set_qual(sd->jpeg_hdr, sd->quality); diff --git a/drivers/media/video/gspca/sn9c20x.c b/drivers/media/video/gspca/sn9c20x.c new file mode 100644 index 00000000000..fcfbbd329b4 --- /dev/null +++ b/drivers/media/video/gspca/sn9c20x.c @@ -0,0 +1,2434 @@ +/* + * Sonix sn9c201 sn9c202 library + * Copyright (C) 2008-2009 microdia project <microdia@googlegroups.com> + * Copyright (C) 2009 Brian Johnson <brijohn@gmail.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ + +#ifdef CONFIG_USB_GSPCA_SN9C20X_EVDEV +#include <linux/kthread.h> +#include <linux/freezer.h> +#include <linux/usb/input.h> +#include <linux/input.h> +#endif + +#include "gspca.h" +#include "jpeg.h" + +#include <media/v4l2-chip-ident.h> + +MODULE_AUTHOR("Brian Johnson <brijohn@gmail.com>, " + "microdia project <microdia@googlegroups.com>"); +MODULE_DESCRIPTION("GSPCA/SN9C20X USB Camera Driver"); +MODULE_LICENSE("GPL"); + +#define MODULE_NAME "sn9c20x" + +#define MODE_RAW 0x10 +#define MODE_JPEG 0x20 +#define MODE_SXGA 0x80 + +#define SENSOR_OV9650 0 +#define SENSOR_OV9655 1 +#define SENSOR_SOI968 2 +#define SENSOR_OV7660 3 +#define SENSOR_OV7670 4 +#define SENSOR_MT9V011 5 +#define SENSOR_MT9V111 6 +#define SENSOR_MT9V112 7 +#define SENSOR_MT9M001 8 +#define SENSOR_MT9M111 9 +#define SENSOR_HV7131R 10 +#define SENSOR_MT9VPRB 20 + +/* specific webcam descriptor */ +struct sd { + struct gspca_dev gspca_dev; + +#define MIN_AVG_LUM 80 +#define MAX_AVG_LUM 130 + atomic_t avg_lum; + u8 old_step; + u8 older_step; + u8 exposure_step; + + u8 brightness; + u8 contrast; + u8 saturation; + s16 hue; + u8 gamma; + u8 red; + u8 blue; + + u8 hflip; + u8 vflip; + u8 gain; + u16 exposure; + u8 auto_exposure; + + u8 i2c_addr; + u8 sensor; + u8 hstart; + u8 vstart; + + u8 *jpeg_hdr; + u8 quality; + +#ifdef CONFIG_USB_GSPCA_SN9C20X_EVDEV + struct input_dev *input_dev; + u8 input_gpio; + struct task_struct *input_task; +#endif +}; + +static int sd_setbrightness(struct gspca_dev *gspca_dev, s32 val); +static int sd_getbrightness(struct gspca_dev *gspca_dev, s32 *val); +static int sd_setcontrast(struct gspca_dev *gspca_dev, s32 val); +static int sd_getcontrast(struct gspca_dev *gspca_dev, s32 *val); +static int sd_setsaturation(struct gspca_dev *gspca_dev, s32 val); +static int sd_getsaturation(struct gspca_dev *gspca_dev, s32 *val); +static int sd_sethue(struct gspca_dev *gspca_dev, s32 val); +static int sd_gethue(struct gspca_dev *gspca_dev, s32 *val); +static int sd_setgamma(struct gspca_dev *gspca_dev, s32 val); +static int sd_getgamma(struct gspca_dev *gspca_dev, s32 *val); +static int sd_setredbalance(struct gspca_dev *gspca_dev, s32 val); +static int sd_getredbalance(struct gspca_dev *gspca_dev, s32 *val); +static int sd_setbluebalance(struct gspca_dev *gspca_dev, s32 val); +static int sd_getbluebalance(struct gspca_dev *gspca_dev, s32 *val); +static int sd_setvflip(struct gspca_dev *gspca_dev, s32 val); +static int sd_getvflip(struct gspca_dev *gspca_dev, s32 *val); +static int sd_sethflip(struct gspca_dev *gspca_dev, s32 val); +static int sd_gethflip(struct gspca_dev *gspca_dev, s32 *val); +static int sd_setgain(struct gspca_dev *gspca_dev, s32 val); +static int sd_getgain(struct gspca_dev *gspca_dev, s32 *val); +static int sd_setexposure(struct gspca_dev *gspca_dev, s32 val); +static int sd_getexposure(struct gspca_dev *gspca_dev, s32 *val); +static int sd_setautoexposure(struct gspca_dev *gspca_dev, s32 val); +static int sd_getautoexposure(struct gspca_dev *gspca_dev, s32 *val); + +static struct ctrl sd_ctrls[] = { + { +#define BRIGHTNESS_IDX 0 + { + .id = V4L2_CID_BRIGHTNESS, + .type = V4L2_CTRL_TYPE_INTEGER, + .name = "Brightness", + .minimum = 0, + .maximum = 0xff, + .step = 1, +#define BRIGHTNESS_DEFAULT 0x7f + .default_value = BRIGHTNESS_DEFAULT, + }, + .set = sd_setbrightness, + .get = sd_getbrightness, + }, + { +#define CONTRAST_IDX 1 + { + .id = V4L2_CID_CONTRAST, + .type = V4L2_CTRL_TYPE_INTEGER, + .name = "Contrast", + .minimum = 0, + .maximum = 0xff, + .step = 1, +#define CONTRAST_DEFAULT 0x7f + .default_value = CONTRAST_DEFAULT, + }, + .set = sd_setcontrast, + .get = sd_getcontrast, + }, + { +#define SATURATION_IDX 2 + { + .id = V4L2_CID_SATURATION, + .type = V4L2_CTRL_TYPE_INTEGER, + .name = "Saturation", + .minimum = 0, + .maximum = 0xff, + .step = 1, +#define SATURATION_DEFAULT 0x7f + .default_value = SATURATION_DEFAULT, + }, + .set = sd_setsaturation, + .get = sd_getsaturation, + }, + { +#define HUE_IDX 3 + { + .id = V4L2_CID_HUE, + .type = V4L2_CTRL_TYPE_INTEGER, + .name = "Hue", + .minimum = -180, + .maximum = 180, + .step = 1, +#define HUE_DEFAULT 0 + .default_value = HUE_DEFAULT, + }, + .set = sd_sethue, + .get = sd_gethue, + }, + { +#define GAMMA_IDX 4 + { + .id = V4L2_CID_GAMMA, + .type = V4L2_CTRL_TYPE_INTEGER, + .name = "Gamma", + .minimum = 0, + .maximum = 0xff, + .step = 1, +#define GAMMA_DEFAULT 0x10 + .default_value = GAMMA_DEFAULT, + }, + .set = sd_setgamma, + .get = sd_getgamma, + }, + { +#define BLUE_IDX 5 + { + .id = V4L2_CID_BLUE_BALANCE, + .type = V4L2_CTRL_TYPE_INTEGER, + .name = "Blue Balance", + .minimum = 0, + .maximum = 0x7f, + .step = 1, +#define BLUE_DEFAULT 0x28 + .default_value = BLUE_DEFAULT, + }, + .set = sd_setbluebalance, + .get = sd_getbluebalance, + }, + { +#define RED_IDX 6 + { + .id = V4L2_CID_RED_BALANCE, + .type = V4L2_CTRL_TYPE_INTEGER, + .name = "Red Balance", + .minimum = 0, + .maximum = 0x7f, + .step = 1, +#define RED_DEFAULT 0x28 + .default_value = RED_DEFAULT, + }, + .set = sd_setredbalance, + .get = sd_getredbalance, + }, + { +#define HFLIP_IDX 7 + { + .id = V4L2_CID_HFLIP, + .type = V4L2_CTRL_TYPE_BOOLEAN, + .name = "Horizontal Flip", + .minimum = 0, + .maximum = 1, + .step = 1, +#define HFLIP_DEFAULT 0 + .default_value = HFLIP_DEFAULT, + }, + .set = sd_sethflip, + .get = sd_gethflip, + }, + { +#define VFLIP_IDX 8 + { + .id = V4L2_CID_VFLIP, + .type = V4L2_CTRL_TYPE_BOOLEAN, + .name = "Vertical Flip", + .minimum = 0, + .maximum = 1, + .step = 1, +#define VFLIP_DEFAULT 0 + .default_value = VFLIP_DEFAULT, + }, + .set = sd_setvflip, + .get = sd_getvflip, + }, + { +#define EXPOSURE_IDX 9 + { + .id = V4L2_CID_EXPOSURE, + .type = V4L2_CTRL_TYPE_INTEGER, + .name = "Exposure", + .minimum = 0, + .maximum = 0x1780, + .step = 1, +#define EXPOSURE_DEFAULT 0x33 + .default_value = EXPOSURE_DEFAULT, + }, + .set = sd_setexposure, + .get = sd_getexposure, + }, + { +#define GAIN_IDX 10 + { + .id = V4L2_CID_GAIN, + .type = V4L2_CTRL_TYPE_INTEGER, + .name = "Gain", + .minimum = 0, + .maximum = 28, + .step = 1, +#define GAIN_DEFAULT 0x00 + .default_value = GAIN_DEFAULT, + }, + .set = sd_setgain, + .get = sd_getgain, + }, + { +#define AUTOGAIN_IDX 11 + { + .id = V4L2_CID_AUTOGAIN, + .type = V4L2_CTRL_TYPE_BOOLEAN, + .name = "Auto Exposure", + .minimum = 0, + .maximum = 1, + .step = 1, +#define AUTO_EXPOSURE_DEFAULT 1 + .default_value = AUTO_EXPOSURE_DEFAULT, + }, + .set = sd_setautoexposure, + .get = sd_getautoexposure, + }, +}; + +static const struct v4l2_pix_format vga_mode[] = { + {160, 120, V4L2_PIX_FMT_JPEG, V4L2_FIELD_NONE, + .bytesperline = 240, + .sizeimage = 240 * 120, + .colorspace = V4L2_COLORSPACE_JPEG, + .priv = 0 | MODE_JPEG}, + {160, 120, V4L2_PIX_FMT_SBGGR8, V4L2_FIELD_NONE, + .bytesperline = 160, + .sizeimage = 160 * 120, + .colorspace = V4L2_COLORSPACE_SRGB, + .priv = 0 | MODE_RAW}, + {160, 120, V4L2_PIX_FMT_SN9C20X_I420, V4L2_FIELD_NONE, + .bytesperline = 240, + .sizeimage = 240 * 120, + .colorspace = V4L2_COLORSPACE_SRGB, + .priv = 0}, + {320, 240, V4L2_PIX_FMT_JPEG, V4L2_FIELD_NONE, + .bytesperline = 480, + .sizeimage = 480 * 240 , + .colorspace = V4L2_COLORSPACE_JPEG, + .priv = 1 | MODE_JPEG}, + {320, 240, V4L2_PIX_FMT_SBGGR8, V4L2_FIELD_NONE, + .bytesperline = 320, + .sizeimage = 320 * 240 , + .colorspace = V4L2_COLORSPACE_SRGB, + .priv = 1 | MODE_RAW}, + {320, 240, V4L2_PIX_FMT_SN9C20X_I420, V4L2_FIELD_NONE, + .bytesperline = 480, + .sizeimage = 480 * 240 , + .colorspace = V4L2_COLORSPACE_SRGB, + .priv = 1}, + {640, 480, V4L2_PIX_FMT_JPEG, V4L2_FIELD_NONE, + .bytesperline = 960, + .sizeimage = 960 * 480, + .colorspace = V4L2_COLORSPACE_JPEG, + .priv = 2 | MODE_JPEG}, + {640, 480, V4L2_PIX_FMT_SBGGR8, V4L2_FIELD_NONE, + .bytesperline = 640, + .sizeimage = 640 * 480, + .colorspace = V4L2_COLORSPACE_SRGB, + .priv = 2 | MODE_RAW}, + {640, 480, V4L2_PIX_FMT_SN9C20X_I420, V4L2_FIELD_NONE, + .bytesperline = 960, + .sizeimage = 960 * 480, + .colorspace = V4L2_COLORSPACE_SRGB, + .priv = 2}, +}; + +static const struct v4l2_pix_format sxga_mode[] = { + {160, 120, V4L2_PIX_FMT_JPEG, V4L2_FIELD_NONE, + .bytesperline = 240, + .sizeimage = 240 * 120, + .colorspace = V4L2_COLORSPACE_JPEG, + .priv = 0 | MODE_JPEG}, + {160, 120, V4L2_PIX_FMT_SBGGR8, V4L2_FIELD_NONE, + .bytesperline = 160, + .sizeimage = 160 * 120, + .colorspace = V4L2_COLORSPACE_SRGB, + .priv = 0 | MODE_RAW}, + {160, 120, V4L2_PIX_FMT_SN9C20X_I420, V4L2_FIELD_NONE, + .bytesperline = 240, + .sizeimage = 240 * 120, + .colorspace = V4L2_COLORSPACE_SRGB, + .priv = 0}, + {320, 240, V4L2_PIX_FMT_JPEG, V4L2_FIELD_NONE, + .bytesperline = 480, + .sizeimage = 480 * 240 , + .colorspace = V4L2_COLORSPACE_JPEG, + .priv = 1 | MODE_JPEG}, + {320, 240, V4L2_PIX_FMT_SBGGR8, V4L2_FIELD_NONE, + .bytesperline = 320, + .sizeimage = 320 * 240 , + .colorspace = V4L2_COLORSPACE_SRGB, + .priv = 1 | MODE_RAW}, + {320, 240, V4L2_PIX_FMT_SN9C20X_I420, V4L2_FIELD_NONE, + .bytesperline = 480, + .sizeimage = 480 * 240 , + .colorspace = V4L2_COLORSPACE_SRGB, + .priv = 1}, + {640, 480, V4L2_PIX_FMT_JPEG, V4L2_FIELD_NONE, + .bytesperline = 960, + .sizeimage = 960 * 480, + .colorspace = V4L2_COLORSPACE_JPEG, + .priv = 2 | MODE_JPEG}, + {640, 480, V4L2_PIX_FMT_SBGGR8, V4L2_FIELD_NONE, + .bytesperline = 640, + .sizeimage = 640 * 480, + .colorspace = V4L2_COLORSPACE_SRGB, + .priv = 2 | MODE_RAW}, + {640, 480, V4L2_PIX_FMT_SN9C20X_I420, V4L2_FIELD_NONE, + .bytesperline = 960, + .sizeimage = 960 * 480, + .colorspace = V4L2_COLORSPACE_SRGB, + .priv = 2}, + {1280, 1024, V4L2_PIX_FMT_SBGGR8, V4L2_FIELD_NONE, + .bytesperline = 1280, + .sizeimage = (1280 * 1024) + 64, + .colorspace = V4L2_COLORSPACE_SRGB, + .priv = 3 | MODE_RAW | MODE_SXGA}, +}; + +static const int hsv_red_x[] = { + 41, 44, 46, 48, 50, 52, 54, 56, + 58, 60, 62, 64, 66, 68, 70, 72, + 74, 76, 78, 80, 81, 83, 85, 87, + 88, 90, 92, 93, 95, 97, 98, 100, + 101, 102, 104, 105, 107, 108, 109, 110, + 112, 113, 114, 115, 116, 117, 118, 119, + 120, 121, 122, 123, 123, 124, 125, 125, + 126, 127, 127, 128, 128, 129, 129, 129, + 130, 130, 130, 130, 131, 131, 131, 131, + 131, 131, 131, 131, 130, 130, 130, 130, + 129, 129, 129, 128, 128, 127, 127, 126, + 125, 125, 124, 123, 122, 122, 121, 120, + 119, 118, 117, 116, 115, 114, 112, 111, + 110, 109, 107, 106, 105, 103, 102, 101, + 99, 98, 96, 94, 93, 91, 90, 88, + 86, 84, 83, 81, 79, 77, 75, 74, + 72, 70, 68, 66, 64, 62, 60, 58, + 56, 54, 52, 49, 47, 45, 43, 41, + 39, 36, 34, 32, 30, 28, 25, 23, + 21, 19, 16, 14, 12, 9, 7, 5, + 3, 0, -1, -3, -6, -8, -10, -12, + -15, -17, -19, -22, -24, -26, -28, -30, + -33, -35, -37, -39, -41, -44, -46, -48, + -50, -52, -54, -56, -58, -60, -62, -64, + -66, -68, -70, -72, -74, -76, -78, -80, + -81, -83, -85, -87, -88, -90, -92, -93, + -95, -97, -98, -100, -101, -102, -104, -105, + -107, -108, -109, -110, -112, -113, -114, -115, + -116, -117, -118, -119, -120, -121, -122, -123, + -123, -124, -125, -125, -126, -127, -127, -128, + -128, -128, -128, -128, -128, -128, -128, -128, + -128, -128, -128, -128, -128, -128, -128, -128, + -128, -128, -128, -128, -128, -128, -128, -128, + -128, -127, -127, -126, -125, -125, -124, -123, + -122, -122, -121, -120, -119, -118, -117, -116, + -115, -114, -112, -111, -110, -109, -107, -106, + -105, -103, -102, -101, -99, -98, -96, -94, + -93, -91, -90, -88, -86, -84, -83, -81, + -79, -77, -75, -74, -72, -70, -68, -66, + -64, -62, -60, -58, -56, -54, -52, -49, + -47, -45, -43, -41, -39, -36, -34, -32, + -30, -28, -25, -23, -21, -19, -16, -14, + -12, -9, -7, -5, -3, 0, 1, 3, + 6, 8, 10, 12, 15, 17, 19, 22, + 24, 26, 28, 30, 33, 35, 37, 39, 41 +}; + +static const int hsv_red_y[] = { + 82, 80, 78, 76, 74, 73, 71, 69, + 67, 65, 63, 61, 58, 56, 54, 52, + 50, 48, 46, 44, 41, 39, 37, 35, + 32, 30, 28, 26, 23, 21, 19, 16, + 14, 12, 10, 7, 5, 3, 0, -1, + -3, -6, -8, -10, -13, -15, -17, -19, + -22, -24, -26, -29, -31, -33, -35, -38, + -40, -42, -44, -46, -48, -51, -53, -55, + -57, -59, -61, -63, -65, -67, -69, -71, + -73, -75, -77, -79, -81, -82, -84, -86, + -88, -89, -91, -93, -94, -96, -98, -99, + -101, -102, -104, -105, -106, -108, -109, -110, + -112, -113, -114, -115, -116, -117, -119, -120, + -120, -121, -122, -123, -124, -125, -126, -126, + -127, -128, -128, -128, -128, -128, -128, -128, + -128, -128, -128, -128, -128, -128, -128, -128, + -128, -128, -128, -128, -128, -128, -128, -128, + -128, -128, -128, -128, -128, -128, -128, -128, + -127, -127, -126, -125, -125, -124, -123, -122, + -121, -120, -119, -118, -117, -116, -115, -114, + -113, -111, -110, -109, -107, -106, -105, -103, + -102, -100, -99, -97, -96, -94, -92, -91, + -89, -87, -85, -84, -82, -80, -78, -76, + -74, -73, -71, -69, -67, -65, -63, -61, + -58, -56, -54, -52, -50, -48, -46, -44, + -41, -39, -37, -35, -32, -30, -28, -26, + -23, -21, -19, -16, -14, -12, -10, -7, + -5, -3, 0, 1, 3, 6, 8, 10, + 13, 15, 17, 19, 22, 24, 26, 29, + 31, 33, 35, 38, 40, 42, 44, 46, + 48, 51, 53, 55, 57, 59, 61, 63, + 65, 67, 69, 71, 73, 75, 77, 79, + 81, 82, 84, 86, 88, 89, 91, 93, + 94, 96, 98, 99, 101, 102, 104, 105, + 106, 108, 109, 110, 112, 113, 114, 115, + 116, 117, 119, 120, 120, 121, 122, 123, + 124, 125, 126, 126, 127, 128, 128, 129, + 129, 130, 130, 131, 131, 131, 131, 132, + 132, 132, 132, 132, 132, 132, 132, 132, + 132, 132, 132, 131, 131, 131, 130, 130, + 130, 129, 129, 128, 127, 127, 126, 125, + 125, 124, 123, 122, 121, 120, 119, 118, + 117, 116, 115, 114, 113, 111, 110, 109, + 107, 106, 105, 103, 102, 100, 99, 97, + 96, 94, 92, 91, 89, 87, 85, 84, 82 +}; + +static const int hsv_green_x[] = { + -124, -124, -125, -125, -125, -125, -125, -125, + -125, -126, -126, -125, -125, -125, -125, -125, + -125, -124, -124, -124, -123, -123, -122, -122, + -121, -121, -120, -120, -119, -118, -117, -117, + -116, -115, -114, -113, -112, -111, -110, -109, + -108, -107, -105, -104, -103, -102, -100, -99, + -98, -96, -95, -93, -92, -91, -89, -87, + -86, -84, -83, -81, -79, -77, -76, -74, + -72, -70, -69, -67, -65, -63, -61, -59, + -57, -55, -53, -51, -49, -47, -45, -43, + -41, -39, -37, -35, -33, -30, -28, -26, + -24, -22, -20, -18, -15, -13, -11, -9, + -7, -4, -2, 0, 1, 3, 6, 8, + 10, 12, 14, 17, 19, 21, 23, 25, + 27, 29, 32, 34, 36, 38, 40, 42, + 44, 46, 48, 50, 52, 54, 56, 58, + 60, 62, 64, 66, 68, 70, 71, 73, + 75, 77, 78, 80, 82, 83, 85, 87, + 88, 90, 91, 93, 94, 96, 97, 98, + 100, 101, 102, 104, 105, 106, 107, 108, + 109, 111, 112, 113, 113, 114, 115, 116, + 117, 118, 118, 119, 120, 120, 121, 122, + 122, 123, 123, 124, 124, 124, 125, 125, + 125, 125, 125, 125, 125, 126, 126, 125, + 125, 125, 125, 125, 125, 124, 124, 124, + 123, 123, 122, 122, 121, 121, 120, 120, + 119, 118, 117, 117, 116, 115, 114, 113, + 112, 111, 110, 109, 108, 107, 105, 104, + 103, 102, 100, 99, 98, 96, 95, 93, + 92, 91, 89, 87, 86, 84, 83, 81, + 79, 77, 76, 74, 72, 70, 69, 67, + 65, 63, 61, 59, 57, 55, 53, 51, + 49, 47, 45, 43, 41, 39, 37, 35, + 33, 30, 28, 26, 24, 22, 20, 18, + 15, 13, 11, 9, 7, 4, 2, 0, + -1, -3, -6, -8, -10, -12, -14, -17, + -19, -21, -23, -25, -27, -29, -32, -34, + -36, -38, -40, -42, -44, -46, -48, -50, + -52, -54, -56, -58, -60, -62, -64, -66, + -68, -70, -71, -73, -75, -77, -78, -80, + -82, -83, -85, -87, -88, -90, -91, -93, + -94, -96, -97, -98, -100, -101, -102, -104, + -105, -106, -107, -108, -109, -111, -112, -113, + -113, -114, -115, -116, -117, -118, -118, -119, + -120, -120, -121, -122, -122, -123, -123, -124, -124 +}; + +static const int hsv_green_y[] = { + -100, -99, -98, -97, -95, -94, -93, -91, + -90, -89, -87, -86, -84, -83, -81, -80, + -78, -76, -75, -73, -71, -70, -68, -66, + -64, -63, -61, -59, -57, -55, -53, -51, + -49, -48, -46, -44, -42, -40, -38, -36, + -34, -32, -30, -27, -25, -23, -21, -19, + -17, -15, -13, -11, -9, -7, -4, -2, + 0, 1, 3, 5, 7, 9, 11, 14, + 16, 18, 20, 22, 24, 26, 28, 30, + 32, 34, 36, 38, 40, 42, 44, 46, + 48, 50, 52, 54, 56, 58, 59, 61, + 63, 65, 67, 68, 70, 72, 74, 75, + 77, 78, 80, 82, 83, 85, 86, 88, + 89, 90, 92, 93, 95, 96, 97, 98, + 100, 101, 102, 103, 104, 105, 106, 107, + 108, 109, 110, 111, 112, 112, 113, 114, + 115, 115, 116, 116, 117, 117, 118, 118, + 119, 119, 119, 120, 120, 120, 120, 120, + 121, 121, 121, 121, 121, 121, 120, 120, + 120, 120, 120, 119, 119, 119, 118, 118, + 117, 117, 116, 116, 115, 114, 114, 113, + 112, 111, 111, 110, 109, 108, 107, 106, + 105, 104, 103, 102, 100, 99, 98, 97, + 95, 94, 93, 91, 90, 89, 87, 86, + 84, 83, 81, 80, 78, 76, 75, 73, + 71, 70, 68, 66, 64, 63, 61, 59, + 57, 55, 53, 51, 49, 48, 46, 44, + 42, 40, 38, 36, 34, 32, 30, 27, + 25, 23, 21, 19, 17, 15, 13, 11, + 9, 7, 4, 2, 0, -1, -3, -5, + -7, -9, -11, -14, -16, -18, -20, -22, + -24, -26, -28, -30, -32, -34, -36, -38, + -40, -42, -44, -46, -48, -50, -52, -54, + -56, -58, -59, -61, -63, -65, -67, -68, + -70, -72, -74, -75, -77, -78, -80, -82, + -83, -85, -86, -88, -89, -90, -92, -93, + -95, -96, -97, -98, -100, -101, -102, -103, + -104, -105, -106, -107, -108, -109, -110, -111, + -112, -112, -113, -114, -115, -115, -116, -116, + -117, -117, -118, -118, -119, -119, -119, -120, + -120, -120, -120, -120, -121, -121, -121, -121, + -121, -121, -120, -120, -120, -120, -120, -119, + -119, -119, -118, -118, -117, -117, -116, -116, + -115, -114, -114, -113, -112, -111, -111, -110, + -109, -108, -107, -106, -105, -104, -103, -102, -100 +}; + +static const int hsv_blue_x[] = { + 112, 113, 114, 114, 115, 116, 117, 117, + 118, 118, 119, 119, 120, 120, 120, 121, + 121, 121, 122, 122, 122, 122, 122, 122, + 122, 122, 122, 122, 122, 122, 121, 121, + 121, 120, 120, 120, 119, 119, 118, 118, + 117, 116, 116, 115, 114, 113, 113, 112, + 111, 110, 109, 108, 107, 106, 105, 104, + 103, 102, 100, 99, 98, 97, 95, 94, + 93, 91, 90, 88, 87, 85, 84, 82, + 80, 79, 77, 76, 74, 72, 70, 69, + 67, 65, 63, 61, 60, 58, 56, 54, + 52, 50, 48, 46, 44, 42, 40, 38, + 36, 34, 32, 30, 28, 26, 24, 22, + 19, 17, 15, 13, 11, 9, 7, 5, + 2, 0, -1, -3, -5, -7, -9, -12, + -14, -16, -18, -20, -22, -24, -26, -28, + -31, -33, -35, -37, -39, -41, -43, -45, + -47, -49, -51, -53, -54, -56, -58, -60, + -62, -64, -66, -67, -69, -71, -73, -74, + -76, -78, -79, -81, -83, -84, -86, -87, + -89, -90, -92, -93, -94, -96, -97, -98, + -99, -101, -102, -103, -104, -105, -106, -107, + -108, -109, -110, -111, -112, -113, -114, -114, + -115, -116, -117, -117, -118, -118, -119, -119, + -120, -120, -120, -121, -121, -121, -122, -122, + -122, -122, -122, -122, -122, -122, -122, -122, + -122, -122, -121, -121, -121, -120, -120, -120, + -119, -119, -118, -118, -117, -116, -116, -115, + -114, -113, -113, -112, -111, -110, -109, -108, + -107, -106, -105, -104, -103, -102, -100, -99, + -98, -97, -95, -94, -93, -91, -90, -88, + -87, -85, -84, -82, -80, -79, -77, -76, + -74, -72, -70, -69, -67, -65, -63, -61, + -60, -58, -56, -54, -52, -50, -48, -46, + -44, -42, -40, -38, -36, -34, -32, -30, + -28, -26, -24, -22, -19, -17, -15, -13, + -11, -9, -7, -5, -2, 0, 1, 3, + 5, 7, 9, 12, 14, 16, 18, 20, + 22, 24, 26, 28, 31, 33, 35, 37, + 39, 41, 43, 45, 47, 49, 51, 53, + 54, 56, 58, 60, 62, 64, 66, 67, + 69, 71, 73, 74, 76, 78, 79, 81, + 83, 84, 86, 87, 89, 90, 92, 93, + 94, 96, 97, 98, 99, 101, 102, 103, + 104, 105, 106, 107, 108, 109, 110, 111, 112 +}; + +static const int hsv_blue_y[] = { + -11, -13, -15, -17, -19, -21, -23, -25, + -27, -29, -31, -33, -35, -37, -39, -41, + -43, -45, -46, -48, -50, -52, -54, -55, + -57, -59, -61, -62, -64, -66, -67, -69, + -71, -72, -74, -75, -77, -78, -80, -81, + -83, -84, -86, -87, -88, -90, -91, -92, + -93, -95, -96, -97, -98, -99, -100, -101, + -102, -103, -104, -105, -106, -106, -107, -108, + -109, -109, -110, -111, -111, -112, -112, -113, + -113, -114, -114, -114, -115, -115, -115, -115, + -116, -116, -116, -116, -116, -116, -116, -116, + -116, -115, -115, -115, -115, -114, -114, -114, + -113, -113, -112, -112, -111, -111, -110, -110, + -109, -108, -108, -107, -106, -105, -104, -103, + -102, -101, -100, -99, -98, -97, -96, -95, + -94, -93, -91, -90, -89, -88, -86, -85, + -84, -82, -81, -79, -78, -76, -75, -73, + -71, -70, -68, -67, -65, -63, -62, -60, + -58, -56, -55, -53, -51, -49, -47, -45, + -44, -42, -40, -38, -36, -34, -32, -30, + -28, -26, -24, -22, -20, -18, -16, -14, + -12, -10, -8, -6, -4, -2, 0, 1, + 3, 5, 7, 9, 11, 13, 15, 17, + 19, 21, 23, 25, 27, 29, 31, 33, + 35, 37, 39, 41, 43, 45, 46, 48, + 50, 52, 54, 55, 57, 59, 61, 62, + 64, 66, 67, 69, 71, 72, 74, 75, + 77, 78, 80, 81, 83, 84, 86, 87, + 88, 90, 91, 92, 93, 95, 96, 97, + 98, 99, 100, 101, 102, 103, 104, 105, + 106, 106, 107, 108, 109, 109, 110, 111, + 111, 112, 112, 113, 113, 114, 114, 114, + 115, 115, 115, 115, 116, 116, 116, 116, + 116, 116, 116, 116, 116, 115, 115, 115, + 115, 114, 114, 114, 113, 113, 112, 112, + 111, 111, 110, 110, 109, 108, 108, 107, + 106, 105, 104, 103, 102, 101, 100, 99, + 98, 97, 96, 95, 94, 93, 91, 90, + 89, 88, 86, 85, 84, 82, 81, 79, + 78, 76, 75, 73, 71, 70, 68, 67, + 65, 63, 62, 60, 58, 56, 55, 53, + 51, 49, 47, 45, 44, 42, 40, 38, + 36, 34, 32, 30, 28, 26, 24, 22, + 20, 18, 16, 14, 12, 10, 8, 6, + 4, 2, 0, -1, -3, -5, -7, -9, -11 +}; + +static u16 i2c_ident[] = { + V4L2_IDENT_OV9650, + V4L2_IDENT_OV9655, + V4L2_IDENT_SOI968, + V4L2_IDENT_OV7660, + V4L2_IDENT_OV7670, + V4L2_IDENT_MT9V011, + V4L2_IDENT_MT9V111, + V4L2_IDENT_MT9V112, + V4L2_IDENT_MT9M001C12ST, + V4L2_IDENT_MT9M111, + V4L2_IDENT_HV7131R, +}; + +static u16 bridge_init[][2] = { + {0x1000, 0x78}, {0x1001, 0x40}, {0x1002, 0x1c}, + {0x1020, 0x80}, {0x1061, 0x01}, {0x1067, 0x40}, + {0x1068, 0x30}, {0x1069, 0x20}, {0x106a, 0x10}, + {0x106b, 0x08}, {0x1188, 0x87}, {0x11a1, 0x00}, + {0x11a2, 0x00}, {0x11a3, 0x6a}, {0x11a4, 0x50}, + {0x11ab, 0x00}, {0x11ac, 0x00}, {0x11ad, 0x50}, + {0x11ae, 0x3c}, {0x118a, 0x04}, {0x0395, 0x04}, + {0x11b8, 0x3a}, {0x118b, 0x0e}, {0x10f7, 0x05}, + {0x10f8, 0x14}, {0x10fa, 0xff}, {0x10f9, 0x00}, + {0x11ba, 0x0a}, {0x11a5, 0x2d}, {0x11a6, 0x2d}, + {0x11a7, 0x3a}, {0x11a8, 0x05}, {0x11a9, 0x04}, + {0x11aa, 0x3f}, {0x11af, 0x28}, {0x11b0, 0xd8}, + {0x11b1, 0x14}, {0x11b2, 0xec}, {0x11b3, 0x32}, + {0x11b4, 0xdd}, {0x11b5, 0x32}, {0x11b6, 0xdd}, + {0x10e0, 0x2c}, {0x11bc, 0x40}, {0x11bd, 0x01}, + {0x11be, 0xf0}, {0x11bf, 0x00}, {0x118c, 0x1f}, + {0x118d, 0x1f}, {0x118e, 0x1f}, {0x118f, 0x1f}, + {0x1180, 0x01}, {0x1181, 0x00}, {0x1182, 0x01}, + {0x1183, 0x00}, {0x1184, 0x50}, {0x1185, 0x80} +}; + +/* Gain = (bit[3:0] / 16 + 1) * (bit[4] + 1) * (bit[5] + 1) * (bit[6] + 1) */ +static u8 ov_gain[] = { + 0x00 /* 1x */, 0x04 /* 1.25x */, 0x08 /* 1.5x */, 0x0c /* 1.75x */, + 0x10 /* 2x */, 0x12 /* 2.25x */, 0x14 /* 2.5x */, 0x16 /* 2.75x */, + 0x18 /* 3x */, 0x1a /* 3.25x */, 0x1c /* 3.5x */, 0x1e /* 3.75x */, + 0x30 /* 4x */, 0x31 /* 4.25x */, 0x32 /* 4.5x */, 0x33 /* 4.75x */, + 0x34 /* 5x */, 0x35 /* 5.25x */, 0x36 /* 5.5x */, 0x37 /* 5.75x */, + 0x38 /* 6x */, 0x39 /* 6.25x */, 0x3a /* 6.5x */, 0x3b /* 6.75x */, + 0x3c /* 7x */, 0x3d /* 7.25x */, 0x3e /* 7.5x */, 0x3f /* 7.75x */, + 0x70 /* 8x */ +}; + +/* Gain = (bit[8] + 1) * (bit[7] + 1) * (bit[6:0] * 0.03125) */ +static u16 micron1_gain[] = { + /* 1x 1.25x 1.5x 1.75x */ + 0x0020, 0x0028, 0x0030, 0x0038, + /* 2x 2.25x 2.5x 2.75x */ + 0x00a0, 0x00a4, 0x00a8, 0x00ac, + /* 3x 3.25x 3.5x 3.75x */ + 0x00b0, 0x00b4, 0x00b8, 0x00bc, + /* 4x 4.25x 4.5x 4.75x */ + 0x00c0, 0x00c4, 0x00c8, 0x00cc, + /* 5x 5.25x 5.5x 5.75x */ + 0x00d0, 0x00d4, 0x00d8, 0x00dc, + /* 6x 6.25x 6.5x 6.75x */ + 0x00e0, 0x00e4, 0x00e8, 0x00ec, + /* 7x 7.25x 7.5x 7.75x */ + 0x00f0, 0x00f4, 0x00f8, 0x00fc, + /* 8x */ + 0x01c0 +}; + +/* mt9m001 sensor uses a different gain formula then other micron sensors */ +/* Gain = (bit[6] + 1) * (bit[5-0] * 0.125) */ +static u16 micron2_gain[] = { + /* 1x 1.25x 1.5x 1.75x */ + 0x0008, 0x000a, 0x000c, 0x000e, + /* 2x 2.25x 2.5x 2.75x */ + 0x0010, 0x0012, 0x0014, 0x0016, + /* 3x 3.25x 3.5x 3.75x */ + 0x0018, 0x001a, 0x001c, 0x001e, + /* 4x 4.25x 4.5x 4.75x */ + 0x0020, 0x0051, 0x0052, 0x0053, + /* 5x 5.25x 5.5x 5.75x */ + 0x0054, 0x0055, 0x0056, 0x0057, + /* 6x 6.25x 6.5x 6.75x */ + 0x0058, 0x0059, 0x005a, 0x005b, + /* 7x 7.25x 7.5x 7.75x */ + 0x005c, 0x005d, 0x005e, 0x005f, + /* 8x */ + 0x0060 +}; + +/* Gain = .5 + bit[7:0] / 16 */ +static u8 hv7131r_gain[] = { + 0x08 /* 1x */, 0x0c /* 1.25x */, 0x10 /* 1.5x */, 0x14 /* 1.75x */, + 0x18 /* 2x */, 0x1c /* 2.25x */, 0x20 /* 2.5x */, 0x24 /* 2.75x */, + 0x28 /* 3x */, 0x2c /* 3.25x */, 0x30 /* 3.5x */, 0x34 /* 3.75x */, + 0x38 /* 4x */, 0x3c /* 4.25x */, 0x40 /* 4.5x */, 0x44 /* 4.75x */, + 0x48 /* 5x */, 0x4c /* 5.25x */, 0x50 /* 5.5x */, 0x54 /* 5.75x */, + 0x58 /* 6x */, 0x5c /* 6.25x */, 0x60 /* 6.5x */, 0x64 /* 6.75x */, + 0x68 /* 7x */, 0x6c /* 7.25x */, 0x70 /* 7.5x */, 0x74 /* 7.75x */, + 0x78 /* 8x */ +}; + +static u8 soi968_init[][2] = { + {0x12, 0x80}, {0x0c, 0x00}, {0x0f, 0x1f}, + {0x11, 0x80}, {0x38, 0x52}, {0x1e, 0x00}, + {0x33, 0x08}, {0x35, 0x8c}, {0x36, 0x0c}, + {0x37, 0x04}, {0x45, 0x04}, {0x47, 0xff}, + {0x3e, 0x00}, {0x3f, 0x00}, {0x3b, 0x20}, + {0x3a, 0x96}, {0x3d, 0x0a}, {0x14, 0x8e}, + {0x13, 0x8a}, {0x12, 0x40}, {0x17, 0x13}, + {0x18, 0x63}, {0x19, 0x01}, {0x1a, 0x79}, + {0x32, 0x24}, {0x03, 0x00}, {0x11, 0x40}, + {0x2a, 0x10}, {0x2b, 0xe0}, {0x10, 0x32}, + {0x00, 0x00}, {0x01, 0x80}, {0x02, 0x80}, +}; + +static u8 ov7660_init[][2] = { + {0x0e, 0x80}, {0x0d, 0x08}, {0x0f, 0xc3}, + {0x04, 0xc3}, {0x10, 0x40}, {0x11, 0x40}, + {0x12, 0x05}, {0x13, 0xba}, {0x14, 0x2a}, + {0x37, 0x0f}, {0x38, 0x02}, {0x39, 0x43}, + {0x3a, 0x00}, {0x69, 0x90}, {0x2d, 0xf6}, + {0x2e, 0x0b}, {0x01, 0x78}, {0x02, 0x50}, +}; + +static u8 ov7670_init[][2] = { + {0x12, 0x80}, {0x11, 0x80}, {0x3a, 0x04}, {0x12, 0x01}, + {0x32, 0xb6}, {0x03, 0x0a}, {0x0c, 0x00}, {0x3e, 0x00}, + {0x70, 0x3a}, {0x71, 0x35}, {0x72, 0x11}, {0x73, 0xf0}, + {0xa2, 0x02}, {0x13, 0xe0}, {0x00, 0x00}, {0x10, 0x00}, + {0x0d, 0x40}, {0x14, 0x28}, {0xa5, 0x05}, {0xab, 0x07}, + {0x24, 0x95}, {0x25, 0x33}, {0x26, 0xe3}, {0x9f, 0x75}, + {0xa0, 0x65}, {0xa1, 0x0b}, {0xa6, 0xd8}, {0xa7, 0xd8}, + {0xa8, 0xf0}, {0xa9, 0x90}, {0xaa, 0x94}, {0x13, 0xe5}, + {0x0e, 0x61}, {0x0f, 0x4b}, {0x16, 0x02}, {0x1e, 0x27}, + {0x21, 0x02}, {0x22, 0x91}, {0x29, 0x07}, {0x33, 0x0b}, + {0x35, 0x0b}, {0x37, 0x1d}, {0x38, 0x71}, {0x39, 0x2a}, + {0x3c, 0x78}, {0x4d, 0x40}, {0x4e, 0x20}, {0x69, 0x00}, + {0x74, 0x19}, {0x8d, 0x4f}, {0x8e, 0x00}, {0x8f, 0x00}, + {0x90, 0x00}, {0x91, 0x00}, {0x96, 0x00}, {0x9a, 0x80}, + {0xb0, 0x84}, {0xb1, 0x0c}, {0xb2, 0x0e}, {0xb3, 0x82}, + {0xb8, 0x0a}, {0x43, 0x0a}, {0x44, 0xf0}, {0x45, 0x20}, + {0x46, 0x7d}, {0x47, 0x29}, {0x48, 0x4a}, {0x59, 0x8c}, + {0x5a, 0xa5}, {0x5b, 0xde}, {0x5c, 0x96}, {0x5d, 0x66}, + {0x5e, 0x10}, {0x6c, 0x0a}, {0x6d, 0x55}, {0x6e, 0x11}, + {0x6f, 0x9e}, {0x6a, 0x40}, {0x01, 0x40}, {0x02, 0x40}, + {0x13, 0xe7}, {0x4f, 0x6e}, {0x50, 0x70}, {0x51, 0x02}, + {0x52, 0x1d}, {0x53, 0x56}, {0x54, 0x73}, {0x55, 0x0a}, + {0x56, 0x55}, {0x57, 0x80}, {0x58, 0x9e}, {0x41, 0x08}, + {0x3f, 0x02}, {0x75, 0x03}, {0x76, 0x63}, {0x4c, 0x04}, + {0x77, 0x06}, {0x3d, 0x02}, {0x4b, 0x09}, {0xc9, 0x30}, + {0x41, 0x08}, {0x56, 0x48}, {0x34, 0x11}, {0xa4, 0x88}, + {0x96, 0x00}, {0x97, 0x30}, {0x98, 0x20}, {0x99, 0x30}, + {0x9a, 0x84}, {0x9b, 0x29}, {0x9c, 0x03}, {0x9d, 0x99}, + {0x9e, 0x7f}, {0x78, 0x04}, {0x79, 0x01}, {0xc8, 0xf0}, + {0x79, 0x0f}, {0xc8, 0x00}, {0x79, 0x10}, {0xc8, 0x7e}, + {0x79, 0x0a}, {0xc8, 0x80}, {0x79, 0x0b}, {0xc8, 0x01}, + {0x79, 0x0c}, {0xc8, 0x0f}, {0x79, 0x0d}, {0xc8, 0x20}, + {0x79, 0x09}, {0xc8, 0x80}, {0x79, 0x02}, {0xc8, 0xc0}, + {0x79, 0x03}, {0xc8, 0x40}, {0x79, 0x05}, {0xc8, 0x30}, + {0x79, 0x26}, {0x62, 0x20}, {0x63, 0x00}, {0x64, 0x06}, + {0x65, 0x00}, {0x66, 0x05}, {0x94, 0x05}, {0x95, 0x0a}, + {0x17, 0x13}, {0x18, 0x01}, {0x19, 0x02}, {0x1a, 0x7a}, + {0x46, 0x59}, {0x47, 0x30}, {0x58, 0x9a}, {0x59, 0x84}, + {0x5a, 0x91}, {0x5b, 0x57}, {0x5c, 0x75}, {0x5d, 0x6d}, + {0x5e, 0x13}, {0x64, 0x07}, {0x94, 0x07}, {0x95, 0x0d}, + {0xa6, 0xdf}, {0xa7, 0xdf}, {0x48, 0x4d}, {0x51, 0x00}, + {0x6b, 0x0a}, {0x11, 0x80}, {0x2a, 0x00}, {0x2b, 0x00}, + {0x92, 0x00}, {0x93, 0x00}, {0x55, 0x0a}, {0x56, 0x60}, + {0x4f, 0x6e}, {0x50, 0x70}, {0x51, 0x00}, {0x52, 0x1d}, + {0x53, 0x56}, {0x54, 0x73}, {0x58, 0x9a}, {0x4f, 0x6e}, + {0x50, 0x70}, {0x51, 0x00}, {0x52, 0x1d}, {0x53, 0x56}, + {0x54, 0x73}, {0x58, 0x9a}, {0x3f, 0x01}, {0x7b, 0x03}, + {0x7c, 0x09}, {0x7d, 0x16}, {0x7e, 0x38}, {0x7f, 0x47}, + {0x80, 0x53}, {0x81, 0x5e}, {0x82, 0x6a}, {0x83, 0x74}, + {0x84, 0x80}, {0x85, 0x8c}, {0x86, 0x9b}, {0x87, 0xb2}, + {0x88, 0xcc}, {0x89, 0xe5}, {0x7a, 0x24}, {0x3b, 0x00}, + {0x9f, 0x76}, {0xa0, 0x65}, {0x13, 0xe2}, {0x6b, 0x0a}, + {0x11, 0x80}, {0x2a, 0x00}, {0x2b, 0x00}, {0x92, 0x00}, + {0x93, 0x00}, +}; + +static u8 ov9650_init[][2] = { + {0x12, 0x80}, {0x00, 0x00}, {0x01, 0x78}, + {0x02, 0x78}, {0x03, 0x36}, {0x04, 0x03}, + {0x05, 0x00}, {0x06, 0x00}, {0x08, 0x00}, + {0x09, 0x01}, {0x0c, 0x00}, {0x0d, 0x00}, + {0x0e, 0xa0}, {0x0f, 0x52}, {0x10, 0x7c}, + {0x11, 0x80}, {0x12, 0x45}, {0x13, 0xc2}, + {0x14, 0x2e}, {0x15, 0x00}, {0x16, 0x07}, + {0x17, 0x24}, {0x18, 0xc5}, {0x19, 0x00}, + {0x1a, 0x3c}, {0x1b, 0x00}, {0x1e, 0x04}, + {0x1f, 0x00}, {0x24, 0x78}, {0x25, 0x68}, + {0x26, 0xd4}, {0x27, 0x80}, {0x28, 0x80}, + {0x29, 0x30}, {0x2a, 0x00}, {0x2b, 0x00}, + {0x2c, 0x80}, {0x2d, 0x00}, {0x2e, 0x00}, + {0x2f, 0x00}, {0x30, 0x08}, {0x31, 0x30}, + {0x32, 0x84}, {0x33, 0xe2}, {0x34, 0xbf}, + {0x35, 0x81}, {0x36, 0xf9}, {0x37, 0x00}, + {0x38, 0x93}, {0x39, 0x50}, {0x3a, 0x01}, + {0x3b, 0x01}, {0x3c, 0x73}, {0x3d, 0x19}, + {0x3e, 0x0b}, {0x3f, 0x80}, {0x40, 0xc1}, + {0x41, 0x00}, {0x42, 0x08}, {0x67, 0x80}, + {0x68, 0x80}, {0x69, 0x40}, {0x6a, 0x00}, + {0x6b, 0x0a}, {0x8b, 0x06}, {0x8c, 0x20}, + {0x8d, 0x00}, {0x8e, 0x00}, {0x8f, 0xdf}, + {0x92, 0x00}, {0x93, 0x00}, {0x94, 0x88}, + {0x95, 0x88}, {0x96, 0x04}, {0xa1, 0x00}, + {0xa5, 0x80}, {0xa8, 0x80}, {0xa9, 0xb8}, + {0xaa, 0x92}, {0xab, 0x0a}, +}; + +static u8 ov9655_init[][2] = { + {0x12, 0x80}, {0x12, 0x01}, {0x0d, 0x00}, {0x0e, 0x61}, + {0x11, 0x80}, {0x13, 0xba}, {0x14, 0x2e}, {0x16, 0x24}, + {0x1e, 0x04}, {0x1e, 0x04}, {0x1e, 0x04}, {0x27, 0x08}, + {0x28, 0x08}, {0x29, 0x15}, {0x2c, 0x08}, {0x32, 0xbf}, + {0x34, 0x3d}, {0x35, 0x00}, {0x36, 0xf8}, {0x38, 0x12}, + {0x39, 0x57}, {0x3a, 0x00}, {0x3b, 0xcc}, {0x3c, 0x0c}, + {0x3d, 0x19}, {0x3e, 0x0c}, {0x3f, 0x01}, {0x41, 0x40}, + {0x42, 0x80}, {0x45, 0x46}, {0x46, 0x62}, {0x47, 0x2a}, + {0x48, 0x3c}, {0x4a, 0xf0}, {0x4b, 0xdc}, {0x4c, 0xdc}, + {0x4d, 0xdc}, {0x4e, 0xdc}, {0x69, 0x02}, {0x6c, 0x04}, + {0x6f, 0x9e}, {0x70, 0x05}, {0x71, 0x78}, {0x77, 0x02}, + {0x8a, 0x23}, {0x8c, 0x0d}, {0x90, 0x7e}, {0x91, 0x7c}, + {0x9f, 0x6e}, {0xa0, 0x6e}, {0xa5, 0x68}, {0xa6, 0x60}, + {0xa8, 0xc1}, {0xa9, 0xfa}, {0xaa, 0x92}, {0xab, 0x04}, + {0xac, 0x80}, {0xad, 0x80}, {0xae, 0x80}, {0xaf, 0x80}, + {0xb2, 0xf2}, {0xb3, 0x20}, {0xb5, 0x00}, {0xb6, 0xaf}, + {0xbb, 0xae}, {0xbc, 0x44}, {0xbd, 0x44}, {0xbe, 0x3b}, + {0xbf, 0x3a}, {0xc0, 0xe2}, {0xc1, 0xc8}, {0xc2, 0x01}, + {0xc4, 0x00}, {0xc6, 0x85}, {0xc7, 0x81}, {0xc9, 0xe0}, + {0xca, 0xe8}, {0xcc, 0xd8}, {0xcd, 0x93}, {0x12, 0x61}, + {0x36, 0xfa}, {0x8c, 0x8d}, {0xc0, 0xaa}, {0x69, 0x0a}, + {0x03, 0x12}, {0x17, 0x14}, {0x18, 0x00}, {0x19, 0x01}, + {0x1a, 0x3d}, {0x32, 0xbf}, {0x11, 0x80}, {0x2a, 0x10}, + {0x2b, 0x0a}, {0x92, 0x00}, {0x93, 0x00}, {0x1e, 0x04}, + {0x1e, 0x04}, {0x10, 0x7c}, {0x04, 0x03}, {0xa1, 0x00}, + {0x2d, 0x00}, {0x2e, 0x00}, {0x00, 0x00}, {0x01, 0x80}, + {0x02, 0x80}, {0x12, 0x61}, {0x36, 0xfa}, {0x8c, 0x8d}, + {0xc0, 0xaa}, {0x69, 0x0a}, {0x03, 0x12}, {0x17, 0x14}, + {0x18, 0x00}, {0x19, 0x01}, {0x1a, 0x3d}, {0x32, 0xbf}, + {0x11, 0x80}, {0x2a, 0x10}, {0x2b, 0x0a}, {0x92, 0x00}, + {0x93, 0x00}, {0x04, 0x01}, {0x10, 0x1f}, {0xa1, 0x00}, + {0x00, 0x0a}, {0xa1, 0x00}, {0x10, 0x5d}, {0x04, 0x03}, + {0x00, 0x01}, {0xa1, 0x00}, {0x10, 0x7c}, {0x04, 0x03}, + {0x00, 0x03}, {0x00, 0x0a}, {0x00, 0x10}, {0x00, 0x13}, +}; + +static u16 mt9v112_init[][2] = { + {0xf0, 0x0000}, {0x0d, 0x0021}, {0x0d, 0x0020}, + {0x34, 0xc019}, {0x0a, 0x0011}, {0x0b, 0x000b}, + {0x20, 0x0703}, {0x35, 0x2022}, {0xf0, 0x0001}, + {0x05, 0x0000}, {0x06, 0x340c}, {0x3b, 0x042a}, + {0x3c, 0x0400}, {0xf0, 0x0002}, {0x2e, 0x0c58}, + {0x5b, 0x0001}, {0xc8, 0x9f0b}, {0xf0, 0x0001}, + {0x9b, 0x5300}, {0xf0, 0x0000}, {0x2b, 0x0020}, + {0x2c, 0x002a}, {0x2d, 0x0032}, {0x2e, 0x0020}, + {0x09, 0x01dc}, {0x01, 0x000c}, {0x02, 0x0020}, + {0x03, 0x01e0}, {0x04, 0x0280}, {0x06, 0x000c}, + {0x05, 0x0098}, {0x20, 0x0703}, {0x09, 0x01f2}, + {0x2b, 0x00a0}, {0x2c, 0x00a0}, {0x2d, 0x00a0}, + {0x2e, 0x00a0}, {0x01, 0x000c}, {0x02, 0x0020}, + {0x03, 0x01e0}, {0x04, 0x0280}, {0x06, 0x000c}, + {0x05, 0x0098}, {0x09, 0x01c1}, {0x2b, 0x00ae}, + {0x2c, 0x00ae}, {0x2d, 0x00ae}, {0x2e, 0x00ae}, +}; + +static u16 mt9v111_init[][2] = { + {0x01, 0x0004}, {0x0d, 0x0001}, {0x0d, 0x0000}, + {0x01, 0x0001}, {0x02, 0x0016}, {0x03, 0x01e1}, + {0x04, 0x0281}, {0x05, 0x0004}, {0x07, 0x3002}, + {0x21, 0x0000}, {0x25, 0x4024}, {0x26, 0xff03}, + {0x27, 0xff10}, {0x2b, 0x7828}, {0x2c, 0xb43c}, + {0x2d, 0xf0a0}, {0x2e, 0x0c64}, {0x2f, 0x0064}, + {0x67, 0x4010}, {0x06, 0x301e}, {0x08, 0x0480}, + {0x01, 0x0004}, {0x02, 0x0016}, {0x03, 0x01e6}, + {0x04, 0x0286}, {0x05, 0x0004}, {0x06, 0x0000}, + {0x07, 0x3002}, {0x08, 0x0008}, {0x0c, 0x0000}, + {0x0d, 0x0000}, {0x0e, 0x0000}, {0x0f, 0x0000}, + {0x10, 0x0000}, {0x11, 0x0000}, {0x12, 0x00b0}, + {0x13, 0x007c}, {0x14, 0x0000}, {0x15, 0x0000}, + {0x16, 0x0000}, {0x17, 0x0000}, {0x18, 0x0000}, + {0x19, 0x0000}, {0x1a, 0x0000}, {0x1b, 0x0000}, + {0x1c, 0x0000}, {0x1d, 0x0000}, {0x30, 0x0000}, + {0x30, 0x0005}, {0x31, 0x0000}, {0x02, 0x0016}, + {0x03, 0x01e1}, {0x04, 0x0281}, {0x05, 0x0004}, + {0x06, 0x0000}, {0x07, 0x3002}, {0x06, 0x002d}, + {0x05, 0x0004}, {0x09, 0x0064}, {0x2b, 0x00a0}, + {0x2c, 0x00a0}, {0x2d, 0x00a0}, {0x2e, 0x00a0}, + {0x02, 0x0016}, {0x03, 0x01e1}, {0x04, 0x0281}, + {0x05, 0x0004}, {0x06, 0x002d}, {0x07, 0x3002}, + {0x0e, 0x0008}, {0x06, 0x002d}, {0x05, 0x0004}, +}; + +static u16 mt9v011_init[][2] = { + {0x07, 0x0002}, {0x0d, 0x0001}, {0x0d, 0x0000}, + {0x01, 0x0008}, {0x02, 0x0016}, {0x03, 0x01e1}, + {0x04, 0x0281}, {0x05, 0x0083}, {0x06, 0x0006}, + {0x0d, 0x0002}, {0x0a, 0x0000}, {0x0b, 0x0000}, + {0x0c, 0x0000}, {0x0d, 0x0000}, {0x0e, 0x0000}, + {0x0f, 0x0000}, {0x10, 0x0000}, {0x11, 0x0000}, + {0x12, 0x0000}, {0x13, 0x0000}, {0x14, 0x0000}, + {0x15, 0x0000}, {0x16, 0x0000}, {0x17, 0x0000}, + {0x18, 0x0000}, {0x19, 0x0000}, {0x1a, 0x0000}, + {0x1b, 0x0000}, {0x1c, 0x0000}, {0x1d, 0x0000}, + {0x32, 0x0000}, {0x20, 0x1101}, {0x21, 0x0000}, + {0x22, 0x0000}, {0x23, 0x0000}, {0x24, 0x0000}, + {0x25, 0x0000}, {0x26, 0x0000}, {0x27, 0x0024}, + {0x2f, 0xf7b0}, {0x30, 0x0005}, {0x31, 0x0000}, + {0x32, 0x0000}, {0x33, 0x0000}, {0x34, 0x0100}, + {0x3d, 0x068f}, {0x40, 0x01e0}, {0x41, 0x00d1}, + {0x44, 0x0082}, {0x5a, 0x0000}, {0x5b, 0x0000}, + {0x5c, 0x0000}, {0x5d, 0x0000}, {0x5e, 0x0000}, + {0x5f, 0xa31d}, {0x62, 0x0611}, {0x0a, 0x0000}, + {0x06, 0x0029}, {0x05, 0x0009}, {0x20, 0x1101}, + {0x20, 0x1101}, {0x09, 0x0064}, {0x07, 0x0003}, + {0x2b, 0x0033}, {0x2c, 0x00a0}, {0x2d, 0x00a0}, + {0x2e, 0x0033}, {0x07, 0x0002}, {0x06, 0x0000}, + {0x06, 0x0029}, {0x05, 0x0009}, +}; + +static u16 mt9m001_init[][2] = { + {0x0d, 0x0001}, {0x0d, 0x0000}, {0x01, 0x000e}, + {0x02, 0x0014}, {0x03, 0x03c1}, {0x04, 0x0501}, + {0x05, 0x0083}, {0x06, 0x0006}, {0x0d, 0x0002}, + {0x0a, 0x0000}, {0x0c, 0x0000}, {0x11, 0x0000}, + {0x1e, 0x8000}, {0x5f, 0x8904}, {0x60, 0x0000}, + {0x61, 0x0000}, {0x62, 0x0498}, {0x63, 0x0000}, + {0x64, 0x0000}, {0x20, 0x111d}, {0x06, 0x00f2}, + {0x05, 0x0013}, {0x09, 0x10f2}, {0x07, 0x0003}, + {0x2b, 0x002a}, {0x2d, 0x002a}, {0x2c, 0x002a}, + {0x2e, 0x0029}, {0x07, 0x0002}, +}; + +static u16 mt9m111_init[][2] = { + {0xf0, 0x0000}, {0x0d, 0x0008}, {0x0d, 0x0009}, + {0x0d, 0x0008}, {0xf0, 0x0001}, {0x3a, 0x4300}, + {0x9b, 0x4300}, {0xa1, 0x0280}, {0xa4, 0x0200}, + {0x06, 0x308e}, {0xf0, 0x0000}, +}; + +static u8 hv7131r_init[][2] = { + {0x02, 0x08}, {0x02, 0x00}, {0x01, 0x08}, + {0x02, 0x00}, {0x20, 0x00}, {0x21, 0xd0}, + {0x22, 0x00}, {0x23, 0x09}, {0x01, 0x08}, + {0x01, 0x08}, {0x01, 0x08}, {0x25, 0x07}, + {0x26, 0xc3}, {0x27, 0x50}, {0x30, 0x62}, + {0x31, 0x10}, {0x32, 0x06}, {0x33, 0x10}, + {0x20, 0x00}, {0x21, 0xd0}, {0x22, 0x00}, + {0x23, 0x09}, {0x01, 0x08}, +}; + +int reg_r(struct gspca_dev *gspca_dev, u16 reg, u16 length) +{ + struct usb_device *dev = gspca_dev->dev; + int result; + result = usb_control_msg(dev, usb_rcvctrlpipe(dev, 0), + 0x00, + USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_INTERFACE, + reg, + 0x00, + gspca_dev->usb_buf, + length, + 500); + if (unlikely(result < 0 || result != length)) { + err("Read register failed 0x%02X", reg); + return -EIO; + } + return 0; +} + +int reg_w(struct gspca_dev *gspca_dev, u16 reg, const u8 *buffer, int length) +{ + struct usb_device *dev = gspca_dev->dev; + int result; + memcpy(gspca_dev->usb_buf, buffer, length); + result = usb_control_msg(dev, usb_sndctrlpipe(dev, 0), + 0x08, + USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_INTERFACE, + reg, + 0x00, + gspca_dev->usb_buf, + length, + 500); + if (unlikely(result < 0 || result != length)) { + err("Write register failed index 0x%02X", reg); + return -EIO; + } + return 0; +} + +int reg_w1(struct gspca_dev *gspca_dev, u16 reg, const u8 value) +{ + u8 data[1] = {value}; + return reg_w(gspca_dev, reg, data, 1); +} + +int i2c_w(struct gspca_dev *gspca_dev, const u8 *buffer) +{ + int i; + reg_w(gspca_dev, 0x10c0, buffer, 8); + for (i = 0; i < 5; i++) { + reg_r(gspca_dev, 0x10c0, 1); + if (gspca_dev->usb_buf[0] & 0x04) { + if (gspca_dev->usb_buf[0] & 0x08) + return -1; + return 0; + } + msleep(1); + } + return -1; +} + +int i2c_w1(struct gspca_dev *gspca_dev, u8 reg, u8 val) +{ + struct sd *sd = (struct sd *) gspca_dev; + + u8 row[8]; + + /* + * from the point of view of the bridge, the length + * includes the address + */ + row[0] = 0x81 | (2 << 4); + row[1] = sd->i2c_addr; + row[2] = reg; + row[3] = val; + row[4] = 0x00; + row[5] = 0x00; + row[6] = 0x00; + row[7] = 0x10; + + return i2c_w(gspca_dev, row); +} + +int i2c_w2(struct gspca_dev *gspca_dev, u8 reg, u16 val) +{ + struct sd *sd = (struct sd *) gspca_dev; + u8 row[8]; + + /* + * from the point of view of the bridge, the length + * includes the address + */ + row[0] = 0x81 | (3 << 4); + row[1] = sd->i2c_addr; + row[2] = reg; + row[3] = (val >> 8) & 0xff; + row[4] = val & 0xff; + row[5] = 0x00; + row[6] = 0x00; + row[7] = 0x10; + + return i2c_w(gspca_dev, row); +} + +int i2c_r1(struct gspca_dev *gspca_dev, u8 reg, u8 *val) +{ + struct sd *sd = (struct sd *) gspca_dev; + u8 row[8]; + + row[0] = 0x81 | 0x10; + row[1] = sd->i2c_addr; + row[2] = reg; + row[3] = 0; + row[4] = 0; + row[5] = 0; + row[6] = 0; + row[7] = 0x10; + reg_w(gspca_dev, 0x10c0, row, 8); + msleep(1); + row[0] = 0x81 | (2 << 4) | 0x02; + row[2] = 0; + reg_w(gspca_dev, 0x10c0, row, 8); + msleep(1); + reg_r(gspca_dev, 0x10c2, 5); + *val = gspca_dev->usb_buf[3]; + return 0; +} + +int i2c_r2(struct gspca_dev *gspca_dev, u8 reg, u16 *val) +{ + struct sd *sd = (struct sd *) gspca_dev; + u8 row[8]; + + row[0] = 0x81 | 0x10; + row[1] = sd->i2c_addr; + row[2] = reg; + row[3] = 0; + row[4] = 0; + row[5] = 0; + row[6] = 0; + row[7] = 0x10; + reg_w(gspca_dev, 0x10c0, row, 8); + msleep(1); + row[0] = 0x81 | (3 << 4) | 0x02; + row[2] = 0; + reg_w(gspca_dev, 0x10c0, row, 8); + msleep(1); + reg_r(gspca_dev, 0x10c2, 5); + *val = (gspca_dev->usb_buf[2] << 8) | gspca_dev->usb_buf[3]; + return 0; +} + +static int ov9650_init_sensor(struct gspca_dev *gspca_dev) +{ + int i; + struct sd *sd = (struct sd *) gspca_dev; + + for (i = 0; i < ARRAY_SIZE(ov9650_init); i++) { + if (i2c_w1(gspca_dev, ov9650_init[i][0], + ov9650_init[i][1]) < 0) { + err("OV9650 sensor initialization failed"); + return -ENODEV; + } + } + sd->hstart = 1; + sd->vstart = 7; + return 0; +} + +static int ov9655_init_sensor(struct gspca_dev *gspca_dev) +{ + int i; + struct sd *sd = (struct sd *) gspca_dev; + + for (i = 0; i < ARRAY_SIZE(ov9655_init); i++) { + if (i2c_w1(gspca_dev, ov9655_init[i][0], + ov9655_init[i][1]) < 0) { + err("OV9655 sensor initialization failed"); + return -ENODEV; + } + } + /* disable hflip and vflip */ + gspca_dev->ctrl_dis = (1 << HFLIP_IDX) | (1 << VFLIP_IDX); + sd->hstart = 0; + sd->vstart = 7; + return 0; +} + +static int soi968_init_sensor(struct gspca_dev *gspca_dev) +{ + int i; + struct sd *sd = (struct sd *) gspca_dev; + + for (i = 0; i < ARRAY_SIZE(soi968_init); i++) { + if (i2c_w1(gspca_dev, soi968_init[i][0], + soi968_init[i][1]) < 0) { + err("SOI968 sensor initialization failed"); + return -ENODEV; + } + } + /* disable hflip and vflip */ + gspca_dev->ctrl_dis = (1 << HFLIP_IDX) | (1 << VFLIP_IDX); + sd->hstart = 60; + sd->vstart = 11; + return 0; +} + +static int ov7660_init_sensor(struct gspca_dev *gspca_dev) +{ + int i; + struct sd *sd = (struct sd *) gspca_dev; + + for (i = 0; i < ARRAY_SIZE(ov7660_init); i++) { + if (i2c_w1(gspca_dev, ov7660_init[i][0], + ov7660_init[i][1]) < 0) { + err("OV7660 sensor initialization failed"); + return -ENODEV; + } + } + /* disable hflip and vflip */ + gspca_dev->ctrl_dis = (1 << HFLIP_IDX) | (1 << VFLIP_IDX); + sd->hstart = 1; + sd->vstart = 1; + return 0; +} + +static int ov7670_init_sensor(struct gspca_dev *gspca_dev) +{ + int i; + struct sd *sd = (struct sd *) gspca_dev; + + for (i = 0; i < ARRAY_SIZE(ov7670_init); i++) { + if (i2c_w1(gspca_dev, ov7670_init[i][0], + ov7670_init[i][1]) < 0) { + err("OV7670 sensor initialization failed"); + return -ENODEV; + } + } + /* disable hflip and vflip */ + gspca_dev->ctrl_dis = (1 << HFLIP_IDX) | (1 << VFLIP_IDX); + sd->hstart = 0; + sd->vstart = 1; + return 0; +} + +static int mt9v_init_sensor(struct gspca_dev *gspca_dev) +{ + struct sd *sd = (struct sd *) gspca_dev; + int i; + u16 value; + int ret; + + sd->i2c_addr = 0x5d; + ret = i2c_r2(gspca_dev, 0xff, &value); + if ((ret == 0) && (value == 0x8243)) { + for (i = 0; i < ARRAY_SIZE(mt9v011_init); i++) { + if (i2c_w2(gspca_dev, mt9v011_init[i][0], + mt9v011_init[i][1]) < 0) { + err("MT9V011 sensor initialization failed"); + return -ENODEV; + } + } + sd->hstart = 2; + sd->vstart = 2; + sd->sensor = SENSOR_MT9V011; + info("MT9V011 sensor detected"); + return 0; + } + + sd->i2c_addr = 0x5c; + i2c_w2(gspca_dev, 0x01, 0x0004); + ret = i2c_r2(gspca_dev, 0xff, &value); + if ((ret == 0) && (value == 0x823a)) { + for (i = 0; i < ARRAY_SIZE(mt9v111_init); i++) { + if (i2c_w2(gspca_dev, mt9v111_init[i][0], + mt9v111_init[i][1]) < 0) { + err("MT9V111 sensor initialization failed"); + return -ENODEV; + } + } + sd->hstart = 2; + sd->vstart = 2; + sd->sensor = SENSOR_MT9V111; + info("MT9V111 sensor detected"); + return 0; + } + + sd->i2c_addr = 0x5d; + ret = i2c_w2(gspca_dev, 0xf0, 0x0000); + if (ret < 0) { + sd->i2c_addr = 0x48; + i2c_w2(gspca_dev, 0xf0, 0x0000); + } + ret = i2c_r2(gspca_dev, 0x00, &value); + if ((ret == 0) && (value == 0x1229)) { + for (i = 0; i < ARRAY_SIZE(mt9v112_init); i++) { + if (i2c_w2(gspca_dev, mt9v112_init[i][0], + mt9v112_init[i][1]) < 0) { + err("MT9V112 sensor initialization failed"); + return -ENODEV; + } + } + sd->hstart = 6; + sd->vstart = 2; + sd->sensor = SENSOR_MT9V112; + info("MT9V112 sensor detected"); + return 0; + } + + return -ENODEV; +} + +static int mt9m111_init_sensor(struct gspca_dev *gspca_dev) +{ + struct sd *sd = (struct sd *) gspca_dev; + int i; + for (i = 0; i < ARRAY_SIZE(mt9m111_init); i++) { + if (i2c_w2(gspca_dev, mt9m111_init[i][0], + mt9m111_init[i][1]) < 0) { + err("MT9M111 sensor initialization failed"); + return -ENODEV; + } + } + sd->hstart = 0; + sd->vstart = 2; + return 0; +} + +static int mt9m001_init_sensor(struct gspca_dev *gspca_dev) +{ + struct sd *sd = (struct sd *) gspca_dev; + int i; + for (i = 0; i < ARRAY_SIZE(mt9m001_init); i++) { + if (i2c_w2(gspca_dev, mt9m001_init[i][0], + mt9m001_init[i][1]) < 0) { + err("MT9M001 sensor initialization failed"); + return -ENODEV; + } + } + /* disable hflip and vflip */ + gspca_dev->ctrl_dis = (1 << HFLIP_IDX) | (1 << VFLIP_IDX); + sd->hstart = 2; + sd->vstart = 2; + return 0; +} + +static int hv7131r_init_sensor(struct gspca_dev *gspca_dev) +{ + int i; + struct sd *sd = (struct sd *) gspca_dev; + + for (i = 0; i < ARRAY_SIZE(hv7131r_init); i++) { + if (i2c_w1(gspca_dev, hv7131r_init[i][0], + hv7131r_init[i][1]) < 0) { + err("HV7131R Sensor initialization failed"); + return -ENODEV; + } + } + sd->hstart = 0; + sd->vstart = 1; + return 0; +} + +#ifdef CONFIG_USB_GSPCA_SN9C20X_EVDEV +static int input_kthread(void *data) +{ + struct gspca_dev *gspca_dev = (struct gspca_dev *)data; + struct sd *sd = (struct sd *) gspca_dev; + + DECLARE_WAIT_QUEUE_HEAD(wait); + set_freezable(); + for (;;) { + if (kthread_should_stop()) + break; + + if (reg_r(gspca_dev, 0x1005, 1) < 0) + continue; + + input_report_key(sd->input_dev, + KEY_CAMERA, + gspca_dev->usb_buf[0] & sd->input_gpio); + input_sync(sd->input_dev); + + wait_event_freezable_timeout(wait, + kthread_should_stop(), + msecs_to_jiffies(100)); + } + return 0; +} + + +static int sn9c20x_input_init(struct gspca_dev *gspca_dev) +{ + struct sd *sd = (struct sd *) gspca_dev; + if (sd->input_gpio == 0) + return 0; + + sd->input_dev = input_allocate_device(); + if (!sd->input_dev) + return -ENOMEM; + + sd->input_dev->name = "SN9C20X Webcam"; + + sd->input_dev->phys = kasprintf(GFP_KERNEL, "usb-%s-%s", + gspca_dev->dev->bus->bus_name, + gspca_dev->dev->devpath); + + if (!sd->input_dev->phys) + return -ENOMEM; + + usb_to_input_id(gspca_dev->dev, &sd->input_dev->id); + sd->input_dev->dev.parent = &gspca_dev->dev->dev; + + set_bit(EV_KEY, sd->input_dev->evbit); + set_bit(KEY_CAMERA, sd->input_dev->keybit); + + if (input_register_device(sd->input_dev)) + return -EINVAL; + + sd->input_task = kthread_run(input_kthread, gspca_dev, "sn9c20x/%d", + gspca_dev->vdev.minor); + + if (IS_ERR(sd->input_task)) + return -EINVAL; + + return 0; +} + +static void sn9c20x_input_cleanup(struct gspca_dev *gspca_dev) +{ + struct sd *sd = (struct sd *) gspca_dev; + if (sd->input_task != NULL && !IS_ERR(sd->input_task)) + kthread_stop(sd->input_task); + + if (sd->input_dev != NULL) { + input_unregister_device(sd->input_dev); + kfree(sd->input_dev->phys); + input_free_device(sd->input_dev); + sd->input_dev = NULL; + } +} +#endif + +static int set_cmatrix(struct gspca_dev *gspca_dev) +{ + struct sd *sd = (struct sd *) gspca_dev; + s32 hue_coord, hue_index = 180 + sd->hue; + u8 cmatrix[21]; + memset(cmatrix, 0, 21); + + cmatrix[2] = (sd->contrast * 0x25 / 0x100) + 0x26; + cmatrix[0] = 0x13 + (cmatrix[2] - 0x26) * 0x13 / 0x25; + cmatrix[4] = 0x07 + (cmatrix[2] - 0x26) * 0x07 / 0x25; + cmatrix[18] = sd->brightness - 0x80; + + hue_coord = (hsv_red_x[hue_index] * sd->saturation) >> 8; + cmatrix[6] = (unsigned char)(hue_coord & 0xff); + cmatrix[7] = (unsigned char)((hue_coord >> 8) & 0x0f); + + hue_coord = (hsv_red_y[hue_index] * sd->saturation) >> 8; + cmatrix[8] = (unsigned char)(hue_coord & 0xff); + cmatrix[9] = (unsigned char)((hue_coord >> 8) & 0x0f); + + hue_coord = (hsv_green_x[hue_index] * sd->saturation) >> 8; + cmatrix[10] = (unsigned char)(hue_coord & 0xff); + cmatrix[11] = (unsigned char)((hue_coord >> 8) & 0x0f); + + hue_coord = (hsv_green_y[hue_index] * sd->saturation) >> 8; + cmatrix[12] = (unsigned char)(hue_coord & 0xff); + cmatrix[13] = (unsigned char)((hue_coord >> 8) & 0x0f); + + hue_coord = (hsv_blue_x[hue_index] * sd->saturation) >> 8; + cmatrix[14] = (unsigned char)(hue_coord & 0xff); + cmatrix[15] = (unsigned char)((hue_coord >> 8) & 0x0f); + + hue_coord = (hsv_blue_y[hue_index] * sd->saturation) >> 8; + cmatrix[16] = (unsigned char)(hue_coord & 0xff); + cmatrix[17] = (unsigned char)((hue_coord >> 8) & 0x0f); + + return reg_w(gspca_dev, 0x10e1, cmatrix, 21); +} + +static int set_gamma(struct gspca_dev *gspca_dev) +{ + struct sd *sd = (struct sd *) gspca_dev; + u8 gamma[17]; + u8 gval = sd->gamma * 0xb8 / 0x100; + + + gamma[0] = 0x0a; + gamma[1] = 0x13 + (gval * (0xcb - 0x13) / 0xb8); + gamma[2] = 0x25 + (gval * (0xee - 0x25) / 0xb8); + gamma[3] = 0x37 + (gval * (0xfa - 0x37) / 0xb8); + gamma[4] = 0x45 + (gval * (0xfc - 0x45) / 0xb8); + gamma[5] = 0x55 + (gval * (0xfb - 0x55) / 0xb8); + gamma[6] = 0x65 + (gval * (0xfc - 0x65) / 0xb8); + gamma[7] = 0x74 + (gval * (0xfd - 0x74) / 0xb8); + gamma[8] = 0x83 + (gval * (0xfe - 0x83) / 0xb8); + gamma[9] = 0x92 + (gval * (0xfc - 0x92) / 0xb8); + gamma[10] = 0xa1 + (gval * (0xfc - 0xa1) / 0xb8); + gamma[11] = 0xb0 + (gval * (0xfc - 0xb0) / 0xb8); + gamma[12] = 0xbf + (gval * (0xfb - 0xbf) / 0xb8); + gamma[13] = 0xce + (gval * (0xfb - 0xce) / 0xb8); + gamma[14] = 0xdf + (gval * (0xfd - 0xdf) / 0xb8); + gamma[15] = 0xea + (gval * (0xf9 - 0xea) / 0xb8); + gamma[16] = 0xf5; + + return reg_w(gspca_dev, 0x1190, gamma, 17); +} + +static int set_redblue(struct gspca_dev *gspca_dev) +{ + struct sd *sd = (struct sd *) gspca_dev; + reg_w1(gspca_dev, 0x118c, sd->red); + reg_w1(gspca_dev, 0x118f, sd->blue); + return 0; +} + +static int set_hvflip(struct gspca_dev *gspca_dev) +{ + u8 value, tslb; + u16 value2; + struct sd *sd = (struct sd *) gspca_dev; + switch (sd->sensor) { + case SENSOR_OV9650: + i2c_r1(gspca_dev, 0x1e, &value); + value &= ~0x30; + tslb = 0x01; + if (sd->hflip) + value |= 0x20; + if (sd->vflip) { + value |= 0x10; + tslb = 0x49; + } + i2c_w1(gspca_dev, 0x1e, value); + i2c_w1(gspca_dev, 0x3a, tslb); + break; + case SENSOR_MT9V111: + case SENSOR_MT9V011: + i2c_r2(gspca_dev, 0x20, &value2); + value2 &= ~0xc0a0; + if (sd->hflip) + value2 |= 0x8080; + if (sd->vflip) + value2 |= 0x4020; + i2c_w2(gspca_dev, 0x20, value2); + break; + case SENSOR_MT9M111: + case SENSOR_MT9V112: + i2c_r2(gspca_dev, 0x20, &value2); + value2 &= ~0x0003; + if (sd->hflip) + value2 |= 0x0002; + if (sd->vflip) + value2 |= 0x0001; + i2c_w2(gspca_dev, 0x20, value2); + break; + case SENSOR_HV7131R: + i2c_r1(gspca_dev, 0x01, &value); + value &= ~0x03; + if (sd->vflip) + value |= 0x01; + if (sd->hflip) + value |= 0x02; + i2c_w1(gspca_dev, 0x01, value); + break; + } + return 0; +} + +static int set_exposure(struct gspca_dev *gspca_dev) +{ + struct sd *sd = (struct sd *) gspca_dev; + u8 exp[8] = {0x81, sd->i2c_addr, 0x00, 0x00, 0x00, 0x00, 0x00, 0x1e}; + switch (sd->sensor) { + case SENSOR_OV7660: + case SENSOR_OV7670: + case SENSOR_SOI968: + case SENSOR_OV9655: + case SENSOR_OV9650: + exp[0] |= (3 << 4); + exp[2] = 0x2d; + exp[3] = sd->exposure & 0xff; + exp[4] = sd->exposure >> 8; + break; + case SENSOR_MT9M001: + case SENSOR_MT9M111: + case SENSOR_MT9V112: + case SENSOR_MT9V111: + case SENSOR_MT9V011: + exp[0] |= (3 << 4); + exp[2] = 0x09; + exp[3] = sd->exposure >> 8; + exp[4] = sd->exposure & 0xff; + break; + case SENSOR_HV7131R: + exp[0] |= (4 << 4); + exp[2] = 0x25; + exp[3] = ((sd->exposure * 0xffffff) / 0xffff) >> 16; + exp[4] = ((sd->exposure * 0xffffff) / 0xffff) >> 8; + exp[5] = ((sd->exposure * 0xffffff) / 0xffff) & 0xff; + break; + } + i2c_w(gspca_dev, exp); + return 0; +} + +static int set_gain(struct gspca_dev *gspca_dev) +{ + struct sd *sd = (struct sd *) gspca_dev; + u8 gain[8] = {0x81, sd->i2c_addr, 0x00, 0x00, 0x00, 0x00, 0x00, 0x1d}; + switch (sd->sensor) { + case SENSOR_OV7660: + case SENSOR_OV7670: + case SENSOR_SOI968: + case SENSOR_OV9655: + case SENSOR_OV9650: + gain[0] |= (2 << 4); + gain[3] = ov_gain[sd->gain]; + break; + case SENSOR_MT9V011: + case SENSOR_MT9V111: + gain[0] |= (3 << 4); + gain[2] = 0x35; + gain[3] = micron1_gain[sd->gain] >> 8; + gain[4] = micron1_gain[sd->gain] & 0xff; + break; + case SENSOR_MT9V112: + case SENSOR_MT9M111: + gain[0] |= (3 << 4); + gain[2] = 0x2f; + gain[3] = micron1_gain[sd->gain] >> 8; + gain[4] = micron1_gain[sd->gain] & 0xff; + break; + case SENSOR_MT9M001: + gain[0] |= (3 << 4); + gain[2] = 0x2f; + gain[3] = micron2_gain[sd->gain] >> 8; + gain[4] = micron2_gain[sd->gain] & 0xff; + break; + case SENSOR_HV7131R: + gain[0] |= (2 << 4); + gain[2] = 0x30; + gain[3] = hv7131r_gain[sd->gain]; + break; + } + i2c_w(gspca_dev, gain); + return 0; +} + +static int sd_setbrightness(struct gspca_dev *gspca_dev, s32 val) +{ + struct sd *sd = (struct sd *) gspca_dev; + + sd->brightness = val; + if (gspca_dev->streaming) + return set_cmatrix(gspca_dev); + return 0; +} + +static int sd_getbrightness(struct gspca_dev *gspca_dev, s32 *val) +{ + struct sd *sd = (struct sd *) gspca_dev; + *val = sd->brightness; + return 0; +} + + +static int sd_setcontrast(struct gspca_dev *gspca_dev, s32 val) +{ + struct sd *sd = (struct sd *) gspca_dev; + + sd->contrast = val; + if (gspca_dev->streaming) + return set_cmatrix(gspca_dev); + return 0; +} + +static int sd_getcontrast(struct gspca_dev *gspca_dev, s32 *val) +{ + struct sd *sd = (struct sd *) gspca_dev; + *val = sd->contrast; + return 0; +} + +static int sd_setsaturation(struct gspca_dev *gspca_dev, s32 val) +{ + struct sd *sd = (struct sd *) gspca_dev; + + sd->saturation = val; + if (gspca_dev->streaming) + return set_cmatrix(gspca_dev); + return 0; +} + +static int sd_getsaturation(struct gspca_dev *gspca_dev, s32 *val) +{ + struct sd *sd = (struct sd *) gspca_dev; + *val = sd->saturation; + return 0; +} + +static int sd_sethue(struct gspca_dev *gspca_dev, s32 val) +{ + struct sd *sd = (struct sd *) gspca_dev; + + sd->hue = val; + if (gspca_dev->streaming) + return set_cmatrix(gspca_dev); + return 0; +} + +static int sd_gethue(struct gspca_dev *gspca_dev, s32 *val) +{ + struct sd *sd = (struct sd *) gspca_dev; + *val = sd->hue; + return 0; +} + +static int sd_setgamma(struct gspca_dev *gspca_dev, s32 val) +{ + struct sd *sd = (struct sd *) gspca_dev; + + sd->gamma = val; + if (gspca_dev->streaming) + return set_gamma(gspca_dev); + return 0; +} + +static int sd_getgamma(struct gspca_dev *gspca_dev, s32 *val) +{ + struct sd *sd = (struct sd *) gspca_dev; + *val = sd->gamma; + return 0; +} + +static int sd_setredbalance(struct gspca_dev *gspca_dev, s32 val) +{ + struct sd *sd = (struct sd *) gspca_dev; + + sd->red = val; + if (gspca_dev->streaming) + return set_redblue(gspca_dev); + return 0; +} + +static int sd_getredbalance(struct gspca_dev *gspca_dev, s32 *val) +{ + struct sd *sd = (struct sd *) gspca_dev; + *val = sd->red; + return 0; +} + +static int sd_setbluebalance(struct gspca_dev *gspca_dev, s32 val) +{ + struct sd *sd = (struct sd *) gspca_dev; + + sd->blue = val; + if (gspca_dev->streaming) + return set_redblue(gspca_dev); + return 0; +} + +static int sd_getbluebalance(struct gspca_dev *gspca_dev, s32 *val) +{ + struct sd *sd = (struct sd *) gspca_dev; + *val = sd->blue; + return 0; +} + +static int sd_sethflip(struct gspca_dev *gspca_dev, s32 val) +{ + struct sd *sd = (struct sd *) gspca_dev; + + sd->hflip = val; + if (gspca_dev->streaming) + return set_hvflip(gspca_dev); + return 0; +} + +static int sd_gethflip(struct gspca_dev *gspca_dev, s32 *val) +{ + struct sd *sd = (struct sd *) gspca_dev; + *val = sd->hflip; + return 0; +} + +static int sd_setvflip(struct gspca_dev *gspca_dev, s32 val) +{ + struct sd *sd = (struct sd *) gspca_dev; + + sd->vflip = val; + if (gspca_dev->streaming) + return set_hvflip(gspca_dev); + return 0; +} + +static int sd_getvflip(struct gspca_dev *gspca_dev, s32 *val) +{ + struct sd *sd = (struct sd *) gspca_dev; + *val = sd->vflip; + return 0; +} + +static int sd_setexposure(struct gspca_dev *gspca_dev, s32 val) +{ + struct sd *sd = (struct sd *) gspca_dev; + + sd->exposure = val; + if (gspca_dev->streaming) + return set_exposure(gspca_dev); + return 0; +} + +static int sd_getexposure(struct gspca_dev *gspca_dev, s32 *val) +{ + struct sd *sd = (struct sd *) gspca_dev; + *val = sd->exposure; + return 0; +} + +static int sd_setgain(struct gspca_dev *gspca_dev, s32 val) +{ + struct sd *sd = (struct sd *) gspca_dev; + + sd->gain = val; + if (gspca_dev->streaming) + return set_gain(gspca_dev); + return 0; +} + +static int sd_getgain(struct gspca_dev *gspca_dev, s32 *val) +{ + struct sd *sd = (struct sd *) gspca_dev; + *val = sd->gain; + return 0; +} + +static int sd_setautoexposure(struct gspca_dev *gspca_dev, s32 val) +{ + struct sd *sd = (struct sd *) gspca_dev; + sd->auto_exposure = val; + return 0; +} + +static int sd_getautoexposure(struct gspca_dev *gspca_dev, s32 *val) +{ + struct sd *sd = (struct sd *) gspca_dev; + *val = sd->auto_exposure; + return 0; +} + +#ifdef CONFIG_VIDEO_ADV_DEBUG +static int sd_dbg_g_register(struct gspca_dev *gspca_dev, + struct v4l2_dbg_register *reg) +{ + struct sd *sd = (struct sd *) gspca_dev; + switch (reg->match.type) { + case V4L2_CHIP_MATCH_HOST: + if (reg->match.addr != 0) + return -EINVAL; + if (reg->reg < 0x1000 || reg->reg > 0x11ff) + return -EINVAL; + if (reg_r(gspca_dev, reg->reg, 1) < 0) + return -EINVAL; + reg->val = gspca_dev->usb_buf[0]; + return 0; + case V4L2_CHIP_MATCH_I2C_ADDR: + if (reg->match.addr != sd->i2c_addr) + return -EINVAL; + if (sd->sensor >= SENSOR_MT9V011 && + sd->sensor <= SENSOR_MT9M111) { + if (i2c_r2(gspca_dev, reg->reg, (u16 *)®->val) < 0) + return -EINVAL; + } else { + if (i2c_r1(gspca_dev, reg->reg, (u8 *)®->val) < 0) + return -EINVAL; + } + return 0; + } + return -EINVAL; +} + +static int sd_dbg_s_register(struct gspca_dev *gspca_dev, + struct v4l2_dbg_register *reg) +{ + struct sd *sd = (struct sd *) gspca_dev; + switch (reg->match.type) { + case V4L2_CHIP_MATCH_HOST: + if (reg->match.addr != 0) + return -EINVAL; + if (reg->reg < 0x1000 || reg->reg > 0x11ff) + return -EINVAL; + if (reg_w1(gspca_dev, reg->reg, reg->val) < 0) + return -EINVAL; + return 0; + case V4L2_CHIP_MATCH_I2C_ADDR: + if (reg->match.addr != sd->i2c_addr) + return -EINVAL; + if (sd->sensor >= SENSOR_MT9V011 && + sd->sensor <= SENSOR_MT9M111) { + if (i2c_w2(gspca_dev, reg->reg, reg->val) < 0) + return -EINVAL; + } else { + if (i2c_w1(gspca_dev, reg->reg, reg->val) < 0) + return -EINVAL; + } + return 0; + } + return -EINVAL; +} +#endif + +static int sd_chip_ident(struct gspca_dev *gspca_dev, + struct v4l2_dbg_chip_ident *chip) +{ + struct sd *sd = (struct sd *) gspca_dev; + + switch (chip->match.type) { + case V4L2_CHIP_MATCH_HOST: + if (chip->match.addr != 0) + return -EINVAL; + chip->revision = 0; + chip->ident = V4L2_IDENT_SN9C20X; + return 0; + case V4L2_CHIP_MATCH_I2C_ADDR: + if (chip->match.addr != sd->i2c_addr) + return -EINVAL; + chip->revision = 0; + chip->ident = i2c_ident[sd->sensor]; + return 0; + } + return -EINVAL; +} + +static int sd_config(struct gspca_dev *gspca_dev, + const struct usb_device_id *id) +{ + struct sd *sd = (struct sd *) gspca_dev; + struct cam *cam; + + cam = &gspca_dev->cam; + + sd->sensor = (id->driver_info >> 8) & 0xff; + sd->i2c_addr = id->driver_info & 0xff; + + switch (sd->sensor) { + case SENSOR_OV9650: + cam->cam_mode = sxga_mode; + cam->nmodes = ARRAY_SIZE(sxga_mode); + break; + default: + cam->cam_mode = vga_mode; + cam->nmodes = ARRAY_SIZE(vga_mode); + } + + sd->old_step = 0; + sd->older_step = 0; + sd->exposure_step = 16; + + sd->brightness = BRIGHTNESS_DEFAULT; + sd->contrast = CONTRAST_DEFAULT; + sd->saturation = SATURATION_DEFAULT; + sd->hue = HUE_DEFAULT; + sd->gamma = GAMMA_DEFAULT; + sd->red = RED_DEFAULT; + sd->blue = BLUE_DEFAULT; + + sd->hflip = HFLIP_DEFAULT; + sd->vflip = VFLIP_DEFAULT; + sd->exposure = EXPOSURE_DEFAULT; + sd->gain = GAIN_DEFAULT; + sd->auto_exposure = AUTO_EXPOSURE_DEFAULT; + + sd->quality = 95; + +#ifdef CONFIG_USB_GSPCA_SN9C20X_EVDEV + sd->input_gpio = (id->driver_info >> 16) & 0xff; + if (sn9c20x_input_init(gspca_dev) < 0) + return -ENODEV; +#endif + return 0; +} + +static int sd_init(struct gspca_dev *gspca_dev) +{ + struct sd *sd = (struct sd *) gspca_dev; + int i; + u8 value; + u8 i2c_init[9] = + {0x80, sd->i2c_addr, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03}; + + for (i = 0; i < ARRAY_SIZE(bridge_init); i++) { + value = bridge_init[i][1]; + if (reg_w(gspca_dev, bridge_init[i][0], &value, 1) < 0) { + err("Device initialization failed"); + return -ENODEV; + } + } + + if (reg_w(gspca_dev, 0x10c0, i2c_init, 9) < 0) { + err("Device initialization failed"); + return -ENODEV; + } + + switch (sd->sensor) { + case SENSOR_OV9650: + if (ov9650_init_sensor(gspca_dev) < 0) + return -ENODEV; + info("OV9650 sensor detected"); + break; + case SENSOR_OV9655: + if (ov9655_init_sensor(gspca_dev) < 0) + return -ENODEV; + info("OV9655 sensor detected"); + break; + case SENSOR_SOI968: + if (soi968_init_sensor(gspca_dev) < 0) + return -ENODEV; + info("SOI968 sensor detected"); + break; + case SENSOR_OV7660: + if (ov7660_init_sensor(gspca_dev) < 0) + return -ENODEV; + info("OV7660 sensor detected"); + break; + case SENSOR_OV7670: + if (ov7670_init_sensor(gspca_dev) < 0) + return -ENODEV; + info("OV7670 sensor detected"); + break; + case SENSOR_MT9VPRB: + if (mt9v_init_sensor(gspca_dev) < 0) + return -ENODEV; + break; + case SENSOR_MT9M111: + if (mt9m111_init_sensor(gspca_dev) < 0) + return -ENODEV; + info("MT9M111 sensor detected"); + break; + case SENSOR_MT9M001: + if (mt9m001_init_sensor(gspca_dev) < 0) + return -ENODEV; + info("MT9M001 sensor detected"); + break; + case SENSOR_HV7131R: + if (hv7131r_init_sensor(gspca_dev) < 0) + return -ENODEV; + info("HV7131R sensor detected"); + break; + default: + info("Unsupported Sensor"); + return -ENODEV; + } + + return 0; +} + +static void configure_sensor_output(struct gspca_dev *gspca_dev, int mode) +{ + struct sd *sd = (struct sd *) gspca_dev; + u8 value; + switch (sd->sensor) { + case SENSOR_OV9650: + if (mode & MODE_SXGA) { + i2c_w1(gspca_dev, 0x17, 0x1b); + i2c_w1(gspca_dev, 0x18, 0xbc); + i2c_w1(gspca_dev, 0x19, 0x01); + i2c_w1(gspca_dev, 0x1a, 0x82); + i2c_r1(gspca_dev, 0x12, &value); + i2c_w1(gspca_dev, 0x12, value & 0x07); + } else { + i2c_w1(gspca_dev, 0x17, 0x24); + i2c_w1(gspca_dev, 0x18, 0xc5); + i2c_w1(gspca_dev, 0x19, 0x00); + i2c_w1(gspca_dev, 0x1a, 0x3c); + i2c_r1(gspca_dev, 0x12, &value); + i2c_w1(gspca_dev, 0x12, (value & 0x7) | 0x40); + } + break; + } +} + +#define HW_WIN(mode, hstart, vstart) \ +((const u8 []){hstart & 0xff, hstart >> 8, \ +vstart & 0xff, vstart >> 8, \ +(mode & MODE_SXGA ? 1280 >> 4 : 640 >> 4), \ +(mode & MODE_SXGA ? 1024 >> 3 : 480 >> 3)}) + +#define CLR_WIN(width, height) \ +((const u8 [])\ +{0, width >> 2, 0, height >> 1,\ +((width >> 10) & 0x01) | ((height >> 8) & 0x6)}) + +static int sd_start(struct gspca_dev *gspca_dev) +{ + struct sd *sd = (struct sd *) gspca_dev; + int mode = gspca_dev->cam.cam_mode[(int) gspca_dev->curr_mode].priv; + int width = gspca_dev->width; + int height = gspca_dev->height; + u8 fmt, scale = 0; + + sd->jpeg_hdr = kmalloc(JPEG_HDR_SZ, GFP_KERNEL); + if (sd->jpeg_hdr == NULL) + return -ENOMEM; + + jpeg_define(sd->jpeg_hdr, height, width, + 0x21); + jpeg_set_qual(sd->jpeg_hdr, sd->quality); + + if (mode & MODE_RAW) + fmt = 0x2d; + else if (mode & MODE_JPEG) + fmt = 0x2c; + else + fmt = 0x2f; + + switch (mode & 0x0f) { + case 3: + scale = 0xc0; + info("Set 1280x1024"); + break; + case 2: + scale = 0x80; + info("Set 640x480"); + break; + case 1: + scale = 0x90; + info("Set 320x240"); + break; + case 0: + scale = 0xa0; + info("Set 160x120"); + break; + } + + configure_sensor_output(gspca_dev, mode); + reg_w(gspca_dev, 0x1100, sd->jpeg_hdr + JPEG_QT0_OFFSET, 64); + reg_w(gspca_dev, 0x1140, sd->jpeg_hdr + JPEG_QT1_OFFSET, 64); + reg_w(gspca_dev, 0x10fb, CLR_WIN(width, height), 5); + reg_w(gspca_dev, 0x1180, HW_WIN(mode, sd->hstart, sd->vstart), 6); + reg_w1(gspca_dev, 0x1189, scale); + reg_w1(gspca_dev, 0x10e0, fmt); + + set_cmatrix(gspca_dev); + set_gamma(gspca_dev); + set_redblue(gspca_dev); + set_gain(gspca_dev); + set_exposure(gspca_dev); + set_hvflip(gspca_dev); + + reg_r(gspca_dev, 0x1061, 1); + reg_w1(gspca_dev, 0x1061, gspca_dev->usb_buf[0] | 0x02); + return 0; +} + +static void sd_stopN(struct gspca_dev *gspca_dev) +{ + reg_r(gspca_dev, 0x1061, 1); + reg_w1(gspca_dev, 0x1061, gspca_dev->usb_buf[0] & ~0x02); +} + +static void sd_stop0(struct gspca_dev *gspca_dev) +{ + struct sd *sd = (struct sd *) gspca_dev; + kfree(sd->jpeg_hdr); +} + +static void do_autoexposure(struct gspca_dev *gspca_dev) +{ + struct sd *sd = (struct sd *) gspca_dev; + int avg_lum, new_exp; + + if (!sd->auto_exposure) + return; + + avg_lum = atomic_read(&sd->avg_lum); + + /* + * some hardcoded values are present + * like those for maximal/minimal exposure + * and exposure steps + */ + if (avg_lum < MIN_AVG_LUM) { + if (sd->exposure > 0x1770) + return; + + new_exp = sd->exposure + sd->exposure_step; + if (new_exp > 0x1770) + new_exp = 0x1770; + if (new_exp < 0x10) + new_exp = 0x10; + sd->exposure = new_exp; + set_exposure(gspca_dev); + + sd->older_step = sd->old_step; + sd->old_step = 1; + + if (sd->old_step ^ sd->older_step) + sd->exposure_step /= 2; + else + sd->exposure_step += 2; + } + if (avg_lum > MAX_AVG_LUM) { + if (sd->exposure < 0x10) + return; + new_exp = sd->exposure - sd->exposure_step; + if (new_exp > 0x1700) + new_exp = 0x1770; + if (new_exp < 0x10) + new_exp = 0x10; + sd->exposure = new_exp; + set_exposure(gspca_dev); + sd->older_step = sd->old_step; + sd->old_step = 0; + + if (sd->old_step ^ sd->older_step) + sd->exposure_step /= 2; + else + sd->exposure_step += 2; + } +} + +static void sd_pkt_scan(struct gspca_dev *gspca_dev, + struct gspca_frame *frame, /* target */ + u8 *data, /* isoc packet */ + int len) /* iso packet length */ +{ + struct sd *sd = (struct sd *) gspca_dev; + int avg_lum; + static unsigned char frame_header[] = + {0xff, 0xff, 0x00, 0xc4, 0xc4, 0x96}; + if (len == 64 && memcmp(data, frame_header, 6) == 0) { + avg_lum = ((data[35] >> 2) & 3) | + (data[20] << 2) | + (data[19] << 10); + avg_lum += ((data[35] >> 4) & 3) | + (data[22] << 2) | + (data[21] << 10); + avg_lum += ((data[35] >> 6) & 3) | + (data[24] << 2) | + (data[23] << 10); + avg_lum += (data[36] & 3) | + (data[26] << 2) | + (data[25] << 10); + avg_lum += ((data[36] >> 2) & 3) | + (data[28] << 2) | + (data[27] << 10); + avg_lum += ((data[36] >> 4) & 3) | + (data[30] << 2) | + (data[29] << 10); + avg_lum += ((data[36] >> 6) & 3) | + (data[32] << 2) | + (data[31] << 10); + avg_lum += ((data[44] >> 4) & 3) | + (data[34] << 2) | + (data[33] << 10); + avg_lum >>= 9; + atomic_set(&sd->avg_lum, avg_lum); + gspca_frame_add(gspca_dev, LAST_PACKET, + frame, data, len); + return; + } + if (gspca_dev->last_packet_type == LAST_PACKET) { + if (gspca_dev->cam.cam_mode[(int) gspca_dev->curr_mode].priv + & MODE_JPEG) { + gspca_frame_add(gspca_dev, FIRST_PACKET, frame, + sd->jpeg_hdr, JPEG_HDR_SZ); + gspca_frame_add(gspca_dev, INTER_PACKET, frame, + data, len); + } else { + gspca_frame_add(gspca_dev, FIRST_PACKET, frame, + data, len); + } + } else { + gspca_frame_add(gspca_dev, INTER_PACKET, frame, data, len); + } +} + +/* sub-driver description */ +static const struct sd_desc sd_desc = { + .name = MODULE_NAME, + .ctrls = sd_ctrls, + .nctrls = ARRAY_SIZE(sd_ctrls), + .config = sd_config, + .init = sd_init, + .start = sd_start, + .stopN = sd_stopN, + .stop0 = sd_stop0, + .pkt_scan = sd_pkt_scan, + .dq_callback = do_autoexposure, +#ifdef CONFIG_VIDEO_ADV_DEBUG + .set_register = sd_dbg_s_register, + .get_register = sd_dbg_g_register, +#endif + .get_chip_ident = sd_chip_ident, +}; + +#define SN9C20X(sensor, i2c_addr, button_mask) \ + .driver_info = (button_mask << 16) \ + | (SENSOR_ ## sensor << 8) \ + | (i2c_addr) + +static const __devinitdata struct usb_device_id device_table[] = { + {USB_DEVICE(0x0c45, 0x6240), SN9C20X(MT9M001, 0x5d, 0)}, + {USB_DEVICE(0x0c45, 0x6242), SN9C20X(MT9M111, 0x5d, 0)}, + {USB_DEVICE(0x0c45, 0x6248), SN9C20X(OV9655, 0x30, 0)}, + {USB_DEVICE(0x0c45, 0x624e), SN9C20X(SOI968, 0x30, 0x10)}, + {USB_DEVICE(0x0c45, 0x624f), SN9C20X(OV9650, 0x30, 0)}, + {USB_DEVICE(0x0c45, 0x6251), SN9C20X(OV9650, 0x30, 0)}, + {USB_DEVICE(0x0c45, 0x6253), SN9C20X(OV9650, 0x30, 0)}, + {USB_DEVICE(0x0c45, 0x6260), SN9C20X(OV7670, 0x21, 0)}, + {USB_DEVICE(0x0c45, 0x6270), SN9C20X(MT9VPRB, 0x00, 0)}, + {USB_DEVICE(0x0c45, 0x627b), SN9C20X(OV7660, 0x21, 0)}, + {USB_DEVICE(0x0c45, 0x627c), SN9C20X(HV7131R, 0x11, 0)}, + {USB_DEVICE(0x0c45, 0x627f), SN9C20X(OV9650, 0x30, 0)}, + {USB_DEVICE(0x0c45, 0x6280), SN9C20X(MT9M001, 0x5d, 0)}, + {USB_DEVICE(0x0c45, 0x6282), SN9C20X(MT9M111, 0x5d, 0)}, + {USB_DEVICE(0x0c45, 0x6288), SN9C20X(OV9655, 0x30, 0)}, + {USB_DEVICE(0x0c45, 0x628e), SN9C20X(SOI968, 0x30, 0)}, + {USB_DEVICE(0x0c45, 0x628f), SN9C20X(OV9650, 0x30, 0)}, + {USB_DEVICE(0x0c45, 0x62a0), SN9C20X(OV7670, 0x21, 0)}, + {USB_DEVICE(0x0c45, 0x62b0), SN9C20X(MT9VPRB, 0x00, 0)}, + {USB_DEVICE(0x0c45, 0x62b3), SN9C20X(OV9655, 0x30, 0)}, + {USB_DEVICE(0x0c45, 0x62bb), SN9C20X(OV7660, 0x21, 0)}, + {USB_DEVICE(0x0c45, 0x62bc), SN9C20X(HV7131R, 0x11, 0)}, + {USB_DEVICE(0x045e, 0x00f4), SN9C20X(OV9650, 0x30, 0)}, + {USB_DEVICE(0x145f, 0x013d), SN9C20X(OV7660, 0x21, 0)}, + {USB_DEVICE(0x0458, 0x7029), SN9C20X(HV7131R, 0x11, 0)}, + {USB_DEVICE(0xa168, 0x0610), SN9C20X(HV7131R, 0x11, 0)}, + {USB_DEVICE(0xa168, 0x0611), SN9C20X(HV7131R, 0x11, 0)}, + {USB_DEVICE(0xa168, 0x0613), SN9C20X(HV7131R, 0x11, 0)}, + {USB_DEVICE(0xa168, 0x0618), SN9C20X(HV7131R, 0x11, 0)}, + {USB_DEVICE(0xa168, 0x0614), SN9C20X(MT9M111, 0x5d, 0)}, + {USB_DEVICE(0xa168, 0x0615), SN9C20X(MT9M111, 0x5d, 0)}, + {USB_DEVICE(0xa168, 0x0617), SN9C20X(MT9M111, 0x5d, 0)}, + {} +}; +MODULE_DEVICE_TABLE(usb, device_table); + +/* -- device connect -- */ +static int sd_probe(struct usb_interface *intf, + const struct usb_device_id *id) +{ + return gspca_dev_probe(intf, id, &sd_desc, sizeof(struct sd), + THIS_MODULE); +} + +static void sd_disconnect(struct usb_interface *intf) +{ +#ifdef CONFIG_USB_GSPCA_SN9C20X_EVDEV + struct gspca_dev *gspca_dev = usb_get_intfdata(intf); + + sn9c20x_input_cleanup(gspca_dev); +#endif + + gspca_disconnect(intf); +} + +static struct usb_driver sd_driver = { + .name = MODULE_NAME, + .id_table = device_table, + .probe = sd_probe, + .disconnect = sd_disconnect, +#ifdef CONFIG_PM + .suspend = gspca_suspend, + .resume = gspca_resume, + .reset_resume = gspca_resume, +#endif +}; + +/* -- module insert / remove -- */ +static int __init sd_mod_init(void) +{ + int ret; + ret = usb_register(&sd_driver); + if (ret < 0) + return ret; + info("registered"); + return 0; +} +static void __exit sd_mod_exit(void) +{ + usb_deregister(&sd_driver); + info("deregistered"); +} + +module_init(sd_mod_init); +module_exit(sd_mod_exit); diff --git a/drivers/media/video/gspca/sonixj.c b/drivers/media/video/gspca/sonixj.c index 0d02f41fa7d..d6332ab8066 100644 --- a/drivers/media/video/gspca/sonixj.c +++ b/drivers/media/video/gspca/sonixj.c @@ -1634,6 +1634,8 @@ static void setfreq(struct gspca_dev *gspca_dev) { struct sd *sd = (struct sd *) gspca_dev; + if (gspca_dev->ctrl_dis & (1 << FREQ_IDX)) + return; if (sd->sensor == SENSOR_OV7660) { switch (sd->freq) { case 0: /* Banding filter disabled */ @@ -1735,6 +1737,8 @@ static int sd_start(struct gspca_dev *gspca_dev) /* create the JPEG header */ sd->jpeg_hdr = kmalloc(JPEG_HDR_SZ, GFP_KERNEL); + if (!sd->jpeg_hdr) + return -ENOMEM; jpeg_define(sd->jpeg_hdr, gspca_dev->height, gspca_dev->width, 0x21); /* JPEG 422 */ jpeg_set_qual(sd->jpeg_hdr, sd->quality); diff --git a/drivers/media/video/gspca/spca500.c b/drivers/media/video/gspca/spca500.c index 8806b2ff82b..fab7ef85a6c 100644 --- a/drivers/media/video/gspca/spca500.c +++ b/drivers/media/video/gspca/spca500.c @@ -670,6 +670,8 @@ static int sd_start(struct gspca_dev *gspca_dev) /* create the JPEG header */ sd->jpeg_hdr = kmalloc(JPEG_HDR_SZ, GFP_KERNEL); + if (!sd->jpeg_hdr) + return -ENOMEM; jpeg_define(sd->jpeg_hdr, gspca_dev->height, gspca_dev->width, 0x22); /* JPEG 411 */ jpeg_set_qual(sd->jpeg_hdr, sd->quality); diff --git a/drivers/media/video/gspca/stk014.c b/drivers/media/video/gspca/stk014.c index f25be20cf1a..47628964801 100644 --- a/drivers/media/video/gspca/stk014.c +++ b/drivers/media/video/gspca/stk014.c @@ -333,6 +333,8 @@ static int sd_start(struct gspca_dev *gspca_dev) /* create the JPEG header */ sd->jpeg_hdr = kmalloc(JPEG_HDR_SZ, GFP_KERNEL); + if (!sd->jpeg_hdr) + return -ENOMEM; jpeg_define(sd->jpeg_hdr, gspca_dev->height, gspca_dev->width, 0x22); /* JPEG 411 */ jpeg_set_qual(sd->jpeg_hdr, sd->quality); diff --git a/drivers/media/video/gspca/stv06xx/stv06xx.h b/drivers/media/video/gspca/stv06xx/stv06xx.h index 9df7137fe67..992ce530f13 100644 --- a/drivers/media/video/gspca/stv06xx/stv06xx.h +++ b/drivers/media/video/gspca/stv06xx/stv06xx.h @@ -36,10 +36,6 @@ #define STV_ISOC_ENDPOINT_ADDR 0x81 -#ifndef V4L2_PIX_FMT_SGRBG8 -#define V4L2_PIX_FMT_SGRBG8 v4l2_fourcc('G', 'R', 'B', 'G') -#endif - #define STV_REG23 0x0423 /* Control registers of the STV0600 ASIC */ diff --git a/drivers/media/video/gspca/stv06xx/stv06xx_hdcs.c b/drivers/media/video/gspca/stv06xx/stv06xx_hdcs.c index 3039ec208f3..e5024c8496e 100644 --- a/drivers/media/video/gspca/stv06xx/stv06xx_hdcs.c +++ b/drivers/media/video/gspca/stv06xx/stv06xx_hdcs.c @@ -64,7 +64,7 @@ static struct v4l2_pix_format hdcs1x00_mode[] = { { HDCS_1X00_DEF_WIDTH, HDCS_1X00_DEF_HEIGHT, - V4L2_PIX_FMT_SBGGR8, + V4L2_PIX_FMT_SGRBG8, V4L2_FIELD_NONE, .sizeimage = HDCS_1X00_DEF_WIDTH * HDCS_1X00_DEF_HEIGHT, @@ -80,7 +80,7 @@ static struct v4l2_pix_format hdcs1020_mode[] = { { HDCS_1020_DEF_WIDTH, HDCS_1020_DEF_HEIGHT, - V4L2_PIX_FMT_SBGGR8, + V4L2_PIX_FMT_SGRBG8, V4L2_FIELD_NONE, .sizeimage = HDCS_1020_DEF_WIDTH * HDCS_1020_DEF_HEIGHT, @@ -131,9 +131,11 @@ static int hdcs_reg_write_seq(struct sd *sd, u8 reg, u8 *vals, u8 len) (reg + len > 0xff))) return -EINVAL; - for (i = 0; i < len; i++, reg++) { - regs[2*i] = reg; - regs[2*i+1] = vals[i]; + for (i = 0; i < len; i++) { + regs[2 * i] = reg; + regs[2 * i + 1] = vals[i]; + /* All addresses are shifted left one bit as bit 0 toggles r/w */ + reg += 2; } return stv06xx_write_sensor_bytes(sd, regs, len); @@ -174,7 +176,9 @@ static int hdcs_set_state(struct sd *sd, enum hdcs_power_state state) } ret = stv06xx_write_sensor(sd, HDCS_REG_CONTROL(sd), val); - if (ret < 0) + + /* Update the state if the write succeeded */ + if (!ret) hdcs->state = state; return ret; diff --git a/drivers/media/video/gspca/sunplus.c b/drivers/media/video/gspca/sunplus.c index 9623f294bda..5127bbf9dd2 100644 --- a/drivers/media/video/gspca/sunplus.c +++ b/drivers/media/video/gspca/sunplus.c @@ -973,6 +973,8 @@ static int sd_start(struct gspca_dev *gspca_dev) /* create the JPEG header */ sd->jpeg_hdr = kmalloc(JPEG_HDR_SZ, GFP_KERNEL); + if (!sd->jpeg_hdr) + return -ENOMEM; jpeg_define(sd->jpeg_hdr, gspca_dev->height, gspca_dev->width, 0x22); /* JPEG 411 */ jpeg_set_qual(sd->jpeg_hdr, sd->quality); diff --git a/drivers/media/video/gspca/zc3xx.c b/drivers/media/video/gspca/zc3xx.c index 08422d315e6..3d2756f7874 100644 --- a/drivers/media/video/gspca/zc3xx.c +++ b/drivers/media/video/gspca/zc3xx.c @@ -7243,6 +7243,8 @@ static int sd_start(struct gspca_dev *gspca_dev) /* create the JPEG header */ sd->jpeg_hdr = kmalloc(JPEG_HDR_SZ, GFP_KERNEL); + if (!sd->jpeg_hdr) + return -ENOMEM; jpeg_define(sd->jpeg_hdr, gspca_dev->height, gspca_dev->width, 0x21); /* JPEG 422 */ jpeg_set_qual(sd->jpeg_hdr, sd->quality); diff --git a/drivers/media/video/hdpvr/hdpvr-video.c b/drivers/media/video/hdpvr/hdpvr-video.c index ccd47f57f42..d678765cbba 100644 --- a/drivers/media/video/hdpvr/hdpvr-video.c +++ b/drivers/media/video/hdpvr/hdpvr-video.c @@ -1220,6 +1220,8 @@ static const struct video_device hdpvr_video_template = { V4L2_STD_PAL_G | V4L2_STD_PAL_H | V4L2_STD_PAL_I | V4L2_STD_PAL_D | V4L2_STD_PAL_M | V4L2_STD_PAL_N | V4L2_STD_PAL_60, + .current_norm = V4L2_STD_NTSC | V4L2_STD_PAL_M | + V4L2_STD_PAL_60, }; int hdpvr_register_videodev(struct hdpvr_device *dev, struct device *parent, diff --git a/drivers/media/video/ivtv/ivtv-controls.c b/drivers/media/video/ivtv/ivtv-controls.c index a3b77ed3f08..4a9c8ce0ecb 100644 --- a/drivers/media/video/ivtv/ivtv-controls.c +++ b/drivers/media/video/ivtv/ivtv-controls.c @@ -17,6 +17,7 @@ along with this program; if not, write to the Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ +#include <linux/kernel.h> #include "ivtv-driver.h" #include "ivtv-cards.h" @@ -281,7 +282,7 @@ int ivtv_s_ext_ctrls(struct file *file, void *fh, struct v4l2_ext_controls *c) idx = p.audio_properties & 0x03; /* The audio clock of the digitizer must match the codec sample rate otherwise you get some very strange effects. */ - if (idx < sizeof(freqs)) + if (idx < ARRAY_SIZE(freqs)) ivtv_call_all(itv, audio, s_clock_freq, freqs[idx]); return err; } diff --git a/drivers/media/video/mt9v011.c b/drivers/media/video/mt9v011.c new file mode 100644 index 00000000000..cc85f77a570 --- /dev/null +++ b/drivers/media/video/mt9v011.c @@ -0,0 +1,634 @@ +/* + * mt9v011 -Micron 1/4-Inch VGA Digital Image Sensor + * + * Copyright (c) 2009 Mauro Carvalho Chehab (mchehab@redhat.com) + * This code is placed under the terms of the GNU General Public License v2 + */ + +#include <linux/i2c.h> +#include <linux/videodev2.h> +#include <linux/delay.h> +#include <asm/div64.h> +#include <media/v4l2-device.h> +#include "mt9v011.h" +#include <media/v4l2-i2c-drv.h> +#include <media/v4l2-chip-ident.h> + +MODULE_DESCRIPTION("Micron mt9v011 sensor driver"); +MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@redhat.com>"); +MODULE_LICENSE("GPL"); + + +static int debug; +module_param(debug, int, 0); +MODULE_PARM_DESC(debug, "Debug level (0-2)"); + +/* supported controls */ +static struct v4l2_queryctrl mt9v011_qctrl[] = { + { + .id = V4L2_CID_GAIN, + .type = V4L2_CTRL_TYPE_INTEGER, + .name = "Gain", + .minimum = 0, + .maximum = (1 << 10) - 1, + .step = 1, + .default_value = 0x0020, + .flags = 0, + }, { + .id = V4L2_CID_RED_BALANCE, + .type = V4L2_CTRL_TYPE_INTEGER, + .name = "Red Balance", + .minimum = -1 << 9, + .maximum = (1 << 9) - 1, + .step = 1, + .default_value = 0, + .flags = 0, + }, { + .id = V4L2_CID_BLUE_BALANCE, + .type = V4L2_CTRL_TYPE_INTEGER, + .name = "Blue Balance", + .minimum = -1 << 9, + .maximum = (1 << 9) - 1, + .step = 1, + .default_value = 0, + .flags = 0, + }, { + .id = V4L2_CID_HFLIP, + .type = V4L2_CTRL_TYPE_BOOLEAN, + .name = "Mirror", + .minimum = 0, + .maximum = 1, + .step = 1, + .default_value = 0, + .flags = 0, + }, { + .id = V4L2_CID_VFLIP, + .type = V4L2_CTRL_TYPE_BOOLEAN, + .name = "Vflip", + .minimum = 0, + .maximum = 1, + .step = 1, + .default_value = 0, + .flags = 0, + }, { + } +}; + +struct mt9v011 { + struct v4l2_subdev sd; + unsigned width, height; + unsigned xtal; + unsigned hflip:1; + unsigned vflip:1; + + u16 global_gain, red_bal, blue_bal; +}; + +static inline struct mt9v011 *to_mt9v011(struct v4l2_subdev *sd) +{ + return container_of(sd, struct mt9v011, sd); +} + +static int mt9v011_read(struct v4l2_subdev *sd, unsigned char addr) +{ + struct i2c_client *c = v4l2_get_subdevdata(sd); + __be16 buffer; + int rc, val; + + rc = i2c_master_send(c, &addr, 1); + if (rc != 1) + v4l2_dbg(0, debug, sd, + "i2c i/o error: rc == %d (should be 1)\n", rc); + + msleep(10); + + rc = i2c_master_recv(c, (char *)&buffer, 2); + if (rc != 2) + v4l2_dbg(0, debug, sd, + "i2c i/o error: rc == %d (should be 2)\n", rc); + + val = be16_to_cpu(buffer); + + v4l2_dbg(2, debug, sd, "mt9v011: read 0x%02x = 0x%04x\n", addr, val); + + return val; +} + +static void mt9v011_write(struct v4l2_subdev *sd, unsigned char addr, + u16 value) +{ + struct i2c_client *c = v4l2_get_subdevdata(sd); + unsigned char buffer[3]; + int rc; + + buffer[0] = addr; + buffer[1] = value >> 8; + buffer[2] = value & 0xff; + + v4l2_dbg(2, debug, sd, + "mt9v011: writing 0x%02x 0x%04x\n", buffer[0], value); + rc = i2c_master_send(c, buffer, 3); + if (rc != 3) + v4l2_dbg(0, debug, sd, + "i2c i/o error: rc == %d (should be 3)\n", rc); +} + + +struct i2c_reg_value { + unsigned char reg; + u16 value; +}; + +/* + * Values used at the original driver + * Some values are marked as Reserved at the datasheet + */ +static const struct i2c_reg_value mt9v011_init_default[] = { + { R0D_MT9V011_RESET, 0x0001 }, + { R0D_MT9V011_RESET, 0x0000 }, + + { R0C_MT9V011_SHUTTER_DELAY, 0x0000 }, + { R09_MT9V011_SHUTTER_WIDTH, 0x1fc }, + + { R0A_MT9V011_CLK_SPEED, 0x0000 }, + { R1E_MT9V011_DIGITAL_ZOOM, 0x0000 }, + + { R07_MT9V011_OUT_CTRL, 0x0002 }, /* chip enable */ +}; + +static void set_balance(struct v4l2_subdev *sd) +{ + struct mt9v011 *core = to_mt9v011(sd); + u16 green1_gain, green2_gain, blue_gain, red_gain; + + green1_gain = core->global_gain; + green2_gain = core->global_gain; + + blue_gain = core->global_gain + + core->global_gain * core->blue_bal / (1 << 9); + + red_gain = core->global_gain + + core->global_gain * core->blue_bal / (1 << 9); + + mt9v011_write(sd, R2B_MT9V011_GREEN_1_GAIN, green1_gain); + mt9v011_write(sd, R2E_MT9V011_GREEN_2_GAIN, green1_gain); + mt9v011_write(sd, R2C_MT9V011_BLUE_GAIN, blue_gain); + mt9v011_write(sd, R2D_MT9V011_RED_GAIN, red_gain); +} + +static void calc_fps(struct v4l2_subdev *sd, u32 *numerator, u32 *denominator) +{ + struct mt9v011 *core = to_mt9v011(sd); + unsigned height, width, hblank, vblank, speed; + unsigned row_time, t_time; + u64 frames_per_ms; + unsigned tmp; + + height = mt9v011_read(sd, R03_MT9V011_HEIGHT); + width = mt9v011_read(sd, R04_MT9V011_WIDTH); + hblank = mt9v011_read(sd, R05_MT9V011_HBLANK); + vblank = mt9v011_read(sd, R06_MT9V011_VBLANK); + speed = mt9v011_read(sd, R0A_MT9V011_CLK_SPEED); + + row_time = (width + 113 + hblank) * (speed + 2); + t_time = row_time * (height + vblank + 1); + + frames_per_ms = core->xtal * 1000l; + do_div(frames_per_ms, t_time); + tmp = frames_per_ms; + + v4l2_dbg(1, debug, sd, "Programmed to %u.%03u fps (%d pixel clcks)\n", + tmp / 1000, tmp % 1000, t_time); + + if (numerator && denominator) { + *numerator = 1000; + *denominator = (u32)frames_per_ms; + } +} + +static u16 calc_speed(struct v4l2_subdev *sd, u32 numerator, u32 denominator) +{ + struct mt9v011 *core = to_mt9v011(sd); + unsigned height, width, hblank, vblank; + unsigned row_time, line_time; + u64 t_time, speed; + + /* Avoid bogus calculus */ + if (!numerator || !denominator) + return 0; + + height = mt9v011_read(sd, R03_MT9V011_HEIGHT); + width = mt9v011_read(sd, R04_MT9V011_WIDTH); + hblank = mt9v011_read(sd, R05_MT9V011_HBLANK); + vblank = mt9v011_read(sd, R06_MT9V011_VBLANK); + + row_time = width + 113 + hblank; + line_time = height + vblank + 1; + + t_time = core->xtal * ((u64)numerator); + /* round to the closest value */ + t_time += denominator / 2; + do_div(t_time, denominator); + + speed = t_time; + do_div(speed, row_time * line_time); + + /* Avoid having a negative value for speed */ + if (speed < 2) + speed = 0; + else + speed -= 2; + + /* Avoid speed overflow */ + if (speed > 15) + return 15; + + return (u16)speed; +} + +static void set_res(struct v4l2_subdev *sd) +{ + struct mt9v011 *core = to_mt9v011(sd); + unsigned vstart, hstart; + + /* + * The mt9v011 doesn't have scaling. So, in order to select the desired + * resolution, we're cropping at the middle of the sensor. + * hblank and vblank should be adjusted, in order to warrant that + * we'll preserve the line timings for 30 fps, no matter what resolution + * is selected. + * NOTE: datasheet says that width (and height) should be filled with + * width-1. However, this doesn't work, since one pixel per line will + * be missing. + */ + + hstart = 14 + (640 - core->width) / 2; + mt9v011_write(sd, R02_MT9V011_COLSTART, hstart); + mt9v011_write(sd, R04_MT9V011_WIDTH, core->width); + mt9v011_write(sd, R05_MT9V011_HBLANK, 771 - core->width); + + vstart = 8 + (480 - core->height) / 2; + mt9v011_write(sd, R01_MT9V011_ROWSTART, vstart); + mt9v011_write(sd, R03_MT9V011_HEIGHT, core->height); + mt9v011_write(sd, R06_MT9V011_VBLANK, 508 - core->height); + + calc_fps(sd, NULL, NULL); +}; + +static void set_read_mode(struct v4l2_subdev *sd) +{ + struct mt9v011 *core = to_mt9v011(sd); + unsigned mode = 0x1000; + + if (core->hflip) + mode |= 0x4000; + + if (core->vflip) + mode |= 0x8000; + + mt9v011_write(sd, R20_MT9V011_READ_MODE, mode); +} + +static int mt9v011_reset(struct v4l2_subdev *sd, u32 val) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(mt9v011_init_default); i++) + mt9v011_write(sd, mt9v011_init_default[i].reg, + mt9v011_init_default[i].value); + + set_balance(sd); + set_res(sd); + set_read_mode(sd); + + return 0; +}; + +static int mt9v011_g_ctrl(struct v4l2_subdev *sd, struct v4l2_control *ctrl) +{ + struct mt9v011 *core = to_mt9v011(sd); + + v4l2_dbg(1, debug, sd, "g_ctrl called\n"); + + switch (ctrl->id) { + case V4L2_CID_GAIN: + ctrl->value = core->global_gain; + return 0; + case V4L2_CID_RED_BALANCE: + ctrl->value = core->red_bal; + return 0; + case V4L2_CID_BLUE_BALANCE: + ctrl->value = core->blue_bal; + return 0; + case V4L2_CID_HFLIP: + ctrl->value = core->hflip ? 1 : 0; + return 0; + case V4L2_CID_VFLIP: + ctrl->value = core->vflip ? 1 : 0; + return 0; + } + return -EINVAL; +} + +static int mt9v011_queryctrl(struct v4l2_subdev *sd, struct v4l2_queryctrl *qc) +{ + int i; + + v4l2_dbg(1, debug, sd, "queryctrl called\n"); + + for (i = 0; i < ARRAY_SIZE(mt9v011_qctrl); i++) + if (qc->id && qc->id == mt9v011_qctrl[i].id) { + memcpy(qc, &(mt9v011_qctrl[i]), + sizeof(*qc)); + return 0; + } + + return -EINVAL; +} + + +static int mt9v011_s_ctrl(struct v4l2_subdev *sd, struct v4l2_control *ctrl) +{ + struct mt9v011 *core = to_mt9v011(sd); + u8 i, n; + n = ARRAY_SIZE(mt9v011_qctrl); + + for (i = 0; i < n; i++) { + if (ctrl->id != mt9v011_qctrl[i].id) + continue; + if (ctrl->value < mt9v011_qctrl[i].minimum || + ctrl->value > mt9v011_qctrl[i].maximum) + return -ERANGE; + v4l2_dbg(1, debug, sd, "s_ctrl: id=%d, value=%d\n", + ctrl->id, ctrl->value); + break; + } + + switch (ctrl->id) { + case V4L2_CID_GAIN: + core->global_gain = ctrl->value; + break; + case V4L2_CID_RED_BALANCE: + core->red_bal = ctrl->value; + break; + case V4L2_CID_BLUE_BALANCE: + core->blue_bal = ctrl->value; + break; + case V4L2_CID_HFLIP: + core->hflip = ctrl->value; + set_read_mode(sd); + return 0; + case V4L2_CID_VFLIP: + core->vflip = ctrl->value; + set_read_mode(sd); + return 0; + default: + return -EINVAL; + } + + set_balance(sd); + + return 0; +} + +static int mt9v011_enum_fmt(struct v4l2_subdev *sd, struct v4l2_fmtdesc *fmt) +{ + if (fmt->index > 0) + return -EINVAL; + + fmt->flags = 0; + strcpy(fmt->description, "8 bpp Bayer GRGR..BGBG"); + fmt->pixelformat = V4L2_PIX_FMT_SGRBG8; + + return 0; +} + +static int mt9v011_try_fmt(struct v4l2_subdev *sd, struct v4l2_format *fmt) +{ + struct v4l2_pix_format *pix = &fmt->fmt.pix; + + if (pix->pixelformat != V4L2_PIX_FMT_SGRBG8) + return -EINVAL; + + v4l_bound_align_image(&pix->width, 48, 639, 1, + &pix->height, 32, 480, 1, 0); + + return 0; +} + +static int mt9v011_g_parm(struct v4l2_subdev *sd, struct v4l2_streamparm *parms) +{ + struct v4l2_captureparm *cp = &parms->parm.capture; + + if (parms->type != V4L2_BUF_TYPE_VIDEO_CAPTURE) + return -EINVAL; + + memset(cp, 0, sizeof(struct v4l2_captureparm)); + cp->capability = V4L2_CAP_TIMEPERFRAME; + calc_fps(sd, + &cp->timeperframe.numerator, + &cp->timeperframe.denominator); + + return 0; +} + +static int mt9v011_s_parm(struct v4l2_subdev *sd, struct v4l2_streamparm *parms) +{ + struct v4l2_captureparm *cp = &parms->parm.capture; + struct v4l2_fract *tpf = &cp->timeperframe; + u16 speed; + + if (parms->type != V4L2_BUF_TYPE_VIDEO_CAPTURE) + return -EINVAL; + if (cp->extendedmode != 0) + return -EINVAL; + + speed = calc_speed(sd, tpf->numerator, tpf->denominator); + + mt9v011_write(sd, R0A_MT9V011_CLK_SPEED, speed); + v4l2_dbg(1, debug, sd, "Setting speed to %d\n", speed); + + /* Recalculate and update fps info */ + calc_fps(sd, &tpf->numerator, &tpf->denominator); + + return 0; +} + +static int mt9v011_s_fmt(struct v4l2_subdev *sd, struct v4l2_format *fmt) +{ + struct v4l2_pix_format *pix = &fmt->fmt.pix; + struct mt9v011 *core = to_mt9v011(sd); + int rc; + + rc = mt9v011_try_fmt(sd, fmt); + if (rc < 0) + return -EINVAL; + + core->width = pix->width; + core->height = pix->height; + + set_res(sd); + + return 0; +} + +static int mt9v011_s_config(struct v4l2_subdev *sd, int dumb, void *data) +{ + struct mt9v011 *core = to_mt9v011(sd); + unsigned *xtal = data; + + v4l2_dbg(1, debug, sd, "s_config called\n"); + + if (xtal) { + core->xtal = *xtal; + v4l2_dbg(1, debug, sd, "xtal set to %d.%03d MHz\n", + *xtal / 1000000, (*xtal / 1000) % 1000); + } + + return 0; +} + + +#ifdef CONFIG_VIDEO_ADV_DEBUG +static int mt9v011_g_register(struct v4l2_subdev *sd, + struct v4l2_dbg_register *reg) +{ + struct i2c_client *client = v4l2_get_subdevdata(sd); + + if (!v4l2_chip_match_i2c_client(client, ®->match)) + return -EINVAL; + if (!capable(CAP_SYS_ADMIN)) + return -EPERM; + + reg->val = mt9v011_read(sd, reg->reg & 0xff); + reg->size = 2; + + return 0; +} + +static int mt9v011_s_register(struct v4l2_subdev *sd, + struct v4l2_dbg_register *reg) +{ + struct i2c_client *client = v4l2_get_subdevdata(sd); + + if (!v4l2_chip_match_i2c_client(client, ®->match)) + return -EINVAL; + if (!capable(CAP_SYS_ADMIN)) + return -EPERM; + + mt9v011_write(sd, reg->reg & 0xff, reg->val & 0xffff); + + return 0; +} +#endif + +static int mt9v011_g_chip_ident(struct v4l2_subdev *sd, + struct v4l2_dbg_chip_ident *chip) +{ + u16 version; + struct i2c_client *client = v4l2_get_subdevdata(sd); + + version = mt9v011_read(sd, R00_MT9V011_CHIP_VERSION); + + return v4l2_chip_ident_i2c_client(client, chip, V4L2_IDENT_MT9V011, + version); +} + +static const struct v4l2_subdev_core_ops mt9v011_core_ops = { + .queryctrl = mt9v011_queryctrl, + .g_ctrl = mt9v011_g_ctrl, + .s_ctrl = mt9v011_s_ctrl, + .reset = mt9v011_reset, + .s_config = mt9v011_s_config, + .g_chip_ident = mt9v011_g_chip_ident, +#ifdef CONFIG_VIDEO_ADV_DEBUG + .g_register = mt9v011_g_register, + .s_register = mt9v011_s_register, +#endif +}; + +static const struct v4l2_subdev_video_ops mt9v011_video_ops = { + .enum_fmt = mt9v011_enum_fmt, + .try_fmt = mt9v011_try_fmt, + .s_fmt = mt9v011_s_fmt, + .g_parm = mt9v011_g_parm, + .s_parm = mt9v011_s_parm, +}; + +static const struct v4l2_subdev_ops mt9v011_ops = { + .core = &mt9v011_core_ops, + .video = &mt9v011_video_ops, +}; + + +/**************************************************************************** + I2C Client & Driver + ****************************************************************************/ + +static int mt9v011_probe(struct i2c_client *c, + const struct i2c_device_id *id) +{ + u16 version; + struct mt9v011 *core; + struct v4l2_subdev *sd; + + /* Check if the adapter supports the needed features */ + if (!i2c_check_functionality(c->adapter, + I2C_FUNC_SMBUS_READ_BYTE | I2C_FUNC_SMBUS_WRITE_BYTE_DATA)) + return -EIO; + + core = kzalloc(sizeof(struct mt9v011), GFP_KERNEL); + if (!core) + return -ENOMEM; + + sd = &core->sd; + v4l2_i2c_subdev_init(sd, c, &mt9v011_ops); + + /* Check if the sensor is really a MT9V011 */ + version = mt9v011_read(sd, R00_MT9V011_CHIP_VERSION); + if ((version != MT9V011_VERSION) && + (version != MT9V011_REV_B_VERSION)) { + v4l2_info(sd, "*** unknown micron chip detected (0x%04x).\n", + version); + kfree(core); + return -EINVAL; + } + + core->global_gain = 0x0024; + core->width = 640; + core->height = 480; + core->xtal = 27000000; /* Hz */ + + v4l_info(c, "chip found @ 0x%02x (%s - chip version 0x%04x)\n", + c->addr << 1, c->adapter->name, version); + + return 0; +} + +static int mt9v011_remove(struct i2c_client *c) +{ + struct v4l2_subdev *sd = i2c_get_clientdata(c); + + v4l2_dbg(1, debug, sd, + "mt9v011.c: removing mt9v011 adapter on address 0x%x\n", + c->addr << 1); + + v4l2_device_unregister_subdev(sd); + kfree(to_mt9v011(sd)); + return 0; +} + +/* ----------------------------------------------------------------------- */ + +static const struct i2c_device_id mt9v011_id[] = { + { "mt9v011", 0 }, + { } +}; +MODULE_DEVICE_TABLE(i2c, mt9v011_id); + +static struct v4l2_i2c_driver_data v4l2_i2c_data = { + .name = "mt9v011", + .probe = mt9v011_probe, + .remove = mt9v011_remove, + .id_table = mt9v011_id, +}; diff --git a/drivers/media/video/mt9v011.h b/drivers/media/video/mt9v011.h new file mode 100644 index 00000000000..3350fd6083c --- /dev/null +++ b/drivers/media/video/mt9v011.h @@ -0,0 +1,36 @@ +/* + * mt9v011 -Micron 1/4-Inch VGA Digital Image Sensor + * + * Copyright (c) 2009 Mauro Carvalho Chehab (mchehab@redhat.com) + * This code is placed under the terms of the GNU General Public License v2 + */ + +#ifndef MT9V011_H_ +#define MT9V011_H_ + +#define R00_MT9V011_CHIP_VERSION 0x00 +#define R01_MT9V011_ROWSTART 0x01 +#define R02_MT9V011_COLSTART 0x02 +#define R03_MT9V011_HEIGHT 0x03 +#define R04_MT9V011_WIDTH 0x04 +#define R05_MT9V011_HBLANK 0x05 +#define R06_MT9V011_VBLANK 0x06 +#define R07_MT9V011_OUT_CTRL 0x07 +#define R09_MT9V011_SHUTTER_WIDTH 0x09 +#define R0A_MT9V011_CLK_SPEED 0x0a +#define R0B_MT9V011_RESTART 0x0b +#define R0C_MT9V011_SHUTTER_DELAY 0x0c +#define R0D_MT9V011_RESET 0x0d +#define R1E_MT9V011_DIGITAL_ZOOM 0x1e +#define R20_MT9V011_READ_MODE 0x20 +#define R2B_MT9V011_GREEN_1_GAIN 0x2b +#define R2C_MT9V011_BLUE_GAIN 0x2c +#define R2D_MT9V011_RED_GAIN 0x2d +#define R2E_MT9V011_GREEN_2_GAIN 0x2e +#define R35_MT9V011_GLOBAL_GAIN 0x35 +#define RF1_MT9V011_CHIP_ENABLE 0xf1 + +#define MT9V011_VERSION 0x8232 +#define MT9V011_REV_B_VERSION 0x8243 + +#endif diff --git a/drivers/media/video/mx1_camera.c b/drivers/media/video/mx1_camera.c index 2d075205bdf..736c31d2319 100644 --- a/drivers/media/video/mx1_camera.c +++ b/drivers/media/video/mx1_camera.c @@ -234,6 +234,7 @@ static int mx1_camera_setup_dma(struct mx1_camera_dev *pcdev) return ret; } +/* Called under spinlock_irqsave(&pcdev->lock, ...) */ static void mx1_videobuf_queue(struct videobuf_queue *vq, struct videobuf_buffer *vb) { @@ -241,13 +242,10 @@ static void mx1_videobuf_queue(struct videobuf_queue *vq, struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent); struct mx1_camera_dev *pcdev = ici->priv; struct mx1_buffer *buf = container_of(vb, struct mx1_buffer, vb); - unsigned long flags; dev_dbg(&icd->dev, "%s (vb=0x%p) 0x%08lx %d\n", __func__, vb, vb->baddr, vb->bsize); - spin_lock_irqsave(&pcdev->lock, flags); - list_add_tail(&vb->queue, &pcdev->capture); vb->state = VIDEOBUF_ACTIVE; @@ -264,8 +262,6 @@ static void mx1_videobuf_queue(struct videobuf_queue *vq, __raw_writel(temp, pcdev->base + CSICR1); } } - - spin_unlock_irqrestore(&pcdev->lock, flags); } static void mx1_videobuf_release(struct videobuf_queue *vq, diff --git a/drivers/media/video/mx3_camera.c b/drivers/media/video/mx3_camera.c index e605c076ed8..9770cb7932c 100644 --- a/drivers/media/video/mx3_camera.c +++ b/drivers/media/video/mx3_camera.c @@ -332,7 +332,10 @@ static enum pixel_fmt fourcc_to_ipu_pix(__u32 fourcc) } } -/* Called with .vb_lock held */ +/* + * Called with .vb_lock mutex held and + * under spinlock_irqsave(&mx3_cam->lock, ...) + */ static void mx3_videobuf_queue(struct videobuf_queue *vq, struct videobuf_buffer *vb) { @@ -346,7 +349,8 @@ static void mx3_videobuf_queue(struct videobuf_queue *vq, struct idmac_video_param *video = &ichan->params.video; const struct soc_camera_data_format *data_fmt = icd->current_fmt; dma_cookie_t cookie; - unsigned long flags; + + BUG_ON(!irqs_disabled()); /* This is the configuration of one sg-element */ video->out_pixel_fmt = fourcc_to_ipu_pix(data_fmt->fourcc); @@ -359,8 +363,6 @@ static void mx3_videobuf_queue(struct videobuf_queue *vq, memset((void *)vb->baddr, 0xaa, vb->bsize); #endif - spin_lock_irqsave(&mx3_cam->lock, flags); - list_add_tail(&vb->queue, &mx3_cam->capture); if (!mx3_cam->active) { @@ -370,24 +372,23 @@ static void mx3_videobuf_queue(struct videobuf_queue *vq, vb->state = VIDEOBUF_QUEUED; } - spin_unlock_irqrestore(&mx3_cam->lock, flags); + spin_unlock_irq(&mx3_cam->lock); cookie = txd->tx_submit(txd); dev_dbg(&icd->dev, "Submitted cookie %d DMA 0x%08x\n", cookie, sg_dma_address(&buf->sg)); + + spin_lock_irq(&mx3_cam->lock); + if (cookie >= 0) return; /* Submit error */ vb->state = VIDEOBUF_PREPARED; - spin_lock_irqsave(&mx3_cam->lock, flags); - list_del_init(&vb->queue); if (mx3_cam->active == buf) mx3_cam->active = NULL; - - spin_unlock_irqrestore(&mx3_cam->lock, flags); } /* Called with .vb_lock held */ diff --git a/drivers/media/video/pwc/pwc-if.c b/drivers/media/video/pwc/pwc-if.c index db25c3034c1..8d17cf61330 100644 --- a/drivers/media/video/pwc/pwc-if.c +++ b/drivers/media/video/pwc/pwc-if.c @@ -62,6 +62,7 @@ #include <linux/module.h> #include <linux/poll.h> #include <linux/slab.h> +#include <linux/smp_lock.h> #ifdef CONFIG_USB_PWC_INPUT_EVDEV #include <linux/usb/input.h> #endif diff --git a/drivers/media/video/pwc/pwc.h b/drivers/media/video/pwc/pwc.h index 0be6f814f53..0b658dee05a 100644 --- a/drivers/media/video/pwc/pwc.h +++ b/drivers/media/video/pwc/pwc.h @@ -29,7 +29,6 @@ #include <linux/usb.h> #include <linux/spinlock.h> #include <linux/wait.h> -#include <linux/smp_lock.h> #include <linux/version.h> #include <linux/mutex.h> #include <linux/mm.h> diff --git a/drivers/media/video/pxa_camera.c b/drivers/media/video/pxa_camera.c index 46e0d8ad880..016bb45ba0c 100644 --- a/drivers/media/video/pxa_camera.c +++ b/drivers/media/video/pxa_camera.c @@ -612,6 +612,7 @@ static void pxa_camera_stop_capture(struct pxa_camera_dev *pcdev) dev_dbg(pcdev->soc_host.dev, "%s\n", __func__); } +/* Called under spinlock_irqsave(&pcdev->lock, ...) */ static void pxa_videobuf_queue(struct videobuf_queue *vq, struct videobuf_buffer *vb) { @@ -619,13 +620,10 @@ static void pxa_videobuf_queue(struct videobuf_queue *vq, struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent); struct pxa_camera_dev *pcdev = ici->priv; struct pxa_buffer *buf = container_of(vb, struct pxa_buffer, vb); - unsigned long flags; dev_dbg(&icd->dev, "%s (vb=0x%p) 0x%08lx %d active=%p\n", __func__, vb, vb->baddr, vb->bsize, pcdev->active); - spin_lock_irqsave(&pcdev->lock, flags); - list_add_tail(&vb->queue, &pcdev->capture); vb->state = VIDEOBUF_ACTIVE; @@ -633,8 +631,6 @@ static void pxa_videobuf_queue(struct videobuf_queue *vq, if (!pcdev->active) pxa_camera_start_capture(pcdev); - - spin_unlock_irqrestore(&pcdev->lock, flags); } static void pxa_videobuf_release(struct videobuf_queue *vq, @@ -1579,6 +1575,7 @@ static int __devinit pxa_camera_probe(struct platform_device *pdev) pcdev->mclk = 20000000; } + pcdev->soc_host.dev = &pdev->dev; pcdev->mclk_divisor = mclk_get_divisor(pcdev); INIT_LIST_HEAD(&pcdev->capture); @@ -1644,7 +1641,6 @@ static int __devinit pxa_camera_probe(struct platform_device *pdev) pcdev->soc_host.drv_name = PXA_CAM_DRV_NAME; pcdev->soc_host.ops = &pxa_soc_camera_host_ops; pcdev->soc_host.priv = pcdev; - pcdev->soc_host.dev = &pdev->dev; pcdev->soc_host.nr = pdev->id; err = soc_camera_host_register(&pcdev->soc_host); diff --git a/drivers/media/video/s2255drv.c b/drivers/media/video/s2255drv.c index 6be845ccc7d..9e3262c0ba3 100644 --- a/drivers/media/video/s2255drv.c +++ b/drivers/media/video/s2255drv.c @@ -48,6 +48,7 @@ #include <linux/videodev2.h> #include <linux/version.h> #include <linux/mm.h> +#include <linux/smp_lock.h> #include <media/videobuf-vmalloc.h> #include <media/v4l2-common.h> #include <media/v4l2-ioctl.h> diff --git a/drivers/media/video/saa5246a.c b/drivers/media/video/saa5246a.c index 155804b061e..b624a4c01fd 100644 --- a/drivers/media/video/saa5246a.c +++ b/drivers/media/video/saa5246a.c @@ -43,7 +43,6 @@ #include <linux/mm.h> #include <linux/init.h> #include <linux/i2c.h> -#include <linux/smp_lock.h> #include <linux/mutex.h> #include <linux/videotext.h> #include <linux/videodev2.h> diff --git a/drivers/media/video/saa5249.c b/drivers/media/video/saa5249.c index 271d6e931b7..12835fb82c9 100644 --- a/drivers/media/video/saa5249.c +++ b/drivers/media/video/saa5249.c @@ -46,7 +46,6 @@ #include <linux/mm.h> #include <linux/init.h> #include <linux/i2c.h> -#include <linux/smp_lock.h> #include <linux/mutex.h> #include <linux/delay.h> #include <linux/videotext.h> diff --git a/drivers/media/video/saa7134/saa7134-cards.c b/drivers/media/video/saa7134/saa7134-cards.c index 06861b782b9..6eebe3ef97d 100644 --- a/drivers/media/video/saa7134/saa7134-cards.c +++ b/drivers/media/video/saa7134/saa7134-cards.c @@ -3331,8 +3331,8 @@ struct saa7134_board saa7134_boards[] = { .gpio = 0x0200100, }, }, - [SAA7134_BOARD_HAUPPAUGE_HVR1120] = { - .name = "Hauppauge WinTV-HVR1120 ATSC/QAM-Hybrid", + [SAA7134_BOARD_HAUPPAUGE_HVR1150] = { + .name = "Hauppauge WinTV-HVR1150 ATSC/QAM-Hybrid", .audio_clock = 0x00187de7, .tuner_type = TUNER_PHILIPS_TDA8290, .radio_type = UNSET, @@ -3363,8 +3363,8 @@ struct saa7134_board saa7134_boards[] = { .gpio = 0x0800100, /* GPIO 23 HI for FM */ }, }, - [SAA7134_BOARD_HAUPPAUGE_HVR1110R3] = { - .name = "Hauppauge WinTV-HVR1110r3 DVB-T/Hybrid", + [SAA7134_BOARD_HAUPPAUGE_HVR1120] = { + .name = "Hauppauge WinTV-HVR1120 DVB-T/Hybrid", .audio_clock = 0x00187de7, .tuner_type = TUNER_PHILIPS_TDA8290, .radio_type = UNSET, @@ -5862,31 +5862,31 @@ struct pci_device_id saa7134_pci_tbl[] = { .device = PCI_DEVICE_ID_PHILIPS_SAA7133, .subvendor = 0x0070, .subdevice = 0x6706, - .driver_data = SAA7134_BOARD_HAUPPAUGE_HVR1120, + .driver_data = SAA7134_BOARD_HAUPPAUGE_HVR1150, },{ .vendor = PCI_VENDOR_ID_PHILIPS, .device = PCI_DEVICE_ID_PHILIPS_SAA7133, .subvendor = 0x0070, .subdevice = 0x6707, - .driver_data = SAA7134_BOARD_HAUPPAUGE_HVR1110R3, + .driver_data = SAA7134_BOARD_HAUPPAUGE_HVR1120, },{ .vendor = PCI_VENDOR_ID_PHILIPS, .device = PCI_DEVICE_ID_PHILIPS_SAA7133, .subvendor = 0x0070, .subdevice = 0x6708, - .driver_data = SAA7134_BOARD_HAUPPAUGE_HVR1120, + .driver_data = SAA7134_BOARD_HAUPPAUGE_HVR1150, },{ .vendor = PCI_VENDOR_ID_PHILIPS, .device = PCI_DEVICE_ID_PHILIPS_SAA7133, .subvendor = 0x0070, .subdevice = 0x6709, - .driver_data = SAA7134_BOARD_HAUPPAUGE_HVR1110R3, + .driver_data = SAA7134_BOARD_HAUPPAUGE_HVR1120, },{ .vendor = PCI_VENDOR_ID_PHILIPS, .device = PCI_DEVICE_ID_PHILIPS_SAA7133, .subvendor = 0x0070, .subdevice = 0x670a, - .driver_data = SAA7134_BOARD_HAUPPAUGE_HVR1110R3, + .driver_data = SAA7134_BOARD_HAUPPAUGE_HVR1120, },{ .vendor = PCI_VENDOR_ID_PHILIPS, .device = PCI_DEVICE_ID_PHILIPS_SAA7133, @@ -6363,8 +6363,8 @@ static int saa7134_tda8290_18271_callback(struct saa7134_dev *dev, switch (command) { case TDA18271_CALLBACK_CMD_AGC_ENABLE: /* 0 */ switch (dev->board) { + case SAA7134_BOARD_HAUPPAUGE_HVR1150: case SAA7134_BOARD_HAUPPAUGE_HVR1120: - case SAA7134_BOARD_HAUPPAUGE_HVR1110R3: ret = saa7134_tda18271_hvr11x0_toggle_agc(dev, arg); break; default: @@ -6384,8 +6384,8 @@ static int saa7134_tda8290_callback(struct saa7134_dev *dev, int ret; switch (dev->board) { + case SAA7134_BOARD_HAUPPAUGE_HVR1150: case SAA7134_BOARD_HAUPPAUGE_HVR1120: - case SAA7134_BOARD_HAUPPAUGE_HVR1110R3: /* tda8290 + tda18271 */ ret = saa7134_tda8290_18271_callback(dev, command, arg); break; @@ -6427,7 +6427,7 @@ static void hauppauge_eeprom(struct saa7134_dev *dev, u8 *eeprom_data) switch (tv.model) { case 67019: /* WinTV-HVR1110 (Retail, IR Blaster, hybrid, FM, SVid/Comp, 3.5mm audio in) */ case 67109: /* WinTV-HVR1000 (Retail, IR Receive, analog, no FM, SVid/Comp, 3.5mm audio in) */ - case 67201: /* WinTV-HVR1120 (Retail, IR Receive, hybrid, FM, SVid/Comp, 3.5mm audio in) */ + case 67201: /* WinTV-HVR1150 (Retail, IR Receive, hybrid, FM, SVid/Comp, 3.5mm audio in) */ case 67301: /* WinTV-HVR1000 (Retail, IR Receive, analog, no FM, SVid/Comp, 3.5mm audio in) */ case 67209: /* WinTV-HVR1110 (Retail, IR Receive, hybrid, FM, SVid/Comp, 3.5mm audio in) */ case 67559: /* WinTV-HVR1110 (OEM, no IR, hybrid, FM, SVid/Comp, RCA aud) */ @@ -6435,7 +6435,7 @@ static void hauppauge_eeprom(struct saa7134_dev *dev, u8 *eeprom_data) case 67579: /* WinTV-HVR1110 (OEM, no IR, hybrid, no FM) */ case 67589: /* WinTV-HVR1110 (OEM, no IR, hybrid, no FM, SVid/Comp, RCA aud) */ case 67599: /* WinTV-HVR1110 (OEM, no IR, hybrid, no FM, SVid/Comp, RCA aud) */ - case 67651: /* WinTV-HVR1120 (OEM, no IR, hybrid, FM, SVid/Comp, RCA aud) */ + case 67651: /* WinTV-HVR1150 (OEM, no IR, hybrid, FM, SVid/Comp, RCA aud) */ case 67659: /* WinTV-HVR1110 (OEM, no IR, hybrid, FM, SVid/Comp, RCA aud) */ break; default: @@ -6625,8 +6625,8 @@ int saa7134_board_init1(struct saa7134_dev *dev) saa_writeb (SAA7134_PRODUCTION_TEST_MODE, 0x00); break; + case SAA7134_BOARD_HAUPPAUGE_HVR1150: case SAA7134_BOARD_HAUPPAUGE_HVR1120: - case SAA7134_BOARD_HAUPPAUGE_HVR1110R3: /* GPIO 26 high for digital, low for analog */ saa7134_set_gpio(dev, 26, 0); msleep(1); @@ -6891,8 +6891,8 @@ int saa7134_board_init2(struct saa7134_dev *dev) dev->name, saa7134_boards[dev->board].name); } break; + case SAA7134_BOARD_HAUPPAUGE_HVR1150: case SAA7134_BOARD_HAUPPAUGE_HVR1120: - case SAA7134_BOARD_HAUPPAUGE_HVR1110R3: hauppauge_eeprom(dev, dev->eedata+0x80); break; case SAA7134_BOARD_HAUPPAUGE_HVR1110: diff --git a/drivers/media/video/saa7134/saa7134-dvb.c b/drivers/media/video/saa7134/saa7134-dvb.c index 31930f26ffc..98f3efd1e94 100644 --- a/drivers/media/video/saa7134/saa7134-dvb.c +++ b/drivers/media/video/saa7134/saa7134-dvb.c @@ -1119,7 +1119,7 @@ static int dvb_init(struct saa7134_dev *dev) &tda827x_cfg_2) < 0) goto dettach_frontend; break; - case SAA7134_BOARD_HAUPPAUGE_HVR1110R3: + case SAA7134_BOARD_HAUPPAUGE_HVR1120: fe0->dvb.frontend = dvb_attach(tda10048_attach, &hcw_tda10048_config, &dev->i2c_adap); @@ -1147,7 +1147,7 @@ static int dvb_init(struct saa7134_dev *dev) &tda827x_cfg_1) < 0) goto dettach_frontend; break; - case SAA7134_BOARD_HAUPPAUGE_HVR1120: + case SAA7134_BOARD_HAUPPAUGE_HVR1150: fe0->dvb.frontend = dvb_attach(lgdt3305_attach, &hcw_lgdt3305_config, &dev->i2c_adap); diff --git a/drivers/media/video/saa7134/saa7134-empress.c b/drivers/media/video/saa7134/saa7134-empress.c index add1757f893..296788c3bf0 100644 --- a/drivers/media/video/saa7134/saa7134-empress.c +++ b/drivers/media/video/saa7134/saa7134-empress.c @@ -22,6 +22,7 @@ #include <linux/module.h> #include <linux/kernel.h> #include <linux/slab.h> +#include <linux/smp_lock.h> #include <linux/delay.h> #include "saa7134-reg.h" diff --git a/drivers/media/video/saa7134/saa7134.h b/drivers/media/video/saa7134/saa7134.h index 82268848f26..fb564f14887 100644 --- a/drivers/media/video/saa7134/saa7134.h +++ b/drivers/media/video/saa7134/saa7134.h @@ -278,8 +278,8 @@ struct saa7134_format { #define SAA7134_BOARD_ASUSTeK_TIGER 152 #define SAA7134_BOARD_KWORLD_PLUS_TV_ANALOG 153 #define SAA7134_BOARD_AVERMEDIA_GO_007_FM_PLUS 154 -#define SAA7134_BOARD_HAUPPAUGE_HVR1120 155 -#define SAA7134_BOARD_HAUPPAUGE_HVR1110R3 156 +#define SAA7134_BOARD_HAUPPAUGE_HVR1150 155 +#define SAA7134_BOARD_HAUPPAUGE_HVR1120 156 #define SAA7134_BOARD_AVERMEDIA_STUDIO_507UA 157 #define SAA7134_BOARD_AVERMEDIA_CARDBUS_501 158 #define SAA7134_BOARD_BEHOLD_505RDS 159 diff --git a/drivers/media/video/se401.c b/drivers/media/video/se401.c index c8f05297d0f..85ffc2cba03 100644 --- a/drivers/media/video/se401.c +++ b/drivers/media/video/se401.c @@ -31,6 +31,7 @@ static const char version[] = "0.24"; #include <linux/init.h> #include <linux/vmalloc.h> #include <linux/slab.h> +#include <linux/smp_lock.h> #include <linux/pagemap.h> #include <linux/usb.h> #include "se401.h" diff --git a/drivers/media/video/sh_mobile_ceu_camera.c b/drivers/media/video/sh_mobile_ceu_camera.c index 0db88a53d92..e86878deea7 100644 --- a/drivers/media/video/sh_mobile_ceu_camera.c +++ b/drivers/media/video/sh_mobile_ceu_camera.c @@ -282,27 +282,24 @@ out: return ret; } +/* Called under spinlock_irqsave(&pcdev->lock, ...) */ static void sh_mobile_ceu_videobuf_queue(struct videobuf_queue *vq, struct videobuf_buffer *vb) { struct soc_camera_device *icd = vq->priv_data; struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent); struct sh_mobile_ceu_dev *pcdev = ici->priv; - unsigned long flags; dev_dbg(&icd->dev, "%s (vb=0x%p) 0x%08lx %zd\n", __func__, vb, vb->baddr, vb->bsize); vb->state = VIDEOBUF_QUEUED; - spin_lock_irqsave(&pcdev->lock, flags); list_add_tail(&vb->queue, &pcdev->capture); if (!pcdev->active) { pcdev->active = vb; sh_mobile_ceu_capture(pcdev); } - - spin_unlock_irqrestore(&pcdev->lock, flags); } static void sh_mobile_ceu_videobuf_release(struct videobuf_queue *vq, diff --git a/drivers/media/video/soc_camera.c b/drivers/media/video/soc_camera.c index 16f595d4337..9f5ae816785 100644 --- a/drivers/media/video/soc_camera.c +++ b/drivers/media/video/soc_camera.c @@ -237,11 +237,11 @@ static int soc_camera_init_user_formats(struct soc_camera_device *icd) return -ENOMEM; icd->num_user_formats = fmts; - fmts = 0; dev_dbg(&icd->dev, "Found %d supported formats.\n", fmts); /* Second pass - actually fill data formats */ + fmts = 0; for (i = 0; i < icd->num_formats; i++) if (!ici->ops->get_formats) { icd->user_formats[i].host_fmt = icd->formats + i; @@ -877,8 +877,11 @@ static int soc_camera_probe(struct device *dev) (unsigned short)~0; ret = soc_camera_init_user_formats(icd); - if (ret < 0) + if (ret < 0) { + if (icd->ops->remove) + icd->ops->remove(icd); goto eiufmt; + } icd->height = DEFAULT_HEIGHT; icd->width = DEFAULT_WIDTH; @@ -902,8 +905,10 @@ static int soc_camera_remove(struct device *dev) { struct soc_camera_device *icd = to_soc_camera_dev(dev); + mutex_lock(&icd->video_lock); if (icd->ops->remove) icd->ops->remove(icd); + mutex_unlock(&icd->video_lock); soc_camera_free_user_formats(icd); @@ -1145,6 +1150,7 @@ evidallocd: } EXPORT_SYMBOL(soc_camera_video_start); +/* Called from client .remove() methods with .video_lock held */ void soc_camera_video_stop(struct soc_camera_device *icd) { struct video_device *vdev = icd->vdev; @@ -1154,10 +1160,8 @@ void soc_camera_video_stop(struct soc_camera_device *icd) if (!icd->dev.parent || !vdev) return; - mutex_lock(&icd->video_lock); video_unregister_device(vdev); icd->vdev = NULL; - mutex_unlock(&icd->video_lock); } EXPORT_SYMBOL(soc_camera_video_stop); diff --git a/drivers/media/video/stk-webcam.c b/drivers/media/video/stk-webcam.c index 2e593704727..b154bd961e3 100644 --- a/drivers/media/video/stk-webcam.c +++ b/drivers/media/video/stk-webcam.c @@ -27,6 +27,7 @@ #include <linux/kernel.h> #include <linux/errno.h> #include <linux/slab.h> +#include <linux/smp_lock.h> #include <linux/usb.h> #include <linux/mm.h> @@ -1049,8 +1050,8 @@ static int stk_setup_format(struct stk_camera *dev) depth = 1; else depth = 2; - while (stk_sizes[i].m != dev->vsettings.mode - && i < ARRAY_SIZE(stk_sizes)) + while (i < ARRAY_SIZE(stk_sizes) && + stk_sizes[i].m != dev->vsettings.mode) i++; if (i == ARRAY_SIZE(stk_sizes)) { STK_ERROR("Something is broken in %s\n", __func__); diff --git a/drivers/media/video/stradis.c b/drivers/media/video/stradis.c index 0eb313082c9..eaada39c76f 100644 --- a/drivers/media/video/stradis.c +++ b/drivers/media/video/stradis.c @@ -26,6 +26,7 @@ #include <linux/kernel.h> #include <linux/major.h> #include <linux/slab.h> +#include <linux/smp_lock.h> #include <linux/mm.h> #include <linux/init.h> #include <linux/poll.h> diff --git a/drivers/media/video/stv680.c b/drivers/media/video/stv680.c index 75f286f7a2e..8b4e7dafce7 100644 --- a/drivers/media/video/stv680.c +++ b/drivers/media/video/stv680.c @@ -62,6 +62,7 @@ #include <linux/init.h> #include <linux/vmalloc.h> #include <linux/slab.h> +#include <linux/smp_lock.h> #include <linux/pagemap.h> #include <linux/errno.h> #include <linux/videodev.h> diff --git a/drivers/media/video/usbvideo/vicam.c b/drivers/media/video/usbvideo/vicam.c index 8d73979596f..45fce39ec9a 100644 --- a/drivers/media/video/usbvideo/vicam.c +++ b/drivers/media/video/usbvideo/vicam.c @@ -43,6 +43,7 @@ #include <linux/vmalloc.h> #include <linux/mm.h> #include <linux/slab.h> +#include <linux/smp_lock.h> #include <linux/mutex.h> #include <linux/firmware.h> #include <linux/ihex.h> diff --git a/drivers/media/video/usbvision/usbvision-video.c b/drivers/media/video/usbvision/usbvision-video.c index 90b58914f98..90d9b5c0e9a 100644 --- a/drivers/media/video/usbvision/usbvision-video.c +++ b/drivers/media/video/usbvision/usbvision-video.c @@ -50,6 +50,7 @@ #include <linux/list.h> #include <linux/timer.h> #include <linux/slab.h> +#include <linux/smp_lock.h> #include <linux/mm.h> #include <linux/utsname.h> #include <linux/highmem.h> diff --git a/drivers/media/video/uvc/uvc_driver.c b/drivers/media/video/uvc/uvc_driver.c index 89927b7aec2..04b47832fa0 100644 --- a/drivers/media/video/uvc/uvc_driver.c +++ b/drivers/media/video/uvc/uvc_driver.c @@ -1845,11 +1845,29 @@ static struct usb_device_id uvc_ids[] = { .bInterfaceSubClass = 1, .bInterfaceProtocol = 0, .driver_info = UVC_QUIRK_STREAM_NO_FID }, - /* ViMicro */ - { .match_flags = USB_DEVICE_ID_MATCH_VENDOR + /* ViMicro Vega */ + { .match_flags = USB_DEVICE_ID_MATCH_DEVICE + | USB_DEVICE_ID_MATCH_INT_INFO, + .idVendor = 0x0ac8, + .idProduct = 0x332d, + .bInterfaceClass = USB_CLASS_VIDEO, + .bInterfaceSubClass = 1, + .bInterfaceProtocol = 0, + .driver_info = UVC_QUIRK_FIX_BANDWIDTH }, + /* ViMicro - Minoru3D */ + { .match_flags = USB_DEVICE_ID_MATCH_DEVICE + | USB_DEVICE_ID_MATCH_INT_INFO, + .idVendor = 0x0ac8, + .idProduct = 0x3410, + .bInterfaceClass = USB_CLASS_VIDEO, + .bInterfaceSubClass = 1, + .bInterfaceProtocol = 0, + .driver_info = UVC_QUIRK_FIX_BANDWIDTH }, + /* ViMicro Venus - Minoru3D */ + { .match_flags = USB_DEVICE_ID_MATCH_DEVICE | USB_DEVICE_ID_MATCH_INT_INFO, .idVendor = 0x0ac8, - .idProduct = 0x0000, + .idProduct = 0x3420, .bInterfaceClass = USB_CLASS_VIDEO, .bInterfaceSubClass = 1, .bInterfaceProtocol = 0, diff --git a/drivers/media/video/uvc/uvc_status.c b/drivers/media/video/uvc/uvc_status.c index f152a990386..1ca6dff7361 100644 --- a/drivers/media/video/uvc/uvc_status.c +++ b/drivers/media/video/uvc/uvc_status.c @@ -145,8 +145,8 @@ static void uvc_status_complete(struct urb *urb) break; default: - uvc_printk(KERN_INFO, "unknown event type %u.\n", - dev->status[0]); + uvc_trace(UVC_TRACE_STATUS, "Unknown status event " + "type %u.\n", dev->status[0]); break; } } diff --git a/drivers/media/video/v4l2-dev.c b/drivers/media/video/v4l2-dev.c index 31eac66411d..a7f1b69a7da 100644 --- a/drivers/media/video/v4l2-dev.c +++ b/drivers/media/video/v4l2-dev.c @@ -25,7 +25,6 @@ #include <linux/init.h> #include <linux/kmod.h> #include <linux/slab.h> -#include <linux/smp_lock.h> #include <asm/uaccess.h> #include <asm/system.h> diff --git a/drivers/media/video/v4l2-ioctl.c b/drivers/media/video/v4l2-ioctl.c index be64a502ea2..f2afc4e0837 100644 --- a/drivers/media/video/v4l2-ioctl.c +++ b/drivers/media/video/v4l2-ioctl.c @@ -1081,8 +1081,10 @@ static long __video_do_ioctl(struct file *file, /* Calls the specific handler */ if (ops->vidioc_g_std) ret = ops->vidioc_g_std(file, fh, id); - else + else if (vfd->current_norm) *id = vfd->current_norm; + else + ret = -EINVAL; if (!ret) dbgarg(cmd, "std=0x%08Lx\n", (long long unsigned)*id); @@ -1553,12 +1555,19 @@ static long __video_do_ioctl(struct file *file, break; ret = ops->vidioc_g_parm(file, fh, p); } else { + v4l2_std_id std = vfd->current_norm; + if (p->type != V4L2_BUF_TYPE_VIDEO_CAPTURE) return -EINVAL; - v4l2_video_std_frame_period(vfd->current_norm, - &p->parm.capture.timeperframe); ret = 0; + if (ops->vidioc_g_std) + ret = ops->vidioc_g_std(file, fh, &std); + else if (std == 0) + ret = -EINVAL; + if (ret == 0) + v4l2_video_std_frame_period(std, + &p->parm.capture.timeperframe); } dbgarg(cmd, "type=%d\n", p->type); diff --git a/drivers/media/video/vivi.c b/drivers/media/video/vivi.c index cd726685846..7705fc6baf0 100644 --- a/drivers/media/video/vivi.c +++ b/drivers/media/video/vivi.c @@ -343,6 +343,53 @@ static struct bar_std bars[] = { #define TO_U(r, g, b) \ (((-9714 * r - 19070 * g + 28784 * b + 32768) >> 16) + 128) +/* precalculate color bar values to speed up rendering */ +static void precalculate_bars(struct vivi_fh *fh) +{ + struct vivi_dev *dev = fh->dev; + unsigned char r, g, b; + int k, is_yuv; + + fh->input = dev->input; + + for (k = 0; k < 8; k++) { + r = bars[fh->input].bar[k][0]; + g = bars[fh->input].bar[k][1]; + b = bars[fh->input].bar[k][2]; + is_yuv = 0; + + switch (fh->fmt->fourcc) { + case V4L2_PIX_FMT_YUYV: + case V4L2_PIX_FMT_UYVY: + is_yuv = 1; + break; + case V4L2_PIX_FMT_RGB565: + case V4L2_PIX_FMT_RGB565X: + r >>= 3; + g >>= 2; + b >>= 3; + break; + case V4L2_PIX_FMT_RGB555: + case V4L2_PIX_FMT_RGB555X: + r >>= 3; + g >>= 3; + b >>= 3; + break; + } + + if (is_yuv) { + fh->bars[k][0] = TO_Y(r, g, b); /* Luma */ + fh->bars[k][1] = TO_U(r, g, b); /* Cb */ + fh->bars[k][2] = TO_V(r, g, b); /* Cr */ + } else { + fh->bars[k][0] = r; + fh->bars[k][1] = g; + fh->bars[k][2] = b; + } + } + +} + #define TSTAMP_MIN_Y 24 #define TSTAMP_MAX_Y (TSTAMP_MIN_Y + 15) #define TSTAMP_INPUT_X 10 @@ -755,6 +802,8 @@ buffer_prepare(struct videobuf_queue *vq, struct videobuf_buffer *vb, buf->vb.height = fh->height; buf->vb.field = field; + precalculate_bars(fh); + if (VIDEOBUF_NEEDS_INIT == buf->vb.state) { rc = videobuf_iolock(vq, &buf->vb, NULL); if (rc < 0) @@ -893,53 +942,6 @@ static int vidioc_try_fmt_vid_cap(struct file *file, void *priv, return 0; } -/* precalculate color bar values to speed up rendering */ -static void precalculate_bars(struct vivi_fh *fh) -{ - struct vivi_dev *dev = fh->dev; - unsigned char r, g, b; - int k, is_yuv; - - fh->input = dev->input; - - for (k = 0; k < 8; k++) { - r = bars[fh->input].bar[k][0]; - g = bars[fh->input].bar[k][1]; - b = bars[fh->input].bar[k][2]; - is_yuv = 0; - - switch (fh->fmt->fourcc) { - case V4L2_PIX_FMT_YUYV: - case V4L2_PIX_FMT_UYVY: - is_yuv = 1; - break; - case V4L2_PIX_FMT_RGB565: - case V4L2_PIX_FMT_RGB565X: - r >>= 3; - g >>= 2; - b >>= 3; - break; - case V4L2_PIX_FMT_RGB555: - case V4L2_PIX_FMT_RGB555X: - r >>= 3; - g >>= 3; - b >>= 3; - break; - } - - if (is_yuv) { - fh->bars[k][0] = TO_Y(r, g, b); /* Luma */ - fh->bars[k][1] = TO_U(r, g, b); /* Cb */ - fh->bars[k][2] = TO_V(r, g, b); /* Cr */ - } else { - fh->bars[k][0] = r; - fh->bars[k][1] = g; - fh->bars[k][2] = b; - } - } - -} - /*FIXME: This seems to be generic enough to be at videodev2 */ static int vidioc_s_fmt_vid_cap(struct file *file, void *priv, struct v4l2_format *f) @@ -965,8 +967,6 @@ static int vidioc_s_fmt_vid_cap(struct file *file, void *priv, fh->vb_vidq.field = f->fmt.pix.field; fh->type = f->type; - precalculate_bars(fh); - ret = 0; out: mutex_unlock(&q->vb_lock); @@ -1357,6 +1357,7 @@ static int __init vivi_create_instance(int inst) goto unreg_dev; *vfd = vivi_template; + vfd->debug = debug; ret = video_register_device(vfd, VFL_TYPE_GRABBER, video_nr); if (ret < 0) diff --git a/drivers/media/video/zoran/zoran_driver.c b/drivers/media/video/zoran/zoran_driver.c index 3d7df32a3d8..bcdefb1bcb3 100644 --- a/drivers/media/video/zoran/zoran_driver.c +++ b/drivers/media/video/zoran/zoran_driver.c @@ -49,6 +49,7 @@ #include <linux/module.h> #include <linux/delay.h> #include <linux/slab.h> +#include <linux/smp_lock.h> #include <linux/pci.h> #include <linux/vmalloc.h> #include <linux/wait.h> diff --git a/drivers/media/video/zr364xx.c b/drivers/media/video/zr364xx.c index fc976f42f43..2622a6e63da 100644 --- a/drivers/media/video/zr364xx.c +++ b/drivers/media/video/zr364xx.c @@ -695,7 +695,7 @@ static int zr364xx_release(struct file *file) for (i = 0; i < 2; i++) { err = send_control_msg(udev, 1, init[cam->method][i].value, - 0, init[i][cam->method].bytes, + 0, init[cam->method][i].bytes, init[cam->method][i].size); if (err < 0) { dev_err(&udev->dev, "error during release sequence\n"); diff --git a/drivers/mfd/dm355evm_msp.c b/drivers/mfd/dm355evm_msp.c index 7ac12cb0be4..5b6e58a3ba4 100644 --- a/drivers/mfd/dm355evm_msp.c +++ b/drivers/mfd/dm355evm_msp.c @@ -32,8 +32,7 @@ * This driver was tested with firmware revision A4. */ -#if defined(CONFIG_KEYBOARD_DM355EVM) \ - || defined(CONFIG_KEYBOARD_DM355EVM_MODULE) +#if defined(CONFIG_INPUT_DM355EVM) || defined(CONFIG_INPUT_DM355EVM_MODULE) #define msp_has_keyboard() true #else #define msp_has_keyboard() false diff --git a/drivers/mfd/ezx-pcap.c b/drivers/mfd/ezx-pcap.c index 671a7efe86a..c1de4afa89a 100644 --- a/drivers/mfd/ezx-pcap.c +++ b/drivers/mfd/ezx-pcap.c @@ -238,8 +238,10 @@ static irqreturn_t pcap_adc_irq(int irq, void *_pcap) mutex_lock(&pcap->adc_mutex); req = pcap->adc_queue[pcap->adc_head]; - if (WARN(!req, KERN_WARNING "adc irq without pending request\n")) + if (WARN(!req, KERN_WARNING "adc irq without pending request\n")) { + mutex_unlock(&pcap->adc_mutex); return IRQ_HANDLED; + } /* read requested channels results */ ezx_pcap_read(pcap, PCAP_REG_ADC, &tmp); diff --git a/drivers/mfd/sm501.c b/drivers/mfd/sm501.c index 4c7b7962f6b..0cc5eeff5ee 100644 --- a/drivers/mfd/sm501.c +++ b/drivers/mfd/sm501.c @@ -367,7 +367,8 @@ int sm501_unit_power(struct device *dev, unsigned int unit, unsigned int to) break; default: - return -1; + gate = -1; + goto already; } writel(mode, sm->regs + SM501_POWER_MODE_CONTROL); diff --git a/drivers/mfd/twl4030-irq.c b/drivers/mfd/twl4030-irq.c index bae61b22501..7d430835655 100644 --- a/drivers/mfd/twl4030-irq.c +++ b/drivers/mfd/twl4030-irq.c @@ -180,14 +180,9 @@ static struct completion irq_event; static int twl4030_irq_thread(void *data) { long irq = (long)data; - struct irq_desc *desc = irq_to_desc(irq); static unsigned i2c_errors; static const unsigned max_i2c_errors = 100; - if (!desc) { - pr_err("twl4030: Invalid IRQ: %ld\n", irq); - return -EINVAL; - } current->flags |= PF_NOFREEZE; @@ -240,7 +235,7 @@ static int twl4030_irq_thread(void *data) } local_irq_enable(); - desc->chip->unmask(irq); + enable_irq(irq); } return 0; @@ -255,25 +250,13 @@ static int twl4030_irq_thread(void *data) * thread. All we do here is acknowledge and mask the interrupt and wakeup * the kernel thread. */ -static void handle_twl4030_pih(unsigned int irq, struct irq_desc *desc) +static irqreturn_t handle_twl4030_pih(int irq, void *devid) { /* Acknowledge, clear *AND* mask the interrupt... */ - desc->chip->ack(irq); - complete(&irq_event); -} - -static struct task_struct *start_twl4030_irq_thread(long irq) -{ - struct task_struct *thread; - - init_completion(&irq_event); - thread = kthread_run(twl4030_irq_thread, (void *)irq, "twl4030-irq"); - if (!thread) - pr_err("twl4030: could not create irq %ld thread!\n", irq); - - return thread; + disable_irq_nosync(irq); + complete(devid); + return IRQ_HANDLED; } - /*----------------------------------------------------------------------*/ /* @@ -734,18 +717,28 @@ int twl_init_irq(int irq_num, unsigned irq_base, unsigned irq_end) } /* install an irq handler to demultiplex the TWL4030 interrupt */ - task = start_twl4030_irq_thread(irq_num); - if (!task) { - pr_err("twl4030: irq thread FAIL\n"); - status = -ESRCH; - goto fail; - } - set_irq_data(irq_num, task); - set_irq_chained_handler(irq_num, handle_twl4030_pih); - return status; + init_completion(&irq_event); + status = request_irq(irq_num, handle_twl4030_pih, IRQF_DISABLED, + "TWL4030-PIH", &irq_event); + if (status < 0) { + pr_err("twl4030: could not claim irq%d: %d\n", irq_num, status); + goto fail_rqirq; + } + + task = kthread_run(twl4030_irq_thread, (void *)irq_num, "twl4030-irq"); + if (IS_ERR(task)) { + pr_err("twl4030: could not create irq %d thread!\n", irq_num); + status = PTR_ERR(task); + goto fail_kthread; + } + return status; +fail_kthread: + free_irq(irq_num, &irq_event); +fail_rqirq: + /* clean up twl4030_sih_setup */ fail: for (i = irq_base; i < irq_end; i++) set_irq_chip_and_handler(i, NULL, NULL); diff --git a/drivers/misc/cb710/sgbuf2.c b/drivers/misc/cb710/sgbuf2.c index d38a7acdb6e..d019746551f 100644 --- a/drivers/misc/cb710/sgbuf2.c +++ b/drivers/misc/cb710/sgbuf2.c @@ -114,7 +114,6 @@ static void sg_dwiter_write_slow(struct sg_mapping_iter *miter, uint32_t data) if (!left) return; addr += len; - flush_kernel_dcache_page(miter->page); } while (sg_dwiter_next(miter)); } @@ -142,9 +141,6 @@ void cb710_sg_dwiter_write_next_block(struct sg_mapping_iter *miter, uint32_t da return; } else sg_dwiter_write_slow(miter, data); - - if (miter->length == miter->consumed) - flush_kernel_dcache_page(miter->page); } EXPORT_SYMBOL_GPL(cb710_sg_dwiter_write_next_block); diff --git a/drivers/misc/eeprom/at25.c b/drivers/misc/eeprom/at25.c index b34cb5f79ee..2e535a0ccd5 100644 --- a/drivers/misc/eeprom/at25.c +++ b/drivers/misc/eeprom/at25.c @@ -173,6 +173,7 @@ at25_ee_write(struct at25_data *at25, const char *buf, loff_t off, unsigned segment; unsigned offset = (unsigned) off; u8 *cp = bounce + 1; + int sr; *cp = AT25_WREN; status = spi_write(at25->spi, cp, 1); @@ -214,7 +215,6 @@ at25_ee_write(struct at25_data *at25, const char *buf, loff_t off, timeout = jiffies + msecs_to_jiffies(EE_TIMEOUT); retries = 0; do { - int sr; sr = spi_w8r8(at25->spi, AT25_RDSR); if (sr < 0 || (sr & AT25_SR_nRDY)) { @@ -228,7 +228,7 @@ at25_ee_write(struct at25_data *at25, const char *buf, loff_t off, break; } while (retries++ < 3 || time_before_eq(jiffies, timeout)); - if (time_after(jiffies, timeout)) { + if ((sr < 0) || (sr & AT25_SR_nRDY)) { dev_err(&at25->spi->dev, "write %d bytes offset %d, " "timeout after %u msecs\n", diff --git a/drivers/misc/sgi-gru/grufile.c b/drivers/misc/sgi-gru/grufile.c index fa2d93a9fb8..aed609832bc 100644 --- a/drivers/misc/sgi-gru/grufile.c +++ b/drivers/misc/sgi-gru/grufile.c @@ -29,7 +29,6 @@ #include <linux/slab.h> #include <linux/mm.h> #include <linux/io.h> -#include <linux/smp_lock.h> #include <linux/spinlock.h> #include <linux/device.h> #include <linux/miscdevice.h> diff --git a/drivers/misc/sgi-gru/grukservices.c b/drivers/misc/sgi-gru/grukservices.c index eedbf9c3276..79689b10f93 100644 --- a/drivers/misc/sgi-gru/grukservices.c +++ b/drivers/misc/sgi-gru/grukservices.c @@ -24,7 +24,6 @@ #include <linux/errno.h> #include <linux/slab.h> #include <linux/mm.h> -#include <linux/smp_lock.h> #include <linux/spinlock.h> #include <linux/device.h> #include <linux/miscdevice.h> diff --git a/drivers/misc/sgi-xp/xpnet.c b/drivers/misc/sgi-xp/xpnet.c index 8d1c60a3f0d..5d778ec8cdb 100644 --- a/drivers/misc/sgi-xp/xpnet.c +++ b/drivers/misc/sgi-xp/xpnet.c @@ -235,7 +235,7 @@ xpnet_receive(short partid, int channel, struct xpnet_message *msg) skb->ip_summed = CHECKSUM_UNNECESSARY; dev_dbg(xpnet, "passing skb to network layer\n" - KERN_DEBUG "\tskb->head=0x%p skb->data=0x%p skb->tail=0x%p " + "\tskb->head=0x%p skb->data=0x%p skb->tail=0x%p " "skb->end=0x%p skb->len=%d\n", (void *)skb->head, (void *)skb->data, skb_tail_pointer(skb), skb_end_pointer(skb), skb->len); @@ -399,7 +399,7 @@ xpnet_send(struct sk_buff *skb, struct xpnet_pending_msg *queued_msg, msg->buf_pa = xp_pa((void *)start_addr); dev_dbg(xpnet, "sending XPC message to %d:%d\n" - KERN_DEBUG "msg->buf_pa=0x%lx, msg->size=%u, " + "msg->buf_pa=0x%lx, msg->size=%u, " "msg->leadin_ignore=%u, msg->tailout_ignore=%u\n", dest_partid, XPC_NET_CHANNEL, msg->buf_pa, msg->size, msg->leadin_ignore, msg->tailout_ignore); diff --git a/drivers/mmc/host/cb710-mmc.c b/drivers/mmc/host/cb710-mmc.c index 11efefb1af5..4e72964a7b4 100644 --- a/drivers/mmc/host/cb710-mmc.c +++ b/drivers/mmc/host/cb710-mmc.c @@ -278,7 +278,7 @@ static int cb710_mmc_receive(struct cb710_slot *slot, struct mmc_data *data) if (unlikely(data->blksz & 15 && (data->blocks != 1 || data->blksz != 8))) return -EINVAL; - sg_miter_start(&miter, data->sg, data->sg_len, 0); + sg_miter_start(&miter, data->sg, data->sg_len, SG_MITER_TO_SG); cb710_modify_port_8(slot, CB710_MMC_CONFIG2_PORT, 15, CB710_MMC_C2_READ_PIO_SIZE_MASK); @@ -307,7 +307,7 @@ static int cb710_mmc_receive(struct cb710_slot *slot, struct mmc_data *data) goto out; } out: - cb710_sg_miter_stop_writing(&miter); + sg_miter_stop(&miter); return err; } @@ -322,7 +322,7 @@ static int cb710_mmc_send(struct cb710_slot *slot, struct mmc_data *data) if (unlikely(data->blocks > 1 && data->blksz & 15)) return -EINVAL; - sg_miter_start(&miter, data->sg, data->sg_len, 0); + sg_miter_start(&miter, data->sg, data->sg_len, SG_MITER_FROM_SG); cb710_modify_port_8(slot, CB710_MMC_CONFIG2_PORT, 0, CB710_MMC_C2_READ_PIO_SIZE_MASK); diff --git a/drivers/mmc/host/imxmmc.c b/drivers/mmc/host/imxmmc.c index e0be21a4a69..bf98d7cc928 100644 --- a/drivers/mmc/host/imxmmc.c +++ b/drivers/mmc/host/imxmmc.c @@ -652,7 +652,7 @@ static irqreturn_t imxmci_irq(int irq, void *devid) set_bit(IMXMCI_PEND_STARTED_b, &host->pending_events); tasklet_schedule(&host->tasklet); - return IRQ_RETVAL(handled);; + return IRQ_RETVAL(handled); } static void imxmci_tasklet_fnc(unsigned long data) diff --git a/drivers/mmc/host/mmc_spi.c b/drivers/mmc/host/mmc_spi.c index 240608cc7ae..a461017ce5c 100644 --- a/drivers/mmc/host/mmc_spi.c +++ b/drivers/mmc/host/mmc_spi.c @@ -1313,6 +1313,12 @@ static int mmc_spi_probe(struct spi_device *spi) struct mmc_spi_host *host; int status; + /* We rely on full duplex transfers, mostly to reduce + * per-transfer overheads (by making fewer transfers). + */ + if (spi->master->flags & SPI_MASTER_HALF_DUPLEX) + return -EINVAL; + /* MMC and SD specs only seem to care that sampling is on the * rising edge ... meaning SPI modes 0 or 3. So either SPI mode * should be legit. We'll use mode 0 since the steady state is 0, diff --git a/drivers/mmc/host/mvsdio.c b/drivers/mmc/host/mvsdio.c index b56d72ff06e..34e23489811 100644 --- a/drivers/mmc/host/mvsdio.c +++ b/drivers/mmc/host/mvsdio.c @@ -384,7 +384,7 @@ static irqreturn_t mvsd_irq(int irq, void *dev) u16 val[2] = {0, 0}; val[0] = mvsd_read(MVSD_FIFO); val[1] = mvsd_read(MVSD_FIFO); - memcpy(p, &val, s); + memcpy(p, ((void *)&val) + 4 - s, s); s = 0; intr_status = mvsd_read(MVSD_NOR_INTR_STATUS); } @@ -423,7 +423,7 @@ static irqreturn_t mvsd_irq(int irq, void *dev) if (s < 4) { if (s && (intr_status & MVSD_NOR_TX_AVAIL)) { u16 val[2] = {0, 0}; - memcpy(&val, p, s); + memcpy(((void *)&val) + 4 - s, p, s); mvsd_write(MVSD_FIFO, val[0]); mvsd_write(MVSD_FIFO, val[1]); s = 0; diff --git a/drivers/mmc/host/pxamci.c b/drivers/mmc/host/pxamci.c index d7d7109ef47..e55ac792d68 100644 --- a/drivers/mmc/host/pxamci.c +++ b/drivers/mmc/host/pxamci.c @@ -168,12 +168,12 @@ static void pxamci_setup_data(struct pxamci_host *host, struct mmc_data *data) if (data->flags & MMC_DATA_READ) { host->dma_dir = DMA_FROM_DEVICE; - dcmd = DCMD_INCTRGADDR | DCMD_FLOWTRG; + dcmd = DCMD_INCTRGADDR | DCMD_FLOWSRC; DRCMR(host->dma_drcmrtx) = 0; DRCMR(host->dma_drcmrrx) = host->dma | DRCMR_MAPVLD; } else { host->dma_dir = DMA_TO_DEVICE; - dcmd = DCMD_INCSRCADDR | DCMD_FLOWSRC; + dcmd = DCMD_INCSRCADDR | DCMD_FLOWTRG; DRCMR(host->dma_drcmrrx) = 0; DRCMR(host->dma_drcmrtx) = host->dma | DRCMR_MAPVLD; } diff --git a/drivers/mmc/host/sdhci-of.c b/drivers/mmc/host/sdhci-of.c index d79fa55c3b8..1e8aa590bb3 100644 --- a/drivers/mmc/host/sdhci-of.c +++ b/drivers/mmc/host/sdhci-of.c @@ -158,6 +158,13 @@ static unsigned int esdhc_get_max_clock(struct sdhci_host *host) return of_host->clock; } +static unsigned int esdhc_get_min_clock(struct sdhci_host *host) +{ + struct sdhci_of_host *of_host = sdhci_priv(host); + + return of_host->clock / 256 / 16; +} + static unsigned int esdhc_get_timeout_clock(struct sdhci_host *host) { struct sdhci_of_host *of_host = sdhci_priv(host); @@ -184,6 +191,7 @@ static struct sdhci_of_data sdhci_esdhc = { .set_clock = esdhc_set_clock, .enable_dma = esdhc_enable_dma, .get_max_clock = esdhc_get_max_clock, + .get_min_clock = esdhc_get_min_clock, .get_timeout_clock = esdhc_get_timeout_clock, }, }; @@ -226,7 +234,7 @@ static int __devinit sdhci_of_probe(struct of_device *ofdev, return -ENODEV; host = sdhci_alloc_host(&ofdev->dev, sizeof(*of_host)); - if (!host) + if (IS_ERR(host)) return -ENOMEM; of_host = sdhci_priv(host); diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c index 6779b4ecab1..fc96f8cb9c0 100644 --- a/drivers/mmc/host/sdhci.c +++ b/drivers/mmc/host/sdhci.c @@ -773,8 +773,14 @@ static void sdhci_prepare_data(struct sdhci_host *host, struct mmc_data *data) } if (!(host->flags & SDHCI_REQ_USE_DMA)) { - sg_miter_start(&host->sg_miter, - data->sg, data->sg_len, SG_MITER_ATOMIC); + int flags; + + flags = SG_MITER_ATOMIC; + if (host->data->flags & MMC_DATA_READ) + flags |= SG_MITER_TO_SG; + else + flags |= SG_MITER_FROM_SG; + sg_miter_start(&host->sg_miter, data->sg, data->sg_len, flags); host->blocks = data->blocks; } @@ -1766,7 +1772,10 @@ int sdhci_add_host(struct sdhci_host *host) * Set host parameters. */ mmc->ops = &sdhci_ops; - mmc->f_min = host->max_clk / 256; + if (host->ops->get_min_clock) + mmc->f_min = host->ops->get_min_clock(host); + else + mmc->f_min = host->max_clk / 256; mmc->f_max = host->max_clk; mmc->caps = MMC_CAP_SDIO_IRQ; diff --git a/drivers/mmc/host/sdhci.h b/drivers/mmc/host/sdhci.h index 831ddf7dcb4..c77e9ff3022 100644 --- a/drivers/mmc/host/sdhci.h +++ b/drivers/mmc/host/sdhci.h @@ -302,6 +302,7 @@ struct sdhci_ops { int (*enable_dma)(struct sdhci_host *host); unsigned int (*get_max_clock)(struct sdhci_host *host); + unsigned int (*get_min_clock)(struct sdhci_host *host); unsigned int (*get_timeout_clock)(struct sdhci_host *host); }; diff --git a/drivers/mtd/cmdlinepart.c b/drivers/mtd/cmdlinepart.c index 5011fa73f91..1479da6d3aa 100644 --- a/drivers/mtd/cmdlinepart.c +++ b/drivers/mtd/cmdlinepart.c @@ -194,7 +194,7 @@ static struct mtd_partition * newpart(char *s, parts[this_part].name = extra_mem; extra_mem += name_len + 1; - dbg(("partition %d: name <%s>, offset %x, size %x, mask flags %x\n", + dbg(("partition %d: name <%s>, offset %llx, size %llx, mask flags %x\n", this_part, parts[this_part].name, parts[this_part].offset, diff --git a/drivers/mtd/devices/m25p80.c b/drivers/mtd/devices/m25p80.c index 59c46126a5c..10ed195c0c1 100644 --- a/drivers/mtd/devices/m25p80.c +++ b/drivers/mtd/devices/m25p80.c @@ -54,7 +54,7 @@ #define SR_SRWD 0x80 /* SR write protect */ /* Define max times to check status register before we give up. */ -#define MAX_READY_WAIT_JIFFIES (10 * HZ) /* eg. M25P128 specs 6s max sector erase */ +#define MAX_READY_WAIT_JIFFIES (40 * HZ) /* M25P16 specs 40s max chip erase */ #define CMD_SIZE 4 #ifdef CONFIG_M25PXX_USE_FAST_READ @@ -736,7 +736,7 @@ static int __devinit m25p_probe(struct spi_device *spi) flash->partitioned = 1; return add_mtd_partitions(&flash->mtd, parts, nr_parts); } - } else if (data->nr_parts) + } else if (data && data->nr_parts) dev_warn(&spi->dev, "ignoring %d default partitions on %s\n", data->nr_parts, data->name); diff --git a/drivers/mtd/inftlcore.c b/drivers/mtd/inftlcore.c index 73f05227dc8..d8cf29c01cc 100644 --- a/drivers/mtd/inftlcore.c +++ b/drivers/mtd/inftlcore.c @@ -226,7 +226,7 @@ static u16 INFTL_findfreeblock(struct INFTLrecord *inftl, int desperate) if (!desperate && inftl->numfreeEUNs < 2) { DEBUG(MTD_DEBUG_LEVEL1, "INFTL: there are too few free " "EUNs (%d)\n", inftl->numfreeEUNs); - return 0xffff; + return BLOCK_NIL; } /* Scan for a free block */ @@ -281,7 +281,8 @@ static u16 INFTL_foldchain(struct INFTLrecord *inftl, unsigned thisVUC, unsigned silly = MAX_LOOPS; while (thisEUN < inftl->nb_blocks) { for (block = 0; block < inftl->EraseSize/SECTORSIZE; block ++) { - if ((BlockMap[block] != 0xffff) || BlockDeleted[block]) + if ((BlockMap[block] != BLOCK_NIL) || + BlockDeleted[block]) continue; if (inftl_read_oob(mtd, (thisEUN * inftl->EraseSize) @@ -525,7 +526,7 @@ static inline u16 INFTL_findwriteunit(struct INFTLrecord *inftl, unsigned block) if (!silly--) { printk(KERN_WARNING "INFTL: infinite loop in " "Virtual Unit Chain 0x%x\n", thisVUC); - return 0xffff; + return BLOCK_NIL; } /* Skip to next block in chain */ @@ -549,7 +550,7 @@ hitused: * waiting to be picked up. We're going to have to fold * a chain to make room. */ - thisEUN = INFTL_makefreeblock(inftl, 0xffff); + thisEUN = INFTL_makefreeblock(inftl, BLOCK_NIL); /* * Hopefully we free something, lets try again. @@ -631,7 +632,7 @@ hitused: printk(KERN_WARNING "INFTL: error folding to make room for Virtual " "Unit Chain 0x%x\n", thisVUC); - return 0xffff; + return BLOCK_NIL; } /* diff --git a/drivers/mtd/maps/Kconfig b/drivers/mtd/maps/Kconfig index 0b98654d8ee..7a58bd5522f 100644 --- a/drivers/mtd/maps/Kconfig +++ b/drivers/mtd/maps/Kconfig @@ -284,13 +284,6 @@ config MTD_L440GX BE VERY CAREFUL. -config MTD_SBC8240 - tristate "Flash device on SBC8240" - depends on MTD_JEDECPROBE && 8260 - help - Flash access on the SBC8240 board from Wind River. See - <http://www.windriver.com/products/sbc8240/> - config MTD_TQM8XXL tristate "CFI Flash device mapped on TQM8XXL" depends on MTD_CFI && TQM8xxL diff --git a/drivers/mtd/maps/Makefile b/drivers/mtd/maps/Makefile index 8bae7f9850c..5beb0662d72 100644 --- a/drivers/mtd/maps/Makefile +++ b/drivers/mtd/maps/Makefile @@ -50,7 +50,6 @@ obj-$(CONFIG_MTD_UCLINUX) += uclinux.o obj-$(CONFIG_MTD_NETtel) += nettel.o obj-$(CONFIG_MTD_SCB2_FLASH) += scb2_flash.o obj-$(CONFIG_MTD_H720X) += h720x-flash.o -obj-$(CONFIG_MTD_SBC8240) += sbc8240.o obj-$(CONFIG_MTD_IXP4XX) += ixp4xx.o obj-$(CONFIG_MTD_IXP2000) += ixp2000.o obj-$(CONFIG_MTD_WRSBC8260) += wr_sbc82xx_flash.o diff --git a/drivers/mtd/maps/integrator-flash.c b/drivers/mtd/maps/integrator-flash.c index b08a798ee25..2aac41bde8b 100644 --- a/drivers/mtd/maps/integrator-flash.c +++ b/drivers/mtd/maps/integrator-flash.c @@ -42,10 +42,8 @@ #include <mach/hardware.h> #include <asm/system.h> -#define SUBDEV_NAME_SIZE (BUS_ID_SIZE + 2) - struct armflash_subdev_info { - char name[SUBDEV_NAME_SIZE]; + char *name; struct mtd_info *mtd; struct map_info map; struct flash_platform_data *plat; @@ -134,6 +132,8 @@ static void armflash_subdev_remove(struct armflash_subdev_info *subdev) map_destroy(subdev->mtd); if (subdev->map.virt) iounmap(subdev->map.virt); + kfree(subdev->name); + subdev->name = NULL; release_mem_region(subdev->map.phys, subdev->map.size); } @@ -177,16 +177,22 @@ static int armflash_probe(struct platform_device *dev) if (nr == 1) /* No MTD concatenation, just use the default name */ - snprintf(subdev->name, SUBDEV_NAME_SIZE, "%s", - dev_name(&dev->dev)); + subdev->name = kstrdup(dev_name(&dev->dev), GFP_KERNEL); else - snprintf(subdev->name, SUBDEV_NAME_SIZE, "%s-%d", - dev_name(&dev->dev), i); + subdev->name = kasprintf(GFP_KERNEL, "%s-%d", + dev_name(&dev->dev), i); + if (!subdev->name) { + err = -ENOMEM; + break; + } subdev->plat = plat; err = armflash_subdev_probe(subdev, res); - if (err) + if (err) { + kfree(subdev->name); + subdev->name = NULL; break; + } } info->nr_subdev = i; diff --git a/drivers/mtd/maps/sbc8240.c b/drivers/mtd/maps/sbc8240.c deleted file mode 100644 index d5374cdcb16..00000000000 --- a/drivers/mtd/maps/sbc8240.c +++ /dev/null @@ -1,250 +0,0 @@ -/* - * Handle mapping of the flash memory access routines on the SBC8240 board. - * - * Carolyn Smith, Tektronix, Inc. - * - * This code is GPLed - */ - -/* - * The SBC8240 has 2 flash banks. - * Bank 0 is a 512 KiB AMD AM29F040B; 8 x 64 KiB sectors. - * It contains the U-Boot code (7 sectors) and the environment (1 sector). - * Bank 1 is 4 x 1 MiB AMD AM29LV800BT; 15 x 64 KiB sectors, 1 x 32 KiB sector, - * 2 x 8 KiB sectors, 1 x 16 KiB sectors. - * Both parts are JEDEC compatible. - */ - -#include <linux/module.h> -#include <linux/types.h> -#include <linux/kernel.h> -#include <asm/io.h> - -#include <linux/mtd/mtd.h> -#include <linux/mtd/map.h> -#include <linux/mtd/cfi.h> - -#ifdef CONFIG_MTD_PARTITIONS -#include <linux/mtd/partitions.h> -#endif - -#define DEBUG - -#ifdef DEBUG -# define debugk(fmt,args...) printk(fmt ,##args) -#else -# define debugk(fmt,args...) -#endif - - -#define WINDOW_ADDR0 0xFFF00000 /* 512 KiB */ -#define WINDOW_SIZE0 0x00080000 -#define BUSWIDTH0 1 - -#define WINDOW_ADDR1 0xFF000000 /* 4 MiB */ -#define WINDOW_SIZE1 0x00400000 -#define BUSWIDTH1 8 - -#define MSG_PREFIX "sbc8240:" /* prefix for our printk()'s */ -#define MTDID "sbc8240-%d" /* for mtdparts= partitioning */ - - -static struct map_info sbc8240_map[2] = { - { - .name = "sbc8240 Flash Bank #0", - .size = WINDOW_SIZE0, - .bankwidth = BUSWIDTH0, - }, - { - .name = "sbc8240 Flash Bank #1", - .size = WINDOW_SIZE1, - .bankwidth = BUSWIDTH1, - } -}; - -#define NUM_FLASH_BANKS ARRAY_SIZE(sbc8240_map) - -/* - * The following defines the partition layout of SBC8240 boards. - * - * See include/linux/mtd/partitions.h for definition of the - * mtd_partition structure. - * - * The *_max_flash_size is the maximum possible mapped flash size - * which is not necessarily the actual flash size. It must correspond - * to the value specified in the mapping definition defined by the - * "struct map_desc *_io_desc" for the corresponding machine. - */ - -#ifdef CONFIG_MTD_PARTITIONS - -static struct mtd_partition sbc8240_uboot_partitions [] = { - /* Bank 0 */ - { - .name = "U-boot", /* U-Boot Firmware */ - .offset = 0, - .size = 0x00070000, /* 7 x 64 KiB sectors */ - .mask_flags = MTD_WRITEABLE, /* force read-only */ - }, - { - .name = "environment", /* U-Boot environment */ - .offset = 0x00070000, - .size = 0x00010000, /* 1 x 64 KiB sector */ - }, -}; - -static struct mtd_partition sbc8240_fs_partitions [] = { - { - .name = "jffs", /* JFFS filesystem */ - .offset = 0, - .size = 0x003C0000, /* 4 * 15 * 64KiB */ - }, - { - .name = "tmp32", - .offset = 0x003C0000, - .size = 0x00020000, /* 4 * 32KiB */ - }, - { - .name = "tmp8a", - .offset = 0x003E0000, - .size = 0x00008000, /* 4 * 8KiB */ - }, - { - .name = "tmp8b", - .offset = 0x003E8000, - .size = 0x00008000, /* 4 * 8KiB */ - }, - { - .name = "tmp16", - .offset = 0x003F0000, - .size = 0x00010000, /* 4 * 16KiB */ - } -}; - -/* trivial struct to describe partition information */ -struct mtd_part_def -{ - int nums; - unsigned char *type; - struct mtd_partition* mtd_part; -}; - -static struct mtd_info *sbc8240_mtd[NUM_FLASH_BANKS]; -static struct mtd_part_def sbc8240_part_banks[NUM_FLASH_BANKS]; - - -#endif /* CONFIG_MTD_PARTITIONS */ - - -static int __init init_sbc8240_mtd (void) -{ - static struct _cjs { - u_long addr; - u_long size; - } pt[NUM_FLASH_BANKS] = { - { - .addr = WINDOW_ADDR0, - .size = WINDOW_SIZE0 - }, - { - .addr = WINDOW_ADDR1, - .size = WINDOW_SIZE1 - }, - }; - - int devicesfound = 0; - int i,j; - - for (i = 0; i < NUM_FLASH_BANKS; i++) { - printk (KERN_NOTICE MSG_PREFIX - "Probing 0x%08lx at 0x%08lx\n", pt[i].size, pt[i].addr); - - sbc8240_map[i].map_priv_1 = - (unsigned long) ioremap (pt[i].addr, pt[i].size); - if (!sbc8240_map[i].map_priv_1) { - printk (MSG_PREFIX "failed to ioremap\n"); - for (j = 0; j < i; j++) { - iounmap((void *) sbc8240_map[j].map_priv_1); - sbc8240_map[j].map_priv_1 = 0; - } - return -EIO; - } - simple_map_init(&sbc8240_mtd[i]); - - sbc8240_mtd[i] = do_map_probe("jedec_probe", &sbc8240_map[i]); - - if (sbc8240_mtd[i]) { - sbc8240_mtd[i]->module = THIS_MODULE; - devicesfound++; - } else { - if (sbc8240_map[i].map_priv_1) { - iounmap((void *) sbc8240_map[i].map_priv_1); - sbc8240_map[i].map_priv_1 = 0; - } - } - } - - if (!devicesfound) { - printk(KERN_NOTICE MSG_PREFIX - "No suppported flash chips found!\n"); - return -ENXIO; - } - -#ifdef CONFIG_MTD_PARTITIONS - sbc8240_part_banks[0].mtd_part = sbc8240_uboot_partitions; - sbc8240_part_banks[0].type = "static image"; - sbc8240_part_banks[0].nums = ARRAY_SIZE(sbc8240_uboot_partitions); - sbc8240_part_banks[1].mtd_part = sbc8240_fs_partitions; - sbc8240_part_banks[1].type = "static file system"; - sbc8240_part_banks[1].nums = ARRAY_SIZE(sbc8240_fs_partitions); - - for (i = 0; i < NUM_FLASH_BANKS; i++) { - - if (!sbc8240_mtd[i]) continue; - if (sbc8240_part_banks[i].nums == 0) { - printk (KERN_NOTICE MSG_PREFIX - "No partition info available, registering whole device\n"); - add_mtd_device(sbc8240_mtd[i]); - } else { - printk (KERN_NOTICE MSG_PREFIX - "Using %s partition definition\n", sbc8240_part_banks[i].mtd_part->name); - add_mtd_partitions (sbc8240_mtd[i], - sbc8240_part_banks[i].mtd_part, - sbc8240_part_banks[i].nums); - } - } -#else - printk(KERN_NOTICE MSG_PREFIX - "Registering %d flash banks at once\n", devicesfound); - - for (i = 0; i < devicesfound; i++) { - add_mtd_device(sbc8240_mtd[i]); - } -#endif /* CONFIG_MTD_PARTITIONS */ - - return devicesfound == 0 ? -ENXIO : 0; -} - -static void __exit cleanup_sbc8240_mtd (void) -{ - int i; - - for (i = 0; i < NUM_FLASH_BANKS; i++) { - if (sbc8240_mtd[i]) { - del_mtd_device (sbc8240_mtd[i]); - map_destroy (sbc8240_mtd[i]); - } - if (sbc8240_map[i].map_priv_1) { - iounmap ((void *) sbc8240_map[i].map_priv_1); - sbc8240_map[i].map_priv_1 = 0; - } - } -} - -module_init (init_sbc8240_mtd); -module_exit (cleanup_sbc8240_mtd); - -MODULE_LICENSE ("GPL"); -MODULE_AUTHOR ("Carolyn Smith <carolyn.smith@tektronix.com>"); -MODULE_DESCRIPTION ("MTD map driver for SBC8240 boards"); - diff --git a/drivers/mtd/mtd_blkdevs.c b/drivers/mtd/mtd_blkdevs.c index c3f62654b6d..7baba40c1ed 100644 --- a/drivers/mtd/mtd_blkdevs.c +++ b/drivers/mtd/mtd_blkdevs.c @@ -144,7 +144,7 @@ static int blktrans_open(struct block_device *bdev, fmode_t mode) struct mtd_blktrans_ops *tr = dev->tr; int ret = -ENODEV; - if (!try_module_get(dev->mtd->owner)) + if (!get_mtd_device(NULL, dev->mtd->index)) goto out; if (!try_module_get(tr->owner)) @@ -158,7 +158,7 @@ static int blktrans_open(struct block_device *bdev, fmode_t mode) ret = 0; if (tr->open && (ret = tr->open(dev))) { dev->mtd->usecount--; - module_put(dev->mtd->owner); + put_mtd_device(dev->mtd); out_tr: module_put(tr->owner); } @@ -177,7 +177,7 @@ static int blktrans_release(struct gendisk *disk, fmode_t mode) if (!ret) { dev->mtd->usecount--; - module_put(dev->mtd->owner); + put_mtd_device(dev->mtd); module_put(tr->owner); } diff --git a/drivers/mtd/mtdblock.c b/drivers/mtd/mtdblock.c index 208c6faa035..77db5ce24d9 100644 --- a/drivers/mtd/mtdblock.c +++ b/drivers/mtd/mtdblock.c @@ -29,6 +29,8 @@ static struct mtdblk_dev { enum { STATE_EMPTY, STATE_CLEAN, STATE_DIRTY } cache_state; } *mtdblks[MAX_MTD_DEVICES]; +static struct mutex mtdblks_lock; + /* * Cache stuff... * @@ -270,15 +272,19 @@ static int mtdblock_open(struct mtd_blktrans_dev *mbd) DEBUG(MTD_DEBUG_LEVEL1,"mtdblock_open\n"); + mutex_lock(&mtdblks_lock); if (mtdblks[dev]) { mtdblks[dev]->count++; + mutex_unlock(&mtdblks_lock); return 0; } /* OK, it's not open. Create cache info for it */ mtdblk = kzalloc(sizeof(struct mtdblk_dev), GFP_KERNEL); - if (!mtdblk) + if (!mtdblk) { + mutex_unlock(&mtdblks_lock); return -ENOMEM; + } mtdblk->count = 1; mtdblk->mtd = mtd; @@ -291,6 +297,7 @@ static int mtdblock_open(struct mtd_blktrans_dev *mbd) } mtdblks[dev] = mtdblk; + mutex_unlock(&mtdblks_lock); DEBUG(MTD_DEBUG_LEVEL1, "ok\n"); @@ -304,6 +311,8 @@ static int mtdblock_release(struct mtd_blktrans_dev *mbd) DEBUG(MTD_DEBUG_LEVEL1, "mtdblock_release\n"); + mutex_lock(&mtdblks_lock); + mutex_lock(&mtdblk->cache_mutex); write_cached_data(mtdblk); mutex_unlock(&mtdblk->cache_mutex); @@ -316,6 +325,9 @@ static int mtdblock_release(struct mtd_blktrans_dev *mbd) vfree(mtdblk->cache_data); kfree(mtdblk); } + + mutex_unlock(&mtdblks_lock); + DEBUG(MTD_DEBUG_LEVEL1, "ok\n"); return 0; @@ -376,6 +388,8 @@ static struct mtd_blktrans_ops mtdblock_tr = { static int __init init_mtdblock(void) { + mutex_init(&mtdblks_lock); + return register_mtd_blktrans(&mtdblock_tr); } diff --git a/drivers/mtd/mtdcore.c b/drivers/mtd/mtdcore.c index fac54a3fa3f..00ebf7af746 100644 --- a/drivers/mtd/mtdcore.c +++ b/drivers/mtd/mtdcore.c @@ -65,8 +65,8 @@ static void mtd_release(struct device *dev) static int mtd_cls_suspend(struct device *dev, pm_message_t state) { struct mtd_info *mtd = dev_to_mtd(dev); - - if (mtd->suspend) + + if (mtd && mtd->suspend) return mtd->suspend(mtd); else return 0; @@ -76,7 +76,7 @@ static int mtd_cls_resume(struct device *dev) { struct mtd_info *mtd = dev_to_mtd(dev); - if (mtd->resume) + if (mtd && mtd->resume) mtd->resume(mtd); return 0; } @@ -298,6 +298,7 @@ int add_mtd_device(struct mtd_info *mtd) mtd->dev.class = &mtd_class; mtd->dev.devt = MTD_DEVT(i); dev_set_name(&mtd->dev, "mtd%d", i); + dev_set_drvdata(&mtd->dev, mtd); if (device_register(&mtd->dev) != 0) { mtd_table[i] = NULL; break; diff --git a/drivers/mtd/nand/atmel_nand.c b/drivers/mtd/nand/atmel_nand.c index 2802992b39d..20c828ba940 100644 --- a/drivers/mtd/nand/atmel_nand.c +++ b/drivers/mtd/nand/atmel_nand.c @@ -534,7 +534,7 @@ static int __init atmel_nand_probe(struct platform_device *pdev) &num_partitions); if ((!partitions) || (num_partitions == 0)) { - printk(KERN_ERR "atmel_nand: No parititions defined, or unsupported device.\n"); + printk(KERN_ERR "atmel_nand: No partitions defined, or unsupported device.\n"); res = ENXIO; goto err_no_partitions; } diff --git a/drivers/mtd/nand/omap2.c b/drivers/mtd/nand/omap2.c index 0cd76f89f4b..ebd07e95b81 100644 --- a/drivers/mtd/nand/omap2.c +++ b/drivers/mtd/nand/omap2.c @@ -11,6 +11,8 @@ #include <linux/platform_device.h> #include <linux/dma-mapping.h> #include <linux/delay.h> +#include <linux/jiffies.h> +#include <linux/sched.h> #include <linux/mtd/mtd.h> #include <linux/mtd/nand.h> #include <linux/mtd/partitions.h> @@ -541,7 +543,7 @@ static int omap_wait(struct mtd_info *mtd, struct nand_chip *chip) struct omap_nand_info *info = container_of(mtd, struct omap_nand_info, mtd); unsigned long timeo = jiffies; - int status, state = this->state; + int status = NAND_STATUS_FAIL, state = this->state; if (state == FL_ERASING) timeo += (HZ * 400) / 1000; @@ -556,8 +558,9 @@ static int omap_wait(struct mtd_info *mtd, struct nand_chip *chip) while (time_before(jiffies, timeo)) { status = __raw_readb(this->IO_ADDR_R); - if (!(status & 0x40)) + if (status & NAND_STATUS_READY) break; + cond_resched(); } return status; } diff --git a/drivers/mtd/nand/orion_nand.c b/drivers/mtd/nand/orion_nand.c index 7ad972229db..0d9d4bc9c76 100644 --- a/drivers/mtd/nand/orion_nand.c +++ b/drivers/mtd/nand/orion_nand.c @@ -61,7 +61,7 @@ static void orion_nand_read_buf(struct mtd_info *mtd, uint8_t *buf, int len) buf64 = (uint64_t *)buf; while (i < len/8) { uint64_t x; - asm ("ldrd\t%0, [%1]" : "=r" (x) : "r" (io_base)); + asm volatile ("ldrd\t%0, [%1]" : "=&r" (x) : "r" (io_base)); buf64[i++] = x; } i *= 8; diff --git a/drivers/mtd/nftlcore.c b/drivers/mtd/nftlcore.c index e3f8495a94c..1002e188299 100644 --- a/drivers/mtd/nftlcore.c +++ b/drivers/mtd/nftlcore.c @@ -135,16 +135,17 @@ static void nftl_remove_dev(struct mtd_blktrans_dev *dev) int nftl_read_oob(struct mtd_info *mtd, loff_t offs, size_t len, size_t *retlen, uint8_t *buf) { + loff_t mask = mtd->writesize - 1; struct mtd_oob_ops ops; int res; ops.mode = MTD_OOB_PLACE; - ops.ooboffs = offs & (mtd->writesize - 1); + ops.ooboffs = offs & mask; ops.ooblen = len; ops.oobbuf = buf; ops.datbuf = NULL; - res = mtd->read_oob(mtd, offs & ~(mtd->writesize - 1), &ops); + res = mtd->read_oob(mtd, offs & ~mask, &ops); *retlen = ops.oobretlen; return res; } @@ -155,16 +156,17 @@ int nftl_read_oob(struct mtd_info *mtd, loff_t offs, size_t len, int nftl_write_oob(struct mtd_info *mtd, loff_t offs, size_t len, size_t *retlen, uint8_t *buf) { + loff_t mask = mtd->writesize - 1; struct mtd_oob_ops ops; int res; ops.mode = MTD_OOB_PLACE; - ops.ooboffs = offs & (mtd->writesize - 1); + ops.ooboffs = offs & mask; ops.ooblen = len; ops.oobbuf = buf; ops.datbuf = NULL; - res = mtd->write_oob(mtd, offs & ~(mtd->writesize - 1), &ops); + res = mtd->write_oob(mtd, offs & ~mask, &ops); *retlen = ops.oobretlen; return res; } @@ -177,17 +179,18 @@ int nftl_write_oob(struct mtd_info *mtd, loff_t offs, size_t len, static int nftl_write(struct mtd_info *mtd, loff_t offs, size_t len, size_t *retlen, uint8_t *buf, uint8_t *oob) { + loff_t mask = mtd->writesize - 1; struct mtd_oob_ops ops; int res; ops.mode = MTD_OOB_PLACE; - ops.ooboffs = offs; + ops.ooboffs = offs & mask; ops.ooblen = mtd->oobsize; ops.oobbuf = oob; ops.datbuf = buf; ops.len = len; - res = mtd->write_oob(mtd, offs & ~(mtd->writesize - 1), &ops); + res = mtd->write_oob(mtd, offs & ~mask, &ops); *retlen = ops.retlen; return res; } @@ -208,7 +211,7 @@ static u16 NFTL_findfreeblock(struct NFTLrecord *nftl, int desperate ) /* Normally, we force a fold to happen before we run out of free blocks completely */ if (!desperate && nftl->numfreeEUNs < 2) { DEBUG(MTD_DEBUG_LEVEL1, "NFTL_findfreeblock: there are too few free EUNs\n"); - return 0xffff; + return BLOCK_NIL; } /* Scan for a free block */ @@ -230,11 +233,11 @@ static u16 NFTL_findfreeblock(struct NFTLrecord *nftl, int desperate ) printk("Argh! No free blocks found! LastFreeEUN = %d, " "FirstEUN = %d\n", nftl->LastFreeEUN, le16_to_cpu(nftl->MediaHdr.FirstPhysicalEUN)); - return 0xffff; + return BLOCK_NIL; } } while (pot != nftl->LastFreeEUN); - return 0xffff; + return BLOCK_NIL; } static u16 NFTL_foldchain (struct NFTLrecord *nftl, unsigned thisVUC, unsigned pendingblock ) @@ -431,7 +434,7 @@ static u16 NFTL_foldchain (struct NFTLrecord *nftl, unsigned thisVUC, unsigned p /* add the header so that it is now a valid chain */ oob.u.a.VirtUnitNum = oob.u.a.SpareVirtUnitNum = cpu_to_le16(thisVUC); - oob.u.a.ReplUnitNum = oob.u.a.SpareReplUnitNum = 0xffff; + oob.u.a.ReplUnitNum = oob.u.a.SpareReplUnitNum = BLOCK_NIL; nftl_write_oob(mtd, (nftl->EraseSize * targetEUN) + 8, 8, &retlen, (char *)&oob.u); @@ -515,7 +518,7 @@ static u16 NFTL_makefreeblock( struct NFTLrecord *nftl , unsigned pendingblock) if (ChainLength < 2) { printk(KERN_WARNING "No Virtual Unit Chains available for folding. " "Failing request\n"); - return 0xffff; + return BLOCK_NIL; } return NFTL_foldchain (nftl, LongestChain, pendingblock); @@ -578,7 +581,7 @@ static inline u16 NFTL_findwriteunit(struct NFTLrecord *nftl, unsigned block) printk(KERN_WARNING "Infinite loop in Virtual Unit Chain 0x%x\n", thisVUC); - return 0xffff; + return BLOCK_NIL; } /* Skip to next block in chain */ @@ -601,7 +604,7 @@ static inline u16 NFTL_findwriteunit(struct NFTLrecord *nftl, unsigned block) //u16 startEUN = nftl->EUNtable[thisVUC]; //printk("Write to VirtualUnitChain %d, calling makefreeblock()\n", thisVUC); - writeEUN = NFTL_makefreeblock(nftl, 0xffff); + writeEUN = NFTL_makefreeblock(nftl, BLOCK_NIL); if (writeEUN == BLOCK_NIL) { /* OK, we accept that the above comment is @@ -673,7 +676,7 @@ static inline u16 NFTL_findwriteunit(struct NFTLrecord *nftl, unsigned block) printk(KERN_WARNING "Error folding to make room for Virtual Unit Chain 0x%x\n", thisVUC); - return 0xffff; + return BLOCK_NIL; } static int nftl_writeblock(struct mtd_blktrans_dev *mbd, unsigned long block, diff --git a/drivers/mtd/onenand/omap2.c b/drivers/mtd/onenand/omap2.c index 38d656b9b2e..0108ed42e87 100644 --- a/drivers/mtd/onenand/omap2.c +++ b/drivers/mtd/onenand/omap2.c @@ -266,7 +266,7 @@ static inline int omap2_onenand_bufferram_offset(struct mtd_info *mtd, int area) if (ONENAND_CURRENT_BUFFERRAM(this)) { if (area == ONENAND_DATARAM) - return mtd->writesize; + return this->writesize; if (area == ONENAND_SPARERAM) return mtd->oobsize; } @@ -770,6 +770,7 @@ static int __devexit omap2_onenand_remove(struct platform_device *pdev) } iounmap(c->onenand.base); release_mem_region(c->phys_base, ONENAND_IO_SIZE); + gpmc_cs_free(c->gpmc_cs); kfree(c); return 0; diff --git a/drivers/mtd/ubi/build.c b/drivers/mtd/ubi/build.c index 286ed594e5a..e1f7d0a78b9 100644 --- a/drivers/mtd/ubi/build.c +++ b/drivers/mtd/ubi/build.c @@ -657,6 +657,11 @@ static int io_init(struct ubi_device *ubi) if (ubi->mtd->block_isbad && ubi->mtd->block_markbad) ubi->bad_allowed = 1; + if (ubi->mtd->type == MTD_NORFLASH) { + ubi_assert(ubi->mtd->writesize == 1); + ubi->nor_flash = 1; + } + ubi->min_io_size = ubi->mtd->writesize; ubi->hdrs_min_io_size = ubi->mtd->writesize >> ubi->mtd->subpage_sft; @@ -996,6 +1001,7 @@ int ubi_attach_mtd_dev(struct mtd_info *mtd, int ubi_num, int vid_hdr_offset) ubi_msg("number of PEBs reserved for bad PEB handling: %d", ubi->beb_rsvd_pebs); ubi_msg("max/mean erase counter: %d/%d", ubi->max_ec, ubi->mean_ec); + ubi_msg("image sequence number: %d", ubi->image_seq); /* * The below lock makes sure we do not race with 'ubi_thread()' which diff --git a/drivers/mtd/ubi/debug.c b/drivers/mtd/ubi/debug.c index c0ed60e8ade..54b0186915f 100644 --- a/drivers/mtd/ubi/debug.c +++ b/drivers/mtd/ubi/debug.c @@ -44,6 +44,8 @@ void ubi_dbg_dump_ec_hdr(const struct ubi_ec_hdr *ec_hdr) be32_to_cpu(ec_hdr->vid_hdr_offset)); printk(KERN_DEBUG "\tdata_offset %d\n", be32_to_cpu(ec_hdr->data_offset)); + printk(KERN_DEBUG "\timage_seq %d\n", + be32_to_cpu(ec_hdr->image_seq)); printk(KERN_DEBUG "\thdr_crc %#08x\n", be32_to_cpu(ec_hdr->hdr_crc)); printk(KERN_DEBUG "erase counter header hexdump:\n"); diff --git a/drivers/mtd/ubi/debug.h b/drivers/mtd/ubi/debug.h index 13777e5beac..a4da7a09b94 100644 --- a/drivers/mtd/ubi/debug.h +++ b/drivers/mtd/ubi/debug.h @@ -93,6 +93,12 @@ void ubi_dbg_dump_mkvol_req(const struct ubi_mkvol_req *req); #define UBI_IO_DEBUG 0 #endif +#ifdef CONFIG_MTD_UBI_DEBUG_PARANOID +int ubi_dbg_check_all_ff(struct ubi_device *ubi, int pnum, int offset, int len); +#else +#define ubi_dbg_check_all_ff(ubi, pnum, offset, len) 0 +#endif + #ifdef CONFIG_MTD_UBI_DEBUG_DISABLE_BGT #define DBG_DISABLE_BGT 1 #else @@ -167,6 +173,7 @@ static inline int ubi_dbg_is_erase_failure(void) #define ubi_dbg_is_bitflip() 0 #define ubi_dbg_is_write_failure() 0 #define ubi_dbg_is_erase_failure() 0 +#define ubi_dbg_check_all_ff(ubi, pnum, offset, len) 0 #endif /* !CONFIG_MTD_UBI_DEBUG */ #endif /* !__UBI_DEBUG_H__ */ diff --git a/drivers/mtd/ubi/eba.c b/drivers/mtd/ubi/eba.c index 0f2034c3ed2..e4d9ef0c965 100644 --- a/drivers/mtd/ubi/eba.c +++ b/drivers/mtd/ubi/eba.c @@ -1254,6 +1254,7 @@ out_free: if (!ubi->volumes[i]) continue; kfree(ubi->volumes[i]->eba_tbl); + ubi->volumes[i]->eba_tbl = NULL; } return err; } diff --git a/drivers/mtd/ubi/gluebi.c b/drivers/mtd/ubi/gluebi.c index 95aaac03f93..b5e478fa266 100644 --- a/drivers/mtd/ubi/gluebi.c +++ b/drivers/mtd/ubi/gluebi.c @@ -332,6 +332,7 @@ static int gluebi_create(struct ubi_device_info *di, } gluebi->vol_id = vi->vol_id; + gluebi->ubi_num = vi->ubi_num; mtd->type = MTD_UBIVOLUME; if (!di->ro_mode) mtd->flags = MTD_WRITEABLE; diff --git a/drivers/mtd/ubi/io.c b/drivers/mtd/ubi/io.c index effaff28bab..4cb69925d8d 100644 --- a/drivers/mtd/ubi/io.c +++ b/drivers/mtd/ubi/io.c @@ -98,17 +98,12 @@ static int paranoid_check_ec_hdr(const struct ubi_device *ubi, int pnum, static int paranoid_check_peb_vid_hdr(const struct ubi_device *ubi, int pnum); static int paranoid_check_vid_hdr(const struct ubi_device *ubi, int pnum, const struct ubi_vid_hdr *vid_hdr); -static int paranoid_check_all_ff(struct ubi_device *ubi, int pnum, int offset, - int len); -static int paranoid_check_empty(struct ubi_device *ubi, int pnum); #else #define paranoid_check_not_bad(ubi, pnum) 0 #define paranoid_check_peb_ec_hdr(ubi, pnum) 0 #define paranoid_check_ec_hdr(ubi, pnum, ec_hdr) 0 #define paranoid_check_peb_vid_hdr(ubi, pnum) 0 #define paranoid_check_vid_hdr(ubi, pnum, vid_hdr) 0 -#define paranoid_check_all_ff(ubi, pnum, offset, len) 0 -#define paranoid_check_empty(ubi, pnum) 0 #endif /** @@ -244,7 +239,7 @@ int ubi_io_write(struct ubi_device *ubi, const void *buf, int pnum, int offset, return err > 0 ? -EINVAL : err; /* The area we are writing to has to contain all 0xFF bytes */ - err = paranoid_check_all_ff(ubi, pnum, offset, len); + err = ubi_dbg_check_all_ff(ubi, pnum, offset, len); if (err) return err > 0 ? -EINVAL : err; @@ -271,8 +266,8 @@ int ubi_io_write(struct ubi_device *ubi, const void *buf, int pnum, int offset, addr = (loff_t)pnum * ubi->peb_size + offset; err = ubi->mtd->write(ubi->mtd, addr, len, &written, buf); if (err) { - ubi_err("error %d while writing %d bytes to PEB %d:%d, written" - " %zd bytes", err, len, pnum, offset, written); + ubi_err("error %d while writing %d bytes to PEB %d:%d, written " + "%zd bytes", err, len, pnum, offset, written); ubi_dbg_dump_stack(); } else ubi_assert(written == len); @@ -350,7 +345,7 @@ retry: return -EIO; } - err = paranoid_check_all_ff(ubi, pnum, 0, ubi->peb_size); + err = ubi_dbg_check_all_ff(ubi, pnum, 0, ubi->peb_size); if (err) return err > 0 ? -EINVAL : err; @@ -459,6 +454,54 @@ out: } /** + * nor_erase_prepare - prepare a NOR flash PEB for erasure. + * @ubi: UBI device description object + * @pnum: physical eraseblock number to prepare + * + * NOR flash, or at least some of them, have peculiar embedded PEB erasure + * algorithm: the PEB is first filled with zeroes, then it is erased. And + * filling with zeroes starts from the end of the PEB. This was observed with + * Spansion S29GL512N NOR flash. + * + * This means that in case of a power cut we may end up with intact data at the + * beginning of the PEB, and all zeroes at the end of PEB. In other words, the + * EC and VID headers are OK, but a large chunk of data at the end of PEB is + * zeroed. This makes UBI mistakenly treat this PEB as used and associate it + * with an LEB, which leads to subsequent failures (e.g., UBIFS fails). + * + * This function is called before erasing NOR PEBs and it zeroes out EC and VID + * magic numbers in order to invalidate them and prevent the failures. Returns + * zero in case of success and a negative error code in case of failure. + */ +static int nor_erase_prepare(struct ubi_device *ubi, int pnum) +{ + int err; + size_t written; + loff_t addr; + uint32_t data = 0; + + addr = (loff_t)pnum * ubi->peb_size; + err = ubi->mtd->write(ubi->mtd, addr, 4, &written, (void *)&data); + if (err) { + ubi_err("error %d while writing 4 bytes to PEB %d:%d, written " + "%zd bytes", err, pnum, 0, written); + ubi_dbg_dump_stack(); + return err; + } + + addr += ubi->vid_hdr_aloffset; + err = ubi->mtd->write(ubi->mtd, addr, 4, &written, (void *)&data); + if (err) { + ubi_err("error %d while writing 4 bytes to PEB %d:%d, written " + "%zd bytes", err, pnum, ubi->vid_hdr_aloffset, written); + ubi_dbg_dump_stack(); + return err; + } + + return 0; +} + +/** * ubi_io_sync_erase - synchronously erase a physical eraseblock. * @ubi: UBI device description object * @pnum: physical eraseblock number to erase @@ -489,6 +532,12 @@ int ubi_io_sync_erase(struct ubi_device *ubi, int pnum, int torture) return -EROFS; } + if (ubi->nor_flash) { + err = nor_erase_prepare(ubi, pnum); + if (err) + return err; + } + if (torture) { ret = torture_peb(ubi, pnum); if (ret < 0) @@ -672,11 +721,6 @@ int ubi_io_read_ec_hdr(struct ubi_device *ubi, int pnum, if (read_err != -EBADMSG && check_pattern(ec_hdr, 0xFF, UBI_EC_HDR_SIZE)) { /* The physical eraseblock is supposedly empty */ - err = paranoid_check_all_ff(ubi, pnum, 0, - ubi->peb_size); - if (err) - return err > 0 ? UBI_IO_BAD_EC_HDR : err; - if (verbose) ubi_warn("no EC header found at PEB %d, " "only 0xFF bytes", pnum); @@ -752,6 +796,7 @@ int ubi_io_write_ec_hdr(struct ubi_device *ubi, int pnum, ec_hdr->version = UBI_VERSION; ec_hdr->vid_hdr_offset = cpu_to_be32(ubi->vid_hdr_offset); ec_hdr->data_offset = cpu_to_be32(ubi->leb_start); + ec_hdr->image_seq = cpu_to_be32(ubi->image_seq); crc = crc32(UBI_CRC32_INIT, ec_hdr, UBI_EC_HDR_SIZE_CRC); ec_hdr->hdr_crc = cpu_to_be32(crc); @@ -947,15 +992,6 @@ int ubi_io_read_vid_hdr(struct ubi_device *ubi, int pnum, if (read_err != -EBADMSG && check_pattern(vid_hdr, 0xFF, UBI_VID_HDR_SIZE)) { /* The physical eraseblock is supposedly free */ - - /* - * The below is just a paranoid check, it has to be - * compiled out if paranoid checks are disabled. - */ - err = paranoid_check_empty(ubi, pnum); - if (err) - return err > 0 ? UBI_IO_BAD_VID_HDR : err; - if (verbose) ubi_warn("no VID header found at PEB %d, " "only 0xFF bytes", pnum); @@ -1229,7 +1265,7 @@ exit: } /** - * paranoid_check_all_ff - check that a region of flash is empty. + * ubi_dbg_check_all_ff - check that a region of flash is empty. * @ubi: UBI device description object * @pnum: the physical eraseblock number to check * @offset: the starting offset within the physical eraseblock to check @@ -1239,8 +1275,7 @@ exit: * @offset of the physical eraseblock @pnum, %1 if not, and a negative error * code if an error occurred. */ -static int paranoid_check_all_ff(struct ubi_device *ubi, int pnum, int offset, - int len) +int ubi_dbg_check_all_ff(struct ubi_device *ubi, int pnum, int offset, int len) { size_t read; int err; @@ -1276,74 +1311,4 @@ error: return err; } -/** - * paranoid_check_empty - whether a PEB is empty. - * @ubi: UBI device description object - * @pnum: the physical eraseblock number to check - * - * This function makes sure PEB @pnum is empty, which means it contains only - * %0xFF data bytes. Returns zero if the PEB is empty, %1 if not, and a - * negative error code in case of failure. - * - * Empty PEBs have the EC header, and do not have the VID header. The caller of - * this function should have already made sure the PEB does not have the VID - * header. However, this function re-checks that, because it is possible that - * the header and data has already been written to the PEB. - * - * Let's consider a possible scenario. Suppose there are 2 tasks - A and B. - * Task A is in 'wear_leveling_worker()'. It is reading VID header of PEB X to - * find which LEB it corresponds to. PEB X is currently unmapped, and has no - * VID header. Task B is trying to write to PEB X. - * - * Task A: in 'ubi_io_read_vid_hdr()': reads the VID header from PEB X. The - * read data contain all 0xFF bytes; - * Task B: writes VID header and some data to PEB X; - * Task A: assumes PEB X is empty, calls 'paranoid_check_empty()'. And if we - * do not re-read the VID header, and do not cancel the checking if it - * is there, we fail. - */ -static int paranoid_check_empty(struct ubi_device *ubi, int pnum) -{ - int err, offs = ubi->vid_hdr_aloffset, len = ubi->vid_hdr_alsize; - size_t read; - uint32_t magic; - const struct ubi_vid_hdr *vid_hdr; - - mutex_lock(&ubi->dbg_buf_mutex); - err = ubi->mtd->read(ubi->mtd, offs, len, &read, ubi->dbg_peb_buf); - if (err && err != -EUCLEAN) { - ubi_err("error %d while reading %d bytes from PEB %d:%d, " - "read %zd bytes", err, len, pnum, offs, read); - goto error; - } - - vid_hdr = ubi->dbg_peb_buf; - magic = be32_to_cpu(vid_hdr->magic); - if (magic == UBI_VID_HDR_MAGIC) - /* The PEB contains VID header, so it is not empty */ - goto out; - - err = check_pattern(ubi->dbg_peb_buf, 0xFF, len); - if (err == 0) { - ubi_err("flash region at PEB %d:%d, length %d does not " - "contain all 0xFF bytes", pnum, offs, len); - goto fail; - } - -out: - mutex_unlock(&ubi->dbg_buf_mutex); - return 0; - -fail: - ubi_err("paranoid check failed for PEB %d", pnum); - ubi_msg("hex dump of the %d-%d region", offs, offs + len); - print_hex_dump(KERN_DEBUG, "", DUMP_PREFIX_OFFSET, 32, 1, - ubi->dbg_peb_buf, len, 1); - err = 1; -error: - ubi_dbg_dump_stack(); - mutex_unlock(&ubi->dbg_buf_mutex); - return err; -} - #endif /* CONFIG_MTD_UBI_DEBUG_PARANOID */ diff --git a/drivers/mtd/ubi/scan.c b/drivers/mtd/ubi/scan.c index c3d653ba5ca..b847745394b 100644 --- a/drivers/mtd/ubi/scan.c +++ b/drivers/mtd/ubi/scan.c @@ -757,6 +757,8 @@ static int process_eb(struct ubi_device *ubi, struct ubi_scan_info *si, si->is_empty = 0; if (!ec_corr) { + int image_seq; + /* Make sure UBI version is OK */ if (ech->version != UBI_VERSION) { ubi_err("this UBI version is %d, image version is %d", @@ -778,6 +780,29 @@ static int process_eb(struct ubi_device *ubi, struct ubi_scan_info *si, ubi_dbg_dump_ec_hdr(ech); return -EINVAL; } + + /* + * Make sure that all PEBs have the same image sequence number. + * This allows us to detect situations when users flash UBI + * images incorrectly, so that the flash has the new UBI image + * and leftovers from the old one. This feature was added + * relatively recently, and the sequence number was always + * zero, because old UBI implementations always set it to zero. + * For this reasons, we do not panic if some PEBs have zero + * sequence number, while other PEBs have non-zero sequence + * number. + */ + image_seq = be32_to_cpu(ech->image_seq); + if (!si->image_seq_set) { + ubi->image_seq = image_seq; + si->image_seq_set = 1; + } else if (ubi->image_seq && ubi->image_seq != image_seq) { + ubi_err("bad image sequence number %d in PEB %d, " + "expected %d", image_seq, pnum, ubi->image_seq); + ubi_dbg_dump_ec_hdr(ech); + return -EINVAL; + } + } /* OK, we've done with the EC header, let's look at the VID header */ diff --git a/drivers/mtd/ubi/scan.h b/drivers/mtd/ubi/scan.h index 61df208e2f2..1017cf12def 100644 --- a/drivers/mtd/ubi/scan.h +++ b/drivers/mtd/ubi/scan.h @@ -102,6 +102,7 @@ struct ubi_scan_volume { * @mean_ec: mean erase counter value * @ec_sum: a temporary variable used when calculating @mean_ec * @ec_count: a temporary variable used when calculating @mean_ec + * @image_seq_set: indicates @ubi->image_seq is known * * This data structure contains the result of scanning and may be used by other * UBI sub-systems to build final UBI data structures, further error-recovery @@ -124,6 +125,7 @@ struct ubi_scan_info { int mean_ec; uint64_t ec_sum; int ec_count; + int image_seq_set; }; struct ubi_device; diff --git a/drivers/mtd/ubi/ubi-media.h b/drivers/mtd/ubi/ubi-media.h index 8419fdccc79..503ea9b2730 100644 --- a/drivers/mtd/ubi/ubi-media.h +++ b/drivers/mtd/ubi/ubi-media.h @@ -129,6 +129,7 @@ enum { * @ec: the erase counter * @vid_hdr_offset: where the VID header starts * @data_offset: where the user data start + * @image_seq: image sequence number * @padding2: reserved for future, zeroes * @hdr_crc: erase counter header CRC checksum * @@ -144,6 +145,14 @@ enum { * volume identifier header and user data, relative to the beginning of the * physical eraseblock. These values have to be the same for all physical * eraseblocks. + * + * The @image_seq field is used to validate a UBI image that has been prepared + * for a UBI device. The @image_seq value can be any value, but it must be the + * same on all eraseblocks. UBI will ensure that all new erase counter headers + * also contain this value, and will check the value when scanning at start-up. + * One way to make use of @image_seq is to increase its value by one every time + * an image is flashed over an existing image, then, if the flashing does not + * complete, UBI will detect the error when scanning. */ struct ubi_ec_hdr { __be32 magic; @@ -152,7 +161,8 @@ struct ubi_ec_hdr { __be64 ec; /* Warning: the current limit is 31-bit anyway! */ __be32 vid_hdr_offset; __be32 data_offset; - __u8 padding2[36]; + __be32 image_seq; + __u8 padding2[32]; __be32 hdr_crc; } __attribute__ ((packed)); diff --git a/drivers/mtd/ubi/ubi.h b/drivers/mtd/ubi/ubi.h index 28acd133c99..6a5fe963378 100644 --- a/drivers/mtd/ubi/ubi.h +++ b/drivers/mtd/ubi/ubi.h @@ -301,6 +301,7 @@ struct ubi_wl_entry; * @vol->readers, @vol->writers, @vol->exclusive, * @vol->ref_count, @vol->mapping and @vol->eba_tbl. * @ref_count: count of references on the UBI device + * @image_seq: image sequence number recorded on EC headers * * @rsvd_pebs: count of reserved physical eraseblocks * @avail_pebs: count of available physical eraseblocks @@ -372,6 +373,7 @@ struct ubi_wl_entry; * @vid_hdr_shift: contains @vid_hdr_offset - @vid_hdr_aloffset * @bad_allowed: whether the MTD device admits of bad physical eraseblocks or * not + * @nor_flash: non-zero if working on top of NOR flash * @mtd: MTD device descriptor * * @peb_buf1: a buffer of PEB size used for different purposes @@ -390,6 +392,7 @@ struct ubi_device { struct ubi_volume *volumes[UBI_MAX_VOLUMES+UBI_INT_VOL_COUNT]; spinlock_t volumes_lock; int ref_count; + int image_seq; int rsvd_pebs; int avail_pebs; @@ -452,7 +455,8 @@ struct ubi_device { int vid_hdr_offset; int vid_hdr_aloffset; int vid_hdr_shift; - int bad_allowed; + unsigned int bad_allowed:1; + unsigned int nor_flash:1; struct mtd_info *mtd; void *peb_buf1; diff --git a/drivers/mtd/ubi/wl.c b/drivers/mtd/ubi/wl.c index 2b247230061..600c7229d5c 100644 --- a/drivers/mtd/ubi/wl.c +++ b/drivers/mtd/ubi/wl.c @@ -459,6 +459,14 @@ retry: dbg_wl("PEB %d EC %d", e->pnum, e->ec); prot_queue_add(ubi, e); spin_unlock(&ubi->wl_lock); + + err = ubi_dbg_check_all_ff(ubi, e->pnum, ubi->vid_hdr_aloffset, + ubi->peb_size - ubi->vid_hdr_aloffset); + if (err) { + ubi_err("new PEB %d does not contain all 0xFF bytes", e->pnum); + return err > 0 ? -EINVAL : err; + } + return e->pnum; } diff --git a/drivers/net/3c515.c b/drivers/net/3c515.c index 3e00fa8ea65..4a7c32895be 100644 --- a/drivers/net/3c515.c +++ b/drivers/net/3c515.c @@ -832,7 +832,9 @@ static int corkscrew_open(struct net_device *dev) skb_reserve(skb, 2); /* Align IP on 16 byte boundaries */ vp->rx_ring[i].addr = isa_virt_to_bus(skb->data); } - vp->rx_ring[i - 1].next = isa_virt_to_bus(&vp->rx_ring[0]); /* Wrap the ring. */ + if (i != 0) + vp->rx_ring[i - 1].next = + isa_virt_to_bus(&vp->rx_ring[0]); /* Wrap the ring. */ outl(isa_virt_to_bus(&vp->rx_ring[0]), ioaddr + UpListPtr); } if (vp->full_bus_master_tx) { /* Boomerang bus master Tx. */ diff --git a/drivers/net/3c59x.c b/drivers/net/3c59x.c index c34aee91250..45675889850 100644 --- a/drivers/net/3c59x.c +++ b/drivers/net/3c59x.c @@ -235,6 +235,7 @@ enum vortex_chips { CH_3C900B_FL, CH_3C905_1, CH_3C905_2, + CH_3C905B_TX, CH_3C905B_1, CH_3C905B_2, @@ -307,6 +308,8 @@ static struct vortex_chip_info { PCI_USES_MASTER, IS_BOOMERANG|HAS_MII|EEPROM_RESET, 64, }, {"3c905 Boomerang 100baseT4", PCI_USES_MASTER, IS_BOOMERANG|HAS_MII|EEPROM_RESET, 64, }, + {"3C905B-TX Fast Etherlink XL PCI", + PCI_USES_MASTER, IS_CYCLONE|HAS_NWAY|HAS_HWCKSM|EXTRA_PREAMBLE, 128, }, {"3c905B Cyclone 100baseTx", PCI_USES_MASTER, IS_CYCLONE|HAS_NWAY|HAS_HWCKSM|EXTRA_PREAMBLE, 128, }, @@ -389,6 +392,7 @@ static struct pci_device_id vortex_pci_tbl[] = { { 0x10B7, 0x900A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C900B_FL }, { 0x10B7, 0x9050, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C905_1 }, { 0x10B7, 0x9051, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C905_2 }, + { 0x10B7, 0x9054, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C905B_TX }, { 0x10B7, 0x9055, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C905B_1 }, { 0x10B7, 0x9058, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C905B_2 }, @@ -2721,13 +2725,15 @@ dump_tx_ring(struct net_device *dev) &vp->tx_ring[vp->dirty_tx % TX_RING_SIZE]); issue_and_wait(dev, DownStall); for (i = 0; i < TX_RING_SIZE; i++) { - pr_err(" %d: @%p length %8.8x status %8.8x\n", i, - &vp->tx_ring[i], + unsigned int length; + #if DO_ZEROCOPY - le32_to_cpu(vp->tx_ring[i].frag[0].length), + length = le32_to_cpu(vp->tx_ring[i].frag[0].length); #else - le32_to_cpu(vp->tx_ring[i].length), + length = le32_to_cpu(vp->tx_ring[i].length); #endif + pr_err(" %d: @%p length %8.8x status %8.8x\n", + i, &vp->tx_ring[i], length, le32_to_cpu(vp->tx_ring[i].status)); } if (!stalled) diff --git a/drivers/net/8139cp.c b/drivers/net/8139cp.c index 50efde11ea6..d0dbbf39349 100644 --- a/drivers/net/8139cp.c +++ b/drivers/net/8139cp.c @@ -515,7 +515,7 @@ rx_status_loop: dma_addr_t mapping; struct sk_buff *skb, *new_skb; struct cp_desc *desc; - unsigned buflen; + const unsigned buflen = cp->rx_buf_sz; skb = cp->rx_skb[rx_tail]; BUG_ON(!skb); @@ -549,8 +549,7 @@ rx_status_loop: pr_debug("%s: rx slot %d status 0x%x len %d\n", dev->name, rx_tail, status, len); - buflen = cp->rx_buf_sz + NET_IP_ALIGN; - new_skb = netdev_alloc_skb(dev, buflen); + new_skb = netdev_alloc_skb(dev, buflen + NET_IP_ALIGN); if (!new_skb) { dev->stats.rx_dropped++; goto rx_next; diff --git a/drivers/net/8139too.c b/drivers/net/8139too.c index 8ae72ec1445..0e2ba21d444 100644 --- a/drivers/net/8139too.c +++ b/drivers/net/8139too.c @@ -908,6 +908,7 @@ static const struct net_device_ops rtl8139_netdev_ops = { .ndo_open = rtl8139_open, .ndo_stop = rtl8139_close, .ndo_get_stats = rtl8139_get_stats, + .ndo_change_mtu = eth_change_mtu, .ndo_validate_addr = eth_validate_addr, .ndo_set_mac_address = rtl8139_set_mac_address, .ndo_start_xmit = rtl8139_start_xmit, diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig index c155bd3ec9f..5ce7cbabd7a 100644 --- a/drivers/net/Kconfig +++ b/drivers/net/Kconfig @@ -1727,7 +1727,16 @@ config KS8842 tristate "Micrel KSZ8842" depends on HAS_IOMEM help - This platform driver is for Micrel KSZ8842 chip. + This platform driver is for Micrel KSZ8842 / KS8842 + 2-port ethernet switch chip (managed, VLAN, QoS). + +config KS8851 + tristate "Micrel KS8851 SPI" + depends on SPI + select MII + select CRC32 + help + SPI driver for Micrel KS8851 SPI attached network chip. config VIA_RHINE tristate "VIA Rhine support" diff --git a/drivers/net/Makefile b/drivers/net/Makefile index 4b58a59f211..ead8cab3cfe 100644 --- a/drivers/net/Makefile +++ b/drivers/net/Makefile @@ -88,6 +88,7 @@ obj-$(CONFIG_SKGE) += skge.o obj-$(CONFIG_SKY2) += sky2.o obj-$(CONFIG_SKFP) += skfp/ obj-$(CONFIG_KS8842) += ks8842.o +obj-$(CONFIG_KS8851) += ks8851.o obj-$(CONFIG_VIA_RHINE) += via-rhine.o obj-$(CONFIG_VIA_VELOCITY) += via-velocity.o obj-$(CONFIG_ADAPTEC_STARFIRE) += starfire.o diff --git a/drivers/net/a2065.c b/drivers/net/a2065.c index 85a18175730..08787f5a22a 100644 --- a/drivers/net/a2065.c +++ b/drivers/net/a2065.c @@ -569,16 +569,8 @@ static int lance_start_xmit (struct sk_buff *skb, struct net_device *dev) #ifdef DEBUG_DRIVER /* dump the packet */ - { - int i; - - for (i = 0; i < 64; i++) { - if ((i % 16) == 0) - printk("\n" KERN_DEBUG); - printk ("%2.2x ", skb->data [i]); - } - printk("\n"); - } + print_hex_dump(KERN_DEBUG, "skb->data: ", DUMP_PREFIX_NONE, + 16, 1, skb->data, 64, true); #endif entry = lp->tx_new & lp->tx_ring_mod_mask; ib->btx_ring [entry].length = (-skblen) | 0xf000; diff --git a/drivers/net/arcnet/arcnet.c b/drivers/net/arcnet/arcnet.c index d6d4ab3b430..7d227cdab9f 100644 --- a/drivers/net/arcnet/arcnet.c +++ b/drivers/net/arcnet/arcnet.c @@ -158,15 +158,12 @@ module_exit(arcnet_exit); void arcnet_dump_skb(struct net_device *dev, struct sk_buff *skb, char *desc) { - int i; + char hdr[32]; - printk(KERN_DEBUG "%6s: skb dump (%s) follows:", dev->name, desc); - for (i = 0; i < skb->len; i++) { - if (i % 16 == 0) - printk("\n" KERN_DEBUG "[%04X] ", i); - printk("%02X ", ((u_char *) skb->data)[i]); - } - printk("\n"); + /* dump the packet */ + snprintf(hdr, sizeof(hdr), "%6s:%s skb->data:", dev->name, desc); + print_hex_dump(KERN_DEBUG, hdr, DUMP_PREFIX_OFFSET, + 16, 1, skb->data, skb->len, true); } EXPORT_SYMBOL(arcnet_dump_skb); @@ -184,6 +181,7 @@ static void arcnet_dump_packet(struct net_device *dev, int bufnum, int i, length; unsigned long flags = 0; static uint8_t buf[512]; + char hdr[32]; /* hw.copy_from_card expects IRQ context so take the IRQ lock to keep it single threaded */ @@ -197,14 +195,10 @@ static void arcnet_dump_packet(struct net_device *dev, int bufnum, /* if the offset[0] byte is nonzero, this is a 256-byte packet */ length = (buf[2] ? 256 : 512); - printk(KERN_DEBUG "%6s: packet dump (%s) follows:", dev->name, desc); - for (i = 0; i < length; i++) { - if (i % 16 == 0) - printk("\n" KERN_DEBUG "[%04X] ", i); - printk("%02X ", buf[i]); - } - printk("\n"); - + /* dump the packet */ + snprintf(hdr, sizeof(hdr), "%6s:%s packet dump:", dev->name, desc); + print_hex_dump(KERN_DEBUG, hdr, DUMP_PREFIX_OFFSET, + 16, 1, buf, length, true); } #else diff --git a/drivers/net/arm/Kconfig b/drivers/net/arm/Kconfig index 2895db13bfa..c37ee9e6b67 100644 --- a/drivers/net/arm/Kconfig +++ b/drivers/net/arm/Kconfig @@ -63,3 +63,11 @@ config IXP4XX_ETH help Say Y here if you want to use built-in Ethernet ports on IXP4xx processor. + +config W90P910_ETH + tristate "Nuvoton w90p910 Ethernet support" + depends on ARM && ARCH_W90X900 + select PHYLIB + help + Say Y here if you want to use built-in Ethernet ports + on w90p910 processor. diff --git a/drivers/net/arm/Makefile b/drivers/net/arm/Makefile index 811a3ccd14c..303171f589e 100644 --- a/drivers/net/arm/Makefile +++ b/drivers/net/arm/Makefile @@ -11,3 +11,4 @@ obj-$(CONFIG_ARM_AT91_ETHER) += at91_ether.o obj-$(CONFIG_ARM_KS8695_ETHER) += ks8695net.o obj-$(CONFIG_EP93XX_ETH) += ep93xx_eth.o obj-$(CONFIG_IXP4XX_ETH) += ixp4xx_eth.o +obj-$(CONFIG_W90P910_ETH) += w90p910_ether.o diff --git a/drivers/net/arm/at91_ether.c b/drivers/net/arm/at91_ether.c index 2e7419a6119..5041d10bae9 100644 --- a/drivers/net/arm/at91_ether.c +++ b/drivers/net/arm/at91_ether.c @@ -1228,7 +1228,6 @@ static int at91ether_resume(struct platform_device *pdev) #endif static struct platform_driver at91ether_driver = { - .probe = at91ether_probe, .remove = __devexit_p(at91ether_remove), .suspend = at91ether_suspend, .resume = at91ether_resume, @@ -1240,7 +1239,7 @@ static struct platform_driver at91ether_driver = { static int __init at91ether_init(void) { - return platform_driver_register(&at91ether_driver); + return platform_driver_probe(&at91ether_driver, at91ether_probe); } static void __exit at91ether_exit(void) diff --git a/drivers/net/arm/ixp4xx_eth.c b/drivers/net/arm/ixp4xx_eth.c index 6f42ad72891..3fe09876e76 100644 --- a/drivers/net/arm/ixp4xx_eth.c +++ b/drivers/net/arm/ixp4xx_eth.c @@ -1142,7 +1142,9 @@ static const struct net_device_ops ixp4xx_netdev_ops = { .ndo_start_xmit = eth_xmit, .ndo_set_multicast_list = eth_set_mcast_list, .ndo_do_ioctl = eth_ioctl, - + .ndo_change_mtu = eth_change_mtu, + .ndo_set_mac_address = eth_mac_addr, + .ndo_validate_addr = eth_validate_addr, }; static int __devinit eth_init_one(struct platform_device *pdev) diff --git a/drivers/net/arm/w90p910_ether.c b/drivers/net/arm/w90p910_ether.c new file mode 100644 index 00000000000..ddd231cb54b --- /dev/null +++ b/drivers/net/arm/w90p910_ether.c @@ -0,0 +1,1105 @@ +/* + * Copyright (c) 2008-2009 Nuvoton technology corporation. + * + * Wan ZongShun <mcuos.com@gmail.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation;version 2 of the License. + * + */ + +#include <linux/module.h> +#include <linux/init.h> +#include <linux/mii.h> +#include <linux/netdevice.h> +#include <linux/etherdevice.h> +#include <linux/skbuff.h> +#include <linux/ethtool.h> +#include <linux/platform_device.h> +#include <linux/clk.h> + +#define DRV_MODULE_NAME "w90p910-emc" +#define DRV_MODULE_VERSION "0.1" + +/* Ethernet MAC Registers */ +#define REG_CAMCMR 0x00 +#define REG_CAMEN 0x04 +#define REG_CAMM_BASE 0x08 +#define REG_CAML_BASE 0x0c +#define REG_TXDLSA 0x88 +#define REG_RXDLSA 0x8C +#define REG_MCMDR 0x90 +#define REG_MIID 0x94 +#define REG_MIIDA 0x98 +#define REG_FFTCR 0x9C +#define REG_TSDR 0xa0 +#define REG_RSDR 0xa4 +#define REG_DMARFC 0xa8 +#define REG_MIEN 0xac +#define REG_MISTA 0xb0 +#define REG_CTXDSA 0xcc +#define REG_CTXBSA 0xd0 +#define REG_CRXDSA 0xd4 +#define REG_CRXBSA 0xd8 + +/* mac controller bit */ +#define MCMDR_RXON 0x01 +#define MCMDR_ACP (0x01 << 3) +#define MCMDR_SPCRC (0x01 << 5) +#define MCMDR_TXON (0x01 << 8) +#define MCMDR_FDUP (0x01 << 18) +#define MCMDR_ENMDC (0x01 << 19) +#define MCMDR_OPMOD (0x01 << 20) +#define SWR (0x01 << 24) + +/* cam command regiser */ +#define CAMCMR_AUP 0x01 +#define CAMCMR_AMP (0x01 << 1) +#define CAMCMR_ABP (0x01 << 2) +#define CAMCMR_CCAM (0x01 << 3) +#define CAMCMR_ECMP (0x01 << 4) +#define CAM0EN 0x01 + +/* mac mii controller bit */ +#define MDCCR (0x0a << 20) +#define PHYAD (0x01 << 8) +#define PHYWR (0x01 << 16) +#define PHYBUSY (0x01 << 17) +#define PHYPRESP (0x01 << 18) +#define CAM_ENTRY_SIZE 0x08 + +/* rx and tx status */ +#define TXDS_TXCP (0x01 << 19) +#define RXDS_CRCE (0x01 << 17) +#define RXDS_PTLE (0x01 << 19) +#define RXDS_RXGD (0x01 << 20) +#define RXDS_ALIE (0x01 << 21) +#define RXDS_RP (0x01 << 22) + +/* mac interrupt status*/ +#define MISTA_EXDEF (0x01 << 19) +#define MISTA_TXBERR (0x01 << 24) +#define MISTA_TDU (0x01 << 23) +#define MISTA_RDU (0x01 << 10) +#define MISTA_RXBERR (0x01 << 11) + +#define ENSTART 0x01 +#define ENRXINTR 0x01 +#define ENRXGD (0x01 << 4) +#define ENRXBERR (0x01 << 11) +#define ENTXINTR (0x01 << 16) +#define ENTXCP (0x01 << 18) +#define ENTXABT (0x01 << 21) +#define ENTXBERR (0x01 << 24) +#define ENMDC (0x01 << 19) +#define PHYBUSY (0x01 << 17) +#define MDCCR_VAL 0xa00000 + +/* rx and tx owner bit */ +#define RX_OWEN_DMA (0x01 << 31) +#define RX_OWEN_CPU (~(0x03 << 30)) +#define TX_OWEN_DMA (0x01 << 31) +#define TX_OWEN_CPU (~(0x01 << 31)) + +/* tx frame desc controller bit */ +#define MACTXINTEN 0x04 +#define CRCMODE 0x02 +#define PADDINGMODE 0x01 + +/* fftcr controller bit */ +#define TXTHD (0x03 << 8) +#define BLENGTH (0x01 << 20) + +/* global setting for driver */ +#define RX_DESC_SIZE 50 +#define TX_DESC_SIZE 10 +#define MAX_RBUFF_SZ 0x600 +#define MAX_TBUFF_SZ 0x600 +#define TX_TIMEOUT 50 +#define DELAY 1000 +#define CAM0 0x0 + +static int w90p910_mdio_read(struct net_device *dev, int phy_id, int reg); + +struct w90p910_rxbd { + unsigned int sl; + unsigned int buffer; + unsigned int reserved; + unsigned int next; +}; + +struct w90p910_txbd { + unsigned int mode; + unsigned int buffer; + unsigned int sl; + unsigned int next; +}; + +struct recv_pdesc { + struct w90p910_rxbd desclist[RX_DESC_SIZE]; + char recv_buf[RX_DESC_SIZE][MAX_RBUFF_SZ]; +}; + +struct tran_pdesc { + struct w90p910_txbd desclist[TX_DESC_SIZE]; + char tran_buf[RX_DESC_SIZE][MAX_TBUFF_SZ]; +}; + +struct w90p910_ether { + struct recv_pdesc *rdesc; + struct recv_pdesc *rdesc_phys; + struct tran_pdesc *tdesc; + struct tran_pdesc *tdesc_phys; + struct net_device_stats stats; + struct platform_device *pdev; + struct sk_buff *skb; + struct clk *clk; + struct clk *rmiiclk; + struct mii_if_info mii; + struct timer_list check_timer; + void __iomem *reg; + unsigned int rxirq; + unsigned int txirq; + unsigned int cur_tx; + unsigned int cur_rx; + unsigned int finish_tx; + unsigned int rx_packets; + unsigned int rx_bytes; + unsigned int start_tx_ptr; + unsigned int start_rx_ptr; + unsigned int linkflag; + spinlock_t lock; +}; + +static void update_linkspeed_register(struct net_device *dev, + unsigned int speed, unsigned int duplex) +{ + struct w90p910_ether *ether = netdev_priv(dev); + unsigned int val; + + val = __raw_readl(ether->reg + REG_MCMDR); + + if (speed == SPEED_100) { + /* 100 full/half duplex */ + if (duplex == DUPLEX_FULL) { + val |= (MCMDR_OPMOD | MCMDR_FDUP); + } else { + val |= MCMDR_OPMOD; + val &= ~MCMDR_FDUP; + } + } else { + /* 10 full/half duplex */ + if (duplex == DUPLEX_FULL) { + val |= MCMDR_FDUP; + val &= ~MCMDR_OPMOD; + } else { + val &= ~(MCMDR_FDUP | MCMDR_OPMOD); + } + } + + __raw_writel(val, ether->reg + REG_MCMDR); +} + +static void update_linkspeed(struct net_device *dev) +{ + struct w90p910_ether *ether = netdev_priv(dev); + struct platform_device *pdev; + unsigned int bmsr, bmcr, lpa, speed, duplex; + + pdev = ether->pdev; + + if (!mii_link_ok(ðer->mii)) { + ether->linkflag = 0x0; + netif_carrier_off(dev); + dev_warn(&pdev->dev, "%s: Link down.\n", dev->name); + return; + } + + if (ether->linkflag == 1) + return; + + bmsr = w90p910_mdio_read(dev, ether->mii.phy_id, MII_BMSR); + bmcr = w90p910_mdio_read(dev, ether->mii.phy_id, MII_BMCR); + + if (bmcr & BMCR_ANENABLE) { + if (!(bmsr & BMSR_ANEGCOMPLETE)) + return; + + lpa = w90p910_mdio_read(dev, ether->mii.phy_id, MII_LPA); + + if ((lpa & LPA_100FULL) || (lpa & LPA_100HALF)) + speed = SPEED_100; + else + speed = SPEED_10; + + if ((lpa & LPA_100FULL) || (lpa & LPA_10FULL)) + duplex = DUPLEX_FULL; + else + duplex = DUPLEX_HALF; + + } else { + speed = (bmcr & BMCR_SPEED100) ? SPEED_100 : SPEED_10; + duplex = (bmcr & BMCR_FULLDPLX) ? DUPLEX_FULL : DUPLEX_HALF; + } + + update_linkspeed_register(dev, speed, duplex); + + dev_info(&pdev->dev, "%s: Link now %i-%s\n", dev->name, speed, + (duplex == DUPLEX_FULL) ? "FullDuplex" : "HalfDuplex"); + ether->linkflag = 0x01; + + netif_carrier_on(dev); +} + +static void w90p910_check_link(unsigned long dev_id) +{ + struct net_device *dev = (struct net_device *) dev_id; + struct w90p910_ether *ether = netdev_priv(dev); + + update_linkspeed(dev); + mod_timer(ðer->check_timer, jiffies + msecs_to_jiffies(1000)); +} + +static void w90p910_write_cam(struct net_device *dev, + unsigned int x, unsigned char *pval) +{ + struct w90p910_ether *ether = netdev_priv(dev); + unsigned int msw, lsw; + + msw = (pval[0] << 24) | (pval[1] << 16) | (pval[2] << 8) | pval[3]; + + lsw = (pval[4] << 24) | (pval[5] << 16); + + __raw_writel(lsw, ether->reg + REG_CAML_BASE + x * CAM_ENTRY_SIZE); + __raw_writel(msw, ether->reg + REG_CAMM_BASE + x * CAM_ENTRY_SIZE); +} + +static void w90p910_init_desc(struct net_device *dev) +{ + struct w90p910_ether *ether; + struct w90p910_txbd *tdesc, *tdesc_phys; + struct w90p910_rxbd *rdesc, *rdesc_phys; + unsigned int i, j; + + ether = netdev_priv(dev); + + ether->tdesc = (struct tran_pdesc *) + dma_alloc_coherent(NULL, sizeof(struct tran_pdesc), + (dma_addr_t *) ðer->tdesc_phys, GFP_KERNEL); + + ether->rdesc = (struct recv_pdesc *) + dma_alloc_coherent(NULL, sizeof(struct recv_pdesc), + (dma_addr_t *) ðer->rdesc_phys, GFP_KERNEL); + + for (i = 0; i < TX_DESC_SIZE; i++) { + tdesc = &(ether->tdesc->desclist[i]); + + j = ((i + 1) / TX_DESC_SIZE); + + if (j != 0) { + tdesc_phys = &(ether->tdesc_phys->desclist[0]); + ether->start_tx_ptr = (unsigned int)tdesc_phys; + tdesc->next = (unsigned int)ether->start_tx_ptr; + } else { + tdesc_phys = &(ether->tdesc_phys->desclist[i+1]); + tdesc->next = (unsigned int)tdesc_phys; + } + + tdesc->buffer = (unsigned int)ether->tdesc_phys->tran_buf[i]; + tdesc->sl = 0; + tdesc->mode = 0; + } + + for (i = 0; i < RX_DESC_SIZE; i++) { + rdesc = &(ether->rdesc->desclist[i]); + + j = ((i + 1) / RX_DESC_SIZE); + + if (j != 0) { + rdesc_phys = &(ether->rdesc_phys->desclist[0]); + ether->start_rx_ptr = (unsigned int)rdesc_phys; + rdesc->next = (unsigned int)ether->start_rx_ptr; + } else { + rdesc_phys = &(ether->rdesc_phys->desclist[i+1]); + rdesc->next = (unsigned int)rdesc_phys; + } + + rdesc->sl = RX_OWEN_DMA; + rdesc->buffer = (unsigned int)ether->rdesc_phys->recv_buf[i]; + } +} + +static void w90p910_set_fifo_threshold(struct net_device *dev) +{ + struct w90p910_ether *ether = netdev_priv(dev); + unsigned int val; + + val = TXTHD | BLENGTH; + __raw_writel(val, ether->reg + REG_FFTCR); +} + +static void w90p910_return_default_idle(struct net_device *dev) +{ + struct w90p910_ether *ether = netdev_priv(dev); + unsigned int val; + + val = __raw_readl(ether->reg + REG_MCMDR); + val |= SWR; + __raw_writel(val, ether->reg + REG_MCMDR); +} + +static void w90p910_trigger_rx(struct net_device *dev) +{ + struct w90p910_ether *ether = netdev_priv(dev); + + __raw_writel(ENSTART, ether->reg + REG_RSDR); +} + +static void w90p910_trigger_tx(struct net_device *dev) +{ + struct w90p910_ether *ether = netdev_priv(dev); + + __raw_writel(ENSTART, ether->reg + REG_TSDR); +} + +static void w90p910_enable_mac_interrupt(struct net_device *dev) +{ + struct w90p910_ether *ether = netdev_priv(dev); + unsigned int val; + + val = ENTXINTR | ENRXINTR | ENRXGD | ENTXCP; + val |= ENTXBERR | ENRXBERR | ENTXABT; + + __raw_writel(val, ether->reg + REG_MIEN); +} + +static void w90p910_get_and_clear_int(struct net_device *dev, + unsigned int *val) +{ + struct w90p910_ether *ether = netdev_priv(dev); + + *val = __raw_readl(ether->reg + REG_MISTA); + __raw_writel(*val, ether->reg + REG_MISTA); +} + +static void w90p910_set_global_maccmd(struct net_device *dev) +{ + struct w90p910_ether *ether = netdev_priv(dev); + unsigned int val; + + val = __raw_readl(ether->reg + REG_MCMDR); + val |= MCMDR_SPCRC | MCMDR_ENMDC | MCMDR_ACP | ENMDC; + __raw_writel(val, ether->reg + REG_MCMDR); +} + +static void w90p910_enable_cam(struct net_device *dev) +{ + struct w90p910_ether *ether = netdev_priv(dev); + unsigned int val; + + w90p910_write_cam(dev, CAM0, dev->dev_addr); + + val = __raw_readl(ether->reg + REG_CAMEN); + val |= CAM0EN; + __raw_writel(val, ether->reg + REG_CAMEN); +} + +static void w90p910_enable_cam_command(struct net_device *dev) +{ + struct w90p910_ether *ether = netdev_priv(dev); + unsigned int val; + + val = CAMCMR_ECMP | CAMCMR_ABP | CAMCMR_AMP; + __raw_writel(val, ether->reg + REG_CAMCMR); +} + +static void w90p910_enable_tx(struct net_device *dev, unsigned int enable) +{ + struct w90p910_ether *ether = netdev_priv(dev); + unsigned int val; + + val = __raw_readl(ether->reg + REG_MCMDR); + + if (enable) + val |= MCMDR_TXON; + else + val &= ~MCMDR_TXON; + + __raw_writel(val, ether->reg + REG_MCMDR); +} + +static void w90p910_enable_rx(struct net_device *dev, unsigned int enable) +{ + struct w90p910_ether *ether = netdev_priv(dev); + unsigned int val; + + val = __raw_readl(ether->reg + REG_MCMDR); + + if (enable) + val |= MCMDR_RXON; + else + val &= ~MCMDR_RXON; + + __raw_writel(val, ether->reg + REG_MCMDR); +} + +static void w90p910_set_curdest(struct net_device *dev) +{ + struct w90p910_ether *ether = netdev_priv(dev); + + __raw_writel(ether->start_rx_ptr, ether->reg + REG_RXDLSA); + __raw_writel(ether->start_tx_ptr, ether->reg + REG_TXDLSA); +} + +static void w90p910_reset_mac(struct net_device *dev) +{ + struct w90p910_ether *ether = netdev_priv(dev); + + spin_lock(ðer->lock); + + w90p910_enable_tx(dev, 0); + w90p910_enable_rx(dev, 0); + w90p910_set_fifo_threshold(dev); + w90p910_return_default_idle(dev); + + if (!netif_queue_stopped(dev)) + netif_stop_queue(dev); + + w90p910_init_desc(dev); + + dev->trans_start = jiffies; + ether->cur_tx = 0x0; + ether->finish_tx = 0x0; + ether->cur_rx = 0x0; + + w90p910_set_curdest(dev); + w90p910_enable_cam(dev); + w90p910_enable_cam_command(dev); + w90p910_enable_mac_interrupt(dev); + w90p910_enable_tx(dev, 1); + w90p910_enable_rx(dev, 1); + w90p910_trigger_tx(dev); + w90p910_trigger_rx(dev); + + dev->trans_start = jiffies; + + if (netif_queue_stopped(dev)) + netif_wake_queue(dev); + + spin_unlock(ðer->lock); +} + +static void w90p910_mdio_write(struct net_device *dev, + int phy_id, int reg, int data) +{ + struct w90p910_ether *ether = netdev_priv(dev); + struct platform_device *pdev; + unsigned int val, i; + + pdev = ether->pdev; + + __raw_writel(data, ether->reg + REG_MIID); + + val = (phy_id << 0x08) | reg; + val |= PHYBUSY | PHYWR | MDCCR_VAL; + __raw_writel(val, ether->reg + REG_MIIDA); + + for (i = 0; i < DELAY; i++) { + if ((__raw_readl(ether->reg + REG_MIIDA) & PHYBUSY) == 0) + break; + } + + if (i == DELAY) + dev_warn(&pdev->dev, "mdio write timed out\n"); +} + +static int w90p910_mdio_read(struct net_device *dev, int phy_id, int reg) +{ + struct w90p910_ether *ether = netdev_priv(dev); + struct platform_device *pdev; + unsigned int val, i, data; + + pdev = ether->pdev; + + val = (phy_id << 0x08) | reg; + val |= PHYBUSY | MDCCR_VAL; + __raw_writel(val, ether->reg + REG_MIIDA); + + for (i = 0; i < DELAY; i++) { + if ((__raw_readl(ether->reg + REG_MIIDA) & PHYBUSY) == 0) + break; + } + + if (i == DELAY) { + dev_warn(&pdev->dev, "mdio read timed out\n"); + data = 0xffff; + } else { + data = __raw_readl(ether->reg + REG_MIID); + } + + return data; +} + +static int set_mac_address(struct net_device *dev, void *addr) +{ + struct sockaddr *address = addr; + + if (!is_valid_ether_addr(address->sa_data)) + return -EADDRNOTAVAIL; + + memcpy(dev->dev_addr, address->sa_data, dev->addr_len); + w90p910_write_cam(dev, CAM0, dev->dev_addr); + + return 0; +} + +static int w90p910_ether_close(struct net_device *dev) +{ + struct w90p910_ether *ether = netdev_priv(dev); + + dma_free_writecombine(NULL, sizeof(struct w90p910_rxbd), + ether->rdesc, (dma_addr_t)ether->rdesc_phys); + dma_free_writecombine(NULL, sizeof(struct w90p910_txbd), + ether->tdesc, (dma_addr_t)ether->tdesc_phys); + + netif_stop_queue(dev); + + del_timer_sync(ðer->check_timer); + clk_disable(ether->rmiiclk); + clk_disable(ether->clk); + + free_irq(ether->txirq, dev); + free_irq(ether->rxirq, dev); + + return 0; +} + +static struct net_device_stats *w90p910_ether_stats(struct net_device *dev) +{ + struct w90p910_ether *ether; + + ether = netdev_priv(dev); + + return ðer->stats; +} + +static int w90p910_send_frame(struct net_device *dev, + unsigned char *data, int length) +{ + struct w90p910_ether *ether; + struct w90p910_txbd *txbd; + struct platform_device *pdev; + unsigned char *buffer; + + ether = netdev_priv(dev); + pdev = ether->pdev; + + txbd = ðer->tdesc->desclist[ether->cur_tx]; + buffer = ether->tdesc->tran_buf[ether->cur_tx]; + if (length > 1514) { + dev_err(&pdev->dev, "send data %d bytes, check it\n", length); + length = 1514; + } + + txbd->sl = length & 0xFFFF; + + memcpy(buffer, data, length); + + txbd->mode = TX_OWEN_DMA | PADDINGMODE | CRCMODE | MACTXINTEN; + + w90p910_enable_tx(dev, 1); + + w90p910_trigger_tx(dev); + + ether->cur_tx = (ether->cur_tx+1) % TX_DESC_SIZE; + txbd = ðer->tdesc->desclist[ether->cur_tx]; + + dev->trans_start = jiffies; + + if (txbd->mode & TX_OWEN_DMA) + netif_stop_queue(dev); + + return 0; +} + +static int w90p910_ether_start_xmit(struct sk_buff *skb, struct net_device *dev) +{ + struct w90p910_ether *ether = netdev_priv(dev); + + if (!(w90p910_send_frame(dev, skb->data, skb->len))) { + ether->skb = skb; + dev_kfree_skb_irq(skb); + return 0; + } + return -1; +} + +static irqreturn_t w90p910_tx_interrupt(int irq, void *dev_id) +{ + struct w90p910_ether *ether; + struct w90p910_txbd *txbd; + struct platform_device *pdev; + struct tran_pdesc *tran_pdesc; + struct net_device *dev; + unsigned int cur_entry, entry, status; + + dev = (struct net_device *)dev_id; + ether = netdev_priv(dev); + pdev = ether->pdev; + + spin_lock(ðer->lock); + + w90p910_get_and_clear_int(dev, &status); + + cur_entry = __raw_readl(ether->reg + REG_CTXDSA); + + tran_pdesc = ether->tdesc_phys; + entry = (unsigned int)(&tran_pdesc->desclist[ether->finish_tx]); + + while (entry != cur_entry) { + txbd = ðer->tdesc->desclist[ether->finish_tx]; + + ether->finish_tx = (ether->finish_tx + 1) % TX_DESC_SIZE; + + if (txbd->sl & TXDS_TXCP) { + ether->stats.tx_packets++; + ether->stats.tx_bytes += txbd->sl & 0xFFFF; + } else { + ether->stats.tx_errors++; + } + + txbd->sl = 0x0; + txbd->mode = 0x0; + + if (netif_queue_stopped(dev)) + netif_wake_queue(dev); + + entry = (unsigned int)(&tran_pdesc->desclist[ether->finish_tx]); + } + + if (status & MISTA_EXDEF) { + dev_err(&pdev->dev, "emc defer exceed interrupt\n"); + } else if (status & MISTA_TXBERR) { + dev_err(&pdev->dev, "emc bus error interrupt\n"); + w90p910_reset_mac(dev); + } else if (status & MISTA_TDU) { + if (netif_queue_stopped(dev)) + netif_wake_queue(dev); + } + + spin_unlock(ðer->lock); + + return IRQ_HANDLED; +} + +static void netdev_rx(struct net_device *dev) +{ + struct w90p910_ether *ether; + struct w90p910_rxbd *rxbd; + struct platform_device *pdev; + struct recv_pdesc *rdesc_phys; + struct sk_buff *skb; + unsigned char *data; + unsigned int length, status, val, entry; + + ether = netdev_priv(dev); + pdev = ether->pdev; + rdesc_phys = ether->rdesc_phys; + + rxbd = ðer->rdesc->desclist[ether->cur_rx]; + + do { + val = __raw_readl(ether->reg + REG_CRXDSA); + entry = (unsigned int)&rdesc_phys->desclist[ether->cur_rx]; + + if (val == entry) + break; + + status = rxbd->sl; + length = status & 0xFFFF; + + if (status & RXDS_RXGD) { + data = ether->rdesc->recv_buf[ether->cur_rx]; + skb = dev_alloc_skb(length+2); + if (!skb) { + dev_err(&pdev->dev, "get skb buffer error\n"); + ether->stats.rx_dropped++; + return; + } + + skb->dev = dev; + skb_reserve(skb, 2); + skb_put(skb, length); + skb_copy_to_linear_data(skb, data, length); + skb->protocol = eth_type_trans(skb, dev); + ether->stats.rx_packets++; + ether->stats.rx_bytes += length; + netif_rx(skb); + } else { + ether->stats.rx_errors++; + + if (status & RXDS_RP) { + dev_err(&pdev->dev, "rx runt err\n"); + ether->stats.rx_length_errors++; + } else if (status & RXDS_CRCE) { + dev_err(&pdev->dev, "rx crc err\n"); + ether->stats.rx_crc_errors++; + } + + if (status & RXDS_ALIE) { + dev_err(&pdev->dev, "rx aligment err\n"); + ether->stats.rx_frame_errors++; + } else if (status & RXDS_PTLE) { + dev_err(&pdev->dev, "rx longer err\n"); + ether->stats.rx_over_errors++; + } + } + + rxbd->sl = RX_OWEN_DMA; + rxbd->reserved = 0x0; + ether->cur_rx = (ether->cur_rx+1) % RX_DESC_SIZE; + rxbd = ðer->rdesc->desclist[ether->cur_rx]; + + dev->last_rx = jiffies; + } while (1); +} + +static irqreturn_t w90p910_rx_interrupt(int irq, void *dev_id) +{ + struct net_device *dev; + struct w90p910_ether *ether; + struct platform_device *pdev; + unsigned int status; + + dev = (struct net_device *)dev_id; + ether = netdev_priv(dev); + pdev = ether->pdev; + + spin_lock(ðer->lock); + + w90p910_get_and_clear_int(dev, &status); + + if (status & MISTA_RDU) { + netdev_rx(dev); + + w90p910_trigger_rx(dev); + + spin_unlock(ðer->lock); + return IRQ_HANDLED; + } else if (status & MISTA_RXBERR) { + dev_err(&pdev->dev, "emc rx bus error\n"); + w90p910_reset_mac(dev); + } + + netdev_rx(dev); + spin_unlock(ðer->lock); + return IRQ_HANDLED; +} + +static int w90p910_ether_open(struct net_device *dev) +{ + struct w90p910_ether *ether; + struct platform_device *pdev; + + ether = netdev_priv(dev); + pdev = ether->pdev; + + w90p910_reset_mac(dev); + w90p910_set_fifo_threshold(dev); + w90p910_set_curdest(dev); + w90p910_enable_cam(dev); + w90p910_enable_cam_command(dev); + w90p910_enable_mac_interrupt(dev); + w90p910_set_global_maccmd(dev); + w90p910_enable_rx(dev, 1); + + ether->rx_packets = 0x0; + ether->rx_bytes = 0x0; + + if (request_irq(ether->txirq, w90p910_tx_interrupt, + 0x0, pdev->name, dev)) { + dev_err(&pdev->dev, "register irq tx failed\n"); + return -EAGAIN; + } + + if (request_irq(ether->rxirq, w90p910_rx_interrupt, + 0x0, pdev->name, dev)) { + dev_err(&pdev->dev, "register irq rx failed\n"); + return -EAGAIN; + } + + mod_timer(ðer->check_timer, jiffies + msecs_to_jiffies(1000)); + netif_start_queue(dev); + w90p910_trigger_rx(dev); + + dev_info(&pdev->dev, "%s is OPENED\n", dev->name); + + return 0; +} + +static void w90p910_ether_set_multicast_list(struct net_device *dev) +{ + struct w90p910_ether *ether; + unsigned int rx_mode; + + ether = netdev_priv(dev); + + if (dev->flags & IFF_PROMISC) + rx_mode = CAMCMR_AUP | CAMCMR_AMP | CAMCMR_ABP | CAMCMR_ECMP; + else if ((dev->flags & IFF_ALLMULTI) || dev->mc_list) + rx_mode = CAMCMR_AMP | CAMCMR_ABP | CAMCMR_ECMP; + else + rx_mode = CAMCMR_ECMP | CAMCMR_ABP; + __raw_writel(rx_mode, ether->reg + REG_CAMCMR); +} + +static int w90p910_ether_ioctl(struct net_device *dev, + struct ifreq *ifr, int cmd) +{ + struct w90p910_ether *ether = netdev_priv(dev); + struct mii_ioctl_data *data = if_mii(ifr); + + return generic_mii_ioctl(ðer->mii, data, cmd, NULL); +} + +static void w90p910_get_drvinfo(struct net_device *dev, + struct ethtool_drvinfo *info) +{ + strcpy(info->driver, DRV_MODULE_NAME); + strcpy(info->version, DRV_MODULE_VERSION); +} + +static int w90p910_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) +{ + struct w90p910_ether *ether = netdev_priv(dev); + return mii_ethtool_gset(ðer->mii, cmd); +} + +static int w90p910_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) +{ + struct w90p910_ether *ether = netdev_priv(dev); + return mii_ethtool_sset(ðer->mii, cmd); +} + +static int w90p910_nway_reset(struct net_device *dev) +{ + struct w90p910_ether *ether = netdev_priv(dev); + return mii_nway_restart(ðer->mii); +} + +static u32 w90p910_get_link(struct net_device *dev) +{ + struct w90p910_ether *ether = netdev_priv(dev); + return mii_link_ok(ðer->mii); +} + +static const struct ethtool_ops w90p910_ether_ethtool_ops = { + .get_settings = w90p910_get_settings, + .set_settings = w90p910_set_settings, + .get_drvinfo = w90p910_get_drvinfo, + .nway_reset = w90p910_nway_reset, + .get_link = w90p910_get_link, +}; + +static const struct net_device_ops w90p910_ether_netdev_ops = { + .ndo_open = w90p910_ether_open, + .ndo_stop = w90p910_ether_close, + .ndo_start_xmit = w90p910_ether_start_xmit, + .ndo_get_stats = w90p910_ether_stats, + .ndo_set_multicast_list = w90p910_ether_set_multicast_list, + .ndo_set_mac_address = set_mac_address, + .ndo_do_ioctl = w90p910_ether_ioctl, + .ndo_validate_addr = eth_validate_addr, + .ndo_change_mtu = eth_change_mtu, +}; + +static void __init get_mac_address(struct net_device *dev) +{ + struct w90p910_ether *ether = netdev_priv(dev); + struct platform_device *pdev; + char addr[6]; + + pdev = ether->pdev; + + addr[0] = 0x00; + addr[1] = 0x02; + addr[2] = 0xac; + addr[3] = 0x55; + addr[4] = 0x88; + addr[5] = 0xa8; + + if (is_valid_ether_addr(addr)) + memcpy(dev->dev_addr, &addr, 0x06); + else + dev_err(&pdev->dev, "invalid mac address\n"); +} + +static int w90p910_ether_setup(struct net_device *dev) +{ + struct w90p910_ether *ether = netdev_priv(dev); + + ether_setup(dev); + dev->netdev_ops = &w90p910_ether_netdev_ops; + dev->ethtool_ops = &w90p910_ether_ethtool_ops; + + dev->tx_queue_len = 16; + dev->dma = 0x0; + dev->watchdog_timeo = TX_TIMEOUT; + + get_mac_address(dev); + + spin_lock_init(ðer->lock); + + ether->cur_tx = 0x0; + ether->cur_rx = 0x0; + ether->finish_tx = 0x0; + ether->linkflag = 0x0; + ether->mii.phy_id = 0x01; + ether->mii.phy_id_mask = 0x1f; + ether->mii.reg_num_mask = 0x1f; + ether->mii.dev = dev; + ether->mii.mdio_read = w90p910_mdio_read; + ether->mii.mdio_write = w90p910_mdio_write; + + setup_timer(ðer->check_timer, w90p910_check_link, + (unsigned long)dev); + + return 0; +} + +static int __devinit w90p910_ether_probe(struct platform_device *pdev) +{ + struct w90p910_ether *ether; + struct net_device *dev; + struct resource *res; + int error; + + dev = alloc_etherdev(sizeof(struct w90p910_ether)); + if (!dev) + return -ENOMEM; + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (res == NULL) { + dev_err(&pdev->dev, "failed to get I/O memory\n"); + error = -ENXIO; + goto failed_free; + } + + res = request_mem_region(res->start, resource_size(res), pdev->name); + if (res == NULL) { + dev_err(&pdev->dev, "failed to request I/O memory\n"); + error = -EBUSY; + goto failed_free; + } + + ether = netdev_priv(dev); + + ether->reg = ioremap(res->start, resource_size(res)); + if (ether->reg == NULL) { + dev_err(&pdev->dev, "failed to remap I/O memory\n"); + error = -ENXIO; + goto failed_free_mem; + } + + ether->txirq = platform_get_irq(pdev, 0); + if (ether->txirq < 0) { + dev_err(&pdev->dev, "failed to get ether tx irq\n"); + error = -ENXIO; + goto failed_free_io; + } + + ether->rxirq = platform_get_irq(pdev, 1); + if (ether->rxirq < 0) { + dev_err(&pdev->dev, "failed to get ether rx irq\n"); + error = -ENXIO; + goto failed_free_txirq; + } + + platform_set_drvdata(pdev, dev); + + ether->clk = clk_get(&pdev->dev, NULL); + if (IS_ERR(ether->clk)) { + dev_err(&pdev->dev, "failed to get ether clock\n"); + error = PTR_ERR(ether->clk); + goto failed_free_rxirq; + } + + ether->rmiiclk = clk_get(&pdev->dev, "RMII"); + if (IS_ERR(ether->rmiiclk)) { + dev_err(&pdev->dev, "failed to get ether clock\n"); + error = PTR_ERR(ether->rmiiclk); + goto failed_put_clk; + } + + ether->pdev = pdev; + + w90p910_ether_setup(dev); + + error = register_netdev(dev); + if (error != 0) { + dev_err(&pdev->dev, "Regiter EMC w90p910 FAILED\n"); + error = -ENODEV; + goto failed_put_rmiiclk; + } + + return 0; +failed_put_rmiiclk: + clk_put(ether->rmiiclk); +failed_put_clk: + clk_put(ether->clk); +failed_free_rxirq: + free_irq(ether->rxirq, pdev); + platform_set_drvdata(pdev, NULL); +failed_free_txirq: + free_irq(ether->txirq, pdev); +failed_free_io: + iounmap(ether->reg); +failed_free_mem: + release_mem_region(res->start, resource_size(res)); +failed_free: + free_netdev(dev); + return error; +} + +static int __devexit w90p910_ether_remove(struct platform_device *pdev) +{ + struct net_device *dev = platform_get_drvdata(pdev); + struct w90p910_ether *ether = netdev_priv(dev); + + unregister_netdev(dev); + clk_put(ether->rmiiclk); + clk_put(ether->clk); + del_timer_sync(ðer->check_timer); + platform_set_drvdata(pdev, NULL); + free_netdev(dev); + return 0; +} + +static struct platform_driver w90p910_ether_driver = { + .probe = w90p910_ether_probe, + .remove = __devexit_p(w90p910_ether_remove), + .driver = { + .name = "nuc900-emc", + .owner = THIS_MODULE, + }, +}; + +static int __init w90p910_ether_init(void) +{ + return platform_driver_register(&w90p910_ether_driver); +} + +static void __exit w90p910_ether_exit(void) +{ + platform_driver_unregister(&w90p910_ether_driver); +} + +module_init(w90p910_ether_init); +module_exit(w90p910_ether_exit); + +MODULE_AUTHOR("Wan ZongShun <mcuos.com@gmail.com>"); +MODULE_DESCRIPTION("w90p910 MAC driver!"); +MODULE_LICENSE("GPL"); +MODULE_ALIAS("platform:nuc900-emc"); + diff --git a/drivers/net/at1700.c b/drivers/net/at1700.c index 18b566ad4fd..cf30e278f18 100644 --- a/drivers/net/at1700.c +++ b/drivers/net/at1700.c @@ -318,7 +318,7 @@ static int __init at1700_probe1(struct net_device *dev, int ioaddr) pos3 = mca_read_stored_pos( slot, 3 ); pos4 = mca_read_stored_pos( slot, 4 ); - for (l_i = 0; l_i < 0x09; l_i++) + for (l_i = 0; l_i < 8; l_i++) if (( pos3 & 0x07) == at1700_ioaddr_pattern[l_i]) break; ioaddr = at1700_mca_probe_list[l_i]; diff --git a/drivers/net/atl1c/atl1c.h b/drivers/net/atl1c/atl1c.h index e1658ef3fcd..2a1120ad2e7 100644 --- a/drivers/net/atl1c/atl1c.h +++ b/drivers/net/atl1c/atl1c.h @@ -188,14 +188,14 @@ struct atl1c_tpd_ext_desc { #define RRS_HDS_TYPE_DATA 2 #define RRS_IS_NO_HDS_TYPE(flag) \ - (((flag) >> (RRS_HDS_TYPE_SHIFT)) & RRS_HDS_TYPE_MASK == 0) + ((((flag) >> (RRS_HDS_TYPE_SHIFT)) & RRS_HDS_TYPE_MASK) == 0) #define RRS_IS_HDS_HEAD(flag) \ - (((flag) >> (RRS_HDS_TYPE_SHIFT)) & RRS_HDS_TYPE_MASK == \ + ((((flag) >> (RRS_HDS_TYPE_SHIFT)) & RRS_HDS_TYPE_MASK) == \ RRS_HDS_TYPE_HEAD) #define RRS_IS_HDS_DATA(flag) \ - (((flag) >> (RRS_HDS_TYPE_SHIFT)) & RRS_HDS_TYPE_MASK == \ + ((((flag) >> (RRS_HDS_TYPE_SHIFT)) & RRS_HDS_TYPE_MASK) == \ RRS_HDS_TYPE_DATA) /* rrs word 3 bit 0:31 */ @@ -245,7 +245,7 @@ struct atl1c_tpd_ext_desc { #define RRS_PACKET_TYPE_802_3 1 #define RRS_PACKET_TYPE_ETH 0 #define RRS_PACKET_IS_ETH(word) \ - (((word) >> RRS_PACKET_TYPE_SHIFT) & RRS_PACKET_TYPE_MASK == \ + ((((word) >> RRS_PACKET_TYPE_SHIFT) & RRS_PACKET_TYPE_MASK) == \ RRS_PACKET_TYPE_ETH) #define RRS_RXD_IS_VALID(word) \ ((((word) >> RRS_RXD_UPDATED_SHIFT) & RRS_RXD_UPDATED_MASK) == 1) diff --git a/drivers/net/atl1c/atl1c_ethtool.c b/drivers/net/atl1c/atl1c_ethtool.c index e4afbd628c2..00d11b480af 100644 --- a/drivers/net/atl1c/atl1c_ethtool.c +++ b/drivers/net/atl1c/atl1c_ethtool.c @@ -232,11 +232,11 @@ static void atl1c_get_drvinfo(struct net_device *netdev, { struct atl1c_adapter *adapter = netdev_priv(netdev); - strncpy(drvinfo->driver, atl1c_driver_name, sizeof(drvinfo->driver)); - strncpy(drvinfo->version, atl1c_driver_version, + strlcpy(drvinfo->driver, atl1c_driver_name, sizeof(drvinfo->driver)); + strlcpy(drvinfo->version, atl1c_driver_version, sizeof(drvinfo->version)); - strncpy(drvinfo->fw_version, "N/A", sizeof(drvinfo->fw_version)); - strncpy(drvinfo->bus_info, pci_name(adapter->pdev), + strlcpy(drvinfo->fw_version, "N/A", sizeof(drvinfo->fw_version)); + strlcpy(drvinfo->bus_info, pci_name(adapter->pdev), sizeof(drvinfo->bus_info)); drvinfo->n_stats = 0; drvinfo->testinfo_len = 0; @@ -281,6 +281,8 @@ static int atl1c_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol) if (wol->wolopts & WAKE_PHY) adapter->wol |= AT_WUFC_LNKC; + device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol); + return 0; } diff --git a/drivers/net/atl1c/atl1c_main.c b/drivers/net/atl1c/atl1c_main.c index cd547a205fb..a383122679d 100644 --- a/drivers/net/atl1c/atl1c_main.c +++ b/drivers/net/atl1c/atl1c_main.c @@ -1689,7 +1689,7 @@ static void atl1c_clean_rx_irq(struct atl1c_adapter *adapter, u8 que, if (likely(RRS_RXD_IS_VALID(rrs->word3))) { rfd_num = (rrs->word0 >> RRS_RX_RFD_CNT_SHIFT) & RRS_RX_RFD_CNT_MASK; - if (unlikely(rfd_num) != 1) + if (unlikely(rfd_num != 1)) /* TODO support mul rfd*/ if (netif_msg_rx_err(adapter)) dev_warn(&pdev->dev, diff --git a/drivers/net/atl1e/atl1e_ethtool.c b/drivers/net/atl1e/atl1e_ethtool.c index 619c6583e1a..4003955d7a9 100644 --- a/drivers/net/atl1e/atl1e_ethtool.c +++ b/drivers/net/atl1e/atl1e_ethtool.c @@ -365,6 +365,8 @@ static int atl1e_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol) if (wol->wolopts & WAKE_PHY) adapter->wol |= AT_WUFC_LNKC; + device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol); + return 0; } diff --git a/drivers/net/atlx/atl1.c b/drivers/net/atlx/atl1.c index 94d7325caf4..8bca12f7139 100644 --- a/drivers/net/atlx/atl1.c +++ b/drivers/net/atlx/atl1.c @@ -3378,11 +3378,11 @@ static void atl1_get_drvinfo(struct net_device *netdev, { struct atl1_adapter *adapter = netdev_priv(netdev); - strncpy(drvinfo->driver, ATLX_DRIVER_NAME, sizeof(drvinfo->driver)); - strncpy(drvinfo->version, ATLX_DRIVER_VERSION, + strlcpy(drvinfo->driver, ATLX_DRIVER_NAME, sizeof(drvinfo->driver)); + strlcpy(drvinfo->version, ATLX_DRIVER_VERSION, sizeof(drvinfo->version)); - strncpy(drvinfo->fw_version, "N/A", sizeof(drvinfo->fw_version)); - strncpy(drvinfo->bus_info, pci_name(adapter->pdev), + strlcpy(drvinfo->fw_version, "N/A", sizeof(drvinfo->fw_version)); + strlcpy(drvinfo->bus_info, pci_name(adapter->pdev), sizeof(drvinfo->bus_info)); drvinfo->eedump_len = ATL1_EEDUMP_LEN; } diff --git a/drivers/net/atlx/atl2.c b/drivers/net/atlx/atl2.c index c734b1983ec..204db961029 100644 --- a/drivers/net/atlx/atl2.c +++ b/drivers/net/atlx/atl2.c @@ -2071,7 +2071,7 @@ static int atl2_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol) if (wol->wolopts & (WAKE_ARP | WAKE_MAGICSECURE)) return -EOPNOTSUPP; - if (wol->wolopts & (WAKE_MCAST|WAKE_BCAST|WAKE_MCAST)) + if (wol->wolopts & (WAKE_UCAST | WAKE_BCAST | WAKE_MCAST)) return -EOPNOTSUPP; /* these settings will always override what we currently have */ diff --git a/drivers/net/b44.c b/drivers/net/b44.c index 36d4d377ec2..bafca672ea7 100644 --- a/drivers/net/b44.c +++ b/drivers/net/b44.c @@ -952,9 +952,10 @@ static int b44_start_xmit(struct sk_buff *skb, struct net_device *dev) int rc = NETDEV_TX_OK; dma_addr_t mapping; u32 len, entry, ctrl; + unsigned long flags; len = skb->len; - spin_lock_irq(&bp->lock); + spin_lock_irqsave(&bp->lock, flags); /* This is a hard error, log it. */ if (unlikely(TX_BUFFS_AVAIL(bp) < 1)) { @@ -1027,7 +1028,7 @@ static int b44_start_xmit(struct sk_buff *skb, struct net_device *dev) dev->trans_start = jiffies; out_unlock: - spin_unlock_irq(&bp->lock); + spin_unlock_irqrestore(&bp->lock, flags); return rc; diff --git a/drivers/net/benet/be.h b/drivers/net/benet/be.h index f703758f0a6..5b4bf3d2cdc 100644 --- a/drivers/net/benet/be.h +++ b/drivers/net/benet/be.h @@ -73,7 +73,7 @@ static inline char *nic_name(struct pci_dev *pdev) #define RX_FRAGS_REFILL_WM (RX_Q_LEN - MAX_RX_POST) #define BE_MAX_LRO_DESCRIPTORS 16 -#define BE_MAX_FRAGS_PER_FRAME 16 +#define BE_MAX_FRAGS_PER_FRAME (min((u32) 16, (u32) MAX_SKB_FRAGS)) struct be_dma_mem { void *va; diff --git a/drivers/net/benet/be_ethtool.c b/drivers/net/benet/be_ethtool.c index 9592f22e4c8..cccc5419ad7 100644 --- a/drivers/net/benet/be_ethtool.c +++ b/drivers/net/benet/be_ethtool.c @@ -162,8 +162,8 @@ be_set_coalesce(struct net_device *netdev, struct ethtool_coalesce *coalesce) return -EINVAL; adapter->max_rx_coal = coalesce->rx_max_coalesced_frames; - if (adapter->max_rx_coal > MAX_SKB_FRAGS) - adapter->max_rx_coal = MAX_SKB_FRAGS - 1; + if (adapter->max_rx_coal > BE_MAX_FRAGS_PER_FRAME) + adapter->max_rx_coal = BE_MAX_FRAGS_PER_FRAME; /* if AIC is being turned on now, start with an EQD of 0 */ if (rx_eq->enable_aic == 0 && diff --git a/drivers/net/benet/be_hw.h b/drivers/net/benet/be_hw.h index b02e805c1db..29c33c709c6 100644 --- a/drivers/net/benet/be_hw.h +++ b/drivers/net/benet/be_hw.h @@ -55,6 +55,10 @@ #define MEMBAR_CTRL_INT_CTRL_PFUNC_MASK 0x7 /* bits 26 - 28 */ #define MEMBAR_CTRL_INT_CTRL_PFUNC_SHIFT 26 +/********* ISR0 Register offset **********/ +#define CEV_ISR0_OFFSET 0xC18 +#define CEV_ISR_SIZE 4 + /********* Event Q door bell *************/ #define DB_EQ_OFFSET DB_CQ_OFFSET #define DB_EQ_RING_ID_MASK 0x1FF /* bits 0 - 8 */ diff --git a/drivers/net/benet/be_main.c b/drivers/net/benet/be_main.c index 66c10c87f51..dea3155688b 100644 --- a/drivers/net/benet/be_main.c +++ b/drivers/net/benet/be_main.c @@ -666,8 +666,8 @@ static void skb_fill_rx_data(struct be_adapter *adapter, { struct be_queue_info *rxq = &adapter->rx_obj.q; struct be_rx_page_info *page_info; - u16 rxq_idx, i, num_rcvd; - u32 pktsize, hdr_len, curr_frag_len; + u16 rxq_idx, i, num_rcvd, j; + u32 pktsize, hdr_len, curr_frag_len, size; u8 *start; rxq_idx = AMAP_GET_BITS(struct amap_eth_rx_compl, fragndx, rxcp); @@ -708,23 +708,34 @@ static void skb_fill_rx_data(struct be_adapter *adapter, } /* More frags present for this completion */ - pktsize -= curr_frag_len; /* account for above copied frag */ - for (i = 1; i < num_rcvd; i++) { + size = pktsize; + for (i = 1, j = 0; i < num_rcvd; i++) { + size -= curr_frag_len; index_inc(&rxq_idx, rxq->len); page_info = get_rx_page_info(adapter, rxq_idx); - curr_frag_len = min(pktsize, rx_frag_size); + curr_frag_len = min(size, rx_frag_size); + + /* Coalesce all frags from the same physical page in one slot */ + if (page_info->page_offset == 0) { + /* Fresh page */ + j++; + skb_shinfo(skb)->frags[j].page = page_info->page; + skb_shinfo(skb)->frags[j].page_offset = + page_info->page_offset; + skb_shinfo(skb)->frags[j].size = 0; + skb_shinfo(skb)->nr_frags++; + } else { + put_page(page_info->page); + } - skb_shinfo(skb)->frags[i].page = page_info->page; - skb_shinfo(skb)->frags[i].page_offset = page_info->page_offset; - skb_shinfo(skb)->frags[i].size = curr_frag_len; + skb_shinfo(skb)->frags[j].size += curr_frag_len; skb->len += curr_frag_len; skb->data_len += curr_frag_len; - skb_shinfo(skb)->nr_frags++; - pktsize -= curr_frag_len; memset(page_info, 0, sizeof(*page_info)); } + BUG_ON(j > MAX_SKB_FRAGS); done: be_rx_stats_update(adapter, pktsize, num_rcvd); @@ -786,7 +797,7 @@ static void be_rx_compl_process_lro(struct be_adapter *adapter, struct skb_frag_struct rx_frags[BE_MAX_FRAGS_PER_FRAME]; struct be_queue_info *rxq = &adapter->rx_obj.q; u32 num_rcvd, pkt_size, remaining, vlanf, curr_frag_len; - u16 i, rxq_idx = 0, vid; + u16 i, rxq_idx = 0, vid, j; num_rcvd = AMAP_GET_BITS(struct amap_eth_rx_compl, numfrags, rxcp); pkt_size = AMAP_GET_BITS(struct amap_eth_rx_compl, pktsize, rxcp); @@ -794,20 +805,28 @@ static void be_rx_compl_process_lro(struct be_adapter *adapter, rxq_idx = AMAP_GET_BITS(struct amap_eth_rx_compl, fragndx, rxcp); remaining = pkt_size; - for (i = 0; i < num_rcvd; i++) { + for (i = 0, j = -1; i < num_rcvd; i++) { page_info = get_rx_page_info(adapter, rxq_idx); curr_frag_len = min(remaining, rx_frag_size); - rx_frags[i].page = page_info->page; - rx_frags[i].page_offset = page_info->page_offset; - rx_frags[i].size = curr_frag_len; - remaining -= curr_frag_len; + /* Coalesce all frags from the same physical page in one slot */ + if (i == 0 || page_info->page_offset == 0) { + /* First frag or Fresh page */ + j++; + rx_frags[j].page = page_info->page; + rx_frags[j].page_offset = page_info->page_offset; + rx_frags[j].size = 0; + } else { + put_page(page_info->page); + } + rx_frags[j].size += curr_frag_len; + remaining -= curr_frag_len; index_inc(&rxq_idx, rxq->len); - memset(page_info, 0, sizeof(*page_info)); } + BUG_ON(j > MAX_SKB_FRAGS); if (likely(!vlanf)) { lro_receive_frags(&adapter->rx_obj.lro_mgr, rx_frags, pkt_size, @@ -1255,15 +1274,17 @@ static irqreturn_t be_intx(int irq, void *dev) { struct be_adapter *adapter = dev; struct be_ctrl_info *ctrl = &adapter->ctrl; - int rx, tx; + int isr; - tx = event_handle(ctrl, &adapter->tx_eq); - rx = event_handle(ctrl, &adapter->rx_eq); + isr = ioread32(ctrl->csr + CEV_ISR0_OFFSET + + ctrl->pci_func * CEV_ISR_SIZE); + if (!isr) + return IRQ_NONE; - if (rx || tx) - return IRQ_HANDLED; - else - return IRQ_NONE; + event_handle(ctrl, &adapter->tx_eq); + event_handle(ctrl, &adapter->rx_eq); + + return IRQ_HANDLED; } static irqreturn_t be_msix_rx(int irq, void *dev) diff --git a/drivers/net/bmac.c b/drivers/net/bmac.c index 9578a3dfac0..206144f2470 100644 --- a/drivers/net/bmac.c +++ b/drivers/net/bmac.c @@ -428,10 +428,11 @@ bmac_init_phy(struct net_device *dev) printk(KERN_DEBUG "phy registers:"); for (addr = 0; addr < 32; ++addr) { if ((addr & 7) == 0) - printk("\n" KERN_DEBUG); - printk(" %.4x", bmac_mif_read(dev, addr)); + printk(KERN_DEBUG); + printk(KERN_CONT " %.4x", bmac_mif_read(dev, addr)); } - printk("\n"); + printk(KERN_CONT "\n"); + if (bp->is_bmac_plus) { unsigned int capable, ctrl; diff --git a/drivers/net/bnx2.c b/drivers/net/bnx2.c index b70cc99962f..06b901152d4 100644 --- a/drivers/net/bnx2.c +++ b/drivers/net/bnx2.c @@ -399,9 +399,11 @@ static int bnx2_unregister_cnic(struct net_device *dev) struct bnx2_napi *bnapi = &bp->bnx2_napi[0]; struct cnic_eth_dev *cp = &bp->cnic_eth_dev; + mutex_lock(&bp->cnic_lock); cp->drv_state = 0; bnapi->cnic_present = 0; rcu_assign_pointer(bp->cnic_ops, NULL); + mutex_unlock(&bp->cnic_lock); synchronize_rcu(); return 0; } @@ -429,13 +431,13 @@ bnx2_cnic_stop(struct bnx2 *bp) struct cnic_ops *c_ops; struct cnic_ctl_info info; - rcu_read_lock(); - c_ops = rcu_dereference(bp->cnic_ops); + mutex_lock(&bp->cnic_lock); + c_ops = bp->cnic_ops; if (c_ops) { info.cmd = CNIC_CTL_STOP_CMD; c_ops->cnic_ctl(bp->cnic_data, &info); } - rcu_read_unlock(); + mutex_unlock(&bp->cnic_lock); } static void @@ -444,8 +446,8 @@ bnx2_cnic_start(struct bnx2 *bp) struct cnic_ops *c_ops; struct cnic_ctl_info info; - rcu_read_lock(); - c_ops = rcu_dereference(bp->cnic_ops); + mutex_lock(&bp->cnic_lock); + c_ops = bp->cnic_ops; if (c_ops) { if (!(bp->flags & BNX2_FLAG_USING_MSIX)) { struct bnx2_napi *bnapi = &bp->bnx2_napi[0]; @@ -455,7 +457,7 @@ bnx2_cnic_start(struct bnx2 *bp) info.cmd = CNIC_CTL_START_CMD; c_ops->cnic_ctl(bp->cnic_data, &info); } - rcu_read_unlock(); + mutex_unlock(&bp->cnic_lock); } #else @@ -7663,6 +7665,9 @@ bnx2_init_board(struct pci_dev *pdev, struct net_device *dev) spin_lock_init(&bp->phy_lock); spin_lock_init(&bp->indirect_lock); +#ifdef BCM_CNIC + mutex_init(&bp->cnic_lock); +#endif INIT_WORK(&bp->reset_task, bnx2_reset_task); dev->base_addr = dev->mem_start = pci_resource_start(pdev, 0); diff --git a/drivers/net/bnx2.h b/drivers/net/bnx2.h index f1edfaa9e56..a4f12fd0ecd 100644 --- a/drivers/net/bnx2.h +++ b/drivers/net/bnx2.h @@ -6902,6 +6902,7 @@ struct bnx2 { u32 idle_chk_status_idx; #ifdef BCM_CNIC + struct mutex cnic_lock; struct cnic_eth_dev cnic_eth_dev; #endif diff --git a/drivers/net/bnx2x.h b/drivers/net/bnx2x.h index 8678457849f..85a737c5c23 100644 --- a/drivers/net/bnx2x.h +++ b/drivers/net/bnx2x.h @@ -902,6 +902,8 @@ struct bnx2x { u16 rx_quick_cons_trip; u16 rx_ticks_int; u16 rx_ticks; +/* Maximal coalescing timeout in us */ +#define BNX2X_MAX_COALESCE_TOUT (0xf0*12) u32 lin_cnt; diff --git a/drivers/net/bnx2x_link.c b/drivers/net/bnx2x_link.c index ed648acef7c..2ee581a2cde 100644 --- a/drivers/net/bnx2x_link.c +++ b/drivers/net/bnx2x_link.c @@ -4212,13 +4212,14 @@ static void bnx2x_turn_off_sf(struct bnx2x *bp, u8 port) u8 bnx2x_get_ext_phy_fw_version(struct link_params *params, u8 driver_loaded, u8 *version, u16 len) { - struct bnx2x *bp = params->bp; + struct bnx2x *bp; u32 ext_phy_type = 0; u32 spirom_ver = 0; u8 status = 0 ; if (version == NULL || params == NULL) return -EINVAL; + bp = params->bp; spirom_ver = REG_RD(bp, params->shmem_base + offsetof(struct shmem_region, diff --git a/drivers/net/bnx2x_main.c b/drivers/net/bnx2x_main.c index fbf1352e9c1..c36a5f33739 100644 --- a/drivers/net/bnx2x_main.c +++ b/drivers/net/bnx2x_main.c @@ -484,8 +484,9 @@ static void bnx2x_fw_dump(struct bnx2x *bp) mark = REG_RD(bp, MCP_REG_MCPR_SCRATCH + 0xf104); mark = ((mark + 0x3) & ~0x3); - printk(KERN_ERR PFX "begin fw dump (mark 0x%x)\n" KERN_ERR, mark); + printk(KERN_ERR PFX "begin fw dump (mark 0x%x)\n", mark); + printk(KERN_ERR PFX); for (offset = mark - 0x08000000; offset <= 0xF900; offset += 0x8*4) { for (word = 0; word < 8; word++) data[word] = htonl(REG_RD(bp, MCP_REG_MCPR_SCRATCH + @@ -500,7 +501,7 @@ static void bnx2x_fw_dump(struct bnx2x *bp) data[8] = 0x0; printk(KERN_CONT "%s", (char *)data); } - printk("\n" KERN_ERR PFX "end of fw dump\n"); + printk(KERN_ERR PFX "end of fw dump\n"); } static void bnx2x_panic_dump(struct bnx2x *bp) @@ -4434,7 +4435,7 @@ static void bnx2x_update_coalesce(struct bnx2x *bp) REG_WR16(bp, BAR_USTRORM_INTMEM + USTORM_SB_HC_DISABLE_OFFSET(port, sb_id, U_SB_ETH_RX_CQ_INDEX), - bp->rx_ticks ? 0 : 1); + (bp->rx_ticks/12) ? 0 : 1); /* HC_INDEX_C_ETH_TX_CQ_CONS */ REG_WR8(bp, BAR_CSTRORM_INTMEM + @@ -4444,7 +4445,7 @@ static void bnx2x_update_coalesce(struct bnx2x *bp) REG_WR16(bp, BAR_CSTRORM_INTMEM + CSTORM_SB_HC_DISABLE_OFFSET(port, sb_id, C_SB_ETH_TX_CQ_INDEX), - bp->tx_ticks ? 0 : 1); + (bp->tx_ticks/12) ? 0 : 1); } } @@ -7354,7 +7355,7 @@ static void bnx2x_reset_task(struct work_struct *work) #ifdef BNX2X_STOP_ON_ERROR BNX2X_ERR("reset task called but STOP_ON_ERROR defined" " so reset not done to allow debug dump,\n" - KERN_ERR " you will need to reboot when done\n"); + " you will need to reboot when done\n"); return; #endif @@ -8637,6 +8638,14 @@ static int bnx2x_nway_reset(struct net_device *dev) return 0; } +static u32 +bnx2x_get_link(struct net_device *dev) +{ + struct bnx2x *bp = netdev_priv(dev); + + return bp->link_vars.link_up; +} + static int bnx2x_get_eeprom_len(struct net_device *dev) { struct bnx2x *bp = netdev_priv(dev); @@ -9061,12 +9070,12 @@ static int bnx2x_set_coalesce(struct net_device *dev, struct bnx2x *bp = netdev_priv(dev); bp->rx_ticks = (u16) coal->rx_coalesce_usecs; - if (bp->rx_ticks > 3000) - bp->rx_ticks = 3000; + if (bp->rx_ticks > BNX2X_MAX_COALESCE_TOUT) + bp->rx_ticks = BNX2X_MAX_COALESCE_TOUT; bp->tx_ticks = (u16) coal->tx_coalesce_usecs; - if (bp->tx_ticks > 0x3000) - bp->tx_ticks = 0x3000; + if (bp->tx_ticks > BNX2X_MAX_COALESCE_TOUT) + bp->tx_ticks = BNX2X_MAX_COALESCE_TOUT; if (netif_running(dev)) bnx2x_update_coalesce(bp); @@ -10034,7 +10043,7 @@ static struct ethtool_ops bnx2x_ethtool_ops = { .get_msglevel = bnx2x_get_msglevel, .set_msglevel = bnx2x_set_msglevel, .nway_reset = bnx2x_nway_reset, - .get_link = ethtool_op_get_link, + .get_link = bnx2x_get_link, .get_eeprom_len = bnx2x_get_eeprom_len, .get_eeprom = bnx2x_get_eeprom, .set_eeprom = bnx2x_set_eeprom, diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c index d927f71af8a..aa1be1fecee 100644 --- a/drivers/net/bonding/bond_main.c +++ b/drivers/net/bonding/bond_main.c @@ -1459,8 +1459,16 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev) * ether type (eg ARPHRD_ETHER and ARPHRD_INFINIBAND) share the same bond */ if (bond->slave_cnt == 0) { - if (slave_dev->type != ARPHRD_ETHER) - bond_setup_by_slave(bond_dev, slave_dev); + if (bond_dev->type != slave_dev->type) { + dev_close(bond_dev); + pr_debug("%s: change device type from %d to %d\n", + bond_dev->name, bond_dev->type, slave_dev->type); + if (slave_dev->type != ARPHRD_ETHER) + bond_setup_by_slave(bond_dev, slave_dev); + else + ether_setup(bond_dev); + dev_open(bond_dev); + } } else if (bond_dev->type != slave_dev->type) { pr_err(DRV_NAME ": %s ether type (%d) is different " "from other slaves (%d), can not enslave it.\n", diff --git a/drivers/net/can/dev.c b/drivers/net/can/dev.c index 574daddc21b..e1a4f821423 100644 --- a/drivers/net/can/dev.c +++ b/drivers/net/can/dev.c @@ -346,7 +346,7 @@ void can_restart(unsigned long data) skb = dev_alloc_skb(sizeof(struct can_frame)); if (skb == NULL) { err = -ENOMEM; - goto out; + goto restart; } skb->dev = dev; skb->protocol = htons(ETH_P_CAN); @@ -361,13 +361,13 @@ void can_restart(unsigned long data) stats->rx_packets++; stats->rx_bytes += cf->can_dlc; +restart: dev_dbg(dev->dev.parent, "restarted\n"); priv->can_stats.restarts++; /* Now restart the device */ err = priv->do_set_mode(dev, CAN_MODE_START); -out: netif_carrier_on(dev); if (err) dev_err(dev->dev.parent, "Error %d during restart", err); @@ -473,6 +473,10 @@ int open_candev(struct net_device *dev) return -EINVAL; } + /* Switch carrier on if device was stopped while in bus-off state */ + if (!netif_carrier_ok(dev)) + netif_carrier_on(dev); + setup_timer(&priv->restart_timer, can_restart, (unsigned long)dev); return 0; @@ -607,11 +611,18 @@ nla_put_failure: return -EMSGSIZE; } +static int can_newlink(struct net_device *dev, + struct nlattr *tb[], struct nlattr *data[]) +{ + return -EOPNOTSUPP; +} + static struct rtnl_link_ops can_link_ops __read_mostly = { .kind = "can", .maxtype = IFLA_CAN_MAX, .policy = can_policy, .setup = can_setup, + .newlink = can_newlink, .changelink = can_changelink, .fill_info = can_fill_info, .fill_xstats = can_fill_xstats, diff --git a/drivers/net/can/sja1000/sja1000.c b/drivers/net/can/sja1000/sja1000.c index 571f133a8fe..08ebee79d8a 100644 --- a/drivers/net/can/sja1000/sja1000.c +++ b/drivers/net/can/sja1000/sja1000.c @@ -63,7 +63,6 @@ #include <linux/can.h> #include <linux/can/dev.h> #include <linux/can/error.h> -#include <linux/can/dev.h> #include "sja1000.h" diff --git a/drivers/net/cnic.c b/drivers/net/cnic.c index 4d1515f45ba..74c342959b7 100644 --- a/drivers/net/cnic.c +++ b/drivers/net/cnic.c @@ -138,6 +138,16 @@ static struct cnic_dev *cnic_from_netdev(struct net_device *netdev) return NULL; } +static inline void ulp_get(struct cnic_ulp_ops *ulp_ops) +{ + atomic_inc(&ulp_ops->ref_count); +} + +static inline void ulp_put(struct cnic_ulp_ops *ulp_ops) +{ + atomic_dec(&ulp_ops->ref_count); +} + static void cnic_ctx_wr(struct cnic_dev *dev, u32 cid_addr, u32 off, u32 val) { struct cnic_local *cp = dev->cnic_priv; @@ -227,7 +237,7 @@ static int cnic_send_nlmsg(struct cnic_local *cp, u32 type, } rcu_read_lock(); - ulp_ops = rcu_dereference(cp->ulp_ops[CNIC_ULP_ISCSI]); + ulp_ops = rcu_dereference(cnic_ulp_tbl[CNIC_ULP_ISCSI]); if (ulp_ops) ulp_ops->iscsi_nl_send_msg(cp->dev, msg_type, buf, len); rcu_read_unlock(); @@ -319,6 +329,20 @@ static int cnic_abort_prep(struct cnic_sock *csk) return 0; } +static void cnic_uio_stop(void) +{ + struct cnic_dev *dev; + + read_lock(&cnic_dev_lock); + list_for_each_entry(dev, &cnic_dev_list, list) { + struct cnic_local *cp = dev->cnic_priv; + + if (cp->cnic_uinfo) + cnic_send_nlmsg(cp, ISCSI_KEVENT_IF_DOWN, NULL); + } + read_unlock(&cnic_dev_lock); +} + int cnic_register_driver(int ulp_type, struct cnic_ulp_ops *ulp_ops) { struct cnic_dev *dev; @@ -344,6 +368,7 @@ int cnic_register_driver(int ulp_type, struct cnic_ulp_ops *ulp_ops) } read_unlock(&cnic_dev_lock); + atomic_set(&ulp_ops->ref_count, 0); rcu_assign_pointer(cnic_ulp_tbl[ulp_type], ulp_ops); mutex_unlock(&cnic_lock); @@ -365,6 +390,8 @@ int cnic_register_driver(int ulp_type, struct cnic_ulp_ops *ulp_ops) int cnic_unregister_driver(int ulp_type) { struct cnic_dev *dev; + struct cnic_ulp_ops *ulp_ops; + int i = 0; if (ulp_type >= MAX_CNIC_ULP_TYPE) { printk(KERN_ERR PFX "cnic_unregister_driver: Bad type %d\n", @@ -372,7 +399,8 @@ int cnic_unregister_driver(int ulp_type) return -EINVAL; } mutex_lock(&cnic_lock); - if (!cnic_ulp_tbl[ulp_type]) { + ulp_ops = cnic_ulp_tbl[ulp_type]; + if (!ulp_ops) { printk(KERN_ERR PFX "cnic_unregister_driver: Type %d has not " "been registered\n", ulp_type); goto out_unlock; @@ -390,10 +418,21 @@ int cnic_unregister_driver(int ulp_type) } read_unlock(&cnic_dev_lock); + if (ulp_type == CNIC_ULP_ISCSI) + cnic_uio_stop(); + rcu_assign_pointer(cnic_ulp_tbl[ulp_type], NULL); mutex_unlock(&cnic_lock); synchronize_rcu(); + while ((atomic_read(&ulp_ops->ref_count) != 0) && (i < 20)) { + msleep(100); + i++; + } + + if (atomic_read(&ulp_ops->ref_count) != 0) + printk(KERN_WARNING PFX "%s: Failed waiting for ref count to go" + " to zero.\n", dev->netdev->name); return 0; out_unlock: @@ -449,6 +488,7 @@ EXPORT_SYMBOL(cnic_register_driver); static int cnic_unregister_device(struct cnic_dev *dev, int ulp_type) { struct cnic_local *cp = dev->cnic_priv; + int i = 0; if (ulp_type >= MAX_CNIC_ULP_TYPE) { printk(KERN_ERR PFX "cnic_unregister_device: Bad type %d\n", @@ -469,6 +509,15 @@ static int cnic_unregister_device(struct cnic_dev *dev, int ulp_type) synchronize_rcu(); + while (test_bit(ULP_F_CALL_PENDING, &cp->ulp_flags[ulp_type]) && + i < 20) { + msleep(100); + i++; + } + if (test_bit(ULP_F_CALL_PENDING, &cp->ulp_flags[ulp_type])) + printk(KERN_WARNING PFX "%s: Failed waiting for ULP up call" + " to complete.\n", dev->netdev->name); + return 0; } EXPORT_SYMBOL(cnic_unregister_driver); @@ -632,7 +681,6 @@ static void cnic_free_resc(struct cnic_dev *dev) int i = 0; if (cp->cnic_uinfo) { - cnic_send_nlmsg(cp, ISCSI_KEVENT_IF_DOWN, NULL); while (cp->uio_dev != -1 && i < 15) { msleep(100); i++; @@ -1057,18 +1105,26 @@ static void cnic_ulp_stop(struct cnic_dev *dev) struct cnic_local *cp = dev->cnic_priv; int if_type; - rcu_read_lock(); + if (cp->cnic_uinfo) + cnic_send_nlmsg(cp, ISCSI_KEVENT_IF_DOWN, NULL); + for (if_type = 0; if_type < MAX_CNIC_ULP_TYPE; if_type++) { struct cnic_ulp_ops *ulp_ops; - ulp_ops = rcu_dereference(cp->ulp_ops[if_type]); - if (!ulp_ops) + mutex_lock(&cnic_lock); + ulp_ops = cp->ulp_ops[if_type]; + if (!ulp_ops) { + mutex_unlock(&cnic_lock); continue; + } + set_bit(ULP_F_CALL_PENDING, &cp->ulp_flags[if_type]); + mutex_unlock(&cnic_lock); if (test_and_clear_bit(ULP_F_START, &cp->ulp_flags[if_type])) ulp_ops->cnic_stop(cp->ulp_handle[if_type]); + + clear_bit(ULP_F_CALL_PENDING, &cp->ulp_flags[if_type]); } - rcu_read_unlock(); } static void cnic_ulp_start(struct cnic_dev *dev) @@ -1076,18 +1132,23 @@ static void cnic_ulp_start(struct cnic_dev *dev) struct cnic_local *cp = dev->cnic_priv; int if_type; - rcu_read_lock(); for (if_type = 0; if_type < MAX_CNIC_ULP_TYPE; if_type++) { struct cnic_ulp_ops *ulp_ops; - ulp_ops = rcu_dereference(cp->ulp_ops[if_type]); - if (!ulp_ops || !ulp_ops->cnic_start) + mutex_lock(&cnic_lock); + ulp_ops = cp->ulp_ops[if_type]; + if (!ulp_ops || !ulp_ops->cnic_start) { + mutex_unlock(&cnic_lock); continue; + } + set_bit(ULP_F_CALL_PENDING, &cp->ulp_flags[if_type]); + mutex_unlock(&cnic_lock); if (!test_and_set_bit(ULP_F_START, &cp->ulp_flags[if_type])) ulp_ops->cnic_start(cp->ulp_handle[if_type]); + + clear_bit(ULP_F_CALL_PENDING, &cp->ulp_flags[if_type]); } - rcu_read_unlock(); } static int cnic_ctl(void *data, struct cnic_ctl_info *info) @@ -1097,22 +1158,18 @@ static int cnic_ctl(void *data, struct cnic_ctl_info *info) switch (info->cmd) { case CNIC_CTL_STOP_CMD: cnic_hold(dev); - mutex_lock(&cnic_lock); cnic_ulp_stop(dev); cnic_stop_hw(dev); - mutex_unlock(&cnic_lock); cnic_put(dev); break; case CNIC_CTL_START_CMD: cnic_hold(dev); - mutex_lock(&cnic_lock); if (!cnic_start_hw(dev)) cnic_ulp_start(dev); - mutex_unlock(&cnic_lock); cnic_put(dev); break; default: @@ -1126,19 +1183,23 @@ static void cnic_ulp_init(struct cnic_dev *dev) int i; struct cnic_local *cp = dev->cnic_priv; - rcu_read_lock(); for (i = 0; i < MAX_CNIC_ULP_TYPE_EXT; i++) { struct cnic_ulp_ops *ulp_ops; - ulp_ops = rcu_dereference(cnic_ulp_tbl[i]); - if (!ulp_ops || !ulp_ops->cnic_init) + mutex_lock(&cnic_lock); + ulp_ops = cnic_ulp_tbl[i]; + if (!ulp_ops || !ulp_ops->cnic_init) { + mutex_unlock(&cnic_lock); continue; + } + ulp_get(ulp_ops); + mutex_unlock(&cnic_lock); if (!test_and_set_bit(ULP_F_INIT, &cp->ulp_flags[i])) ulp_ops->cnic_init(dev); + ulp_put(ulp_ops); } - rcu_read_unlock(); } static void cnic_ulp_exit(struct cnic_dev *dev) @@ -1146,19 +1207,23 @@ static void cnic_ulp_exit(struct cnic_dev *dev) int i; struct cnic_local *cp = dev->cnic_priv; - rcu_read_lock(); for (i = 0; i < MAX_CNIC_ULP_TYPE_EXT; i++) { struct cnic_ulp_ops *ulp_ops; - ulp_ops = rcu_dereference(cnic_ulp_tbl[i]); - if (!ulp_ops || !ulp_ops->cnic_exit) + mutex_lock(&cnic_lock); + ulp_ops = cnic_ulp_tbl[i]; + if (!ulp_ops || !ulp_ops->cnic_exit) { + mutex_unlock(&cnic_lock); continue; + } + ulp_get(ulp_ops); + mutex_unlock(&cnic_lock); if (test_and_clear_bit(ULP_F_INIT, &cp->ulp_flags[i])) ulp_ops->cnic_exit(dev); + ulp_put(ulp_ops); } - rcu_read_unlock(); } static int cnic_cm_offload_pg(struct cnic_sock *csk) @@ -2374,21 +2439,45 @@ static int cnic_start_bnx2_hw(struct cnic_dev *dev) return 0; } -static int cnic_start_hw(struct cnic_dev *dev) +static int cnic_register_netdev(struct cnic_dev *dev) { struct cnic_local *cp = dev->cnic_priv; struct cnic_eth_dev *ethdev = cp->ethdev; int err; - if (test_bit(CNIC_F_CNIC_UP, &dev->flags)) - return -EALREADY; + if (!ethdev) + return -ENODEV; + + if (ethdev->drv_state & CNIC_DRV_STATE_REGD) + return 0; err = ethdev->drv_register_cnic(dev->netdev, cp->cnic_ops, dev); - if (err) { + if (err) printk(KERN_ERR PFX "%s: register_cnic failed\n", dev->netdev->name); - goto err2; - } + + return err; +} + +static void cnic_unregister_netdev(struct cnic_dev *dev) +{ + struct cnic_local *cp = dev->cnic_priv; + struct cnic_eth_dev *ethdev = cp->ethdev; + + if (!ethdev) + return; + + ethdev->drv_unregister_cnic(dev->netdev); +} + +static int cnic_start_hw(struct cnic_dev *dev) +{ + struct cnic_local *cp = dev->cnic_priv; + struct cnic_eth_dev *ethdev = cp->ethdev; + int err; + + if (test_bit(CNIC_F_CNIC_UP, &dev->flags)) + return -EALREADY; dev->regview = ethdev->io_base; cp->chip_id = ethdev->chip_id; @@ -2419,18 +2508,13 @@ static int cnic_start_hw(struct cnic_dev *dev) return 0; err1: - ethdev->drv_unregister_cnic(dev->netdev); cp->free_resc(dev); pci_dev_put(dev->pcidev); -err2: return err; } static void cnic_stop_bnx2_hw(struct cnic_dev *dev) { - struct cnic_local *cp = dev->cnic_priv; - struct cnic_eth_dev *ethdev = cp->ethdev; - cnic_disable_bnx2_int_sync(dev); cnic_reg_wr_ind(dev, BNX2_CP_SCRATCH + 0x20, 0); @@ -2442,8 +2526,6 @@ static void cnic_stop_bnx2_hw(struct cnic_dev *dev) cnic_setup_5709_context(dev, 0); cnic_free_irq(dev); - ethdev->drv_unregister_cnic(dev->netdev); - cnic_free_resc(dev); } @@ -2524,7 +2606,7 @@ static struct cnic_dev *init_bnx2_cnic(struct net_device *dev) probe = symbol_get(bnx2_cnic_probe); if (probe) { ethdev = (*probe)(dev); - symbol_put_addr(probe); + symbol_put(bnx2_cnic_probe); } if (!ethdev) return NULL; @@ -2627,10 +2709,12 @@ static int cnic_netdev_event(struct notifier_block *this, unsigned long event, else if (event == NETDEV_UNREGISTER) cnic_ulp_exit(dev); else if (event == NETDEV_UP) { - mutex_lock(&cnic_lock); + if (cnic_register_netdev(dev) != 0) { + cnic_put(dev); + goto done; + } if (!cnic_start_hw(dev)) cnic_ulp_start(dev); - mutex_unlock(&cnic_lock); } rcu_read_lock(); @@ -2649,10 +2733,9 @@ static int cnic_netdev_event(struct notifier_block *this, unsigned long event, rcu_read_unlock(); if (event == NETDEV_GOING_DOWN) { - mutex_lock(&cnic_lock); cnic_ulp_stop(dev); cnic_stop_hw(dev); - mutex_unlock(&cnic_lock); + cnic_unregister_netdev(dev); } else if (event == NETDEV_UNREGISTER) { write_lock(&cnic_dev_lock); list_del_init(&dev->list); @@ -2684,6 +2767,7 @@ static void cnic_release(void) } cnic_ulp_exit(dev); + cnic_unregister_netdev(dev); list_del_init(&dev->list); cnic_free_dev(dev); } diff --git a/drivers/net/cnic.h b/drivers/net/cnic.h index 5192d4a9df5..a94b302bb46 100644 --- a/drivers/net/cnic.h +++ b/drivers/net/cnic.h @@ -176,6 +176,7 @@ struct cnic_local { unsigned long ulp_flags[MAX_CNIC_ULP_TYPE]; #define ULP_F_INIT 0 #define ULP_F_START 1 +#define ULP_F_CALL_PENDING 2 struct cnic_ulp_ops *ulp_ops[MAX_CNIC_ULP_TYPE]; /* protected by ulp_lock */ diff --git a/drivers/net/cnic_if.h b/drivers/net/cnic_if.h index d1bce27ee99..a49235739ee 100644 --- a/drivers/net/cnic_if.h +++ b/drivers/net/cnic_if.h @@ -290,6 +290,7 @@ struct cnic_ulp_ops { void (*iscsi_nl_send_msg)(struct cnic_dev *dev, u32 msg_type, char *data, u16 data_size); struct module *owner; + atomic_t ref_count; }; extern int cnic_register_driver(int ulp_type, struct cnic_ulp_ops *ulp_ops); diff --git a/drivers/net/cpmac.c b/drivers/net/cpmac.c index 58afafbd3b9..fd5e32cbcb8 100644 --- a/drivers/net/cpmac.c +++ b/drivers/net/cpmac.c @@ -1097,7 +1097,7 @@ static const struct net_device_ops cpmac_netdev_ops = { .ndo_start_xmit = cpmac_start_xmit, .ndo_tx_timeout = cpmac_tx_timeout, .ndo_set_multicast_list = cpmac_set_multicast_list, - .ndo_so_ioctl = cpmac_ioctl, + .ndo_do_ioctl = cpmac_ioctl, .ndo_set_config = cpmac_config, .ndo_change_mtu = eth_change_mtu, .ndo_validate_addr = eth_validate_addr, diff --git a/drivers/net/cs89x0.c b/drivers/net/cs89x0.c index 3eee666a9cd..55445f980f9 100644 --- a/drivers/net/cs89x0.c +++ b/drivers/net/cs89x0.c @@ -1524,6 +1524,7 @@ static void net_timeout(struct net_device *dev) static int net_send_packet(struct sk_buff *skb, struct net_device *dev) { struct net_local *lp = netdev_priv(dev); + unsigned long flags; if (net_debug > 3) { printk("%s: sent %d byte packet of type %x\n", @@ -1535,7 +1536,7 @@ static int net_send_packet(struct sk_buff *skb, struct net_device *dev) ask the chip to start transmitting before the whole packet has been completely uploaded. */ - spin_lock_irq(&lp->lock); + spin_lock_irqsave(&lp->lock, flags); netif_stop_queue(dev); /* initiate a transmit sequence */ @@ -1549,13 +1550,13 @@ static int net_send_packet(struct sk_buff *skb, struct net_device *dev) * we're waiting for TxOk, so return 1 and requeue this packet. */ - spin_unlock_irq(&lp->lock); + spin_unlock_irqrestore(&lp->lock, flags); if (net_debug) printk("cs89x0: Tx buffer not free!\n"); return NETDEV_TX_BUSY; } /* Write the contents of the packet */ writewords(dev->base_addr, TX_FRAME_PORT,skb->data,(skb->len+1) >>1); - spin_unlock_irq(&lp->lock); + spin_unlock_irqrestore(&lp->lock, flags); lp->stats.tx_bytes += skb->len; dev->trans_start = jiffies; dev_kfree_skb (skb); diff --git a/drivers/net/cxgb3/cxgb3_main.c b/drivers/net/cxgb3/cxgb3_main.c index 538dda4422d..fb5df5c6203 100644 --- a/drivers/net/cxgb3/cxgb3_main.c +++ b/drivers/net/cxgb3/cxgb3_main.c @@ -642,8 +642,7 @@ static int setup_sge_qsets(struct adapter *adap) struct port_info *pi = netdev_priv(dev); pi->qs = &adap->sge.qs[pi->first_qset]; - for (j = pi->first_qset; j < pi->first_qset + pi->nqsets; - ++j, ++qset_idx) { + for (j = 0; j < pi->nqsets; ++j, ++qset_idx) { set_qset_lro(dev, qset_idx, pi->rx_offload & T3_LRO); err = t3_sge_alloc_qset(adap, qset_idx, 1, (adap->flags & USING_MSIX) ? qset_idx + 1 : diff --git a/drivers/net/davinci_emac.c b/drivers/net/davinci_emac.c index 2df8fb0af70..12fd446f989 100644 --- a/drivers/net/davinci_emac.c +++ b/drivers/net/davinci_emac.c @@ -1820,11 +1820,19 @@ static int emac_dev_setmac_addr(struct net_device *ndev, void *addr) struct device *emac_dev = &priv->ndev->dev; struct sockaddr *sa = addr; + if (!is_valid_ether_addr(sa->sa_data)) + return -EINVAL; + /* Store mac addr in priv and rx channel and set it in EMAC hw */ memcpy(priv->mac_addr, sa->sa_data, ndev->addr_len); - memcpy(rxch->mac_addr, sa->sa_data, ndev->addr_len); memcpy(ndev->dev_addr, sa->sa_data, ndev->addr_len); - emac_setmac(priv, EMAC_DEF_RX_CH, rxch->mac_addr); + + /* If the interface is down - rxch is NULL. */ + /* MAC address is configured only after the interface is enabled. */ + if (netif_running(ndev)) { + memcpy(rxch->mac_addr, sa->sa_data, ndev->addr_len); + emac_setmac(priv, EMAC_DEF_RX_CH, rxch->mac_addr); + } if (netif_msg_drv(priv)) dev_notice(emac_dev, "DaVinci EMAC: emac_dev_setmac_addr %pM\n", diff --git a/drivers/net/dl2k.c b/drivers/net/dl2k.c index 895d72143ee..4b6a219fece 100644 --- a/drivers/net/dl2k.c +++ b/drivers/net/dl2k.c @@ -268,8 +268,9 @@ rio_probe1 (struct pci_dev *pdev, const struct pci_device_id *ent) printk(KERN_INFO "tx_coalesce:\t%d packets\n", tx_coalesce); if (np->coalesce) - printk(KERN_INFO "rx_coalesce:\t%d packets\n" - KERN_INFO "rx_timeout: \t%d ns\n", + printk(KERN_INFO + "rx_coalesce:\t%d packets\n" + "rx_timeout: \t%d ns\n", np->rx_coalesce, np->rx_timeout*640); if (np->vlan) printk(KERN_INFO "vlan(id):\t%d\n", np->vlan); @@ -1522,9 +1523,9 @@ mii_get_media (struct net_device *dev) printk (KERN_INFO "Operating at 10 Mbps, "); } if (bmcr & MII_BMCR_DUPLEX_MODE) { - printk ("Full duplex\n"); + printk (KERN_CONT "Full duplex\n"); } else { - printk ("Half duplex\n"); + printk (KERN_CONT "Half duplex\n"); } } if (np->tx_flow) @@ -1614,9 +1615,9 @@ mii_set_media (struct net_device *dev) } if (np->full_duplex) { bmcr |= MII_BMCR_DUPLEX_MODE; - printk ("Full duplex\n"); + printk (KERN_CONT "Full duplex\n"); } else { - printk ("Half duplex\n"); + printk (KERN_CONT "Half duplex\n"); } #if 0 /* Set 1000BaseT Master/Slave setting */ @@ -1669,9 +1670,9 @@ mii_get_media_pcs (struct net_device *dev) __u16 bmcr = mii_read (dev, phy_addr, PCS_BMCR); printk (KERN_INFO "Operating at 1000 Mbps, "); if (bmcr & MII_BMCR_DUPLEX_MODE) { - printk ("Full duplex\n"); + printk (KERN_CONT "Full duplex\n"); } else { - printk ("Half duplex\n"); + printk (KERN_CONT "Half duplex\n"); } } if (np->tx_flow) diff --git a/drivers/net/e100.c b/drivers/net/e100.c index efa680f4b8d..3a6735dc9f6 100644 --- a/drivers/net/e100.c +++ b/drivers/net/e100.c @@ -1897,6 +1897,9 @@ static int e100_rx_indicate(struct nic *nic, struct rx *rx, if (ioread8(&nic->csr->scb.status) & rus_no_res) nic->ru_running = RU_SUSPENDED; + pci_dma_sync_single_for_device(nic->pdev, rx->dma_addr, + sizeof(struct rfd), + PCI_DMA_FROMDEVICE); return -ENODATA; } diff --git a/drivers/net/e1000/e1000_main.c b/drivers/net/e1000/e1000_main.c index 5e3356f8eb5..5b8cbdb4b52 100644 --- a/drivers/net/e1000/e1000_main.c +++ b/drivers/net/e1000/e1000_main.c @@ -2185,12 +2185,16 @@ static void e1000_clean_rx_ring(struct e1000_adapter *adapter, /* Free all the Rx ring sk_buffs */ for (i = 0; i < rx_ring->count; i++) { buffer_info = &rx_ring->buffer_info[i]; - if (buffer_info->skb) { + if (buffer_info->dma) { pci_unmap_single(pdev, buffer_info->dma, buffer_info->length, PCI_DMA_FROMDEVICE); + } + + buffer_info->dma = 0; + if (buffer_info->skb) { dev_kfree_skb(buffer_info->skb); buffer_info->skb = NULL; } @@ -4033,6 +4037,7 @@ static bool e1000_clean_rx_irq(struct e1000_adapter *adapter, buffer_info->dma, buffer_info->length, PCI_DMA_FROMDEVICE); + buffer_info->dma = 0; length = le16_to_cpu(rx_desc->length); /* !EOP means multiple descriptors were used to store a single @@ -4222,6 +4227,7 @@ map_skb: pci_unmap_single(pdev, buffer_info->dma, adapter->rx_buffer_len, PCI_DMA_FROMDEVICE); + buffer_info->dma = 0; break; /* while !buffer_info->skb */ } @@ -4817,6 +4823,9 @@ static pci_ers_result_t e1000_io_error_detected(struct pci_dev *pdev, netif_device_detach(netdev); + if (state == pci_channel_io_perm_failure) + return PCI_ERS_RESULT_DISCONNECT; + if (netif_running(netdev)) e1000_down(adapter); pci_disable_device(pdev); diff --git a/drivers/net/e1000e/defines.h b/drivers/net/e1000e/defines.h index 8890c97e112..c0f185beb8b 100644 --- a/drivers/net/e1000e/defines.h +++ b/drivers/net/e1000e/defines.h @@ -238,6 +238,7 @@ #define E1000_STATUS_SPEED_100 0x00000040 /* Speed 100Mb/s */ #define E1000_STATUS_SPEED_1000 0x00000080 /* Speed 1000Mb/s */ #define E1000_STATUS_LAN_INIT_DONE 0x00000200 /* Lan Init Completion by NVM */ +#define E1000_STATUS_PHYRA 0x00000400 /* PHY Reset Asserted */ #define E1000_STATUS_GIO_MASTER_ENABLE 0x00080000 /* Status of Master requests. */ /* Constants used to interpret the masked PCI-X bus speed. */ @@ -575,6 +576,8 @@ #define PHY_1000T_STATUS 0x0A /* 1000Base-T Status Reg */ #define PHY_EXT_STATUS 0x0F /* Extended Status Reg */ +#define PHY_CONTROL_LB 0x4000 /* PHY Loopback bit */ + /* NVM Control */ #define E1000_EECD_SK 0x00000001 /* NVM Clock */ #define E1000_EECD_CS 0x00000002 /* NVM Chip Select */ diff --git a/drivers/net/e1000e/hw.h b/drivers/net/e1000e/hw.h index 163c1c0cfee..fd44d9f9076 100644 --- a/drivers/net/e1000e/hw.h +++ b/drivers/net/e1000e/hw.h @@ -215,6 +215,7 @@ enum e1e_registers { E1000_SWSM = 0x05B50, /* SW Semaphore */ E1000_FWSM = 0x05B54, /* FW Semaphore */ E1000_SWSM2 = 0x05B58, /* Driver-only SW semaphore */ + E1000_CRC_OFFSET = 0x05F50, /* CRC Offset register */ E1000_HICR = 0x08F00, /* Host Interface Control */ }; @@ -302,6 +303,9 @@ enum e1e_registers { #define E1000_KMRNCTRLSTA_REN 0x00200000 #define E1000_KMRNCTRLSTA_DIAG_OFFSET 0x3 /* Kumeran Diagnostic */ #define E1000_KMRNCTRLSTA_DIAG_NELPBK 0x1000 /* Nearend Loopback mode */ +#define E1000_KMRNCTRLSTA_K1_CONFIG 0x7 +#define E1000_KMRNCTRLSTA_K1_ENABLE 0x140E +#define E1000_KMRNCTRLSTA_K1_DISABLE 0x1400 #define IFE_PHY_EXTENDED_STATUS_CONTROL 0x10 #define IFE_PHY_SPECIAL_CONTROL 0x11 /* 100BaseTx PHY Special Control */ diff --git a/drivers/net/e1000e/ich8lan.c b/drivers/net/e1000e/ich8lan.c index 9e23f50fb9c..99df2abf82a 100644 --- a/drivers/net/e1000e/ich8lan.c +++ b/drivers/net/e1000e/ich8lan.c @@ -338,9 +338,7 @@ static s32 e1000_init_nvm_params_ich8lan(struct e1000_hw *hw) { struct e1000_nvm_info *nvm = &hw->nvm; struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan; - u32 gfpreg; - u32 sector_base_addr; - u32 sector_end_addr; + u32 gfpreg, sector_base_addr, sector_end_addr; u16 i; /* Can't read flash registers if the register set isn't mapped. */ @@ -446,6 +444,95 @@ static s32 e1000_init_mac_params_ich8lan(struct e1000_adapter *adapter) return 0; } +/** + * e1000_check_for_copper_link_ich8lan - Check for link (Copper) + * @hw: pointer to the HW structure + * + * Checks to see of the link status of the hardware has changed. If a + * change in link status has been detected, then we read the PHY registers + * to get the current speed/duplex if link exists. + **/ +static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw) +{ + struct e1000_mac_info *mac = &hw->mac; + s32 ret_val; + bool link; + + /* + * We only want to go out to the PHY registers to see if Auto-Neg + * has completed and/or if our link status has changed. The + * get_link_status flag is set upon receiving a Link Status + * Change or Rx Sequence Error interrupt. + */ + if (!mac->get_link_status) { + ret_val = 0; + goto out; + } + + if (hw->mac.type == e1000_pchlan) { + ret_val = e1000e_write_kmrn_reg(hw, + E1000_KMRNCTRLSTA_K1_CONFIG, + E1000_KMRNCTRLSTA_K1_ENABLE); + if (ret_val) + goto out; + } + + /* + * First we want to see if the MII Status Register reports + * link. If so, then we want to get the current speed/duplex + * of the PHY. + */ + ret_val = e1000e_phy_has_link_generic(hw, 1, 0, &link); + if (ret_val) + goto out; + + if (!link) + goto out; /* No link detected */ + + mac->get_link_status = false; + + if (hw->phy.type == e1000_phy_82578) { + ret_val = e1000_link_stall_workaround_hv(hw); + if (ret_val) + goto out; + } + + /* + * Check if there was DownShift, must be checked + * immediately after link-up + */ + e1000e_check_downshift(hw); + + /* + * If we are forcing speed/duplex, then we simply return since + * we have already determined whether we have link or not. + */ + if (!mac->autoneg) { + ret_val = -E1000_ERR_CONFIG; + goto out; + } + + /* + * Auto-Neg is enabled. Auto Speed Detection takes care + * of MAC speed/duplex configuration. So we only need to + * configure Collision Distance in the MAC. + */ + e1000e_config_collision_dist(hw); + + /* + * Configure Flow Control now that Auto-Neg has completed. + * First, we need to restore the desired flow control + * settings because we may have had to re-autoneg with a + * different link partner. + */ + ret_val = e1000e_config_fc_after_link_up(hw); + if (ret_val) + hw_dbg(hw, "Error configuring flow control\n"); + +out: + return ret_val; +} + static s32 e1000_get_variants_ich8lan(struct e1000_adapter *adapter) { struct e1000_hw *hw = &adapter->hw; @@ -490,8 +577,8 @@ static DEFINE_MUTEX(nvm_mutex); **/ static s32 e1000_acquire_swflag_ich8lan(struct e1000_hw *hw) { - u32 extcnf_ctrl; - u32 timeout = PHY_CFG_TIMEOUT; + u32 extcnf_ctrl, timeout = PHY_CFG_TIMEOUT; + s32 ret_val = 0; might_sleep(); @@ -499,28 +586,46 @@ static s32 e1000_acquire_swflag_ich8lan(struct e1000_hw *hw) while (timeout) { extcnf_ctrl = er32(EXTCNF_CTRL); + if (!(extcnf_ctrl & E1000_EXTCNF_CTRL_SWFLAG)) + break; - if (!(extcnf_ctrl & E1000_EXTCNF_CTRL_SWFLAG)) { - extcnf_ctrl |= E1000_EXTCNF_CTRL_SWFLAG; - ew32(EXTCNF_CTRL, extcnf_ctrl); + mdelay(1); + timeout--; + } + + if (!timeout) { + hw_dbg(hw, "SW/FW/HW has locked the resource for too long.\n"); + ret_val = -E1000_ERR_CONFIG; + goto out; + } + + timeout = PHY_CFG_TIMEOUT * 2; + + extcnf_ctrl |= E1000_EXTCNF_CTRL_SWFLAG; + ew32(EXTCNF_CTRL, extcnf_ctrl); + + while (timeout) { + extcnf_ctrl = er32(EXTCNF_CTRL); + if (extcnf_ctrl & E1000_EXTCNF_CTRL_SWFLAG) + break; - extcnf_ctrl = er32(EXTCNF_CTRL); - if (extcnf_ctrl & E1000_EXTCNF_CTRL_SWFLAG) - break; - } mdelay(1); timeout--; } if (!timeout) { - hw_dbg(hw, "FW or HW has locked the resource for too long.\n"); + hw_dbg(hw, "Failed to acquire the semaphore.\n"); extcnf_ctrl &= ~E1000_EXTCNF_CTRL_SWFLAG; ew32(EXTCNF_CTRL, extcnf_ctrl); - mutex_unlock(&nvm_mutex); - return -E1000_ERR_CONFIG; + ret_val = -E1000_ERR_CONFIG; + goto out; } - return 0; +out: + if (ret_val) + mutex_unlock(&nvm_mutex); + + return ret_val; } /** @@ -694,6 +799,38 @@ static s32 e1000_hv_phy_workarounds_ich8lan(struct e1000_hw *hw) } /** + * e1000_lan_init_done_ich8lan - Check for PHY config completion + * @hw: pointer to the HW structure + * + * Check the appropriate indication the MAC has finished configuring the + * PHY after a software reset. + **/ +static void e1000_lan_init_done_ich8lan(struct e1000_hw *hw) +{ + u32 data, loop = E1000_ICH8_LAN_INIT_TIMEOUT; + + /* Wait for basic configuration completes before proceeding */ + do { + data = er32(STATUS); + data &= E1000_STATUS_LAN_INIT_DONE; + udelay(100); + } while ((!data) && --loop); + + /* + * If basic configuration is incomplete before the above loop + * count reaches 0, loading the configuration from NVM will + * leave the PHY in a bad state possibly resulting in no link. + */ + if (loop == 0) + hw_dbg(hw, "LAN_INIT_DONE not set, increase timeout\n"); + + /* Clear the Init Done bit for the next init event */ + data = er32(STATUS); + data &= ~E1000_STATUS_LAN_INIT_DONE; + ew32(STATUS, data); +} + +/** * e1000_phy_hw_reset_ich8lan - Performs a PHY reset * @hw: pointer to the HW structure * @@ -707,13 +844,15 @@ static s32 e1000_phy_hw_reset_ich8lan(struct e1000_hw *hw) u32 i; u32 data, cnf_size, cnf_base_addr, sw_cfg_mask; s32 ret_val; - u16 loop = E1000_ICH8_LAN_INIT_TIMEOUT; u16 word_addr, reg_data, reg_addr, phy_page = 0; ret_val = e1000e_phy_hw_reset_generic(hw); if (ret_val) return ret_val; + /* Allow time for h/w to get to a quiescent state after reset */ + mdelay(10); + if (hw->mac.type == e1000_pchlan) { ret_val = e1000_hv_phy_workarounds_ich8lan(hw); if (ret_val) @@ -741,26 +880,8 @@ static s32 e1000_phy_hw_reset_ich8lan(struct e1000_hw *hw) if (!(data & sw_cfg_mask)) return 0; - /* Wait for basic configuration completes before proceeding*/ - do { - data = er32(STATUS); - data &= E1000_STATUS_LAN_INIT_DONE; - udelay(100); - } while ((!data) && --loop); - - /* - * If basic configuration is incomplete before the above loop - * count reaches 0, loading the configuration from NVM will - * leave the PHY in a bad state possibly resulting in no link. - */ - if (loop == 0) { - hw_dbg(hw, "LAN_INIT_DONE not set, increase timeout\n"); - } - - /* Clear the Init Done bit for the next init event */ - data = er32(STATUS); - data &= ~E1000_STATUS_LAN_INIT_DONE; - ew32(STATUS, data); + /* Wait for basic configuration completes before proceeding */ + e1000_lan_init_done_ich8lan(hw); /* * Make sure HW does not configure LCD from PHY @@ -961,12 +1082,14 @@ static s32 e1000_set_d0_lplu_state_ich8lan(struct e1000_hw *hw, bool active) phy_ctrl |= E1000_PHY_CTRL_D0A_LPLU; ew32(PHY_CTRL, phy_ctrl); + if (phy->type != e1000_phy_igp_3) + return 0; + /* * Call gig speed drop workaround on LPLU before accessing * any PHY registers */ - if ((hw->mac.type == e1000_ich8lan) && - (hw->phy.type == e1000_phy_igp_3)) + if (hw->mac.type == e1000_ich8lan) e1000e_gig_downshift_workaround_ich8lan(hw); /* When LPLU is enabled, we should disable SmartSpeed */ @@ -979,6 +1102,9 @@ static s32 e1000_set_d0_lplu_state_ich8lan(struct e1000_hw *hw, bool active) phy_ctrl &= ~E1000_PHY_CTRL_D0A_LPLU; ew32(PHY_CTRL, phy_ctrl); + if (phy->type != e1000_phy_igp_3) + return 0; + /* * LPLU and SmartSpeed are mutually exclusive. LPLU is used * during Dx states where the power conservation is most @@ -1038,6 +1164,10 @@ static s32 e1000_set_d3_lplu_state_ich8lan(struct e1000_hw *hw, bool active) if (!active) { phy_ctrl &= ~E1000_PHY_CTRL_NOND0A_LPLU; ew32(PHY_CTRL, phy_ctrl); + + if (phy->type != e1000_phy_igp_3) + return 0; + /* * LPLU and SmartSpeed are mutually exclusive. LPLU is used * during Dx states where the power conservation is most @@ -1073,12 +1203,14 @@ static s32 e1000_set_d3_lplu_state_ich8lan(struct e1000_hw *hw, bool active) phy_ctrl |= E1000_PHY_CTRL_NOND0A_LPLU; ew32(PHY_CTRL, phy_ctrl); + if (phy->type != e1000_phy_igp_3) + return 0; + /* * Call gig speed drop workaround on LPLU before accessing * any PHY registers */ - if ((hw->mac.type == e1000_ich8lan) && - (hw->phy.type == e1000_phy_igp_3)) + if (hw->mac.type == e1000_ich8lan) e1000e_gig_downshift_workaround_ich8lan(hw); /* When LPLU is enabled, we should disable SmartSpeed */ @@ -1175,7 +1307,7 @@ static s32 e1000_read_nvm_ich8lan(struct e1000_hw *hw, u16 offset, u16 words, struct e1000_nvm_info *nvm = &hw->nvm; struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan; u32 act_offset; - s32 ret_val; + s32 ret_val = 0; u32 bank = 0; u16 i, word; @@ -1190,12 +1322,15 @@ static s32 e1000_read_nvm_ich8lan(struct e1000_hw *hw, u16 offset, u16 words, goto out; ret_val = e1000_valid_nvm_bank_detect_ich8lan(hw, &bank); - if (ret_val) - goto release; + if (ret_val) { + hw_dbg(hw, "Could not detect valid bank, assuming bank 0\n"); + bank = 0; + } act_offset = (bank) ? nvm->flash_bank_size : 0; act_offset += offset; + ret_val = 0; for (i = 0; i < words; i++) { if ((dev_spec->shadow_ram) && (dev_spec->shadow_ram[offset+i].modified)) { @@ -1210,7 +1345,6 @@ static s32 e1000_read_nvm_ich8lan(struct e1000_hw *hw, u16 offset, u16 words, } } -release: e1000_release_swflag_ich8lan(hw); out: @@ -1461,7 +1595,6 @@ static s32 e1000_write_nvm_ich8lan(struct e1000_hw *hw, u16 offset, u16 words, { struct e1000_nvm_info *nvm = &hw->nvm; struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan; - s32 ret_val; u16 i; if ((offset >= nvm->word_size) || (words > nvm->word_size - offset) || @@ -1470,17 +1603,11 @@ static s32 e1000_write_nvm_ich8lan(struct e1000_hw *hw, u16 offset, u16 words, return -E1000_ERR_NVM; } - ret_val = e1000_acquire_swflag_ich8lan(hw); - if (ret_val) - return ret_val; - for (i = 0; i < words; i++) { dev_spec->shadow_ram[offset+i].modified = 1; dev_spec->shadow_ram[offset+i].value = data[i]; } - e1000_release_swflag_ich8lan(hw); - return 0; } @@ -1521,8 +1648,8 @@ static s32 e1000_update_nvm_checksum_ich8lan(struct e1000_hw *hw) */ ret_val = e1000_valid_nvm_bank_detect_ich8lan(hw, &bank); if (ret_val) { - e1000_release_swflag_ich8lan(hw); - goto out; + hw_dbg(hw, "Could not detect valid bank, assuming bank 0\n"); + bank = 0; } if (bank == 0) { @@ -1905,19 +2032,15 @@ static s32 e1000_erase_flash_bank_ich8lan(struct e1000_hw *hw, u32 bank) break; case 1: sector_size = ICH_FLASH_SEG_SIZE_4K; - iteration = flash_bank_size / ICH_FLASH_SEG_SIZE_4K; + iteration = 1; break; case 2: - if (hw->mac.type == e1000_ich9lan) { - sector_size = ICH_FLASH_SEG_SIZE_8K; - iteration = flash_bank_size / ICH_FLASH_SEG_SIZE_8K; - } else { - return -E1000_ERR_NVM; - } + sector_size = ICH_FLASH_SEG_SIZE_8K; + iteration = 1; break; case 3: sector_size = ICH_FLASH_SEG_SIZE_64K; - iteration = flash_bank_size / ICH_FLASH_SEG_SIZE_64K; + iteration = 1; break; default: return -E1000_ERR_NVM; @@ -1925,7 +2048,7 @@ static s32 e1000_erase_flash_bank_ich8lan(struct e1000_hw *hw, u32 bank) /* Start with the base address, then add the sector offset. */ flash_linear_addr = hw->nvm.flash_base_addr; - flash_linear_addr += (bank) ? (sector_size * iteration) : 0; + flash_linear_addr += (bank) ? flash_bank_size : 0; for (j = 0; j < iteration ; j++) { do { @@ -2143,6 +2266,12 @@ static s32 e1000_reset_hw_ich8lan(struct e1000_hw *hw) ctrl = er32(CTRL); if (!e1000_check_reset_block(hw)) { + /* Clear PHY Reset Asserted bit */ + if (hw->mac.type >= e1000_pchlan) { + u32 status = er32(STATUS); + ew32(STATUS, status & ~E1000_STATUS_PHYRA); + } + /* * PHY HW reset requires MAC CORE reset at the same * time to make sure the interface between MAC and the @@ -2156,23 +2285,34 @@ static s32 e1000_reset_hw_ich8lan(struct e1000_hw *hw) ew32(CTRL, (ctrl | E1000_CTRL_RST)); msleep(20); - if (!ret_val) { - /* release the swflag because it is not reset by - * hardware reset - */ + if (!ret_val) e1000_release_swflag_ich8lan(hw); - } - ret_val = e1000e_get_auto_rd_done(hw); - if (ret_val) { - /* - * When auto config read does not complete, do not - * return with an error. This can happen in situations - * where there is no eeprom and prevents getting link. - */ - hw_dbg(hw, "Auto Read Done did not complete\n"); + if (ctrl & E1000_CTRL_PHY_RST) + ret_val = hw->phy.ops.get_cfg_done(hw); + + if (hw->mac.type >= e1000_ich10lan) { + e1000_lan_init_done_ich8lan(hw); + } else { + ret_val = e1000e_get_auto_rd_done(hw); + if (ret_val) { + /* + * When auto config read does not complete, do not + * return with an error. This can happen in situations + * where there is no eeprom and prevents getting link. + */ + hw_dbg(hw, "Auto Read Done did not complete\n"); + } } + /* + * For PCH, this write will make sure that any noise + * will be detected as a CRC error and be dropped rather than show up + * as a bad packet to the DMA engine. + */ + if (hw->mac.type == e1000_pchlan) + ew32(CRC_OFFSET, 0x65656565); + ew32(IMC, 0xffffffff); icr = er32(ICR); @@ -2222,6 +2362,18 @@ static s32 e1000_init_hw_ich8lan(struct e1000_hw *hw) for (i = 0; i < mac->mta_reg_count; i++) E1000_WRITE_REG_ARRAY(hw, E1000_MTA, i, 0); + /* + * The 82578 Rx buffer will stall if wakeup is enabled in host and + * the ME. Reading the BM_WUC register will clear the host wakeup bit. + * Reset the phy after disabling host wakeup to reset the Rx buffer. + */ + if (hw->phy.type == e1000_phy_82578) { + hw->phy.ops.read_phy_reg(hw, BM_WUC, &i); + ret_val = e1000_phy_hw_reset_ich8lan(hw); + if (ret_val) + return ret_val; + } + /* Setup link and flow control */ ret_val = e1000_setup_link_ich8lan(hw); @@ -2254,16 +2406,6 @@ static s32 e1000_init_hw_ich8lan(struct e1000_hw *hw) ew32(CTRL_EXT, ctrl_ext); /* - * The 82578 Rx buffer will stall if wakeup is enabled in host and - * the ME. Reading the BM_WUC register will clear the host wakeup bit. - * Reset the phy after disabling host wakeup to reset the Rx buffer. - */ - if (hw->phy.type == e1000_phy_82578) { - e1e_rphy(hw, BM_WUC, &i); - e1000e_phy_hw_reset_generic(hw); - } - - /* * Clear all of the statistics registers (clear on read). It is * important that we do this after we have tried to establish link * because the symbol error count will increment wildly if there @@ -2485,6 +2627,14 @@ static s32 e1000_get_link_up_info_ich8lan(struct e1000_hw *hw, u16 *speed, if (ret_val) return ret_val; + if ((hw->mac.type == e1000_pchlan) && (*speed == SPEED_1000)) { + ret_val = e1000e_write_kmrn_reg(hw, + E1000_KMRNCTRLSTA_K1_CONFIG, + E1000_KMRNCTRLSTA_K1_DISABLE); + if (ret_val) + return ret_val; + } + if ((hw->mac.type == e1000_ich8lan) && (hw->phy.type == e1000_phy_igp_3) && (*speed == SPEED_1000)) { @@ -2850,6 +3000,16 @@ static s32 e1000_get_cfg_done_ich8lan(struct e1000_hw *hw) { u32 bank = 0; + if (hw->mac.type >= e1000_pchlan) { + u32 status = er32(STATUS); + + if (status & E1000_STATUS_PHYRA) + ew32(STATUS, status & ~E1000_STATUS_PHYRA); + else + hw_dbg(hw, + "PHY Reset Asserted not set - needs delay\n"); + } + e1000e_get_cfg_done(hw); /* If EEPROM is not marked present, init the IGP 3 PHY manually */ @@ -2921,7 +3081,7 @@ static void e1000_clear_hw_cntrs_ich8lan(struct e1000_hw *hw) static struct e1000_mac_operations ich8_mac_ops = { .id_led_init = e1000e_id_led_init, .check_mng_mode = e1000_check_mng_mode_ich8lan, - .check_for_link = e1000e_check_for_copper_link, + .check_for_link = e1000_check_for_copper_link_ich8lan, /* cleanup_led dependent on mac type */ .clear_hw_cntrs = e1000_clear_hw_cntrs_ich8lan, .get_bus_info = e1000_get_bus_info_ich8lan, diff --git a/drivers/net/e1000e/lib.c b/drivers/net/e1000e/lib.c index be6d9e99037..99ba2b8a2a0 100644 --- a/drivers/net/e1000e/lib.c +++ b/drivers/net/e1000e/lib.c @@ -378,12 +378,6 @@ s32 e1000e_check_for_copper_link(struct e1000_hw *hw) mac->get_link_status = 0; - if (hw->phy.type == e1000_phy_82578) { - ret_val = e1000_link_stall_workaround_hv(hw); - if (ret_val) - return ret_val; - } - /* * Check if there was DownShift, must be checked * immediately after link-up diff --git a/drivers/net/e1000e/netdev.c b/drivers/net/e1000e/netdev.c index 679885a122b..fa92a683aef 100644 --- a/drivers/net/e1000e/netdev.c +++ b/drivers/net/e1000e/netdev.c @@ -4538,8 +4538,7 @@ static int __e1000_shutdown(struct pci_dev *pdev, bool *enable_wake) /* Allow time for pending master requests to run */ e1000e_disable_pcie_master(&adapter->hw); - if ((adapter->flags2 & FLAG2_HAS_PHY_WAKEUP) && - !(hw->mac.ops.check_mng_mode(hw))) { + if (adapter->flags2 & FLAG2_HAS_PHY_WAKEUP) { /* enable wakeup by the PHY */ retval = e1000_init_phy_wakeup(adapter, wufc); if (retval) @@ -4557,7 +4556,8 @@ static int __e1000_shutdown(struct pci_dev *pdev, bool *enable_wake) *enable_wake = !!wufc; /* make sure adapter isn't asleep if manageability is enabled */ - if (adapter->flags & FLAG_MNG_PT_ENABLED) + if ((adapter->flags & FLAG_MNG_PT_ENABLED) || + (hw->mac.ops.check_mng_mode(hw))) *enable_wake = true; if (adapter->hw.phy.type == e1000_phy_igp_3) @@ -4670,14 +4670,6 @@ static int e1000_resume(struct pci_dev *pdev) return err; } - /* AER (Advanced Error Reporting) hooks */ - err = pci_enable_pcie_error_reporting(pdev); - if (err) { - dev_err(&pdev->dev, "pci_enable_pcie_error_reporting failed " - "0x%x\n", err); - /* non-fatal, continue */ - } - pci_set_master(pdev); pci_enable_wake(pdev, PCI_D3hot, 0); @@ -4785,6 +4777,9 @@ static pci_ers_result_t e1000_io_error_detected(struct pci_dev *pdev, netif_device_detach(netdev); + if (state == pci_channel_io_perm_failure) + return PCI_ERS_RESULT_DISCONNECT; + if (netif_running(netdev)) e1000e_down(adapter); pci_disable_device(pdev); @@ -4987,6 +4982,14 @@ static int __devinit e1000_probe(struct pci_dev *pdev, if (err) goto err_pci_reg; + /* AER (Advanced Error Reporting) hooks */ + err = pci_enable_pcie_error_reporting(pdev); + if (err) { + dev_err(&pdev->dev, "pci_enable_pcie_error_reporting failed " + "0x%x\n", err); + /* non-fatal, continue */ + } + pci_set_master(pdev); /* PCI config space info */ err = pci_save_state(pdev); diff --git a/drivers/net/e1000e/phy.c b/drivers/net/e1000e/phy.c index e23459cf3d0..994401fd066 100644 --- a/drivers/net/e1000e/phy.c +++ b/drivers/net/e1000e/phy.c @@ -1531,7 +1531,12 @@ s32 e1000e_phy_has_link_generic(struct e1000_hw *hw, u32 iterations, */ ret_val = e1e_rphy(hw, PHY_STATUS, &phy_status); if (ret_val) - break; + /* + * If the first read fails, another entity may have + * ownership of the resources, wait and try again to + * see if they have relinquished the resources yet. + */ + udelay(usec_interval); ret_val = e1e_rphy(hw, PHY_STATUS, &phy_status); if (ret_val) break; @@ -2737,6 +2742,11 @@ s32 e1000_link_stall_workaround_hv(struct e1000_hw *hw) if (hw->phy.type != e1000_phy_82578) goto out; + /* Do not apply workaround if in PHY loopback bit 14 set */ + hw->phy.ops.read_phy_reg(hw, PHY_CONTROL, &data); + if (data & PHY_CONTROL_LB) + goto out; + /* check if link is up and at 1Gbps */ ret_val = hw->phy.ops.read_phy_reg(hw, BM_CS_STATUS, &data); if (ret_val) diff --git a/drivers/net/eepro.c b/drivers/net/eepro.c index cc2ab6412c7..4f700348534 100644 --- a/drivers/net/eepro.c +++ b/drivers/net/eepro.c @@ -1784,7 +1784,7 @@ int __init init_module(void) printk(KERN_INFO "eepro_init_module: Auto-detecting boards (May God protect us...)\n"); } - for (i = 0; io[i] != -1 && i < MAX_EEPRO; i++) { + for (i = 0; i < MAX_EEPRO && io[i] != -1; i++) { dev = alloc_etherdev(sizeof(struct eepro_local)); if (!dev) break; diff --git a/drivers/net/eexpress.c b/drivers/net/eexpress.c index 1686dca2874..1f016d66684 100644 --- a/drivers/net/eexpress.c +++ b/drivers/net/eexpress.c @@ -1474,13 +1474,13 @@ static void eexp_hw_init586(struct net_device *dev) outw(0x0000, ioaddr + 0x800c); outw(0x0000, ioaddr + 0x800e); - for (i = 0; i < (sizeof(start_code)); i+=32) { + for (i = 0; i < ARRAY_SIZE(start_code) * 2; i+=32) { int j; outw(i, ioaddr + SM_PTR); - for (j = 0; j < 16; j+=2) + for (j = 0; j < 16 && (i+j)/2 < ARRAY_SIZE(start_code); j+=2) outw(start_code[(i+j)/2], ioaddr+0x4000+j); - for (j = 0; j < 16; j+=2) + for (j = 0; j < 16 && (i+j+16)/2 < ARRAY_SIZE(start_code); j+=2) outw(start_code[(i+j+16)/2], ioaddr+0x8000+j); } diff --git a/drivers/net/ehea/ehea.h b/drivers/net/ehea/ehea.h index 78952f8324e..fa311a95099 100644 --- a/drivers/net/ehea/ehea.h +++ b/drivers/net/ehea/ehea.h @@ -40,7 +40,7 @@ #include <asm/io.h> #define DRV_NAME "ehea" -#define DRV_VERSION "EHEA_0101" +#define DRV_VERSION "EHEA_0102" /* eHEA capability flags */ #define DLPAR_PORT_ADD_REM 1 diff --git a/drivers/net/ehea/ehea_main.c b/drivers/net/ehea/ehea_main.c index 147c4b088fb..977c3d35827 100644 --- a/drivers/net/ehea/ehea_main.c +++ b/drivers/net/ehea/ehea_main.c @@ -1545,6 +1545,9 @@ static int ehea_clean_portres(struct ehea_port *port, struct ehea_port_res *pr) { int ret, i; + if (pr->qp) + netif_napi_del(&pr->napi); + ret = ehea_destroy_qp(pr->qp); if (!ret) { @@ -3080,7 +3083,9 @@ static const struct net_device_ops ehea_netdev_ops = { .ndo_poll_controller = ehea_netpoll, #endif .ndo_get_stats = ehea_get_stats, + .ndo_change_mtu = eth_change_mtu, .ndo_set_mac_address = ehea_set_mac_addr, + .ndo_validate_addr = eth_validate_addr, .ndo_set_multicast_list = ehea_set_multicast_list, .ndo_change_mtu = ehea_change_mtu, .ndo_vlan_rx_register = ehea_vlan_rx_register, diff --git a/drivers/net/epic100.c b/drivers/net/epic100.c index b60e27dfcfa..88d7ebf3122 100644 --- a/drivers/net/epic100.c +++ b/drivers/net/epic100.c @@ -338,8 +338,7 @@ static int __devinit epic_init_one (struct pci_dev *pdev, #ifndef MODULE static int printed_version; if (!printed_version++) - printk (KERN_INFO "%s" KERN_INFO "%s", - version, version2); + printk(KERN_INFO "%s%s", version, version2); #endif card_idx++; @@ -1600,7 +1599,7 @@ static int __init epic_init (void) { /* when a module, this is printed whether or not devices are found in probe */ #ifdef MODULE - printk (KERN_INFO "%s" KERN_INFO "%s", + printk (KERN_INFO "%s%s", version, version2); #endif diff --git a/drivers/net/fealnx.c b/drivers/net/fealnx.c index 891be28a7d4..160655d2458 100644 --- a/drivers/net/fealnx.c +++ b/drivers/net/fealnx.c @@ -584,7 +584,8 @@ static int __devinit fealnx_init_one(struct pci_dev *pdev, if (np->flags == HAS_MII_XCVR) { int phy, phy_idx = 0; - for (phy = 1; phy < 32 && phy_idx < 4; phy++) { + for (phy = 1; phy < 32 && phy_idx < ARRAY_SIZE(np->phys); + phy++) { int mii_status = mdio_read(dev, phy, 1); if (mii_status != 0xffff && mii_status != 0x0000) { @@ -1209,17 +1210,20 @@ static void fealnx_tx_timeout(struct net_device *dev) unsigned long flags; int i; - printk(KERN_WARNING "%s: Transmit timed out, status %8.8x," - " resetting...\n", dev->name, ioread32(ioaddr + ISR)); + printk(KERN_WARNING + "%s: Transmit timed out, status %8.8x, resetting...\n", + dev->name, ioread32(ioaddr + ISR)); { printk(KERN_DEBUG " Rx ring %p: ", np->rx_ring); for (i = 0; i < RX_RING_SIZE; i++) - printk(" %8.8x", (unsigned int) np->rx_ring[i].status); - printk("\n" KERN_DEBUG " Tx ring %p: ", np->tx_ring); + printk(KERN_CONT " %8.8x", + (unsigned int) np->rx_ring[i].status); + printk(KERN_CONT "\n"); + printk(KERN_DEBUG " Tx ring %p: ", np->tx_ring); for (i = 0; i < TX_RING_SIZE; i++) - printk(" %4.4x", np->tx_ring[i].status); - printk("\n"); + printk(KERN_CONT " %4.4x", np->tx_ring[i].status); + printk(KERN_CONT "\n"); } spin_lock_irqsave(&np->lock, flags); diff --git a/drivers/net/fec.c b/drivers/net/fec.c index 0f19b743749..c9fd82d3a80 100644 --- a/drivers/net/fec.c +++ b/drivers/net/fec.c @@ -285,6 +285,7 @@ fec_enet_start_xmit(struct sk_buff *skb, struct net_device *dev) { struct fec_enet_private *fep = netdev_priv(dev); struct bufdesc *bdp; + void *bufaddr; unsigned short status; unsigned long flags; @@ -312,7 +313,7 @@ fec_enet_start_xmit(struct sk_buff *skb, struct net_device *dev) status &= ~BD_ENET_TX_STATS; /* Set buffer length and buffer pointer */ - bdp->cbd_bufaddr = __pa(skb->data); + bufaddr = skb->data; bdp->cbd_datlen = skb->len; /* @@ -320,11 +321,11 @@ fec_enet_start_xmit(struct sk_buff *skb, struct net_device *dev) * 4-byte boundaries. Use bounce buffers to copy data * and get it aligned. Ugh. */ - if (bdp->cbd_bufaddr & FEC_ALIGNMENT) { + if (((unsigned long) bufaddr) & FEC_ALIGNMENT) { unsigned int index; index = bdp - fep->tx_bd_base; memcpy(fep->tx_bounce[index], (void *)skb->data, skb->len); - bdp->cbd_bufaddr = __pa(fep->tx_bounce[index]); + bufaddr = fep->tx_bounce[index]; } /* Save skb pointer */ @@ -336,7 +337,7 @@ fec_enet_start_xmit(struct sk_buff *skb, struct net_device *dev) /* Push the data cache so the CPM does not get stale memory * data. */ - bdp->cbd_bufaddr = dma_map_single(&dev->dev, skb->data, + bdp->cbd_bufaddr = dma_map_single(&dev->dev, bufaddr, FEC_ENET_TX_FRSIZE, DMA_TO_DEVICE); /* Send it on its way. Tell FEC it's ready, interrupt when done, @@ -1642,6 +1643,7 @@ static const struct net_device_ops fec_netdev_ops = { .ndo_stop = fec_enet_close, .ndo_start_xmit = fec_enet_start_xmit, .ndo_set_multicast_list = set_multicast_list, + .ndo_change_mtu = eth_change_mtu, .ndo_validate_addr = eth_validate_addr, .ndo_tx_timeout = fec_timeout, .ndo_set_mac_address = fec_set_mac_address, diff --git a/drivers/net/fec.h b/drivers/net/fec.h index 30b7dd67133..cc47f3f057c 100644 --- a/drivers/net/fec.h +++ b/drivers/net/fec.h @@ -46,12 +46,12 @@ #else -#define FEC_ECNTRL; 0x000 /* Ethernet control reg */ -#define FEC_IEVENT; 0x004 /* Interrupt even reg */ -#define FEC_IMASK; 0x008 /* Interrupt mask reg */ -#define FEC_IVEC; 0x00c /* Interrupt vec status reg */ -#define FEC_R_DES_ACTIVE; 0x010 /* Receive descriptor reg */ -#define FEC_X_DES_ACTIVE; 0x01c /* Transmit descriptor reg */ +#define FEC_ECNTRL 0x000 /* Ethernet control reg */ +#define FEC_IEVENT 0x004 /* Interrupt even reg */ +#define FEC_IMASK 0x008 /* Interrupt mask reg */ +#define FEC_IVEC 0x00c /* Interrupt vec status reg */ +#define FEC_R_DES_ACTIVE 0x010 /* Receive descriptor reg */ +#define FEC_X_DES_ACTIVE 0x014 /* Transmit descriptor reg */ #define FEC_MII_DATA 0x040 /* MII manage frame reg */ #define FEC_MII_SPEED 0x044 /* MII speed control reg */ #define FEC_R_BOUND 0x08c /* FIFO receive bound reg */ diff --git a/drivers/net/fec_mpc52xx.c b/drivers/net/fec_mpc52xx.c index cc786333d95..c40113f5896 100644 --- a/drivers/net/fec_mpc52xx.c +++ b/drivers/net/fec_mpc52xx.c @@ -309,6 +309,7 @@ static int mpc52xx_fec_start_xmit(struct sk_buff *skb, struct net_device *dev) { struct mpc52xx_fec_priv *priv = netdev_priv(dev); struct bcom_fec_bd *bd; + unsigned long flags; if (bcom_queue_full(priv->tx_dmatsk)) { if (net_ratelimit()) @@ -316,7 +317,7 @@ static int mpc52xx_fec_start_xmit(struct sk_buff *skb, struct net_device *dev) return NETDEV_TX_BUSY; } - spin_lock_irq(&priv->lock); + spin_lock_irqsave(&priv->lock, flags); dev->trans_start = jiffies; bd = (struct bcom_fec_bd *) @@ -332,7 +333,7 @@ static int mpc52xx_fec_start_xmit(struct sk_buff *skb, struct net_device *dev) netif_stop_queue(dev); } - spin_unlock_irq(&priv->lock); + spin_unlock_irqrestore(&priv->lock, flags); return NETDEV_TX_OK; } diff --git a/drivers/net/forcedeth.c b/drivers/net/forcedeth.c index 1094d292630..3b4e0766c7b 100644 --- a/drivers/net/forcedeth.c +++ b/drivers/net/forcedeth.c @@ -3514,11 +3514,13 @@ static irqreturn_t nv_nic_irq(int foo, void *data) nv_msi_workaround(np); #ifdef CONFIG_FORCEDETH_NAPI - napi_schedule(&np->napi); - - /* Disable furthur irq's - (msix not enabled with napi) */ - writel(0, base + NvRegIrqMask); + if (napi_schedule_prep(&np->napi)) { + /* + * Disable further irq's (msix not enabled with napi) + */ + writel(0, base + NvRegIrqMask); + __napi_schedule(&np->napi); + } #else do @@ -3615,12 +3617,13 @@ static irqreturn_t nv_nic_irq_optimized(int foo, void *data) nv_msi_workaround(np); #ifdef CONFIG_FORCEDETH_NAPI - napi_schedule(&np->napi); - - /* Disable furthur irq's - (msix not enabled with napi) */ - writel(0, base + NvRegIrqMask); - + if (napi_schedule_prep(&np->napi)) { + /* + * Disable further irq's (msix not enabled with napi) + */ + writel(0, base + NvRegIrqMask); + __napi_schedule(&np->napi); + } #else do { diff --git a/drivers/net/fs_enet/fs_enet-main.c b/drivers/net/fs_enet/fs_enet-main.c index b892c3ad9a7..2bc2d2b2064 100644 --- a/drivers/net/fs_enet/fs_enet-main.c +++ b/drivers/net/fs_enet/fs_enet-main.c @@ -754,17 +754,16 @@ static int fs_init_phy(struct net_device *dev) fep->oldlink = 0; fep->oldspeed = 0; fep->oldduplex = -1; - if(fep->fpi->phy_node) - phydev = of_phy_connect(dev, fep->fpi->phy_node, - &fs_adjust_link, 0, - PHY_INTERFACE_MODE_MII); - else { - printk("No phy bus ID specified in BSP code\n"); - return -EINVAL; + + phydev = of_phy_connect(dev, fep->fpi->phy_node, &fs_adjust_link, 0, + PHY_INTERFACE_MODE_MII); + if (!phydev) { + phydev = of_phy_connect_fixed_link(dev, &fs_adjust_link, + PHY_INTERFACE_MODE_MII); } - if (IS_ERR(phydev)) { - printk(KERN_ERR "%s: Could not attach to PHY\n", dev->name); - return PTR_ERR(phydev); + if (!phydev) { + dev_err(&dev->dev, "Could not attach to PHY\n"); + return -ENODEV; } fep->phydev = phydev; @@ -1005,6 +1004,7 @@ static int __devinit fs_enet_probe(struct of_device *ofdev, goto out_free_fpi; } + SET_NETDEV_DEV(ndev, &ofdev->dev); dev_set_drvdata(&ofdev->dev, ndev); fep = netdev_priv(ndev); diff --git a/drivers/net/fsl_pq_mdio.c b/drivers/net/fsl_pq_mdio.c index 3af581303ca..d167090248e 100644 --- a/drivers/net/fsl_pq_mdio.c +++ b/drivers/net/fsl_pq_mdio.c @@ -188,7 +188,7 @@ static int fsl_pq_mdio_find_free(struct mii_bus *new_bus) } -#ifdef CONFIG_GIANFAR +#if defined(CONFIG_GIANFAR) || defined(CONFIG_GIANFAR_MODULE) static u32 __iomem *get_gfar_tbipa(struct fsl_pq_mdio __iomem *regs) { struct gfar __iomem *enet_regs; @@ -206,7 +206,7 @@ static u32 __iomem *get_gfar_tbipa(struct fsl_pq_mdio __iomem *regs) #endif -#ifdef CONFIG_UCC_GETH +#if defined(CONFIG_UCC_GETH) || defined(CONFIG_UCC_GETH_MODULE) static int get_ucc_id_for_range(u64 start, u64 end, u32 *ucc_id) { struct device_node *np = NULL; @@ -291,7 +291,7 @@ static int fsl_pq_mdio_probe(struct of_device *ofdev, if (of_device_is_compatible(np, "fsl,gianfar-mdio") || of_device_is_compatible(np, "fsl,gianfar-tbi") || of_device_is_compatible(np, "gianfar")) { -#ifdef CONFIG_GIANFAR +#if defined(CONFIG_GIANFAR) || defined(CONFIG_GIANFAR_MODULE) tbipa = get_gfar_tbipa(regs); #else err = -ENODEV; @@ -299,7 +299,7 @@ static int fsl_pq_mdio_probe(struct of_device *ofdev, #endif } else if (of_device_is_compatible(np, "fsl,ucc-mdio") || of_device_is_compatible(np, "ucc_geth_phy")) { -#ifdef CONFIG_UCC_GETH +#if defined(CONFIG_UCC_GETH) || defined(CONFIG_UCC_GETH_MODULE) u32 id; static u32 mii_mng_master; diff --git a/drivers/net/gianfar.c b/drivers/net/gianfar.c index 4ae1d259fce..a00ec639c38 100644 --- a/drivers/net/gianfar.c +++ b/drivers/net/gianfar.c @@ -156,6 +156,8 @@ static const struct net_device_ops gfar_netdev_ops = { .ndo_tx_timeout = gfar_timeout, .ndo_do_ioctl = gfar_ioctl, .ndo_vlan_rx_register = gfar_vlan_rx_register, + .ndo_set_mac_address = eth_mac_addr, + .ndo_validate_addr = eth_validate_addr, #ifdef CONFIG_NET_POLL_CONTROLLER .ndo_poll_controller = gfar_netpoll, #endif @@ -262,15 +264,6 @@ static int gfar_of_init(struct net_device *dev) priv->device_flags |= FSL_GIANFAR_DEV_HAS_MAGIC_PACKET; priv->phy_node = of_parse_phandle(np, "phy-handle", 0); - if (!priv->phy_node) { - u32 *fixed_link; - - fixed_link = (u32 *)of_get_property(np, "fixed-link", NULL); - if (!fixed_link) { - err = -ENODEV; - goto err_out; - } - } /* Find the TBI PHY. If it's not there, we don't support SGMII */ priv->tbi_node = of_parse_phandle(np, "tbi-handle", 0); @@ -498,6 +491,7 @@ static int gfar_remove(struct of_device *ofdev) dev_set_drvdata(&ofdev->dev, NULL); + unregister_netdev(priv->ndev); iounmap(priv->regs); free_netdev(priv->ndev); @@ -657,13 +651,14 @@ static int init_phy(struct net_device *dev) interface = gfar_get_interface(dev); - if (priv->phy_node) { - priv->phydev = of_phy_connect(dev, priv->phy_node, &adjust_link, - 0, interface); - if (!priv->phydev) { - dev_err(&dev->dev, "error: Could not attach to PHY\n"); - return -ENODEV; - } + priv->phydev = of_phy_connect(dev, priv->phy_node, &adjust_link, 0, + interface); + if (!priv->phydev) + priv->phydev = of_phy_connect_fixed_link(dev, &adjust_link, + interface); + if (!priv->phydev) { + dev_err(&dev->dev, "could not attach to PHY\n"); + return -ENODEV; } if (interface == PHY_INTERFACE_MODE_SGMII) @@ -942,6 +937,7 @@ int startup_gfar(struct net_device *dev) struct gfar __iomem *regs = priv->regs; int err = 0; u32 rctrl = 0; + u32 tctrl = 0; u32 attrs = 0; gfar_write(®s->imask, IMASK_INIT_CLEAR); @@ -1117,11 +1113,19 @@ int startup_gfar(struct net_device *dev) rctrl |= RCTRL_PADDING(priv->padding); } + /* keep vlan related bits if it's enabled */ + if (priv->vlgrp) { + rctrl |= RCTRL_VLEX | RCTRL_PRSDEP_INIT; + tctrl |= TCTRL_VLINS; + } + /* Init rctrl based on our settings */ gfar_write(&priv->regs->rctrl, rctrl); if (dev->features & NETIF_F_IP_CSUM) - gfar_write(&priv->regs->tctrl, TCTRL_INIT_CSUM); + tctrl |= TCTRL_INIT_CSUM; + + gfar_write(&priv->regs->tctrl, tctrl); /* Set the extraction length and index */ attrs = ATTRELI_EL(priv->rx_stash_size) | @@ -1456,7 +1460,6 @@ static void gfar_vlan_rx_register(struct net_device *dev, /* Enable VLAN tag extraction */ tempval = gfar_read(&priv->regs->rctrl); - tempval |= RCTRL_VLEX; tempval |= (RCTRL_VLEX | RCTRL_PRSDEP_INIT); gfar_write(&priv->regs->rctrl, tempval); } else { diff --git a/drivers/net/gianfar_ethtool.c b/drivers/net/gianfar_ethtool.c index dbf06e9313c..2234118eedb 100644 --- a/drivers/net/gianfar_ethtool.c +++ b/drivers/net/gianfar_ethtool.c @@ -366,9 +366,8 @@ static int gfar_scoalesce(struct net_device *dev, struct ethtool_coalesce *cvals return -EINVAL; } - priv->rxic = mk_ic_value( - gfar_usecs2ticks(priv, cvals->rx_coalesce_usecs), - cvals->rx_max_coalesced_frames); + priv->rxic = mk_ic_value(cvals->rx_max_coalesced_frames, + gfar_usecs2ticks(priv, cvals->rx_coalesce_usecs)); /* Set up tx coalescing */ if ((cvals->tx_coalesce_usecs == 0) || @@ -390,9 +389,8 @@ static int gfar_scoalesce(struct net_device *dev, struct ethtool_coalesce *cvals return -EINVAL; } - priv->txic = mk_ic_value( - gfar_usecs2ticks(priv, cvals->tx_coalesce_usecs), - cvals->tx_max_coalesced_frames); + priv->txic = mk_ic_value(cvals->tx_max_coalesced_frames, + gfar_usecs2ticks(priv, cvals->tx_coalesce_usecs)); gfar_write(&priv->regs->rxic, 0); if (priv->rxcoalescing) diff --git a/drivers/net/hamachi.c b/drivers/net/hamachi.c index 9d5b62cb30f..d62378cbc14 100644 --- a/drivers/net/hamachi.c +++ b/drivers/net/hamachi.c @@ -173,8 +173,8 @@ static int tx_params[MAX_UNITS] = {-1, -1, -1, -1, -1, -1, -1, -1}; static const char version[] __devinitconst = KERN_INFO DRV_NAME ".c:v" DRV_VERSION " " DRV_RELDATE " Written by Donald Becker\n" -KERN_INFO " Some modifications by Eric kasten <kasten@nscl.msu.edu>\n" -KERN_INFO " Further modifications by Keith Underwood <keithu@parl.clemson.edu>\n"; +" Some modifications by Eric kasten <kasten@nscl.msu.edu>\n" +" Further modifications by Keith Underwood <keithu@parl.clemson.edu>\n"; /* IP_MF appears to be only defined in <netinet/ip.h>, however, @@ -1080,11 +1080,14 @@ static void hamachi_tx_timeout(struct net_device *dev) { printk(KERN_DEBUG " Rx ring %p: ", hmp->rx_ring); for (i = 0; i < RX_RING_SIZE; i++) - printk(" %8.8x", le32_to_cpu(hmp->rx_ring[i].status_n_length)); - printk("\n"KERN_DEBUG" Tx ring %p: ", hmp->tx_ring); + printk(KERN_CONT " %8.8x", + le32_to_cpu(hmp->rx_ring[i].status_n_length)); + printk(KERN_CONT "\n"); + printk(KERN_DEBUG" Tx ring %p: ", hmp->tx_ring); for (i = 0; i < TX_RING_SIZE; i++) - printk(" %4.4x", le32_to_cpu(hmp->tx_ring[i].status_n_length)); - printk("\n"); + printk(KERN_CONT " %4.4x", + le32_to_cpu(hmp->tx_ring[i].status_n_length)); + printk(KERN_CONT "\n"); } /* Reinit the hardware and make sure the Rx and Tx processes @@ -1753,13 +1756,13 @@ static int hamachi_close(struct net_device *dev) #ifdef __i386__ if (hamachi_debug > 2) { - printk("\n"KERN_DEBUG" Tx ring at %8.8x:\n", + printk(KERN_DEBUG " Tx ring at %8.8x:\n", (int)hmp->tx_ring_dma); for (i = 0; i < TX_RING_SIZE; i++) - printk(" %c #%d desc. %8.8x %8.8x.\n", + printk(KERN_DEBUG " %c #%d desc. %8.8x %8.8x.\n", readl(ioaddr + TxCurPtr) == (long)&hmp->tx_ring[i] ? '>' : ' ', i, hmp->tx_ring[i].status_n_length, hmp->tx_ring[i].addr); - printk("\n"KERN_DEBUG " Rx ring %8.8x:\n", + printk(KERN_DEBUG " Rx ring %8.8x:\n", (int)hmp->rx_ring_dma); for (i = 0; i < RX_RING_SIZE; i++) { printk(KERN_DEBUG " %c #%d desc. %4.4x %8.8x\n", @@ -1770,7 +1773,7 @@ static int hamachi_close(struct net_device *dev) u16 *addr = (u16 *) hmp->rx_skbuff[i]->data; int j; - + printk(KERN_DEBUG "Addr: "); for (j = 0; j < 0x50; j++) printk(" %4.4x", addr[j]); printk("\n"); diff --git a/drivers/net/hamradio/6pack.c b/drivers/net/hamradio/6pack.c index 155160052c8..981ab530e9a 100644 --- a/drivers/net/hamradio/6pack.c +++ b/drivers/net/hamradio/6pack.c @@ -3,7 +3,7 @@ * devices like TTY. It interfaces between a raw TTY and the * kernel's AX.25 protocol layers. * - * Authors: Andreas Könsgen <ajk@iehk.rwth-aachen.de> + * Authors: Andreas Könsgen <ajk@comnets.uni-bremen.de> * Ralf Baechle DL5RB <ralf@linux-mips.org> * * Quite a lot of stuff "stolen" by Joerg Reuter from slip.c, written by diff --git a/drivers/net/hamradio/baycom_epp.c b/drivers/net/hamradio/baycom_epp.c index 5e4b7afd068..352703255bb 100644 --- a/drivers/net/hamradio/baycom_epp.c +++ b/drivers/net/hamradio/baycom_epp.c @@ -68,7 +68,7 @@ static const char paranoia_str[] = KERN_ERR static const char bc_drvname[] = "baycom_epp"; static const char bc_drvinfo[] = KERN_INFO "baycom_epp: (C) 1998-2000 Thomas Sailer, HB9JNX/AE4WA\n" -KERN_INFO "baycom_epp: version 0.7 compiled " __TIME__ " " __DATE__ "\n"; +"baycom_epp: version 0.7 compiled " __TIME__ " " __DATE__ "\n"; /* --------------------------------------------------------------------- */ diff --git a/drivers/net/hamradio/baycom_par.c b/drivers/net/hamradio/baycom_par.c index 2e6fc4dc74b..5f5af9a606f 100644 --- a/drivers/net/hamradio/baycom_par.c +++ b/drivers/net/hamradio/baycom_par.c @@ -102,7 +102,7 @@ static const char bc_drvname[] = "baycom_par"; static const char bc_drvinfo[] = KERN_INFO "baycom_par: (C) 1996-2000 Thomas Sailer, HB9JNX/AE4WA\n" -KERN_INFO "baycom_par: version 0.9 compiled " __TIME__ " " __DATE__ "\n"; +"baycom_par: version 0.9 compiled " __TIME__ " " __DATE__ "\n"; /* --------------------------------------------------------------------- */ diff --git a/drivers/net/hamradio/baycom_ser_fdx.c b/drivers/net/hamradio/baycom_ser_fdx.c index b6a816e60c0..aa4488e871b 100644 --- a/drivers/net/hamradio/baycom_ser_fdx.c +++ b/drivers/net/hamradio/baycom_ser_fdx.c @@ -91,7 +91,7 @@ static const char bc_drvname[] = "baycom_ser_fdx"; static const char bc_drvinfo[] = KERN_INFO "baycom_ser_fdx: (C) 1996-2000 Thomas Sailer, HB9JNX/AE4WA\n" -KERN_INFO "baycom_ser_fdx: version 0.10 compiled " __TIME__ " " __DATE__ "\n"; +"baycom_ser_fdx: version 0.10 compiled " __TIME__ " " __DATE__ "\n"; /* --------------------------------------------------------------------- */ diff --git a/drivers/net/hamradio/baycom_ser_hdx.c b/drivers/net/hamradio/baycom_ser_hdx.c index 3bcc57acbe6..88c59359602 100644 --- a/drivers/net/hamradio/baycom_ser_hdx.c +++ b/drivers/net/hamradio/baycom_ser_hdx.c @@ -79,7 +79,7 @@ static const char bc_drvname[] = "baycom_ser_hdx"; static const char bc_drvinfo[] = KERN_INFO "baycom_ser_hdx: (C) 1996-2000 Thomas Sailer, HB9JNX/AE4WA\n" -KERN_INFO "baycom_ser_hdx: version 0.10 compiled " __TIME__ " " __DATE__ "\n"; +"baycom_ser_hdx: version 0.10 compiled " __TIME__ " " __DATE__ "\n"; /* --------------------------------------------------------------------- */ diff --git a/drivers/net/ibm_newemac/core.c b/drivers/net/ibm_newemac/core.c index beb84213b67..f0f89080371 100644 --- a/drivers/net/ibm_newemac/core.c +++ b/drivers/net/ibm_newemac/core.c @@ -1305,6 +1305,8 @@ static int emac_close(struct net_device *ndev) free_irq(dev->emac_irq, dev); + netif_carrier_off(ndev); + return 0; } diff --git a/drivers/net/ibm_newemac/rgmii.c b/drivers/net/ibm_newemac/rgmii.c index 1d5379de690..8d76cb89dbd 100644 --- a/drivers/net/ibm_newemac/rgmii.c +++ b/drivers/net/ibm_newemac/rgmii.c @@ -188,11 +188,12 @@ void rgmii_put_mdio(struct of_device *ofdev, int input) void rgmii_detach(struct of_device *ofdev, int input) { struct rgmii_instance *dev = dev_get_drvdata(&ofdev->dev); - struct rgmii_regs __iomem *p = dev->base; - - mutex_lock(&dev->lock); + struct rgmii_regs __iomem *p; BUG_ON(!dev || dev->users == 0); + p = dev->base; + + mutex_lock(&dev->lock); RGMII_DBG(dev, "detach(%d)" NL, input); diff --git a/drivers/net/igb/e1000_82575.c b/drivers/net/igb/e1000_82575.c index efd9be21488..ac28dd5a4fd 100644 --- a/drivers/net/igb/e1000_82575.c +++ b/drivers/net/igb/e1000_82575.c @@ -190,6 +190,10 @@ static s32 igb_get_invariants_82575(struct e1000_hw *hw) phy->ops.write_reg = igb_write_phy_reg_igp; } + /* set lan id */ + hw->bus.func = (rd32(E1000_STATUS) & E1000_STATUS_FUNC_MASK) >> + E1000_STATUS_FUNC_SHIFT; + /* Set phy->phy_addr and phy->id. */ ret_val = igb_get_phy_id_82575(hw); if (ret_val) diff --git a/drivers/net/igb/igb_main.c b/drivers/net/igb/igb_main.c index ea17319624a..adb09d32625 100644 --- a/drivers/net/igb/igb_main.c +++ b/drivers/net/igb/igb_main.c @@ -127,14 +127,48 @@ static void igb_restore_vlan(struct igb_adapter *); static void igb_ping_all_vfs(struct igb_adapter *); static void igb_msg_task(struct igb_adapter *); static int igb_rcv_msg_from_vf(struct igb_adapter *, u32); -static inline void igb_set_rah_pool(struct e1000_hw *, int , int); static void igb_set_mc_list_pools(struct igb_adapter *, int, u16); static void igb_vmm_control(struct igb_adapter *); -static inline void igb_set_vmolr(struct e1000_hw *, int); -static inline int igb_set_vf_rlpml(struct igb_adapter *, int, int); static int igb_set_vf_mac(struct igb_adapter *adapter, int, unsigned char *); static void igb_restore_vf_multicasts(struct igb_adapter *adapter); +static inline void igb_set_vmolr(struct e1000_hw *hw, int vfn) +{ + u32 reg_data; + + reg_data = rd32(E1000_VMOLR(vfn)); + reg_data |= E1000_VMOLR_BAM | /* Accept broadcast */ + E1000_VMOLR_ROPE | /* Accept packets matched in UTA */ + E1000_VMOLR_ROMPE | /* Accept packets matched in MTA */ + E1000_VMOLR_AUPE | /* Accept untagged packets */ + E1000_VMOLR_STRVLAN; /* Strip vlan tags */ + wr32(E1000_VMOLR(vfn), reg_data); +} + +static inline int igb_set_vf_rlpml(struct igb_adapter *adapter, int size, + int vfn) +{ + struct e1000_hw *hw = &adapter->hw; + u32 vmolr; + + vmolr = rd32(E1000_VMOLR(vfn)); + vmolr &= ~E1000_VMOLR_RLPML_MASK; + vmolr |= size | E1000_VMOLR_LPE; + wr32(E1000_VMOLR(vfn), vmolr); + + return 0; +} + +static inline void igb_set_rah_pool(struct e1000_hw *hw, int pool, int entry) +{ + u32 reg_data; + + reg_data = rd32(E1000_RAH(entry)); + reg_data &= ~E1000_RAH_POOL_MASK; + reg_data |= E1000_RAH_POOL_1 << pool;; + wr32(E1000_RAH(entry), reg_data); +} + #ifdef CONFIG_PM static int igb_suspend(struct pci_dev *, pm_message_t); static int igb_resume(struct pci_dev *); @@ -4549,11 +4583,12 @@ static bool igb_clean_rx_irq_adv(struct igb_ring *rx_ring, cleaned = true; cleaned_count++; + /* this is the fast path for the non-packet split case */ if (!adapter->rx_ps_hdr_size) { pci_unmap_single(pdev, buffer_info->dma, - adapter->rx_buffer_len + - NET_IP_ALIGN, + adapter->rx_buffer_len, PCI_DMA_FROMDEVICE); + buffer_info->dma = 0; skb_put(skb, length); goto send_up; } @@ -4570,8 +4605,9 @@ static bool igb_clean_rx_irq_adv(struct igb_ring *rx_ring, if (!skb_shinfo(skb)->nr_frags) { pci_unmap_single(pdev, buffer_info->dma, - adapter->rx_ps_hdr_size + NET_IP_ALIGN, + adapter->rx_ps_hdr_size, PCI_DMA_FROMDEVICE); + buffer_info->dma = 0; skb_put(skb, hlen); } @@ -4713,7 +4749,6 @@ static void igb_alloc_rx_buffers_adv(struct igb_ring *rx_ring, bufsz = adapter->rx_ps_hdr_size; else bufsz = adapter->rx_buffer_len; - bufsz += NET_IP_ALIGN; while (cleaned_count--) { rx_desc = E1000_RX_DESC_ADV(*rx_ring, i); @@ -4737,7 +4772,7 @@ static void igb_alloc_rx_buffers_adv(struct igb_ring *rx_ring, } if (!buffer_info->skb) { - skb = netdev_alloc_skb(netdev, bufsz); + skb = netdev_alloc_skb(netdev, bufsz + NET_IP_ALIGN); if (!skb) { adapter->alloc_rx_buff_failed++; goto no_buffers; @@ -5338,6 +5373,9 @@ static pci_ers_result_t igb_io_error_detected(struct pci_dev *pdev, netif_device_detach(netdev); + if (state == pci_channel_io_perm_failure) + return PCI_ERS_RESULT_DISCONNECT; + if (netif_running(netdev)) igb_down(adapter); pci_disable_device(pdev); @@ -5414,43 +5452,6 @@ static void igb_io_resume(struct pci_dev *pdev) igb_get_hw_control(adapter); } -static inline void igb_set_vmolr(struct e1000_hw *hw, int vfn) -{ - u32 reg_data; - - reg_data = rd32(E1000_VMOLR(vfn)); - reg_data |= E1000_VMOLR_BAM | /* Accept broadcast */ - E1000_VMOLR_ROPE | /* Accept packets matched in UTA */ - E1000_VMOLR_ROMPE | /* Accept packets matched in MTA */ - E1000_VMOLR_AUPE | /* Accept untagged packets */ - E1000_VMOLR_STRVLAN; /* Strip vlan tags */ - wr32(E1000_VMOLR(vfn), reg_data); -} - -static inline int igb_set_vf_rlpml(struct igb_adapter *adapter, int size, - int vfn) -{ - struct e1000_hw *hw = &adapter->hw; - u32 vmolr; - - vmolr = rd32(E1000_VMOLR(vfn)); - vmolr &= ~E1000_VMOLR_RLPML_MASK; - vmolr |= size | E1000_VMOLR_LPE; - wr32(E1000_VMOLR(vfn), vmolr); - - return 0; -} - -static inline void igb_set_rah_pool(struct e1000_hw *hw, int pool, int entry) -{ - u32 reg_data; - - reg_data = rd32(E1000_RAH(entry)); - reg_data &= ~E1000_RAH_POOL_MASK; - reg_data |= E1000_RAH_POOL_1 << pool;; - wr32(E1000_RAH(entry), reg_data); -} - static void igb_set_mc_list_pools(struct igb_adapter *adapter, int entry_count, u16 total_rar_filters) { diff --git a/drivers/net/igbvf/vf.c b/drivers/net/igbvf/vf.c index 2a4faf9ade6..a9a61efa964 100644 --- a/drivers/net/igbvf/vf.c +++ b/drivers/net/igbvf/vf.c @@ -274,6 +274,8 @@ static s32 e1000_set_vfta_vf(struct e1000_hw *hw, u16 vid, bool set) err = mbx->ops.read_posted(hw, msgbuf, 2); + msgbuf[0] &= ~E1000_VT_MSGTYPE_CTS; + /* if nacked the vlan was rejected */ if (!err && (msgbuf[0] == (E1000_VF_SET_VLAN | E1000_VT_MSGTYPE_NACK))) err = -E1000_ERR_MAC_INIT; @@ -317,6 +319,8 @@ static void e1000_rar_set_vf(struct e1000_hw *hw, u8 * addr, u32 index) if (!ret_val) ret_val = mbx->ops.read_posted(hw, msgbuf, 3); + msgbuf[0] &= ~E1000_VT_MSGTYPE_CTS; + /* if nacked the address was rejected, use "perm_addr" */ if (!ret_val && (msgbuf[0] == (E1000_VF_SET_MAC_ADDR | E1000_VT_MSGTYPE_NACK))) diff --git a/drivers/net/irda/au1k_ir.c b/drivers/net/irda/au1k_ir.c index c4361d46659..ee1cff5c9b2 100644 --- a/drivers/net/irda/au1k_ir.c +++ b/drivers/net/irda/au1k_ir.c @@ -23,7 +23,6 @@ #include <linux/init.h> #include <linux/errno.h> #include <linux/netdevice.h> -#include <linux/etherdevice.h> #include <linux/slab.h> #include <linux/rtnetlink.h> #include <linux/interrupt.h> @@ -205,9 +204,6 @@ static const struct net_device_ops au1k_irda_netdev_ops = { .ndo_start_xmit = au1k_irda_hard_xmit, .ndo_tx_timeout = au1k_tx_timeout, .ndo_do_ioctl = au1k_irda_ioctl, - .ndo_change_mtu = eth_change_mtu, - .ndo_validate_addr = eth_validate_addr, - .ndo_set_mac_address = eth_mac_addr, }; static int au1k_irda_net_init(struct net_device *dev) diff --git a/drivers/net/irda/bfin_sir.c b/drivers/net/irda/bfin_sir.c index f3eed6a8fba..911c082cee5 100644 --- a/drivers/net/irda/bfin_sir.c +++ b/drivers/net/irda/bfin_sir.c @@ -677,6 +677,14 @@ static int bfin_sir_init_iobuf(iobuff_t *io, int size) return 0; } +static const struct net_device_ops bfin_sir_ndo = { + .ndo_open = bfin_sir_open, + .ndo_stop = bfin_sir_stop, + .ndo_start_xmit = bfin_sir_hard_xmit, + .ndo_do_ioctl = bfin_sir_ioctl, + .ndo_get_stats = bfin_sir_stats, +}; + static int __devinit bfin_sir_probe(struct platform_device *pdev) { struct net_device *dev; @@ -718,12 +726,8 @@ static int __devinit bfin_sir_probe(struct platform_device *pdev) if (err) goto err_mem_3; - dev->hard_start_xmit = bfin_sir_hard_xmit; - dev->open = bfin_sir_open; - dev->stop = bfin_sir_stop; - dev->do_ioctl = bfin_sir_ioctl; - dev->get_stats = bfin_sir_stats; - dev->irq = sir_port->irq; + dev->netdev_ops = &bfin_sir_ndo; + dev->irq = sir_port->irq; irda_init_max_qos_capabilies(&self->qos); diff --git a/drivers/net/irda/irtty-sir.c b/drivers/net/irda/irtty-sir.c index d53aa958213..20f9bc62668 100644 --- a/drivers/net/irda/irtty-sir.c +++ b/drivers/net/irda/irtty-sir.c @@ -31,7 +31,6 @@ #include <linux/tty.h> #include <linux/init.h> #include <asm/uaccess.h> -#include <linux/smp_lock.h> #include <linux/delay.h> #include <linux/mutex.h> diff --git a/drivers/net/irda/pxaficp_ir.c b/drivers/net/irda/pxaficp_ir.c index 3376a4f39e0..77d10edefd2 100644 --- a/drivers/net/irda/pxaficp_ir.c +++ b/drivers/net/irda/pxaficp_ir.c @@ -803,9 +803,6 @@ static const struct net_device_ops pxa_irda_netdev_ops = { .ndo_stop = pxa_irda_stop, .ndo_start_xmit = pxa_irda_hard_xmit, .ndo_do_ioctl = pxa_irda_ioctl, - .ndo_change_mtu = eth_change_mtu, - .ndo_validate_addr = eth_validate_addr, - .ndo_set_mac_address = eth_mac_addr, }; static int pxa_irda_probe(struct platform_device *pdev) @@ -830,6 +827,7 @@ static int pxa_irda_probe(struct platform_device *pdev) if (!dev) goto err_mem_3; + SET_NETDEV_DEV(dev, &pdev->dev); si = netdev_priv(dev); si->dev = &pdev->dev; si->pdata = pdev->dev.platform_data; diff --git a/drivers/net/irda/sa1100_ir.c b/drivers/net/irda/sa1100_ir.c index 2aeb2e6aec1..b039cb081e9 100644 --- a/drivers/net/irda/sa1100_ir.c +++ b/drivers/net/irda/sa1100_ir.c @@ -24,7 +24,6 @@ #include <linux/init.h> #include <linux/errno.h> #include <linux/netdevice.h> -#include <linux/etherdevice.h> #include <linux/slab.h> #include <linux/rtnetlink.h> #include <linux/interrupt.h> @@ -881,9 +880,6 @@ static const struct net_device_ops sa1100_irda_netdev_ops = { .ndo_stop = sa1100_irda_stop, .ndo_start_xmit = sa1100_irda_hard_xmit, .ndo_do_ioctl = sa1100_irda_ioctl, - .ndo_change_mtu = eth_change_mtu, - .ndo_validate_addr = eth_validate_addr, - .ndo_set_mac_address = eth_mac_addr, }; static int sa1100_irda_probe(struct platform_device *pdev) diff --git a/drivers/net/irda/w83977af_ir.c b/drivers/net/irda/w83977af_ir.c index d0883835b0c..fe4f2b2bff9 100644 --- a/drivers/net/irda/w83977af_ir.c +++ b/drivers/net/irda/w83977af_ir.c @@ -115,7 +115,7 @@ static int __init w83977af_init(void) IRDA_DEBUG(0, "%s()\n", __func__ ); - for (i=0; (io[i] < 2000) && (i < ARRAY_SIZE(dev_self)); i++) { + for (i=0; i < ARRAY_SIZE(dev_self) && io[i] < 2000; i++) { if (w83977af_open(i, io[i], irq[i], dma[i]) == 0) return 0; } diff --git a/drivers/net/isa-skeleton.c b/drivers/net/isa-skeleton.c index 73585fd8f29..d12377b8435 100644 --- a/drivers/net/isa-skeleton.c +++ b/drivers/net/isa-skeleton.c @@ -430,7 +430,8 @@ static int net_send_packet(struct sk_buff *skb, struct net_device *dev) * hardware interrupt handler. Queue flow control is * thus managed under this lock as well. */ - spin_lock_irq(&np->lock); + unsigned long flags; + spin_lock_irqsave(&np->lock, flags); add_to_tx_ring(np, skb, length); dev->trans_start = jiffies; @@ -446,7 +447,7 @@ static int net_send_packet(struct sk_buff *skb, struct net_device *dev) * is when the transmit statistics are updated. */ - spin_unlock_irq(&np->lock); + spin_unlock_irqrestore(&np->lock, flags); #else /* This is the case for older hardware which takes * a single transmit buffer at a time, and it is diff --git a/drivers/net/ixgbe/ixgbe.h b/drivers/net/ixgbe/ixgbe.h index cd22323cfd2..2c4dc8221dc 100644 --- a/drivers/net/ixgbe/ixgbe.h +++ b/drivers/net/ixgbe/ixgbe.h @@ -96,6 +96,8 @@ #define IXGBE_TX_FLAGS_VLAN_PRIO_MASK 0x0000e000 #define IXGBE_TX_FLAGS_VLAN_SHIFT 16 +#define IXGBE_MAX_RSC_INT_RATE 162760 + /* wrapper around a pointer to a socket buffer, * so a DMA handle can be stored along with the buffer */ struct ixgbe_tx_buffer { @@ -134,6 +136,8 @@ struct ixgbe_ring { u8 queue_index; /* needed for multiqueue queue management */ +#define IXGBE_RING_RX_PS_ENABLED (u8)(1) + u8 flags; /* per ring feature flags */ u16 head; u16 tail; @@ -327,6 +331,7 @@ struct ixgbe_adapter { #define IXGBE_FLAG_IN_SFP_MOD_TASK (u32)(1 << 25) #define IXGBE_FLAG_FDIR_HASH_CAPABLE (u32)(1 << 26) #define IXGBE_FLAG_FDIR_PERFECT_CAPABLE (u32)(1 << 27) +#define IXGBE_FLAG_FCOE_CAPABLE (u32)(1 << 28) #define IXGBE_FLAG_FCOE_ENABLED (u32)(1 << 29) u32 flags2; diff --git a/drivers/net/ixgbe/ixgbe_82598.c b/drivers/net/ixgbe/ixgbe_82598.c index b9923047ce1..522c03bc1da 100644 --- a/drivers/net/ixgbe/ixgbe_82598.c +++ b/drivers/net/ixgbe/ixgbe_82598.c @@ -50,6 +50,51 @@ static s32 ixgbe_read_i2c_eeprom_82598(struct ixgbe_hw *hw, u8 byte_offset, u8 *eeprom_data); /** + * ixgbe_set_pcie_completion_timeout - set pci-e completion timeout + * @hw: pointer to the HW structure + * + * The defaults for 82598 should be in the range of 50us to 50ms, + * however the hardware default for these parts is 500us to 1ms which is less + * than the 10ms recommended by the pci-e spec. To address this we need to + * increase the value to either 10ms to 250ms for capability version 1 config, + * or 16ms to 55ms for version 2. + **/ +void ixgbe_set_pcie_completion_timeout(struct ixgbe_hw *hw) +{ + struct ixgbe_adapter *adapter = hw->back; + u32 gcr = IXGBE_READ_REG(hw, IXGBE_GCR); + u16 pcie_devctl2; + + /* only take action if timeout value is defaulted to 0 */ + if (gcr & IXGBE_GCR_CMPL_TMOUT_MASK) + goto out; + + /* + * if capababilities version is type 1 we can write the + * timeout of 10ms to 250ms through the GCR register + */ + if (!(gcr & IXGBE_GCR_CAP_VER2)) { + gcr |= IXGBE_GCR_CMPL_TMOUT_10ms; + goto out; + } + + /* + * for version 2 capabilities we need to write the config space + * directly in order to set the completion timeout value for + * 16ms to 55ms + */ + pci_read_config_word(adapter->pdev, + IXGBE_PCI_DEVICE_CONTROL2, &pcie_devctl2); + pcie_devctl2 |= IXGBE_PCI_DEVICE_CONTROL2_16ms; + pci_write_config_word(adapter->pdev, + IXGBE_PCI_DEVICE_CONTROL2, pcie_devctl2); +out: + /* disable completion timeout resend */ + gcr &= ~IXGBE_GCR_CMPL_TMOUT_RESEND; + IXGBE_WRITE_REG(hw, IXGBE_GCR, gcr); +} + +/** * ixgbe_get_pcie_msix_count_82598 - Gets MSI-X vector count * @hw: pointer to hardware structure * @@ -153,6 +198,26 @@ out: } /** + * ixgbe_start_hw_82598 - Prepare hardware for Tx/Rx + * @hw: pointer to hardware structure + * + * Starts the hardware using the generic start_hw function. + * Then set pcie completion timeout + **/ +s32 ixgbe_start_hw_82598(struct ixgbe_hw *hw) +{ + s32 ret_val = 0; + + ret_val = ixgbe_start_hw_generic(hw); + + /* set the completion timeout for interface */ + if (ret_val == 0) + ixgbe_set_pcie_completion_timeout(hw); + + return ret_val; +} + +/** * ixgbe_get_link_capabilities_82598 - Determines link capabilities * @hw: pointer to hardware structure * @speed: pointer to link speed @@ -1085,7 +1150,7 @@ out: static struct ixgbe_mac_operations mac_ops_82598 = { .init_hw = &ixgbe_init_hw_generic, .reset_hw = &ixgbe_reset_hw_82598, - .start_hw = &ixgbe_start_hw_generic, + .start_hw = &ixgbe_start_hw_82598, .clear_hw_cntrs = &ixgbe_clear_hw_cntrs_generic, .get_media_type = &ixgbe_get_media_type_82598, .get_supported_physical_layer = &ixgbe_get_supported_physical_layer_82598, diff --git a/drivers/net/ixgbe/ixgbe_dcb_nl.c b/drivers/net/ixgbe/ixgbe_dcb_nl.c index d56890f5c9d..1c726573290 100644 --- a/drivers/net/ixgbe/ixgbe_dcb_nl.c +++ b/drivers/net/ixgbe/ixgbe_dcb_nl.c @@ -106,8 +106,6 @@ static u8 ixgbe_dcbnl_get_state(struct net_device *netdev) { struct ixgbe_adapter *adapter = netdev_priv(netdev); - DPRINTK(DRV, INFO, "Get DCB Admin Mode.\n"); - return !!(adapter->flags & IXGBE_FLAG_DCB_ENABLED); } @@ -116,8 +114,6 @@ static u8 ixgbe_dcbnl_set_state(struct net_device *netdev, u8 state) u8 err = 0; struct ixgbe_adapter *adapter = netdev_priv(netdev); - DPRINTK(DRV, INFO, "Set DCB Admin Mode.\n"); - if (state > 0) { /* Turn on DCB */ if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) @@ -138,7 +134,23 @@ static u8 ixgbe_dcbnl_set_state(struct net_device *netdev, u8 state) adapter->hw.fc.requested_mode = ixgbe_fc_none; } adapter->flags &= ~IXGBE_FLAG_RSS_ENABLED; + if (adapter->hw.mac.type == ixgbe_mac_82599EB) { + adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE; + adapter->flags &= ~IXGBE_FLAG_FDIR_PERFECT_CAPABLE; + } adapter->flags |= IXGBE_FLAG_DCB_ENABLED; +#ifdef IXGBE_FCOE + /* Turn on FCoE offload */ + if ((adapter->flags & IXGBE_FLAG_FCOE_CAPABLE) && + (!(adapter->flags & IXGBE_FLAG_FCOE_ENABLED))) { + adapter->flags |= IXGBE_FLAG_FCOE_ENABLED; + adapter->ring_feature[RING_F_FCOE].indices = + IXGBE_FCRETA_SIZE; + netdev->features |= NETIF_F_FCOE_CRC; + netdev->features |= NETIF_F_FSO; + netdev->fcoe_ddp_xid = IXGBE_FCOE_DDP_MAX - 1; + } +#endif /* IXGBE_FCOE */ ixgbe_init_interrupt_scheme(adapter); if (netif_running(netdev)) netdev->netdev_ops->ndo_open(netdev); @@ -154,6 +166,20 @@ static u8 ixgbe_dcbnl_set_state(struct net_device *netdev, u8 state) adapter->dcb_cfg.pfc_mode_enable = false; adapter->flags &= ~IXGBE_FLAG_DCB_ENABLED; adapter->flags |= IXGBE_FLAG_RSS_ENABLED; + if (adapter->hw.mac.type == ixgbe_mac_82599EB) + adapter->flags |= IXGBE_FLAG_FDIR_HASH_CAPABLE; + +#ifdef IXGBE_FCOE + /* Turn off FCoE offload */ + if (adapter->flags & (IXGBE_FLAG_FCOE_CAPABLE | + IXGBE_FLAG_FCOE_ENABLED)) { + adapter->flags &= ~IXGBE_FLAG_FCOE_ENABLED; + adapter->ring_feature[RING_F_FCOE].indices = 0; + netdev->features &= ~NETIF_F_FCOE_CRC; + netdev->features &= ~NETIF_F_FSO; + netdev->fcoe_ddp_xid = 0; + } +#endif /* IXGBE_FCOE */ ixgbe_init_interrupt_scheme(adapter); if (netif_running(netdev)) netdev->netdev_ops->ndo_open(netdev); @@ -169,6 +195,8 @@ static void ixgbe_dcbnl_get_perm_hw_addr(struct net_device *netdev, struct ixgbe_adapter *adapter = netdev_priv(netdev); int i, j; + memset(perm_addr, 0xff, MAX_ADDR_LEN); + for (i = 0; i < netdev->addr_len; i++) perm_addr[i] = adapter->hw.mac.perm_addr[i]; diff --git a/drivers/net/ixgbe/ixgbe_ethtool.c b/drivers/net/ixgbe/ixgbe_ethtool.c index 86f4f3e36f2..dff8dfac7ed 100644 --- a/drivers/net/ixgbe/ixgbe_ethtool.c +++ b/drivers/net/ixgbe/ixgbe_ethtool.c @@ -139,7 +139,7 @@ static int ixgbe_get_settings(struct net_device *netdev, ecmd->autoneg = AUTONEG_ENABLE; ecmd->transceiver = XCVR_EXTERNAL; if ((hw->phy.media_type == ixgbe_media_type_copper) || - (hw->mac.type == ixgbe_mac_82599EB)) { + (hw->phy.multispeed_fiber)) { ecmd->supported |= (SUPPORTED_1000baseT_Full | SUPPORTED_Autoneg); @@ -217,7 +217,7 @@ static int ixgbe_set_settings(struct net_device *netdev, s32 err = 0; if ((hw->phy.media_type == ixgbe_media_type_copper) || - (hw->mac.type == ixgbe_mac_82599EB)) { + (hw->phy.multispeed_fiber)) { /* 10000/copper and 1000/copper must autoneg * this function does not support any duplex forcing, but can * limit the advertising of the adapter to only 10000 or 1000 */ @@ -245,6 +245,7 @@ static int ixgbe_set_settings(struct net_device *netdev, } else { /* in this case we currently only support 10Gb/FULL */ if ((ecmd->autoneg == AUTONEG_ENABLE) || + (ecmd->advertising != ADVERTISED_10000baseT_Full) || (ecmd->speed + ecmd->duplex != SPEED_10000 + DUPLEX_FULL)) return -EINVAL; } @@ -1829,7 +1830,6 @@ static int ixgbe_wol_exclusion(struct ixgbe_adapter *adapter, break; default: wol->supported = 0; - retval = 0; } return retval; @@ -1948,6 +1948,7 @@ static int ixgbe_set_coalesce(struct net_device *netdev, struct ethtool_coalesce *ec) { struct ixgbe_adapter *adapter = netdev_priv(netdev); + struct ixgbe_q_vector *q_vector; int i; if (ec->tx_max_coalesced_frames_irq) @@ -1975,18 +1976,31 @@ static int ixgbe_set_coalesce(struct net_device *netdev, * any other value means disable eitr, which is best * served by setting the interrupt rate very high */ - adapter->eitr_param = IXGBE_MAX_INT_RATE; + if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) + adapter->eitr_param = IXGBE_MAX_RSC_INT_RATE; + else + adapter->eitr_param = IXGBE_MAX_INT_RATE; adapter->itr_setting = 0; } - for (i = 0; i < adapter->num_msix_vectors - NON_Q_VECTORS; i++) { - struct ixgbe_q_vector *q_vector = adapter->q_vector[i]; - if (q_vector->txr_count && !q_vector->rxr_count) - /* tx vector gets half the rate */ - q_vector->eitr = (adapter->eitr_param >> 1); - else - /* rx only or mixed */ - q_vector->eitr = adapter->eitr_param; + /* MSI/MSIx Interrupt Mode */ + if (adapter->flags & + (IXGBE_FLAG_MSIX_ENABLED | IXGBE_FLAG_MSI_ENABLED)) { + int num_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; + for (i = 0; i < num_vectors; i++) { + q_vector = adapter->q_vector[i]; + if (q_vector->txr_count && !q_vector->rxr_count) + /* tx vector gets half the rate */ + q_vector->eitr = (adapter->eitr_param >> 1); + else + /* rx only or mixed */ + q_vector->eitr = adapter->eitr_param; + ixgbe_write_eitr(q_vector); + } + /* Legacy Interrupt Mode */ + } else { + q_vector = adapter->q_vector[0]; + q_vector->eitr = adapter->eitr_param; ixgbe_write_eitr(q_vector); } @@ -1999,13 +2013,13 @@ static int ixgbe_set_flags(struct net_device *netdev, u32 data) ethtool_op_set_flags(netdev, data); - if (!(adapter->flags & IXGBE_FLAG2_RSC_CAPABLE)) + if (!(adapter->flags2 & IXGBE_FLAG2_RSC_CAPABLE)) return 0; /* if state changes we need to update adapter->flags and reset */ if ((!!(data & ETH_FLAG_LRO)) != - (!!(adapter->flags & IXGBE_FLAG2_RSC_ENABLED))) { - adapter->flags ^= IXGBE_FLAG2_RSC_ENABLED; + (!!(adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED))) { + adapter->flags2 ^= IXGBE_FLAG2_RSC_ENABLED; if (netif_running(netdev)) ixgbe_reinit_locked(adapter); else diff --git a/drivers/net/ixgbe/ixgbe_fcoe.c b/drivers/net/ixgbe/ixgbe_fcoe.c index fa9f24e2368..28cf104e36c 100644 --- a/drivers/net/ixgbe/ixgbe_fcoe.c +++ b/drivers/net/ixgbe/ixgbe_fcoe.c @@ -336,7 +336,7 @@ int ixgbe_fcoe_ddp(struct ixgbe_adapter *adapter, /* return 0 to bypass going to ULD for DDPed data */ if (fcstat == IXGBE_RXDADV_STAT_FCSTAT_DDP) rc = 0; - else + else if (ddp->len) rc = ddp->len; } diff --git a/drivers/net/ixgbe/ixgbe_main.c b/drivers/net/ixgbe/ixgbe_main.c index e756e220db3..77b0381a2b5 100644 --- a/drivers/net/ixgbe/ixgbe_main.c +++ b/drivers/net/ixgbe/ixgbe_main.c @@ -34,6 +34,7 @@ #include <linux/in.h> #include <linux/ip.h> #include <linux/tcp.h> +#include <linux/pkt_sched.h> #include <linux/ipv6.h> #include <net/checksum.h> #include <net/ip6_checksum.h> @@ -491,12 +492,12 @@ static void ixgbe_receive_skb(struct ixgbe_q_vector *q_vector, skb_record_rx_queue(skb, ring->queue_index); if (!(adapter->flags & IXGBE_FLAG_IN_NETPOLL)) { - if (adapter->vlgrp && is_vlan && (tag != 0)) + if (adapter->vlgrp && is_vlan && (tag & VLAN_VID_MASK)) vlan_gro_receive(napi, adapter->vlgrp, tag, skb); else napi_gro_receive(napi, skb); } else { - if (adapter->vlgrp && is_vlan && (tag != 0)) + if (adapter->vlgrp && is_vlan && (tag & VLAN_VID_MASK)) vlan_hwaccel_rx(skb, adapter->vlgrp, tag); else netif_rx(skb); @@ -510,8 +511,11 @@ static void ixgbe_receive_skb(struct ixgbe_q_vector *q_vector, * @skb: skb currently being received and modified **/ static inline void ixgbe_rx_checksum(struct ixgbe_adapter *adapter, - u32 status_err, struct sk_buff *skb) + union ixgbe_adv_rx_desc *rx_desc, + struct sk_buff *skb) { + u32 status_err = le32_to_cpu(rx_desc->wb.upper.status_error); + skb->ip_summed = CHECKSUM_NONE; /* Rx csum disabled */ @@ -529,6 +533,16 @@ static inline void ixgbe_rx_checksum(struct ixgbe_adapter *adapter, return; if (status_err & IXGBE_RXDADV_ERR_TCPE) { + u16 pkt_info = rx_desc->wb.lower.lo_dword.hs_rss.pkt_info; + + /* + * 82599 errata, UDP frames with a 0 checksum can be marked as + * checksum errors. + */ + if ((pkt_info & IXGBE_RXDADV_PKTTYPE_UDP) && + (adapter->hw.mac.type == ixgbe_mac_82599EB)) + return; + adapter->hw_csum_rx_error++; return; } @@ -563,7 +577,6 @@ static void ixgbe_alloc_rx_buffers(struct ixgbe_adapter *adapter, union ixgbe_adv_rx_desc *rx_desc; struct ixgbe_rx_buffer *bi; unsigned int i; - unsigned int bufsz = rx_ring->rx_buf_len + NET_IP_ALIGN; i = rx_ring->next_to_use; bi = &rx_ring->rx_buffer_info[i]; @@ -572,7 +585,7 @@ static void ixgbe_alloc_rx_buffers(struct ixgbe_adapter *adapter, rx_desc = IXGBE_RX_DESC_ADV(*rx_ring, i); if (!bi->page_dma && - (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED)) { + (rx_ring->flags & IXGBE_RING_RX_PS_ENABLED)) { if (!bi->page) { bi->page = alloc_page(GFP_ATOMIC); if (!bi->page) { @@ -593,7 +606,9 @@ static void ixgbe_alloc_rx_buffers(struct ixgbe_adapter *adapter, if (!bi->skb) { struct sk_buff *skb; - skb = netdev_alloc_skb(adapter->netdev, bufsz); + skb = netdev_alloc_skb(adapter->netdev, + (rx_ring->rx_buf_len + + NET_IP_ALIGN)); if (!skb) { adapter->alloc_rx_buff_failed++; @@ -608,12 +623,13 @@ static void ixgbe_alloc_rx_buffers(struct ixgbe_adapter *adapter, skb_reserve(skb, NET_IP_ALIGN); bi->skb = skb; - bi->dma = pci_map_single(pdev, skb->data, bufsz, + bi->dma = pci_map_single(pdev, skb->data, + rx_ring->rx_buf_len, PCI_DMA_FROMDEVICE); } /* Refresh the desc even if buffer_addrs didn't change because * each write-back erases this info. */ - if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED) { + if (rx_ring->flags & IXGBE_RING_RX_PS_ENABLED) { rx_desc->read.pkt_addr = cpu_to_le64(bi->page_dma); rx_desc->read.hdr_addr = cpu_to_le64(bi->dma); } else { @@ -710,7 +726,7 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector, break; (*work_done)++; - if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED) { + if (rx_ring->flags & IXGBE_RING_RX_PS_ENABLED) { hdr_info = le16_to_cpu(ixgbe_get_hdr_info(rx_desc)); len = (hdr_info & IXGBE_RXDADV_HDRBUFLEN_MASK) >> IXGBE_RXDADV_HDRBUFLEN_SHIFT; @@ -732,6 +748,7 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector, pci_unmap_single(pdev, rx_buffer_info->dma, rx_ring->rx_buf_len, PCI_DMA_FROMDEVICE); + rx_buffer_info->dma = 0; skb_put(skb, len); } @@ -763,7 +780,7 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector, prefetch(next_rxd); cleaned_count++; - if (adapter->flags & IXGBE_FLAG2_RSC_CAPABLE) + if (adapter->flags2 & IXGBE_FLAG2_RSC_CAPABLE) rsc_count = ixgbe_get_rsc_count(rx_desc); if (rsc_count) { @@ -781,7 +798,7 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector, rx_ring->stats.packets++; rx_ring->stats.bytes += skb->len; } else { - if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED) { + if (rx_ring->flags & IXGBE_RING_RX_PS_ENABLED) { rx_buffer_info->skb = next_buffer->skb; rx_buffer_info->dma = next_buffer->dma; next_buffer->skb = skb; @@ -799,7 +816,7 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector, goto next_desc; } - ixgbe_rx_checksum(adapter, staterr, skb); + ixgbe_rx_checksum(adapter, rx_desc, skb); /* probably a little skewed due to removing CRC */ total_rx_bytes += skb->len; @@ -1881,46 +1898,19 @@ static void ixgbe_configure_tx(struct ixgbe_adapter *adapter) #define IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT 2 -static void ixgbe_configure_srrctl(struct ixgbe_adapter *adapter, int index) +static void ixgbe_configure_srrctl(struct ixgbe_adapter *adapter, + struct ixgbe_ring *rx_ring) { - struct ixgbe_ring *rx_ring; u32 srrctl; - int queue0 = 0; - unsigned long mask; + int index; struct ixgbe_ring_feature *feature = adapter->ring_feature; - if (adapter->hw.mac.type == ixgbe_mac_82599EB) { - if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) { - int dcb_i = feature[RING_F_DCB].indices; - if (dcb_i == 8) - queue0 = index >> 4; - else if (dcb_i == 4) - queue0 = index >> 5; - else - dev_err(&adapter->pdev->dev, "Invalid DCB " - "configuration\n"); -#ifdef IXGBE_FCOE - if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) { - struct ixgbe_ring_feature *f; - - rx_ring = &adapter->rx_ring[queue0]; - f = &adapter->ring_feature[RING_F_FCOE]; - if ((queue0 == 0) && (index > rx_ring->reg_idx)) - queue0 = f->mask + index - - rx_ring->reg_idx - 1; - } -#endif /* IXGBE_FCOE */ - } else { - queue0 = index; - } - } else { + index = rx_ring->reg_idx; + if (adapter->hw.mac.type == ixgbe_mac_82598EB) { + unsigned long mask; mask = (unsigned long) feature[RING_F_RSS].mask; - queue0 = index & mask; index = index & mask; } - - rx_ring = &adapter->rx_ring[queue0]; - srrctl = IXGBE_READ_REG(&adapter->hw, IXGBE_SRRCTL(index)); srrctl &= ~IXGBE_SRRCTL_BSIZEHDR_MASK; @@ -1929,7 +1919,7 @@ static void ixgbe_configure_srrctl(struct ixgbe_adapter *adapter, int index) srrctl |= (IXGBE_RX_HDR_SIZE << IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT) & IXGBE_SRRCTL_BSIZEHDR_MASK; - if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED) { + if (rx_ring->flags & IXGBE_RING_RX_PS_ENABLED) { #if (PAGE_SIZE / 2) > IXGBE_MAX_RXBUFFER srrctl |= IXGBE_MAX_RXBUFFER >> IXGBE_SRRCTL_BSIZEPKT_SHIFT; #else @@ -1985,6 +1975,7 @@ static void ixgbe_configure_rx(struct ixgbe_adapter *adapter) { u64 rdba; struct ixgbe_hw *hw = &adapter->hw; + struct ixgbe_ring *rx_ring; struct net_device *netdev = adapter->netdev; int max_frame = netdev->mtu + ETH_HLEN + ETH_FCS_LEN; int i, j; @@ -2001,11 +1992,6 @@ static void ixgbe_configure_rx(struct ixgbe_adapter *adapter) /* Decide whether to use packet split mode or not */ adapter->flags |= IXGBE_FLAG_RX_PS_ENABLED; -#ifdef IXGBE_FCOE - if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) - adapter->flags &= ~IXGBE_FLAG_RX_PS_ENABLED; -#endif /* IXGBE_FCOE */ - /* Set the RX buffer length according to the mode */ if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED) { rx_buf_len = IXGBE_RX_HDR_SIZE; @@ -2019,7 +2005,7 @@ static void ixgbe_configure_rx(struct ixgbe_adapter *adapter) IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(0), psrtype); } } else { - if (!(adapter->flags & IXGBE_FLAG2_RSC_ENABLED) && + if (!(adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) && (netdev->mtu <= ETH_DATA_LEN)) rx_buf_len = MAXIMUM_ETHERNET_VLAN_SIZE; else @@ -2053,29 +2039,35 @@ static void ixgbe_configure_rx(struct ixgbe_adapter *adapter) * the Base and Length of the Rx Descriptor Ring */ for (i = 0; i < adapter->num_rx_queues; i++) { - rdba = adapter->rx_ring[i].dma; - j = adapter->rx_ring[i].reg_idx; + rx_ring = &adapter->rx_ring[i]; + rdba = rx_ring->dma; + j = rx_ring->reg_idx; IXGBE_WRITE_REG(hw, IXGBE_RDBAL(j), (rdba & DMA_BIT_MASK(32))); IXGBE_WRITE_REG(hw, IXGBE_RDBAH(j), (rdba >> 32)); IXGBE_WRITE_REG(hw, IXGBE_RDLEN(j), rdlen); IXGBE_WRITE_REG(hw, IXGBE_RDH(j), 0); IXGBE_WRITE_REG(hw, IXGBE_RDT(j), 0); - adapter->rx_ring[i].head = IXGBE_RDH(j); - adapter->rx_ring[i].tail = IXGBE_RDT(j); - adapter->rx_ring[i].rx_buf_len = rx_buf_len; + rx_ring->head = IXGBE_RDH(j); + rx_ring->tail = IXGBE_RDT(j); + rx_ring->rx_buf_len = rx_buf_len; + + if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED) + rx_ring->flags |= IXGBE_RING_RX_PS_ENABLED; #ifdef IXGBE_FCOE if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) { struct ixgbe_ring_feature *f; f = &adapter->ring_feature[RING_F_FCOE]; - if ((rx_buf_len < IXGBE_FCOE_JUMBO_FRAME_SIZE) && - (i >= f->mask) && (i < f->mask + f->indices)) - adapter->rx_ring[i].rx_buf_len = - IXGBE_FCOE_JUMBO_FRAME_SIZE; + if ((i >= f->mask) && (i < f->mask + f->indices)) { + rx_ring->flags &= ~IXGBE_RING_RX_PS_ENABLED; + if (rx_buf_len < IXGBE_FCOE_JUMBO_FRAME_SIZE) + rx_ring->rx_buf_len = + IXGBE_FCOE_JUMBO_FRAME_SIZE; + } } #endif /* IXGBE_FCOE */ - ixgbe_configure_srrctl(adapter, j); + ixgbe_configure_srrctl(adapter, rx_ring); } if (hw->mac.type == ixgbe_mac_82598EB) { @@ -2148,10 +2140,11 @@ static void ixgbe_configure_rx(struct ixgbe_adapter *adapter) IXGBE_WRITE_REG(hw, IXGBE_RDRXCTL, rdrxctl); } - if (adapter->flags & IXGBE_FLAG2_RSC_ENABLED) { + if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) { /* Enable 82599 HW-RSC */ for (i = 0; i < adapter->num_rx_queues; i++) { - j = adapter->rx_ring[i].reg_idx; + rx_ring = &adapter->rx_ring[i]; + j = rx_ring->reg_idx; rscctrl = IXGBE_READ_REG(hw, IXGBE_RSCCTL(j)); rscctrl |= IXGBE_RSCCTL_RSCEN; /* @@ -2159,7 +2152,7 @@ static void ixgbe_configure_rx(struct ixgbe_adapter *adapter) * total size of max desc * buf_len is not greater * than 65535 */ - if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED) { + if (rx_ring->flags & IXGBE_RING_RX_PS_ENABLED) { #if (MAX_SKB_FRAGS > 16) rscctrl |= IXGBE_RSCCTL_MAXDESC_16; #elif (MAX_SKB_FRAGS > 8) @@ -2694,16 +2687,23 @@ static int ixgbe_up_complete(struct ixgbe_adapter *adapter) /* * For hot-pluggable SFP+ devices, a new SFP+ module may have - * arrived before interrupts were enabled. We need to kick off - * the SFP+ module setup first, then try to bring up link. + * arrived before interrupts were enabled but after probe. Such + * devices wouldn't have their type identified yet. We need to + * kick off the SFP+ module setup first, then try to bring up link. * If we're not hot-pluggable SFP+, we just need to configure link * and bring it up. */ - err = hw->phy.ops.identify(hw); - if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) { - DPRINTK(PROBE, ERR, "PHY not supported on this NIC %d\n", err); - ixgbe_down(adapter); - return err; + if (hw->phy.type == ixgbe_phy_unknown) { + err = hw->phy.ops.identify(hw); + if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) { + /* + * Take the device down and schedule the sfp tasklet + * which will unregister_netdev and log it. + */ + ixgbe_down(adapter); + schedule_work(&adapter->sfp_config_module_task); + return err; + } } if (ixgbe_is_sfp(hw)) { @@ -2812,9 +2812,11 @@ static void ixgbe_clean_rx_ring(struct ixgbe_adapter *adapter, } if (!rx_buffer_info->page) continue; - pci_unmap_page(pdev, rx_buffer_info->page_dma, PAGE_SIZE / 2, - PCI_DMA_FROMDEVICE); - rx_buffer_info->page_dma = 0; + if (rx_buffer_info->page_dma) { + pci_unmap_page(pdev, rx_buffer_info->page_dma, + PAGE_SIZE / 2, PCI_DMA_FROMDEVICE); + rx_buffer_info->page_dma = 0; + } put_page(rx_buffer_info->page); rx_buffer_info->page = NULL; rx_buffer_info->page_offset = 0; @@ -3118,7 +3120,11 @@ static inline bool ixgbe_set_fcoe_queues(struct ixgbe_adapter *adapter) #endif if (adapter->flags & IXGBE_FLAG_RSS_ENABLED) { DPRINTK(PROBE, INFO, "FCOE enabled with RSS \n"); - ixgbe_set_rss_queues(adapter); + if ((adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) || + (adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE)) + ixgbe_set_fdir_queues(adapter); + else + ixgbe_set_rss_queues(adapter); } /* adding FCoE rx rings to the end */ f->mask = adapter->num_rx_queues; @@ -3376,7 +3382,12 @@ static inline bool ixgbe_cache_ring_fcoe(struct ixgbe_adapter *adapter) } #endif /* CONFIG_IXGBE_DCB */ if (adapter->flags & IXGBE_FLAG_RSS_ENABLED) { - ixgbe_cache_ring_rss(adapter); + if ((adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) || + (adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE)) + ixgbe_cache_ring_fdir(adapter); + else + ixgbe_cache_ring_rss(adapter); + fcoe_i = f->mask; } for (i = 0; i < f->indices; i++, fcoe_i++) @@ -3716,14 +3727,15 @@ static void ixgbe_sfp_task(struct work_struct *work) if ((hw->phy.type == ixgbe_phy_nl) && (hw->phy.sfp_type == ixgbe_sfp_type_not_present)) { s32 ret = hw->phy.ops.identify_sfp(hw); - if (ret) + if (ret == IXGBE_ERR_SFP_NOT_PRESENT) goto reschedule; ret = hw->phy.ops.reset(hw); if (ret == IXGBE_ERR_SFP_NOT_SUPPORTED) { - DPRINTK(PROBE, ERR, "failed to initialize because an " - "unsupported SFP+ module type was detected.\n" - "Reload the driver after installing a " - "supported module.\n"); + dev_err(&adapter->pdev->dev, "failed to initialize " + "because an unsupported SFP+ module type " + "was detected.\n" + "Reload the driver after installing a " + "supported module.\n"); unregister_netdev(adapter->netdev); } else { DPRINTK(PROBE, INFO, "detected SFP+: %d\n", @@ -3776,16 +3788,17 @@ static int __devinit ixgbe_sw_init(struct ixgbe_adapter *adapter) adapter->max_msix_q_vectors = MAX_MSIX_Q_VECTORS_82598; } else if (hw->mac.type == ixgbe_mac_82599EB) { adapter->max_msix_q_vectors = MAX_MSIX_Q_VECTORS_82599; - adapter->flags |= IXGBE_FLAG2_RSC_CAPABLE; - adapter->flags |= IXGBE_FLAG2_RSC_ENABLED; + adapter->flags2 |= IXGBE_FLAG2_RSC_CAPABLE; + adapter->flags2 |= IXGBE_FLAG2_RSC_ENABLED; adapter->flags |= IXGBE_FLAG_FDIR_HASH_CAPABLE; adapter->ring_feature[RING_F_FDIR].indices = IXGBE_MAX_FDIR_INDICES; adapter->atr_sample_rate = 20; adapter->fdir_pballoc = 0; #ifdef IXGBE_FCOE - adapter->flags |= IXGBE_FLAG_FCOE_ENABLED; - adapter->ring_feature[RING_F_FCOE].indices = IXGBE_FCRETA_SIZE; + adapter->flags |= IXGBE_FLAG_FCOE_CAPABLE; + adapter->flags &= ~IXGBE_FLAG_FCOE_ENABLED; + adapter->ring_feature[RING_F_FCOE].indices = 0; #endif /* IXGBE_FCOE */ } @@ -4502,7 +4515,8 @@ static void ixgbe_multispeed_fiber_task(struct work_struct *work) u32 autoneg; adapter->flags |= IXGBE_FLAG_IN_SFP_LINK_TASK; - if (hw->mac.ops.get_link_capabilities) + autoneg = hw->phy.autoneg_advertised; + if ((!autoneg) && (hw->mac.ops.get_link_capabilities)) hw->mac.ops.get_link_capabilities(hw, &autoneg, &hw->mac.autoneg); if (hw->mac.ops.setup_link_speed) @@ -4524,10 +4538,17 @@ static void ixgbe_sfp_config_module_task(struct work_struct *work) u32 err; adapter->flags |= IXGBE_FLAG_IN_SFP_MOD_TASK; + + /* Time for electrical oscillations to settle down */ + msleep(100); err = hw->phy.ops.identify_sfp(hw); + if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) { - DPRINTK(PROBE, ERR, "PHY not supported on this NIC %d\n", err); - ixgbe_down(adapter); + dev_err(&adapter->pdev->dev, "failed to initialize because " + "an unsupported SFP+ module type was detected.\n" + "Reload the driver after installing a supported " + "module.\n"); + unregister_netdev(adapter->netdev); return; } hw->mac.ops.setup_sfp(hw); @@ -5095,9 +5116,6 @@ static int ixgbe_xmit_frame(struct sk_buff *skb, struct net_device *netdev) int count = 0; unsigned int f; - r_idx = skb->queue_mapping; - tx_ring = &adapter->tx_ring[r_idx]; - if (adapter->vlgrp && vlan_tx_tag_present(skb)) { tx_flags |= vlan_tx_tag_get(skb); if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) { @@ -5107,11 +5125,19 @@ static int ixgbe_xmit_frame(struct sk_buff *skb, struct net_device *netdev) tx_flags <<= IXGBE_TX_FLAGS_VLAN_SHIFT; tx_flags |= IXGBE_TX_FLAGS_VLAN; } else if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) { - tx_flags |= (skb->queue_mapping << 13); - tx_flags <<= IXGBE_TX_FLAGS_VLAN_SHIFT; - tx_flags |= IXGBE_TX_FLAGS_VLAN; + if (skb->priority != TC_PRIO_CONTROL) { + tx_flags |= (skb->queue_mapping << 13); + tx_flags <<= IXGBE_TX_FLAGS_VLAN_SHIFT; + tx_flags |= IXGBE_TX_FLAGS_VLAN; + } else { + skb->queue_mapping = + adapter->ring_feature[RING_F_DCB].indices-1; + } } + r_idx = skb->queue_mapping; + tx_ring = &adapter->tx_ring[r_idx]; + if ((adapter->flags & IXGBE_FLAG_FCOE_ENABLED) && (skb->protocol == htons(ETH_P_FCOE))) tx_flags |= IXGBE_TX_FLAGS_FCOE; @@ -5310,12 +5336,19 @@ static int ixgbe_del_sanmac_netdev(struct net_device *dev) static void ixgbe_netpoll(struct net_device *netdev) { struct ixgbe_adapter *adapter = netdev_priv(netdev); + int i; - disable_irq(adapter->pdev->irq); adapter->flags |= IXGBE_FLAG_IN_NETPOLL; - ixgbe_intr(adapter->pdev->irq, netdev); + if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) { + int num_q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; + for (i = 0; i < num_q_vectors; i++) { + struct ixgbe_q_vector *q_vector = adapter->q_vector[i]; + ixgbe_msix_clean_many(0, q_vector); + } + } else { + ixgbe_intr(adapter->pdev->irq, netdev); + } adapter->flags &= ~IXGBE_FLAG_IN_NETPOLL; - enable_irq(adapter->pdev->irq); } #endif @@ -5513,8 +5546,10 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev, round_jiffies(jiffies + (2 * HZ))); err = 0; } else if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) { - dev_err(&adapter->pdev->dev, "failed to load because an " - "unsupported SFP+ module type was detected.\n"); + dev_err(&adapter->pdev->dev, "failed to initialize because " + "an unsupported SFP+ module type was detected.\n" + "Reload the driver after installing a supported " + "module.\n"); goto err_sw_init; } else if (err) { dev_err(&adapter->pdev->dev, "HW Init failed: %d\n", err); @@ -5548,29 +5583,18 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev, #endif #ifdef IXGBE_FCOE - if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) { + if (adapter->flags & IXGBE_FLAG_FCOE_CAPABLE) { if (hw->mac.ops.get_device_caps) { hw->mac.ops.get_device_caps(hw, &device_caps); - if (!(device_caps & IXGBE_DEVICE_CAPS_FCOE_OFFLOADS)) { - netdev->features |= NETIF_F_FCOE_CRC; - netdev->features |= NETIF_F_FSO; - netdev->fcoe_ddp_xid = IXGBE_FCOE_DDP_MAX - 1; - DPRINTK(DRV, INFO, "FCoE enabled, " - "disabling Flow Director\n"); - adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE; - adapter->flags &= - ~IXGBE_FLAG_FDIR_PERFECT_CAPABLE; - adapter->atr_sample_rate = 0; - } else { - adapter->flags &= ~IXGBE_FLAG_FCOE_ENABLED; - } + if (device_caps & IXGBE_DEVICE_CAPS_FCOE_OFFLOADS) + adapter->flags &= ~IXGBE_FLAG_FCOE_CAPABLE; } } #endif /* IXGBE_FCOE */ if (pci_using_dac) netdev->features |= NETIF_F_HIGHDMA; - if (adapter->flags & IXGBE_FLAG2_RSC_ENABLED) + if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) netdev->features |= NETIF_F_LRO; /* make sure the EEPROM is good */ @@ -5612,7 +5636,6 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev, adapter->wol = 0; break; } - device_init_wakeup(&adapter->pdev->dev, true); device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol); /* pick up the PCI bus settings for reporting later */ diff --git a/drivers/net/ixgbe/ixgbe_phy.c b/drivers/net/ixgbe/ixgbe_phy.c index 453e966762f..9ecad17522c 100644 --- a/drivers/net/ixgbe/ixgbe_phy.c +++ b/drivers/net/ixgbe/ixgbe_phy.c @@ -60,6 +60,7 @@ s32 ixgbe_identify_phy_generic(struct ixgbe_hw *hw) if (hw->phy.type == ixgbe_phy_unknown) { for (phy_addr = 0; phy_addr < IXGBE_MAX_PHY_ADDR; phy_addr++) { + hw->phy.mdio.prtad = phy_addr; if (mdio45_probe(&hw->phy.mdio, phy_addr) == 0) { ixgbe_get_phy_id(hw); hw->phy.type = @@ -68,6 +69,8 @@ s32 ixgbe_identify_phy_generic(struct ixgbe_hw *hw) break; } } + /* clear value if nothing found */ + hw->phy.mdio.prtad = 0; } else { status = 0; } diff --git a/drivers/net/ixgbe/ixgbe_type.h b/drivers/net/ixgbe/ixgbe_type.h index fa87309dc08..be90eb4575f 100644 --- a/drivers/net/ixgbe/ixgbe_type.h +++ b/drivers/net/ixgbe/ixgbe_type.h @@ -718,6 +718,12 @@ #define IXGBE_ECC_STATUS_82599 0x110E0 #define IXGBE_BAR_CTRL_82599 0x110F4 +/* PCI Express Control */ +#define IXGBE_GCR_CMPL_TMOUT_MASK 0x0000F000 +#define IXGBE_GCR_CMPL_TMOUT_10ms 0x00001000 +#define IXGBE_GCR_CMPL_TMOUT_RESEND 0x00010000 +#define IXGBE_GCR_CAP_VER2 0x00040000 + /* Time Sync Registers */ #define IXGBE_TSYNCRXCTL 0x05188 /* Rx Time Sync Control register - RW */ #define IXGBE_TSYNCTXCTL 0x08C00 /* Tx Time Sync Control register - RW */ @@ -1521,6 +1527,7 @@ /* PCI Bus Info */ #define IXGBE_PCI_LINK_STATUS 0xB2 +#define IXGBE_PCI_DEVICE_CONTROL2 0xC8 #define IXGBE_PCI_LINK_WIDTH 0x3F0 #define IXGBE_PCI_LINK_WIDTH_1 0x10 #define IXGBE_PCI_LINK_WIDTH_2 0x20 @@ -1531,6 +1538,7 @@ #define IXGBE_PCI_LINK_SPEED_5000 0x2 #define IXGBE_PCI_HEADER_TYPE_REGISTER 0x0E #define IXGBE_PCI_HEADER_TYPE_MULTIFUNC 0x80 +#define IXGBE_PCI_DEVICE_CONTROL2_16ms 0x0005 /* Number of 100 microseconds we wait for PCI Express master disable */ #define IXGBE_PCI_MASTER_DISABLE_TIMEOUT 800 diff --git a/drivers/net/ixp2000/ixpdev.c b/drivers/net/ixp2000/ixpdev.c index 2a0174b62e9..92fb8235c76 100644 --- a/drivers/net/ixp2000/ixpdev.c +++ b/drivers/net/ixp2000/ixpdev.c @@ -41,6 +41,7 @@ static int ixpdev_xmit(struct sk_buff *skb, struct net_device *dev) struct ixpdev_priv *ip = netdev_priv(dev); struct ixpdev_tx_desc *desc; int entry; + unsigned long flags; if (unlikely(skb->len > PAGE_SIZE)) { /* @@@ Count drops. */ @@ -63,11 +64,11 @@ static int ixpdev_xmit(struct sk_buff *skb, struct net_device *dev) dev->trans_start = jiffies; - local_irq_disable(); + local_irq_save(flags); ip->tx_queue_entries++; if (ip->tx_queue_entries == TX_BUF_COUNT_PER_CHAN) netif_stop_queue(dev); - local_irq_enable(); + local_irq_restore(flags); return 0; } diff --git a/drivers/net/jazzsonic.c b/drivers/net/jazzsonic.c index d12106b47bf..2f286091394 100644 --- a/drivers/net/jazzsonic.c +++ b/drivers/net/jazzsonic.c @@ -229,6 +229,7 @@ static int __init jazz_sonic_probe(struct platform_device *pdev) lp = netdev_priv(dev); lp->device = &pdev->dev; SET_NETDEV_DEV(dev, &pdev->dev); + platform_set_drvdata(pdev, dev); netdev_boot_setup_check(dev); diff --git a/drivers/net/ks8851.c b/drivers/net/ks8851.c new file mode 100644 index 00000000000..9a1dea60c1c --- /dev/null +++ b/drivers/net/ks8851.c @@ -0,0 +1,1322 @@ +/* drivers/net/ks8651.c + * + * Copyright 2009 Simtec Electronics + * http://www.simtec.co.uk/ + * Ben Dooks <ben@simtec.co.uk> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#define DEBUG + +#include <linux/module.h> +#include <linux/kernel.h> +#include <linux/netdevice.h> +#include <linux/etherdevice.h> +#include <linux/ethtool.h> +#include <linux/cache.h> +#include <linux/crc32.h> +#include <linux/mii.h> + +#include <linux/spi/spi.h> + +#include "ks8851.h" + +/** + * struct ks8851_rxctrl - KS8851 driver rx control + * @mchash: Multicast hash-table data. + * @rxcr1: KS_RXCR1 register setting + * @rxcr2: KS_RXCR2 register setting + * + * Representation of the settings needs to control the receive filtering + * such as the multicast hash-filter and the receive register settings. This + * is used to make the job of working out if the receive settings change and + * then issuing the new settings to the worker that will send the necessary + * commands. + */ +struct ks8851_rxctrl { + u16 mchash[4]; + u16 rxcr1; + u16 rxcr2; +}; + +/** + * union ks8851_tx_hdr - tx header data + * @txb: The header as bytes + * @txw: The header as 16bit, little-endian words + * + * A dual representation of the tx header data to allow + * access to individual bytes, and to allow 16bit accesses + * with 16bit alignment. + */ +union ks8851_tx_hdr { + u8 txb[6]; + __le16 txw[3]; +}; + +/** + * struct ks8851_net - KS8851 driver private data + * @netdev: The network device we're bound to + * @spidev: The spi device we're bound to. + * @lock: Lock to ensure that the device is not accessed when busy. + * @statelock: Lock on this structure for tx list. + * @mii: The MII state information for the mii calls. + * @rxctrl: RX settings for @rxctrl_work. + * @tx_work: Work queue for tx packets + * @irq_work: Work queue for servicing interrupts + * @rxctrl_work: Work queue for updating RX mode and multicast lists + * @txq: Queue of packets for transmission. + * @spi_msg1: pre-setup SPI transfer with one message, @spi_xfer1. + * @spi_msg2: pre-setup SPI transfer with two messages, @spi_xfer2. + * @txh: Space for generating packet TX header in DMA-able data + * @rxd: Space for receiving SPI data, in DMA-able space. + * @txd: Space for transmitting SPI data, in DMA-able space. + * @msg_enable: The message flags controlling driver output (see ethtool). + * @fid: Incrementing frame id tag. + * @rc_ier: Cached copy of KS_IER. + * @rc_rxqcr: Cached copy of KS_RXQCR. + * + * The @lock ensures that the chip is protected when certain operations are + * in progress. When the read or write packet transfer is in progress, most + * of the chip registers are not ccessible until the transfer is finished and + * the DMA has been de-asserted. + * + * The @statelock is used to protect information in the structure which may + * need to be accessed via several sources, such as the network driver layer + * or one of the work queues. + * + * We align the buffers we may use for rx/tx to ensure that if the SPI driver + * wants to DMA map them, it will not have any problems with data the driver + * modifies. + */ +struct ks8851_net { + struct net_device *netdev; + struct spi_device *spidev; + struct mutex lock; + spinlock_t statelock; + + union ks8851_tx_hdr txh ____cacheline_aligned; + u8 rxd[8]; + u8 txd[8]; + + u32 msg_enable ____cacheline_aligned; + u16 tx_space; + u8 fid; + + u16 rc_ier; + u16 rc_rxqcr; + + struct mii_if_info mii; + struct ks8851_rxctrl rxctrl; + + struct work_struct tx_work; + struct work_struct irq_work; + struct work_struct rxctrl_work; + + struct sk_buff_head txq; + + struct spi_message spi_msg1; + struct spi_message spi_msg2; + struct spi_transfer spi_xfer1; + struct spi_transfer spi_xfer2[2]; +}; + +static int msg_enable; + +#define ks_info(_ks, _msg...) dev_info(&(_ks)->spidev->dev, _msg) +#define ks_warn(_ks, _msg...) dev_warn(&(_ks)->spidev->dev, _msg) +#define ks_dbg(_ks, _msg...) dev_dbg(&(_ks)->spidev->dev, _msg) +#define ks_err(_ks, _msg...) dev_err(&(_ks)->spidev->dev, _msg) + +/* shift for byte-enable data */ +#define BYTE_EN(_x) ((_x) << 2) + +/* turn register number and byte-enable mask into data for start of packet */ +#define MK_OP(_byteen, _reg) (BYTE_EN(_byteen) | (_reg) << (8+2) | (_reg) >> 6) + +/* SPI register read/write calls. + * + * All these calls issue SPI transactions to access the chip's registers. They + * all require that the necessary lock is held to prevent accesses when the + * chip is busy transfering packet data (RX/TX FIFO accesses). + */ + +/** + * ks8851_wrreg16 - write 16bit register value to chip + * @ks: The chip state + * @reg: The register address + * @val: The value to write + * + * Issue a write to put the value @val into the register specified in @reg. + */ +static void ks8851_wrreg16(struct ks8851_net *ks, unsigned reg, unsigned val) +{ + struct spi_transfer *xfer = &ks->spi_xfer1; + struct spi_message *msg = &ks->spi_msg1; + __le16 txb[2]; + int ret; + + txb[0] = cpu_to_le16(MK_OP(reg & 2 ? 0xC : 0x03, reg) | KS_SPIOP_WR); + txb[1] = cpu_to_le16(val); + + xfer->tx_buf = txb; + xfer->rx_buf = NULL; + xfer->len = 4; + + ret = spi_sync(ks->spidev, msg); + if (ret < 0) + ks_err(ks, "spi_sync() failed\n"); +} + +/** + * ks8851_rx_1msg - select whether to use one or two messages for spi read + * @ks: The device structure + * + * Return whether to generate a single message with a tx and rx buffer + * supplied to spi_sync(), or alternatively send the tx and rx buffers + * as separate messages. + * + * Depending on the hardware in use, a single message may be more efficient + * on interrupts or work done by the driver. + * + * This currently always returns true until we add some per-device data passed + * from the platform code to specify which mode is better. + */ +static inline bool ks8851_rx_1msg(struct ks8851_net *ks) +{ + return true; +} + +/** + * ks8851_rdreg - issue read register command and return the data + * @ks: The device state + * @op: The register address and byte enables in message format. + * @rxb: The RX buffer to return the result into + * @rxl: The length of data expected. + * + * This is the low level read call that issues the necessary spi message(s) + * to read data from the register specified in @op. + */ +static void ks8851_rdreg(struct ks8851_net *ks, unsigned op, + u8 *rxb, unsigned rxl) +{ + struct spi_transfer *xfer; + struct spi_message *msg; + __le16 *txb = (__le16 *)ks->txd; + u8 *trx = ks->rxd; + int ret; + + txb[0] = cpu_to_le16(op | KS_SPIOP_RD); + + if (ks8851_rx_1msg(ks)) { + msg = &ks->spi_msg1; + xfer = &ks->spi_xfer1; + + xfer->tx_buf = txb; + xfer->rx_buf = trx; + xfer->len = rxl + 2; + } else { + msg = &ks->spi_msg2; + xfer = ks->spi_xfer2; + + xfer->tx_buf = txb; + xfer->rx_buf = NULL; + xfer->len = 2; + + xfer++; + xfer->tx_buf = NULL; + xfer->rx_buf = trx; + xfer->len = rxl; + } + + ret = spi_sync(ks->spidev, msg); + if (ret < 0) + ks_err(ks, "read: spi_sync() failed\n"); + else if (ks8851_rx_1msg(ks)) + memcpy(rxb, trx + 2, rxl); + else + memcpy(rxb, trx, rxl); +} + +/** + * ks8851_rdreg8 - read 8 bit register from device + * @ks: The chip information + * @reg: The register address + * + * Read a 8bit register from the chip, returning the result +*/ +static unsigned ks8851_rdreg8(struct ks8851_net *ks, unsigned reg) +{ + u8 rxb[1]; + + ks8851_rdreg(ks, MK_OP(1 << (reg & 3), reg), rxb, 1); + return rxb[0]; +} + +/** + * ks8851_rdreg16 - read 16 bit register from device + * @ks: The chip information + * @reg: The register address + * + * Read a 16bit register from the chip, returning the result +*/ +static unsigned ks8851_rdreg16(struct ks8851_net *ks, unsigned reg) +{ + __le16 rx = 0; + + ks8851_rdreg(ks, MK_OP(reg & 2 ? 0xC : 0x3, reg), (u8 *)&rx, 2); + return le16_to_cpu(rx); +} + +/** + * ks8851_rdreg32 - read 32 bit register from device + * @ks: The chip information + * @reg: The register address + * + * Read a 32bit register from the chip. + * + * Note, this read requires the address be aligned to 4 bytes. +*/ +static unsigned ks8851_rdreg32(struct ks8851_net *ks, unsigned reg) +{ + __le32 rx = 0; + + WARN_ON(reg & 3); + + ks8851_rdreg(ks, MK_OP(0xf, reg), (u8 *)&rx, 4); + return le32_to_cpu(rx); +} + +/** + * ks8851_soft_reset - issue one of the soft reset to the device + * @ks: The device state. + * @op: The bit(s) to set in the GRR + * + * Issue the relevant soft-reset command to the device's GRR register + * specified by @op. + * + * Note, the delays are in there as a caution to ensure that the reset + * has time to take effect and then complete. Since the datasheet does + * not currently specify the exact sequence, we have chosen something + * that seems to work with our device. + */ +static void ks8851_soft_reset(struct ks8851_net *ks, unsigned op) +{ + ks8851_wrreg16(ks, KS_GRR, op); + mdelay(1); /* wait a short time to effect reset */ + ks8851_wrreg16(ks, KS_GRR, 0); + mdelay(1); /* wait for condition to clear */ +} + +/** + * ks8851_write_mac_addr - write mac address to device registers + * @dev: The network device + * + * Update the KS8851 MAC address registers from the address in @dev. + * + * This call assumes that the chip is not running, so there is no need to + * shutdown the RXQ process whilst setting this. +*/ +static int ks8851_write_mac_addr(struct net_device *dev) +{ + struct ks8851_net *ks = netdev_priv(dev); + u16 *mcp = (u16 *)dev->dev_addr; + + mutex_lock(&ks->lock); + + ks8851_wrreg16(ks, KS_MARL, mcp[0]); + ks8851_wrreg16(ks, KS_MARM, mcp[1]); + ks8851_wrreg16(ks, KS_MARH, mcp[2]); + + mutex_unlock(&ks->lock); + + return 0; +} + +/** + * ks8851_init_mac - initialise the mac address + * @ks: The device structure + * + * Get or create the initial mac address for the device and then set that + * into the station address register. Currently we assume that the device + * does not have a valid mac address in it, and so we use random_ether_addr() + * to create a new one. + * + * In future, the driver should check to see if the device has an EEPROM + * attached and whether that has a valid ethernet address in it. + */ +static void ks8851_init_mac(struct ks8851_net *ks) +{ + struct net_device *dev = ks->netdev; + + random_ether_addr(dev->dev_addr); + ks8851_write_mac_addr(dev); +} + +/** + * ks8851_irq - device interrupt handler + * @irq: Interrupt number passed from the IRQ hnalder. + * @pw: The private word passed to register_irq(), our struct ks8851_net. + * + * Disable the interrupt from happening again until we've processed the + * current status by scheduling ks8851_irq_work(). + */ +static irqreturn_t ks8851_irq(int irq, void *pw) +{ + struct ks8851_net *ks = pw; + + disable_irq_nosync(irq); + schedule_work(&ks->irq_work); + return IRQ_HANDLED; +} + +/** + * ks8851_rdfifo - read data from the receive fifo + * @ks: The device state. + * @buff: The buffer address + * @len: The length of the data to read + * + * Issue an RXQ FIFO read command and read the @len ammount of data from + * the FIFO into the buffer specified by @buff. + */ +static void ks8851_rdfifo(struct ks8851_net *ks, u8 *buff, unsigned len) +{ + struct spi_transfer *xfer = ks->spi_xfer2; + struct spi_message *msg = &ks->spi_msg2; + u8 txb[1]; + int ret; + + if (netif_msg_rx_status(ks)) + ks_dbg(ks, "%s: %d@%p\n", __func__, len, buff); + + /* set the operation we're issuing */ + txb[0] = KS_SPIOP_RXFIFO; + + xfer->tx_buf = txb; + xfer->rx_buf = NULL; + xfer->len = 1; + + xfer++; + xfer->rx_buf = buff; + xfer->tx_buf = NULL; + xfer->len = len; + + ret = spi_sync(ks->spidev, msg); + if (ret < 0) + ks_err(ks, "%s: spi_sync() failed\n", __func__); +} + +/** + * ks8851_dbg_dumpkkt - dump initial packet contents to debug + * @ks: The device state + * @rxpkt: The data for the received packet + * + * Dump the initial data from the packet to dev_dbg(). +*/ +static void ks8851_dbg_dumpkkt(struct ks8851_net *ks, u8 *rxpkt) +{ + ks_dbg(ks, "pkt %02x%02x%02x%02x %02x%02x%02x%02x %02x%02x%02x%02x\n", + rxpkt[4], rxpkt[5], rxpkt[6], rxpkt[7], + rxpkt[8], rxpkt[9], rxpkt[10], rxpkt[11], + rxpkt[12], rxpkt[13], rxpkt[14], rxpkt[15]); +} + +/** + * ks8851_rx_pkts - receive packets from the host + * @ks: The device information. + * + * This is called from the IRQ work queue when the system detects that there + * are packets in the receive queue. Find out how many packets there are and + * read them from the FIFO. + */ +static void ks8851_rx_pkts(struct ks8851_net *ks) +{ + struct sk_buff *skb; + unsigned rxfc; + unsigned rxlen; + unsigned rxstat; + u32 rxh; + u8 *rxpkt; + + rxfc = ks8851_rdreg8(ks, KS_RXFC); + + if (netif_msg_rx_status(ks)) + ks_dbg(ks, "%s: %d packets\n", __func__, rxfc); + + /* Currently we're issuing a read per packet, but we could possibly + * improve the code by issuing a single read, getting the receive + * header, allocating the packet and then reading the packet data + * out in one go. + * + * This form of operation would require us to hold the SPI bus' + * chipselect low during the entie transaction to avoid any + * reset to the data stream comming from the chip. + */ + + for (; rxfc != 0; rxfc--) { + rxh = ks8851_rdreg32(ks, KS_RXFHSR); + rxstat = rxh & 0xffff; + rxlen = rxh >> 16; + + if (netif_msg_rx_status(ks)) + ks_dbg(ks, "rx: stat 0x%04x, len 0x%04x\n", + rxstat, rxlen); + + /* the length of the packet includes the 32bit CRC */ + + /* set dma read address */ + ks8851_wrreg16(ks, KS_RXFDPR, RXFDPR_RXFPAI | 0x00); + + /* start the packet dma process, and set auto-dequeue rx */ + ks8851_wrreg16(ks, KS_RXQCR, + ks->rc_rxqcr | RXQCR_SDA | RXQCR_ADRFE); + + if (rxlen > 0) { + skb = netdev_alloc_skb(ks->netdev, rxlen + 2 + 8); + if (!skb) { + /* todo - dump frame and move on */ + } + + /* two bytes to ensure ip is aligned, and four bytes + * for the status header and 4 bytes of garbage */ + skb_reserve(skb, 2 + 4 + 4); + + rxpkt = skb_put(skb, rxlen - 4) - 8; + + /* align the packet length to 4 bytes, and add 4 bytes + * as we're getting the rx status header as well */ + ks8851_rdfifo(ks, rxpkt, ALIGN(rxlen, 4) + 8); + + if (netif_msg_pktdata(ks)) + ks8851_dbg_dumpkkt(ks, rxpkt); + + skb->protocol = eth_type_trans(skb, ks->netdev); + netif_rx(skb); + + ks->netdev->stats.rx_packets++; + ks->netdev->stats.rx_bytes += rxlen - 4; + } + + ks8851_wrreg16(ks, KS_RXQCR, ks->rc_rxqcr); + } +} + +/** + * ks8851_irq_work - work queue handler for dealing with interrupt requests + * @work: The work structure that was scheduled by schedule_work() + * + * This is the handler invoked when the ks8851_irq() is called to find out + * what happened, as we cannot allow ourselves to sleep whilst waiting for + * anything other process has the chip's lock. + * + * Read the interrupt status, work out what needs to be done and then clear + * any of the interrupts that are not needed. + */ +static void ks8851_irq_work(struct work_struct *work) +{ + struct ks8851_net *ks = container_of(work, struct ks8851_net, irq_work); + unsigned status; + unsigned handled = 0; + + mutex_lock(&ks->lock); + + status = ks8851_rdreg16(ks, KS_ISR); + + if (netif_msg_intr(ks)) + dev_dbg(&ks->spidev->dev, "%s: status 0x%04x\n", + __func__, status); + + if (status & IRQ_LCI) { + /* should do something about checking link status */ + handled |= IRQ_LCI; + } + + if (status & IRQ_LDI) { + u16 pmecr = ks8851_rdreg16(ks, KS_PMECR); + pmecr &= ~PMECR_WKEVT_MASK; + ks8851_wrreg16(ks, KS_PMECR, pmecr | PMECR_WKEVT_LINK); + + handled |= IRQ_LDI; + } + + if (status & IRQ_RXPSI) + handled |= IRQ_RXPSI; + + if (status & IRQ_TXI) { + handled |= IRQ_TXI; + + /* no lock here, tx queue should have been stopped */ + + /* update our idea of how much tx space is available to the + * system */ + ks->tx_space = ks8851_rdreg16(ks, KS_TXMIR); + + if (netif_msg_intr(ks)) + ks_dbg(ks, "%s: txspace %d\n", __func__, ks->tx_space); + } + + if (status & IRQ_RXI) + handled |= IRQ_RXI; + + if (status & IRQ_SPIBEI) { + dev_err(&ks->spidev->dev, "%s: spi bus error\n", __func__); + handled |= IRQ_SPIBEI; + } + + ks8851_wrreg16(ks, KS_ISR, handled); + + if (status & IRQ_RXI) { + /* the datasheet says to disable the rx interrupt during + * packet read-out, however we're masking the interrupt + * from the device so do not bother masking just the RX + * from the device. */ + + ks8851_rx_pkts(ks); + } + + /* if something stopped the rx process, probably due to wanting + * to change the rx settings, then do something about restarting + * it. */ + if (status & IRQ_RXPSI) { + struct ks8851_rxctrl *rxc = &ks->rxctrl; + + /* update the multicast hash table */ + ks8851_wrreg16(ks, KS_MAHTR0, rxc->mchash[0]); + ks8851_wrreg16(ks, KS_MAHTR1, rxc->mchash[1]); + ks8851_wrreg16(ks, KS_MAHTR2, rxc->mchash[2]); + ks8851_wrreg16(ks, KS_MAHTR3, rxc->mchash[3]); + + ks8851_wrreg16(ks, KS_RXCR2, rxc->rxcr2); + ks8851_wrreg16(ks, KS_RXCR1, rxc->rxcr1); + } + + mutex_unlock(&ks->lock); + + if (status & IRQ_TXI) + netif_wake_queue(ks->netdev); + + enable_irq(ks->netdev->irq); +} + +/** + * calc_txlen - calculate size of message to send packet + * @len: Lenght of data + * + * Returns the size of the TXFIFO message needed to send + * this packet. + */ +static inline unsigned calc_txlen(unsigned len) +{ + return ALIGN(len + 4, 4); +} + +/** + * ks8851_wrpkt - write packet to TX FIFO + * @ks: The device state. + * @txp: The sk_buff to transmit. + * @irq: IRQ on completion of the packet. + * + * Send the @txp to the chip. This means creating the relevant packet header + * specifying the length of the packet and the other information the chip + * needs, such as IRQ on completion. Send the header and the packet data to + * the device. + */ +static void ks8851_wrpkt(struct ks8851_net *ks, struct sk_buff *txp, bool irq) +{ + struct spi_transfer *xfer = ks->spi_xfer2; + struct spi_message *msg = &ks->spi_msg2; + unsigned fid = 0; + int ret; + + if (netif_msg_tx_queued(ks)) + dev_dbg(&ks->spidev->dev, "%s: skb %p, %d@%p, irq %d\n", + __func__, txp, txp->len, txp->data, irq); + + fid = ks->fid++; + fid &= TXFR_TXFID_MASK; + + if (irq) + fid |= TXFR_TXIC; /* irq on completion */ + + /* start header at txb[1] to align txw entries */ + ks->txh.txb[1] = KS_SPIOP_TXFIFO; + ks->txh.txw[1] = cpu_to_le16(fid); + ks->txh.txw[2] = cpu_to_le16(txp->len); + + xfer->tx_buf = &ks->txh.txb[1]; + xfer->rx_buf = NULL; + xfer->len = 5; + + xfer++; + xfer->tx_buf = txp->data; + xfer->rx_buf = NULL; + xfer->len = ALIGN(txp->len, 4); + + ret = spi_sync(ks->spidev, msg); + if (ret < 0) + ks_err(ks, "%s: spi_sync() failed\n", __func__); +} + +/** + * ks8851_done_tx - update and then free skbuff after transmitting + * @ks: The device state + * @txb: The buffer transmitted + */ +static void ks8851_done_tx(struct ks8851_net *ks, struct sk_buff *txb) +{ + struct net_device *dev = ks->netdev; + + dev->stats.tx_bytes += txb->len; + dev->stats.tx_packets++; + + dev_kfree_skb(txb); +} + +/** + * ks8851_tx_work - process tx packet(s) + * @work: The work strucutre what was scheduled. + * + * This is called when a number of packets have been scheduled for + * transmission and need to be sent to the device. + */ +static void ks8851_tx_work(struct work_struct *work) +{ + struct ks8851_net *ks = container_of(work, struct ks8851_net, tx_work); + struct sk_buff *txb; + bool last = false; + + mutex_lock(&ks->lock); + + while (!last) { + txb = skb_dequeue(&ks->txq); + last = skb_queue_empty(&ks->txq); + + ks8851_wrreg16(ks, KS_RXQCR, ks->rc_rxqcr | RXQCR_SDA); + ks8851_wrpkt(ks, txb, last); + ks8851_wrreg16(ks, KS_RXQCR, ks->rc_rxqcr); + ks8851_wrreg16(ks, KS_TXQCR, TXQCR_METFE); + + ks8851_done_tx(ks, txb); + } + + mutex_unlock(&ks->lock); +} + +/** + * ks8851_set_powermode - set power mode of the device + * @ks: The device state + * @pwrmode: The power mode value to write to KS_PMECR. + * + * Change the power mode of the chip. + */ +static void ks8851_set_powermode(struct ks8851_net *ks, unsigned pwrmode) +{ + unsigned pmecr; + + if (netif_msg_hw(ks)) + ks_dbg(ks, "setting power mode %d\n", pwrmode); + + pmecr = ks8851_rdreg16(ks, KS_PMECR); + pmecr &= ~PMECR_PM_MASK; + pmecr |= pwrmode; + + ks8851_wrreg16(ks, KS_PMECR, pmecr); +} + +/** + * ks8851_net_open - open network device + * @dev: The network device being opened. + * + * Called when the network device is marked active, such as a user executing + * 'ifconfig up' on the device. + */ +static int ks8851_net_open(struct net_device *dev) +{ + struct ks8851_net *ks = netdev_priv(dev); + + /* lock the card, even if we may not actually be doing anything + * else at the moment */ + mutex_lock(&ks->lock); + + if (netif_msg_ifup(ks)) + ks_dbg(ks, "opening %s\n", dev->name); + + /* bring chip out of any power saving mode it was in */ + ks8851_set_powermode(ks, PMECR_PM_NORMAL); + + /* issue a soft reset to the RX/TX QMU to put it into a known + * state. */ + ks8851_soft_reset(ks, GRR_QMU); + + /* setup transmission parameters */ + + ks8851_wrreg16(ks, KS_TXCR, (TXCR_TXE | /* enable transmit process */ + TXCR_TXPE | /* pad to min length */ + TXCR_TXCRC | /* add CRC */ + TXCR_TXFCE)); /* enable flow control */ + + /* auto-increment tx data, reset tx pointer */ + ks8851_wrreg16(ks, KS_TXFDPR, TXFDPR_TXFPAI); + + /* setup receiver control */ + + ks8851_wrreg16(ks, KS_RXCR1, (RXCR1_RXPAFMA | /* from mac filter */ + RXCR1_RXFCE | /* enable flow control */ + RXCR1_RXBE | /* broadcast enable */ + RXCR1_RXUE | /* unicast enable */ + RXCR1_RXE)); /* enable rx block */ + + /* transfer entire frames out in one go */ + ks8851_wrreg16(ks, KS_RXCR2, RXCR2_SRDBL_FRAME); + + /* set receive counter timeouts */ + ks8851_wrreg16(ks, KS_RXDTTR, 1000); /* 1ms after first frame to IRQ */ + ks8851_wrreg16(ks, KS_RXDBCTR, 4096); /* >4Kbytes in buffer to IRQ */ + ks8851_wrreg16(ks, KS_RXFCTR, 10); /* 10 frames to IRQ */ + + ks->rc_rxqcr = (RXQCR_RXFCTE | /* IRQ on frame count exceeded */ + RXQCR_RXDBCTE | /* IRQ on byte count exceeded */ + RXQCR_RXDTTE); /* IRQ on time exceeded */ + + ks8851_wrreg16(ks, KS_RXQCR, ks->rc_rxqcr); + + /* clear then enable interrupts */ + +#define STD_IRQ (IRQ_LCI | /* Link Change */ \ + IRQ_TXI | /* TX done */ \ + IRQ_RXI | /* RX done */ \ + IRQ_SPIBEI | /* SPI bus error */ \ + IRQ_TXPSI | /* TX process stop */ \ + IRQ_RXPSI) /* RX process stop */ + + ks->rc_ier = STD_IRQ; + ks8851_wrreg16(ks, KS_ISR, STD_IRQ); + ks8851_wrreg16(ks, KS_IER, STD_IRQ); + + netif_start_queue(ks->netdev); + + if (netif_msg_ifup(ks)) + ks_dbg(ks, "network device %s up\n", dev->name); + + mutex_unlock(&ks->lock); + return 0; +} + +/** + * ks8851_net_stop - close network device + * @dev: The device being closed. + * + * Called to close down a network device which has been active. Cancell any + * work, shutdown the RX and TX process and then place the chip into a low + * power state whilst it is not being used. + */ +static int ks8851_net_stop(struct net_device *dev) +{ + struct ks8851_net *ks = netdev_priv(dev); + + if (netif_msg_ifdown(ks)) + ks_info(ks, "%s: shutting down\n", dev->name); + + netif_stop_queue(dev); + + mutex_lock(&ks->lock); + + /* stop any outstanding work */ + flush_work(&ks->irq_work); + flush_work(&ks->tx_work); + flush_work(&ks->rxctrl_work); + + /* turn off the IRQs and ack any outstanding */ + ks8851_wrreg16(ks, KS_IER, 0x0000); + ks8851_wrreg16(ks, KS_ISR, 0xffff); + + /* shutdown RX process */ + ks8851_wrreg16(ks, KS_RXCR1, 0x0000); + + /* shutdown TX process */ + ks8851_wrreg16(ks, KS_TXCR, 0x0000); + + /* set powermode to soft power down to save power */ + ks8851_set_powermode(ks, PMECR_PM_SOFTDOWN); + + /* ensure any queued tx buffers are dumped */ + while (!skb_queue_empty(&ks->txq)) { + struct sk_buff *txb = skb_dequeue(&ks->txq); + + if (netif_msg_ifdown(ks)) + ks_dbg(ks, "%s: freeing txb %p\n", __func__, txb); + + dev_kfree_skb(txb); + } + + mutex_unlock(&ks->lock); + return 0; +} + +/** + * ks8851_start_xmit - transmit packet + * @skb: The buffer to transmit + * @dev: The device used to transmit the packet. + * + * Called by the network layer to transmit the @skb. Queue the packet for + * the device and schedule the necessary work to transmit the packet when + * it is free. + * + * We do this to firstly avoid sleeping with the network device locked, + * and secondly so we can round up more than one packet to transmit which + * means we can try and avoid generating too many transmit done interrupts. + */ +static int ks8851_start_xmit(struct sk_buff *skb, struct net_device *dev) +{ + struct ks8851_net *ks = netdev_priv(dev); + unsigned needed = calc_txlen(skb->len); + int ret = NETDEV_TX_OK; + + if (netif_msg_tx_queued(ks)) + ks_dbg(ks, "%s: skb %p, %d@%p\n", __func__, + skb, skb->len, skb->data); + + spin_lock(&ks->statelock); + + if (needed > ks->tx_space) { + netif_stop_queue(dev); + ret = NETDEV_TX_BUSY; + } else { + ks->tx_space -= needed; + skb_queue_tail(&ks->txq, skb); + } + + spin_unlock(&ks->statelock); + schedule_work(&ks->tx_work); + + return ret; +} + +/** + * ks8851_rxctrl_work - work handler to change rx mode + * @work: The work structure this belongs to. + * + * Lock the device and issue the necessary changes to the receive mode from + * the network device layer. This is done so that we can do this without + * having to sleep whilst holding the network device lock. + * + * Since the recommendation from Micrel is that the RXQ is shutdown whilst the + * receive parameters are programmed, we issue a write to disable the RXQ and + * then wait for the interrupt handler to be triggered once the RXQ shutdown is + * complete. The interrupt handler then writes the new values into the chip. + */ +static void ks8851_rxctrl_work(struct work_struct *work) +{ + struct ks8851_net *ks = container_of(work, struct ks8851_net, rxctrl_work); + + mutex_lock(&ks->lock); + + /* need to shutdown RXQ before modifying filter parameters */ + ks8851_wrreg16(ks, KS_RXCR1, 0x00); + + mutex_unlock(&ks->lock); +} + +static void ks8851_set_rx_mode(struct net_device *dev) +{ + struct ks8851_net *ks = netdev_priv(dev); + struct ks8851_rxctrl rxctrl; + + memset(&rxctrl, 0, sizeof(rxctrl)); + + if (dev->flags & IFF_PROMISC) { + /* interface to receive everything */ + + rxctrl.rxcr1 = RXCR1_RXAE | RXCR1_RXINVF; + } else if (dev->flags & IFF_ALLMULTI) { + /* accept all multicast packets */ + + rxctrl.rxcr1 = (RXCR1_RXME | RXCR1_RXAE | + RXCR1_RXPAFMA | RXCR1_RXMAFMA); + } else if (dev->flags & IFF_MULTICAST && dev->mc_count > 0) { + struct dev_mc_list *mcptr = dev->mc_list; + u32 crc; + int i; + + /* accept some multicast */ + + for (i = dev->mc_count; i > 0; i--) { + crc = ether_crc(ETH_ALEN, mcptr->dmi_addr); + crc >>= (32 - 6); /* get top six bits */ + + rxctrl.mchash[crc >> 4] |= (1 << (crc & 0xf)); + mcptr = mcptr->next; + } + + rxctrl.rxcr1 = RXCR1_RXME | RXCR1_RXAE | RXCR1_RXPAFMA; + } else { + /* just accept broadcast / unicast */ + rxctrl.rxcr1 = RXCR1_RXPAFMA; + } + + rxctrl.rxcr1 |= (RXCR1_RXUE | /* unicast enable */ + RXCR1_RXBE | /* broadcast enable */ + RXCR1_RXE | /* RX process enable */ + RXCR1_RXFCE); /* enable flow control */ + + rxctrl.rxcr2 |= RXCR2_SRDBL_FRAME; + + /* schedule work to do the actual set of the data if needed */ + + spin_lock(&ks->statelock); + + if (memcmp(&rxctrl, &ks->rxctrl, sizeof(rxctrl)) != 0) { + memcpy(&ks->rxctrl, &rxctrl, sizeof(ks->rxctrl)); + schedule_work(&ks->rxctrl_work); + } + + spin_unlock(&ks->statelock); +} + +static int ks8851_set_mac_address(struct net_device *dev, void *addr) +{ + struct sockaddr *sa = addr; + + if (netif_running(dev)) + return -EBUSY; + + if (!is_valid_ether_addr(sa->sa_data)) + return -EADDRNOTAVAIL; + + memcpy(dev->dev_addr, sa->sa_data, ETH_ALEN); + return ks8851_write_mac_addr(dev); +} + +static int ks8851_net_ioctl(struct net_device *dev, struct ifreq *req, int cmd) +{ + struct ks8851_net *ks = netdev_priv(dev); + + if (!netif_running(dev)) + return -EINVAL; + + return generic_mii_ioctl(&ks->mii, if_mii(req), cmd, NULL); +} + +static const struct net_device_ops ks8851_netdev_ops = { + .ndo_open = ks8851_net_open, + .ndo_stop = ks8851_net_stop, + .ndo_do_ioctl = ks8851_net_ioctl, + .ndo_start_xmit = ks8851_start_xmit, + .ndo_set_mac_address = ks8851_set_mac_address, + .ndo_set_rx_mode = ks8851_set_rx_mode, + .ndo_change_mtu = eth_change_mtu, + .ndo_validate_addr = eth_validate_addr, +}; + +/* ethtool support */ + +static void ks8851_get_drvinfo(struct net_device *dev, + struct ethtool_drvinfo *di) +{ + strlcpy(di->driver, "KS8851", sizeof(di->driver)); + strlcpy(di->version, "1.00", sizeof(di->version)); + strlcpy(di->bus_info, dev_name(dev->dev.parent), sizeof(di->bus_info)); +} + +static u32 ks8851_get_msglevel(struct net_device *dev) +{ + struct ks8851_net *ks = netdev_priv(dev); + return ks->msg_enable; +} + +static void ks8851_set_msglevel(struct net_device *dev, u32 to) +{ + struct ks8851_net *ks = netdev_priv(dev); + ks->msg_enable = to; +} + +static int ks8851_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) +{ + struct ks8851_net *ks = netdev_priv(dev); + return mii_ethtool_gset(&ks->mii, cmd); +} + +static int ks8851_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) +{ + struct ks8851_net *ks = netdev_priv(dev); + return mii_ethtool_sset(&ks->mii, cmd); +} + +static u32 ks8851_get_link(struct net_device *dev) +{ + struct ks8851_net *ks = netdev_priv(dev); + return mii_link_ok(&ks->mii); +} + +static int ks8851_nway_reset(struct net_device *dev) +{ + struct ks8851_net *ks = netdev_priv(dev); + return mii_nway_restart(&ks->mii); +} + +static const struct ethtool_ops ks8851_ethtool_ops = { + .get_drvinfo = ks8851_get_drvinfo, + .get_msglevel = ks8851_get_msglevel, + .set_msglevel = ks8851_set_msglevel, + .get_settings = ks8851_get_settings, + .set_settings = ks8851_set_settings, + .get_link = ks8851_get_link, + .nway_reset = ks8851_nway_reset, +}; + +/* MII interface controls */ + +/** + * ks8851_phy_reg - convert MII register into a KS8851 register + * @reg: MII register number. + * + * Return the KS8851 register number for the corresponding MII PHY register + * if possible. Return zero if the MII register has no direct mapping to the + * KS8851 register set. + */ +static int ks8851_phy_reg(int reg) +{ + switch (reg) { + case MII_BMCR: + return KS_P1MBCR; + case MII_BMSR: + return KS_P1MBSR; + case MII_PHYSID1: + return KS_PHY1ILR; + case MII_PHYSID2: + return KS_PHY1IHR; + case MII_ADVERTISE: + return KS_P1ANAR; + case MII_LPA: + return KS_P1ANLPR; + } + + return 0x0; +} + +/** + * ks8851_phy_read - MII interface PHY register read. + * @dev: The network device the PHY is on. + * @phy_addr: Address of PHY (ignored as we only have one) + * @reg: The register to read. + * + * This call reads data from the PHY register specified in @reg. Since the + * device does not support all the MII registers, the non-existant values + * are always returned as zero. + * + * We return zero for unsupported registers as the MII code does not check + * the value returned for any error status, and simply returns it to the + * caller. The mii-tool that the driver was tested with takes any -ve error + * as real PHY capabilities, thus displaying incorrect data to the user. + */ +static int ks8851_phy_read(struct net_device *dev, int phy_addr, int reg) +{ + struct ks8851_net *ks = netdev_priv(dev); + int ksreg; + int result; + + ksreg = ks8851_phy_reg(reg); + if (!ksreg) + return 0x0; /* no error return allowed, so use zero */ + + mutex_lock(&ks->lock); + result = ks8851_rdreg16(ks, ksreg); + mutex_unlock(&ks->lock); + + return result; +} + +static void ks8851_phy_write(struct net_device *dev, + int phy, int reg, int value) +{ + struct ks8851_net *ks = netdev_priv(dev); + int ksreg; + + ksreg = ks8851_phy_reg(reg); + if (ksreg) { + mutex_lock(&ks->lock); + ks8851_wrreg16(ks, ksreg, value); + mutex_unlock(&ks->lock); + } +} + +/** + * ks8851_read_selftest - read the selftest memory info. + * @ks: The device state + * + * Read and check the TX/RX memory selftest information. + */ +static int ks8851_read_selftest(struct ks8851_net *ks) +{ + unsigned both_done = MBIR_TXMBF | MBIR_RXMBF; + int ret = 0; + unsigned rd; + + rd = ks8851_rdreg16(ks, KS_MBIR); + + if ((rd & both_done) != both_done) { + ks_warn(ks, "Memory selftest not finished\n"); + return 0; + } + + if (rd & MBIR_TXMBFA) { + ks_err(ks, "TX memory selftest fail\n"); + ret |= 1; + } + + if (rd & MBIR_RXMBFA) { + ks_err(ks, "RX memory selftest fail\n"); + ret |= 2; + } + + return 0; +} + +/* driver bus management functions */ + +static int __devinit ks8851_probe(struct spi_device *spi) +{ + struct net_device *ndev; + struct ks8851_net *ks; + int ret; + + ndev = alloc_etherdev(sizeof(struct ks8851_net)); + if (!ndev) { + dev_err(&spi->dev, "failed to alloc ethernet device\n"); + return -ENOMEM; + } + + spi->bits_per_word = 8; + + ks = netdev_priv(ndev); + + ks->netdev = ndev; + ks->spidev = spi; + ks->tx_space = 6144; + + mutex_init(&ks->lock); + spin_lock_init(&ks->statelock); + + INIT_WORK(&ks->tx_work, ks8851_tx_work); + INIT_WORK(&ks->irq_work, ks8851_irq_work); + INIT_WORK(&ks->rxctrl_work, ks8851_rxctrl_work); + + /* initialise pre-made spi transfer messages */ + + spi_message_init(&ks->spi_msg1); + spi_message_add_tail(&ks->spi_xfer1, &ks->spi_msg1); + + spi_message_init(&ks->spi_msg2); + spi_message_add_tail(&ks->spi_xfer2[0], &ks->spi_msg2); + spi_message_add_tail(&ks->spi_xfer2[1], &ks->spi_msg2); + + /* setup mii state */ + ks->mii.dev = ndev; + ks->mii.phy_id = 1, + ks->mii.phy_id_mask = 1; + ks->mii.reg_num_mask = 0xf; + ks->mii.mdio_read = ks8851_phy_read; + ks->mii.mdio_write = ks8851_phy_write; + + dev_info(&spi->dev, "message enable is %d\n", msg_enable); + + /* set the default message enable */ + ks->msg_enable = netif_msg_init(msg_enable, (NETIF_MSG_DRV | + NETIF_MSG_PROBE | + NETIF_MSG_LINK)); + + skb_queue_head_init(&ks->txq); + + SET_ETHTOOL_OPS(ndev, &ks8851_ethtool_ops); + SET_NETDEV_DEV(ndev, &spi->dev); + + dev_set_drvdata(&spi->dev, ks); + + ndev->if_port = IF_PORT_100BASET; + ndev->netdev_ops = &ks8851_netdev_ops; + ndev->irq = spi->irq; + + /* simple check for a valid chip being connected to the bus */ + + if ((ks8851_rdreg16(ks, KS_CIDER) & ~CIDER_REV_MASK) != CIDER_ID) { + dev_err(&spi->dev, "failed to read device ID\n"); + ret = -ENODEV; + goto err_id; + } + + ks8851_read_selftest(ks); + ks8851_init_mac(ks); + + ret = request_irq(spi->irq, ks8851_irq, IRQF_TRIGGER_LOW, + ndev->name, ks); + if (ret < 0) { + dev_err(&spi->dev, "failed to get irq\n"); + goto err_irq; + } + + ret = register_netdev(ndev); + if (ret) { + dev_err(&spi->dev, "failed to register network device\n"); + goto err_netdev; + } + + dev_info(&spi->dev, "revision %d, MAC %pM, IRQ %d\n", + CIDER_REV_GET(ks8851_rdreg16(ks, KS_CIDER)), + ndev->dev_addr, ndev->irq); + + return 0; + + +err_netdev: + free_irq(ndev->irq, ndev); + +err_id: +err_irq: + free_netdev(ndev); + return ret; +} + +static int __devexit ks8851_remove(struct spi_device *spi) +{ + struct ks8851_net *priv = dev_get_drvdata(&spi->dev); + + if (netif_msg_drv(priv)) + dev_info(&spi->dev, "remove"); + + unregister_netdev(priv->netdev); + free_irq(spi->irq, priv); + free_netdev(priv->netdev); + + return 0; +} + +static struct spi_driver ks8851_driver = { + .driver = { + .name = "ks8851", + .owner = THIS_MODULE, + }, + .probe = ks8851_probe, + .remove = __devexit_p(ks8851_remove), +}; + +static int __init ks8851_init(void) +{ + return spi_register_driver(&ks8851_driver); +} + +static void __exit ks8851_exit(void) +{ + spi_unregister_driver(&ks8851_driver); +} + +module_init(ks8851_init); +module_exit(ks8851_exit); + +MODULE_DESCRIPTION("KS8851 Network driver"); +MODULE_AUTHOR("Ben Dooks <ben@simtec.co.uk>"); +MODULE_LICENSE("GPL"); + +module_param_named(message, msg_enable, int, 0); +MODULE_PARM_DESC(message, "Message verbosity level (0=none, 31=all)"); diff --git a/drivers/net/ks8851.h b/drivers/net/ks8851.h new file mode 100644 index 00000000000..85abe147afb --- /dev/null +++ b/drivers/net/ks8851.h @@ -0,0 +1,296 @@ +/* drivers/net/ks8851.h + * + * Copyright 2009 Simtec Electronics + * Ben Dooks <ben@simtec.co.uk> + * + * KS8851 register definitions + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. +*/ + +#define KS_CCR 0x08 +#define CCR_EEPROM (1 << 9) +#define CCR_SPI (1 << 8) +#define CCR_32PIN (1 << 0) + +/* MAC address registers */ +#define KS_MARL 0x10 +#define KS_MARM 0x12 +#define KS_MARH 0x14 + +#define KS_OBCR 0x20 +#define OBCR_ODS_16mA (1 << 6) + +#define KS_EEPCR 0x22 +#define EEPCR_EESA (1 << 4) +#define EEPCR_EESB (1 << 3) +#define EEPCR_EEDO (1 << 2) +#define EEPCR_EESCK (1 << 1) +#define EEPCR_EECS (1 << 0) + +#define KS_MBIR 0x24 +#define MBIR_TXMBF (1 << 12) +#define MBIR_TXMBFA (1 << 11) +#define MBIR_RXMBF (1 << 4) +#define MBIR_RXMBFA (1 << 3) + +#define KS_GRR 0x26 +#define GRR_QMU (1 << 1) +#define GRR_GSR (1 << 0) + +#define KS_WFCR 0x2A +#define WFCR_MPRXE (1 << 7) +#define WFCR_WF3E (1 << 3) +#define WFCR_WF2E (1 << 2) +#define WFCR_WF1E (1 << 1) +#define WFCR_WF0E (1 << 0) + +#define KS_WF0CRC0 0x30 +#define KS_WF0CRC1 0x32 +#define KS_WF0BM0 0x34 +#define KS_WF0BM1 0x36 +#define KS_WF0BM2 0x38 +#define KS_WF0BM3 0x3A + +#define KS_WF1CRC0 0x40 +#define KS_WF1CRC1 0x42 +#define KS_WF1BM0 0x44 +#define KS_WF1BM1 0x46 +#define KS_WF1BM2 0x48 +#define KS_WF1BM3 0x4A + +#define KS_WF2CRC0 0x50 +#define KS_WF2CRC1 0x52 +#define KS_WF2BM0 0x54 +#define KS_WF2BM1 0x56 +#define KS_WF2BM2 0x58 +#define KS_WF2BM3 0x5A + +#define KS_WF3CRC0 0x60 +#define KS_WF3CRC1 0x62 +#define KS_WF3BM0 0x64 +#define KS_WF3BM1 0x66 +#define KS_WF3BM2 0x68 +#define KS_WF3BM3 0x6A + +#define KS_TXCR 0x70 +#define TXCR_TCGICMP (1 << 8) +#define TXCR_TCGUDP (1 << 7) +#define TXCR_TCGTCP (1 << 6) +#define TXCR_TCGIP (1 << 5) +#define TXCR_FTXQ (1 << 4) +#define TXCR_TXFCE (1 << 3) +#define TXCR_TXPE (1 << 2) +#define TXCR_TXCRC (1 << 1) +#define TXCR_TXE (1 << 0) + +#define KS_TXSR 0x72 +#define TXSR_TXLC (1 << 13) +#define TXSR_TXMC (1 << 12) +#define TXSR_TXFID_MASK (0x3f << 0) +#define TXSR_TXFID_SHIFT (0) +#define TXSR_TXFID_GET(_v) (((_v) >> 0) & 0x3f) + +#define KS_RXCR1 0x74 +#define RXCR1_FRXQ (1 << 15) +#define RXCR1_RXUDPFCC (1 << 14) +#define RXCR1_RXTCPFCC (1 << 13) +#define RXCR1_RXIPFCC (1 << 12) +#define RXCR1_RXPAFMA (1 << 11) +#define RXCR1_RXFCE (1 << 10) +#define RXCR1_RXEFE (1 << 9) +#define RXCR1_RXMAFMA (1 << 8) +#define RXCR1_RXBE (1 << 7) +#define RXCR1_RXME (1 << 6) +#define RXCR1_RXUE (1 << 5) +#define RXCR1_RXAE (1 << 4) +#define RXCR1_RXINVF (1 << 1) +#define RXCR1_RXE (1 << 0) + +#define KS_RXCR2 0x76 +#define RXCR2_SRDBL_MASK (0x7 << 5) +#define RXCR2_SRDBL_SHIFT (5) +#define RXCR2_SRDBL_4B (0x0 << 5) +#define RXCR2_SRDBL_8B (0x1 << 5) +#define RXCR2_SRDBL_16B (0x2 << 5) +#define RXCR2_SRDBL_32B (0x3 << 5) +#define RXCR2_SRDBL_FRAME (0x4 << 5) +#define RXCR2_IUFFP (1 << 4) +#define RXCR2_RXIUFCEZ (1 << 3) +#define RXCR2_UDPLFE (1 << 2) +#define RXCR2_RXICMPFCC (1 << 1) +#define RXCR2_RXSAF (1 << 0) + +#define KS_TXMIR 0x78 + +#define KS_RXFHSR 0x7C +#define RXFSHR_RXFV (1 << 15) +#define RXFSHR_RXICMPFCS (1 << 13) +#define RXFSHR_RXIPFCS (1 << 12) +#define RXFSHR_RXTCPFCS (1 << 11) +#define RXFSHR_RXUDPFCS (1 << 10) +#define RXFSHR_RXBF (1 << 7) +#define RXFSHR_RXMF (1 << 6) +#define RXFSHR_RXUF (1 << 5) +#define RXFSHR_RXMR (1 << 4) +#define RXFSHR_RXFT (1 << 3) +#define RXFSHR_RXFTL (1 << 2) +#define RXFSHR_RXRF (1 << 1) +#define RXFSHR_RXCE (1 << 0) + +#define KS_RXFHBCR 0x7E +#define KS_TXQCR 0x80 +#define TXQCR_AETFE (1 << 2) +#define TXQCR_TXQMAM (1 << 1) +#define TXQCR_METFE (1 << 0) + +#define KS_RXQCR 0x82 +#define RXQCR_RXDTTS (1 << 12) +#define RXQCR_RXDBCTS (1 << 11) +#define RXQCR_RXFCTS (1 << 10) +#define RXQCR_RXIPHTOE (1 << 9) +#define RXQCR_RXDTTE (1 << 7) +#define RXQCR_RXDBCTE (1 << 6) +#define RXQCR_RXFCTE (1 << 5) +#define RXQCR_ADRFE (1 << 4) +#define RXQCR_SDA (1 << 3) +#define RXQCR_RRXEF (1 << 0) + +#define KS_TXFDPR 0x84 +#define TXFDPR_TXFPAI (1 << 14) +#define TXFDPR_TXFP_MASK (0x7ff << 0) +#define TXFDPR_TXFP_SHIFT (0) + +#define KS_RXFDPR 0x86 +#define RXFDPR_RXFPAI (1 << 14) + +#define KS_RXDTTR 0x8C +#define KS_RXDBCTR 0x8E + +#define KS_IER 0x90 +#define KS_ISR 0x92 +#define IRQ_LCI (1 << 15) +#define IRQ_TXI (1 << 14) +#define IRQ_RXI (1 << 13) +#define IRQ_RXOI (1 << 11) +#define IRQ_TXPSI (1 << 9) +#define IRQ_RXPSI (1 << 8) +#define IRQ_TXSAI (1 << 6) +#define IRQ_RXWFDI (1 << 5) +#define IRQ_RXMPDI (1 << 4) +#define IRQ_LDI (1 << 3) +#define IRQ_EDI (1 << 2) +#define IRQ_SPIBEI (1 << 1) +#define IRQ_DEDI (1 << 0) + +#define KS_RXFCTR 0x9C +#define KS_RXFC 0x9D +#define RXFCTR_RXFC_MASK (0xff << 8) +#define RXFCTR_RXFC_SHIFT (8) +#define RXFCTR_RXFC_GET(_v) (((_v) >> 8) & 0xff) +#define RXFCTR_RXFCT_MASK (0xff << 0) +#define RXFCTR_RXFCT_SHIFT (0) + +#define KS_TXNTFSR 0x9E + +#define KS_MAHTR0 0xA0 +#define KS_MAHTR1 0xA2 +#define KS_MAHTR2 0xA4 +#define KS_MAHTR3 0xA6 + +#define KS_FCLWR 0xB0 +#define KS_FCHWR 0xB2 +#define KS_FCOWR 0xB4 + +#define KS_CIDER 0xC0 +#define CIDER_ID 0x8870 +#define CIDER_REV_MASK (0x7 << 1) +#define CIDER_REV_SHIFT (1) +#define CIDER_REV_GET(_v) (((_v) >> 1) & 0x7) + +#define KS_CGCR 0xC6 + +#define KS_IACR 0xC8 +#define IACR_RDEN (1 << 12) +#define IACR_TSEL_MASK (0x3 << 10) +#define IACR_TSEL_SHIFT (10) +#define IACR_TSEL_MIB (0x3 << 10) +#define IACR_ADDR_MASK (0x1f << 0) +#define IACR_ADDR_SHIFT (0) + +#define KS_IADLR 0xD0 +#define KS_IAHDR 0xD2 + +#define KS_PMECR 0xD4 +#define PMECR_PME_DELAY (1 << 14) +#define PMECR_PME_POL (1 << 12) +#define PMECR_WOL_WAKEUP (1 << 11) +#define PMECR_WOL_MAGICPKT (1 << 10) +#define PMECR_WOL_LINKUP (1 << 9) +#define PMECR_WOL_ENERGY (1 << 8) +#define PMECR_AUTO_WAKE_EN (1 << 7) +#define PMECR_WAKEUP_NORMAL (1 << 6) +#define PMECR_WKEVT_MASK (0xf << 2) +#define PMECR_WKEVT_SHIFT (2) +#define PMECR_WKEVT_GET(_v) (((_v) >> 2) & 0xf) +#define PMECR_WKEVT_ENERGY (0x1 << 2) +#define PMECR_WKEVT_LINK (0x2 << 2) +#define PMECR_WKEVT_MAGICPKT (0x4 << 2) +#define PMECR_WKEVT_FRAME (0x8 << 2) +#define PMECR_PM_MASK (0x3 << 0) +#define PMECR_PM_SHIFT (0) +#define PMECR_PM_NORMAL (0x0 << 0) +#define PMECR_PM_ENERGY (0x1 << 0) +#define PMECR_PM_SOFTDOWN (0x2 << 0) +#define PMECR_PM_POWERSAVE (0x3 << 0) + +/* Standard MII PHY data */ +#define KS_P1MBCR 0xE4 +#define KS_P1MBSR 0xE6 +#define KS_PHY1ILR 0xE8 +#define KS_PHY1IHR 0xEA +#define KS_P1ANAR 0xEC +#define KS_P1ANLPR 0xEE + +#define KS_P1SCLMD 0xF4 +#define P1SCLMD_LEDOFF (1 << 15) +#define P1SCLMD_TXIDS (1 << 14) +#define P1SCLMD_RESTARTAN (1 << 13) +#define P1SCLMD_DISAUTOMDIX (1 << 10) +#define P1SCLMD_FORCEMDIX (1 << 9) +#define P1SCLMD_AUTONEGEN (1 << 7) +#define P1SCLMD_FORCE100 (1 << 6) +#define P1SCLMD_FORCEFDX (1 << 5) +#define P1SCLMD_ADV_FLOW (1 << 4) +#define P1SCLMD_ADV_100BT_FDX (1 << 3) +#define P1SCLMD_ADV_100BT_HDX (1 << 2) +#define P1SCLMD_ADV_10BT_FDX (1 << 1) +#define P1SCLMD_ADV_10BT_HDX (1 << 0) + +#define KS_P1CR 0xF6 +#define P1CR_HP_MDIX (1 << 15) +#define P1CR_REV_POL (1 << 13) +#define P1CR_OP_100M (1 << 10) +#define P1CR_OP_FDX (1 << 9) +#define P1CR_OP_MDI (1 << 7) +#define P1CR_AN_DONE (1 << 6) +#define P1CR_LINK_GOOD (1 << 5) +#define P1CR_PNTR_FLOW (1 << 4) +#define P1CR_PNTR_100BT_FDX (1 << 3) +#define P1CR_PNTR_100BT_HDX (1 << 2) +#define P1CR_PNTR_10BT_FDX (1 << 1) +#define P1CR_PNTR_10BT_HDX (1 << 0) + +/* TX Frame control */ + +#define TXFR_TXIC (1 << 15) +#define TXFR_TXFID_MASK (0x3f << 0) +#define TXFR_TXFID_SHIFT (0) + +/* SPI frame opcodes */ +#define KS_SPIOP_RD (0x00) +#define KS_SPIOP_WR (0x40) +#define KS_SPIOP_RXFIFO (0x80) +#define KS_SPIOP_TXFIFO (0xC0) diff --git a/drivers/net/macb.c b/drivers/net/macb.c index 5b5c25368d1..e3601cf3f93 100644 --- a/drivers/net/macb.c +++ b/drivers/net/macb.c @@ -620,6 +620,7 @@ static int macb_start_xmit(struct sk_buff *skb, struct net_device *dev) dma_addr_t mapping; unsigned int len, entry; u32 ctrl; + unsigned long flags; #ifdef DEBUG int i; @@ -635,12 +636,12 @@ static int macb_start_xmit(struct sk_buff *skb, struct net_device *dev) #endif len = skb->len; - spin_lock_irq(&bp->lock); + spin_lock_irqsave(&bp->lock, flags); /* This is a hard error, log it. */ if (TX_BUFFS_AVAIL(bp) < 1) { netif_stop_queue(dev); - spin_unlock_irq(&bp->lock); + spin_unlock_irqrestore(&bp->lock, flags); dev_err(&bp->pdev->dev, "BUG! Tx Ring full when queue awake!\n"); dev_dbg(&bp->pdev->dev, "tx_head = %u, tx_tail = %u\n", @@ -674,7 +675,7 @@ static int macb_start_xmit(struct sk_buff *skb, struct net_device *dev) if (TX_BUFFS_AVAIL(bp) < 1) netif_stop_queue(dev); - spin_unlock_irq(&bp->lock); + spin_unlock_irqrestore(&bp->lock, flags); dev->trans_start = jiffies; diff --git a/drivers/net/macsonic.c b/drivers/net/macsonic.c index acd143da161..61eabcac734 100644 --- a/drivers/net/macsonic.c +++ b/drivers/net/macsonic.c @@ -179,7 +179,7 @@ static const struct net_device_ops macsonic_netdev_ops = { .ndo_set_mac_address = eth_mac_addr, }; -static int __init macsonic_init(struct net_device *dev) +static int __devinit macsonic_init(struct net_device *dev) { struct sonic_local* lp = netdev_priv(dev); @@ -223,7 +223,7 @@ static int __init macsonic_init(struct net_device *dev) return 0; } -static int __init mac_onboard_sonic_ethernet_addr(struct net_device *dev) +static int __devinit mac_onboard_sonic_ethernet_addr(struct net_device *dev) { struct sonic_local *lp = netdev_priv(dev); const int prom_addr = ONBOARD_SONIC_PROM_BASE; @@ -288,7 +288,7 @@ static int __init mac_onboard_sonic_ethernet_addr(struct net_device *dev) } else return 0; } -static int __init mac_onboard_sonic_probe(struct net_device *dev) +static int __devinit mac_onboard_sonic_probe(struct net_device *dev) { /* Bwahahaha */ static int once_is_more_than_enough; @@ -409,7 +409,7 @@ static int __init mac_onboard_sonic_probe(struct net_device *dev) return macsonic_init(dev); } -static int __init mac_nubus_sonic_ethernet_addr(struct net_device *dev, +static int __devinit mac_nubus_sonic_ethernet_addr(struct net_device *dev, unsigned long prom_addr, int id) { @@ -424,7 +424,7 @@ static int __init mac_nubus_sonic_ethernet_addr(struct net_device *dev, return 0; } -static int __init macsonic_ident(struct nubus_dev *ndev) +static int __devinit macsonic_ident(struct nubus_dev *ndev) { if (ndev->dr_hw == NUBUS_DRHW_ASANTE_LC && ndev->dr_sw == NUBUS_DRSW_SONIC_LC) @@ -449,7 +449,7 @@ static int __init macsonic_ident(struct nubus_dev *ndev) return -1; } -static int __init mac_nubus_sonic_probe(struct net_device *dev) +static int __devinit mac_nubus_sonic_probe(struct net_device *dev) { static int slots; struct nubus_dev* ndev = NULL; @@ -562,7 +562,7 @@ static int __init mac_nubus_sonic_probe(struct net_device *dev) return macsonic_init(dev); } -static int __init mac_sonic_probe(struct platform_device *pdev) +static int __devinit mac_sonic_probe(struct platform_device *pdev) { struct net_device *dev; struct sonic_local *lp; @@ -575,6 +575,7 @@ static int __init mac_sonic_probe(struct platform_device *pdev) lp = netdev_priv(dev); lp->device = &pdev->dev; SET_NETDEV_DEV(dev, &pdev->dev); + platform_set_drvdata(pdev, dev); /* This will catch fatal stuff like -ENOMEM as well as success */ err = mac_onboard_sonic_probe(dev); diff --git a/drivers/net/mdio.c b/drivers/net/mdio.c index dc45e9856c3..6851bdb2ce2 100644 --- a/drivers/net/mdio.c +++ b/drivers/net/mdio.c @@ -14,6 +14,10 @@ #include <linux/mdio.h> #include <linux/module.h> +MODULE_DESCRIPTION("Generic support for MDIO-compatible transceivers"); +MODULE_AUTHOR("Copyright 2006-2009 Solarflare Communications Inc."); +MODULE_LICENSE("GPL"); + /** * mdio45_probe - probe for an MDIO (clause 45) device * @mdio: MDIO interface diff --git a/drivers/net/mlx4/cmd.c b/drivers/net/mlx4/cmd.c index 2845a0560b8..65ec77dc31f 100644 --- a/drivers/net/mlx4/cmd.c +++ b/drivers/net/mlx4/cmd.c @@ -80,7 +80,9 @@ enum { /* Bad management packet (silently discarded): */ CMD_STAT_BAD_PKT = 0x30, /* More outstanding CQEs in CQ than new CQ size: */ - CMD_STAT_BAD_SIZE = 0x40 + CMD_STAT_BAD_SIZE = 0x40, + /* Multi Function device support required: */ + CMD_STAT_MULTI_FUNC_REQ = 0x50, }; enum { @@ -128,6 +130,7 @@ static int mlx4_status_to_errno(u8 status) [CMD_STAT_LAM_NOT_PRE] = -EAGAIN, [CMD_STAT_BAD_PKT] = -EINVAL, [CMD_STAT_BAD_SIZE] = -ENOMEM, + [CMD_STAT_MULTI_FUNC_REQ] = -EACCES, }; if (status >= ARRAY_SIZE(trans_table) || diff --git a/drivers/net/mlx4/en_ethtool.c b/drivers/net/mlx4/en_ethtool.c index 091f99052c9..86467b444ac 100644 --- a/drivers/net/mlx4/en_ethtool.c +++ b/drivers/net/mlx4/en_ethtool.c @@ -220,7 +220,7 @@ static int mlx4_en_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) { cmd->autoneg = AUTONEG_DISABLE; cmd->supported = SUPPORTED_10000baseT_Full; - cmd->advertising = SUPPORTED_10000baseT_Full; + cmd->advertising = ADVERTISED_1000baseT_Full; if (netif_carrier_ok(dev)) { cmd->speed = SPEED_10000; cmd->duplex = DUPLEX_FULL; diff --git a/drivers/net/mlx4/en_rx.c b/drivers/net/mlx4/en_rx.c index 91bdfdfd431..3ac0404d0d1 100644 --- a/drivers/net/mlx4/en_rx.c +++ b/drivers/net/mlx4/en_rx.c @@ -506,8 +506,9 @@ static int mlx4_en_complete_rx_desc(struct mlx4_en_priv *priv, PCI_DMA_FROMDEVICE); } /* Adjust size of last fragment to match actual length */ - skb_frags_rx[nr - 1].size = length - - priv->frag_info[nr - 1].frag_prefix_size; + if (nr > 0) + skb_frags_rx[nr - 1].size = length - + priv->frag_info[nr - 1].frag_prefix_size; return nr; fail: diff --git a/drivers/net/mlx4/en_tx.c b/drivers/net/mlx4/en_tx.c index 08c43f2ae72..62208401c4d 100644 --- a/drivers/net/mlx4/en_tx.c +++ b/drivers/net/mlx4/en_tx.c @@ -249,6 +249,7 @@ static u32 mlx4_en_free_tx_desc(struct mlx4_en_priv *priv, pci_unmap_page(mdev->pdev, (dma_addr_t) be64_to_cpu(data->addr), frag->size, PCI_DMA_TODEVICE); + ++data; } } /* Stamp the freed descriptor */ @@ -436,6 +437,7 @@ static inline void mlx4_en_xmit_poll(struct mlx4_en_priv *priv, int tx_ind) { struct mlx4_en_cq *cq = &priv->tx_cq[tx_ind]; struct mlx4_en_tx_ring *ring = &priv->tx_ring[tx_ind]; + unsigned long flags; /* If we don't have a pending timer, set one up to catch our recent post in case the interface becomes idle */ @@ -444,9 +446,9 @@ static inline void mlx4_en_xmit_poll(struct mlx4_en_priv *priv, int tx_ind) /* Poll the CQ every mlx4_en_TX_MODER_POLL packets */ if ((++ring->poll_cnt & (MLX4_EN_TX_POLL_MODER - 1)) == 0) - if (spin_trylock_irq(&ring->comp_lock)) { + if (spin_trylock_irqsave(&ring->comp_lock, flags)) { mlx4_en_process_tx_cq(priv->dev, cq); - spin_unlock_irq(&ring->comp_lock); + spin_unlock_irqrestore(&ring->comp_lock, flags); } } diff --git a/drivers/net/mlx4/main.c b/drivers/net/mlx4/main.c index 018348c0119..dac621b1e9f 100644 --- a/drivers/net/mlx4/main.c +++ b/drivers/net/mlx4/main.c @@ -729,7 +729,10 @@ static int mlx4_init_hca(struct mlx4_dev *dev) err = mlx4_QUERY_FW(dev); if (err) { - mlx4_err(dev, "QUERY_FW command failed, aborting.\n"); + if (err == -EACCES) + mlx4_info(dev, "non-primary physical function, skipping.\n"); + else + mlx4_err(dev, "QUERY_FW command failed, aborting.\n"); return err; } @@ -1285,6 +1288,7 @@ static struct pci_device_id mlx4_pci_table[] = { { PCI_VDEVICE(MELLANOX, 0x6750) }, /* MT25408 "Hermon" EN 10GigE PCIe gen2 */ { PCI_VDEVICE(MELLANOX, 0x6372) }, /* MT25458 ConnectX EN 10GBASE-T 10GigE */ { PCI_VDEVICE(MELLANOX, 0x675a) }, /* MT25458 ConnectX EN 10GBASE-T+Gen2 10GigE */ + { PCI_VDEVICE(MELLANOX, 0x6764) }, /* MT26468 ConnectX EN 10GigE PCIe gen2*/ { 0, } }; diff --git a/drivers/net/natsemi.c b/drivers/net/natsemi.c index c9bfe4eea18..78c088331f5 100644 --- a/drivers/net/natsemi.c +++ b/drivers/net/natsemi.c @@ -130,8 +130,8 @@ static int full_duplex[MAX_UNITS]; static const char version[] __devinitconst = KERN_INFO DRV_NAME " dp8381x driver, version " DRV_VERSION ", " DRV_RELDATE "\n" - KERN_INFO " originally by Donald Becker <becker@scyld.com>\n" - KERN_INFO " 2.4.x kernel port by Jeff Garzik, Tjeerd Mulder\n"; + " originally by Donald Becker <becker@scyld.com>\n" + " 2.4.x kernel port by Jeff Garzik, Tjeerd Mulder\n"; MODULE_AUTHOR("Donald Becker <becker@scyld.com>"); MODULE_DESCRIPTION("National Semiconductor DP8381x series PCI Ethernet driver"); diff --git a/drivers/net/ne.c b/drivers/net/ne.c index 5c3e242428f..992dbfffdb0 100644 --- a/drivers/net/ne.c +++ b/drivers/net/ne.c @@ -321,7 +321,7 @@ static int __init ne_probe1(struct net_device *dev, unsigned long ioaddr) } if (ei_debug && version_printed++ == 0) - printk(KERN_INFO "%s" KERN_INFO "%s", version1, version2); + printk(KERN_INFO "%s%s", version1, version2); printk(KERN_INFO "NE*000 ethercard probe at %#3lx:", ioaddr); diff --git a/drivers/net/netxen/netxen_nic.h b/drivers/net/netxen/netxen_nic.h index 970cedeb5f3..a9c1fcca5e7 100644 --- a/drivers/net/netxen/netxen_nic.h +++ b/drivers/net/netxen/netxen_nic.h @@ -60,7 +60,18 @@ #define _NETXEN_NIC_LINUX_SUBVERSION 30 #define NETXEN_NIC_LINUX_VERSIONID "4.0.30" -#define NETXEN_VERSION_CODE(a, b, c) (((a) << 16) + ((b) << 8) + (c)) +#define NETXEN_VERSION_CODE(a, b, c) (((a) << 24) + ((b) << 16) + (c)) +#define _major(v) (((v) >> 24) & 0xff) +#define _minor(v) (((v) >> 16) & 0xff) +#define _build(v) ((v) & 0xffff) + +/* version in image has weird encoding: + * 7:0 - major + * 15:8 - minor + * 31:16 - build (little endian) + */ +#define NETXEN_DECODE_VERSION(v) \ + NETXEN_VERSION_CODE(((v) & 0xff), (((v) >> 8) & 0xff), ((v) >> 16)) #define NETXEN_NUM_FLASH_SECTORS (64) #define NETXEN_FLASH_SECTOR_SIZE (64 * 1024) @@ -199,6 +210,7 @@ #define NETXEN_CTX_SIGNATURE 0xdee0 #define NETXEN_CTX_SIGNATURE_V2 0x0002dee0 #define NETXEN_CTX_RESET 0xbad0 +#define NETXEN_CTX_D3_RESET 0xacc0 #define NETXEN_RCV_PRODUCER(ringid) (ringid) #define PHAN_PEG_RCV_INITIALIZED 0xff01 @@ -614,6 +626,7 @@ struct netxen_new_user_info { #define NX_P2_MN_ROMIMAGE 0 #define NX_P3_CT_ROMIMAGE 1 #define NX_P3_MN_ROMIMAGE 2 +#define NX_FLASH_ROMIMAGE 3 #define NETXEN_USER_START_OLD NETXEN_PXE_START /* for backward compatibility */ @@ -761,6 +774,8 @@ struct nx_host_tx_ring { u32 crb_cmd_consumer; u32 num_desc; + struct netdev_queue *txq; + struct netxen_cmd_buffer *cmd_buf_arr; struct cmd_desc_type0 *desc_head; dma_addr_t phys_addr; @@ -1239,11 +1254,11 @@ struct netxen_adapter { u8 mc_enabled; u8 max_mc_count; u8 rss_supported; - u8 resv2; + u8 link_changed; u32 resv3; u8 has_link_events; - u8 resv1; + u8 fw_type; u16 tx_context_id; u16 mtu; u16 is_up; @@ -1387,6 +1402,7 @@ void netxen_free_adapter_offload(struct netxen_adapter *adapter); int netxen_initialize_adapter_offload(struct netxen_adapter *adapter); int netxen_phantom_init(struct netxen_adapter *adapter, int pegtune_val); int netxen_load_firmware(struct netxen_adapter *adapter); +int netxen_need_fw_reset(struct netxen_adapter *adapter); void netxen_request_firmware(struct netxen_adapter *adapter); void netxen_release_firmware(struct netxen_adapter *adapter); int netxen_pinit_from_rom(struct netxen_adapter *adapter, int verbose); diff --git a/drivers/net/netxen/netxen_nic_ctx.c b/drivers/net/netxen/netxen_nic_ctx.c index 4754f5cffad..9f8ae4719e2 100644 --- a/drivers/net/netxen/netxen_nic_ctx.c +++ b/drivers/net/netxen/netxen_nic_ctx.c @@ -684,10 +684,8 @@ int netxen_alloc_hw_resources(struct netxen_adapter *adapter) goto err_out_free; } else { err = netxen_init_old_ctx(adapter); - if (err) { - netxen_free_hw_resources(adapter); - return err; - } + if (err) + goto err_out_free; } return 0; @@ -708,15 +706,18 @@ void netxen_free_hw_resources(struct netxen_adapter *adapter) int port = adapter->portnum; if (adapter->fw_major >= 4) { - nx_fw_cmd_destroy_tx_ctx(adapter); nx_fw_cmd_destroy_rx_ctx(adapter); + nx_fw_cmd_destroy_tx_ctx(adapter); } else { netxen_api_lock(adapter); NXWR32(adapter, CRB_CTX_SIGNATURE_REG(port), - NETXEN_CTX_RESET | port); + NETXEN_CTX_D3_RESET | port); netxen_api_unlock(adapter); } + /* Allow dma queues to drain after context reset */ + msleep(20); + recv_ctx = &adapter->recv_ctx; if (recv_ctx->hwctx != NULL) { diff --git a/drivers/net/netxen/netxen_nic_hdr.h b/drivers/net/netxen/netxen_nic_hdr.h index 3cc047844af..82410367564 100644 --- a/drivers/net/netxen/netxen_nic_hdr.h +++ b/drivers/net/netxen/netxen_nic_hdr.h @@ -853,6 +853,7 @@ enum { #define NX_PEG_TUNE_CAPABILITY (NETXEN_CAM_RAM(0x02c)) #define NETXEN_CAM_RAM_DMA_WATCHDOG_CTRL (0x14) +#define NETXEN_PEG_ALIVE_COUNTER (NETXEN_CAM_RAM(0xb0)) #define ISR_MSI_INT_TRIGGER(FUNC) (NETXEN_PCIX_PS_REG(PCIX_MSI_F(FUNC))) #define ISR_LEGACY_INT_TRIGGERED(VAL) (((VAL) & 0x300) == 0x200) diff --git a/drivers/net/netxen/netxen_nic_hw.c b/drivers/net/netxen/netxen_nic_hw.c index ce3b89d2cbb..b9123d445c9 100644 --- a/drivers/net/netxen/netxen_nic_hw.c +++ b/drivers/net/netxen/netxen_nic_hw.c @@ -461,13 +461,14 @@ netxen_send_cmd_descs(struct netxen_adapter *adapter, i = 0; tx_ring = adapter->tx_ring; - netif_tx_lock_bh(adapter->netdev); + __netif_tx_lock_bh(tx_ring->txq); producer = tx_ring->producer; consumer = tx_ring->sw_consumer; - if (nr_desc >= find_diff_among(producer, consumer, tx_ring->num_desc)) { - netif_tx_unlock_bh(adapter->netdev); + if (nr_desc >= netxen_tx_avail(tx_ring)) { + netif_tx_stop_queue(tx_ring->txq); + __netif_tx_unlock_bh(tx_ring->txq); return -EBUSY; } @@ -490,7 +491,7 @@ netxen_send_cmd_descs(struct netxen_adapter *adapter, netxen_nic_update_cmd_producer(adapter, tx_ring); - netif_tx_unlock_bh(adapter->netdev); + __netif_tx_unlock_bh(tx_ring->txq); return 0; } diff --git a/drivers/net/netxen/netxen_nic_init.c b/drivers/net/netxen/netxen_nic_init.c index 055bb61d6e7..5d3343ef3d8 100644 --- a/drivers/net/netxen/netxen_nic_init.c +++ b/drivers/net/netxen/netxen_nic_init.c @@ -214,6 +214,7 @@ int netxen_alloc_sw_resources(struct netxen_adapter *adapter) adapter->tx_ring = tx_ring; tx_ring->num_desc = adapter->num_txd; + tx_ring->txq = netdev_get_tx_queue(netdev, 0); cmd_buf_arr = vmalloc(TX_BUFF_RINGSIZE(tx_ring)); if (cmd_buf_arr == NULL) { @@ -684,11 +685,84 @@ int netxen_pinit_from_rom(struct netxen_adapter *adapter, int verbose) } int +netxen_need_fw_reset(struct netxen_adapter *adapter) +{ + u32 count, old_count; + u32 val, version, major, minor, build; + int i, timeout; + u8 fw_type; + + /* NX2031 firmware doesn't support heartbit */ + if (NX_IS_REVISION_P2(adapter->ahw.revision_id)) + return 1; + + /* last attempt had failed */ + if (NXRD32(adapter, CRB_CMDPEG_STATE) == PHAN_INITIALIZE_FAILED) + return 1; + + old_count = count = NXRD32(adapter, NETXEN_PEG_ALIVE_COUNTER); + + for (i = 0; i < 10; i++) { + + timeout = msleep_interruptible(200); + if (timeout) { + NXWR32(adapter, CRB_CMDPEG_STATE, + PHAN_INITIALIZE_FAILED); + return -EINTR; + } + + count = NXRD32(adapter, NETXEN_PEG_ALIVE_COUNTER); + if (count != old_count) + break; + } + + /* firmware is dead */ + if (count == old_count) + return 1; + + /* check if we have got newer or different file firmware */ + if (adapter->fw) { + + const struct firmware *fw = adapter->fw; + + val = cpu_to_le32(*(u32 *)&fw->data[NX_FW_VERSION_OFFSET]); + version = NETXEN_DECODE_VERSION(val); + + major = NXRD32(adapter, NETXEN_FW_VERSION_MAJOR); + minor = NXRD32(adapter, NETXEN_FW_VERSION_MINOR); + build = NXRD32(adapter, NETXEN_FW_VERSION_SUB); + + if (version > NETXEN_VERSION_CODE(major, minor, build)) + return 1; + + if (version == NETXEN_VERSION_CODE(major, minor, build)) { + + val = NXRD32(adapter, NETXEN_MIU_MN_CONTROL); + fw_type = (val & 0x4) ? + NX_P3_CT_ROMIMAGE : NX_P3_MN_ROMIMAGE; + + if (adapter->fw_type != fw_type) + return 1; + } + } + + return 0; +} + +static char *fw_name[] = { + "nxromimg.bin", "nx3fwct.bin", "nx3fwmn.bin", "flash", +}; + +int netxen_load_firmware(struct netxen_adapter *adapter) { u64 *ptr64; u32 i, flashaddr, size; const struct firmware *fw = adapter->fw; + struct pci_dev *pdev = adapter->pdev; + + dev_info(&pdev->dev, "loading firmware from %s\n", + fw_name[adapter->fw_type]); if (NX_IS_REVISION_P2(adapter->ahw.revision_id)) NXWR32(adapter, NETXEN_ROMUSB_GLB_CAS_RST, 1); @@ -756,7 +830,7 @@ static int netxen_validate_firmware(struct netxen_adapter *adapter, const char *fwname) { __le32 val; - u32 major, minor, build, ver, min_ver, bios; + u32 ver, min_ver, bios; struct pci_dev *pdev = adapter->pdev; const struct firmware *fw = adapter->fw; @@ -768,21 +842,18 @@ netxen_validate_firmware(struct netxen_adapter *adapter, const char *fwname) return -EINVAL; val = cpu_to_le32(*(u32 *)&fw->data[NX_FW_VERSION_OFFSET]); - major = (__force u32)val & 0xff; - minor = ((__force u32)val >> 8) & 0xff; - build = (__force u32)val >> 16; if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) min_ver = NETXEN_VERSION_CODE(4, 0, 216); else min_ver = NETXEN_VERSION_CODE(3, 4, 216); - ver = NETXEN_VERSION_CODE(major, minor, build); + ver = NETXEN_DECODE_VERSION(val); - if ((major > _NETXEN_NIC_LINUX_MAJOR) || (ver < min_ver)) { + if ((_major(ver) > _NETXEN_NIC_LINUX_MAJOR) || (ver < min_ver)) { dev_err(&pdev->dev, "%s: firmware version %d.%d.%d unsupported\n", - fwname, major, minor, build); + fwname, _major(ver), _minor(ver), _build(ver)); return -EINVAL; } @@ -798,22 +869,21 @@ netxen_validate_firmware(struct netxen_adapter *adapter, const char *fwname) if (netxen_rom_fast_read(adapter, NX_FW_VERSION_OFFSET, (int *)&val)) return -EIO; - major = (__force u32)val & 0xff; - minor = ((__force u32)val >> 8) & 0xff; - build = (__force u32)val >> 16; - if (NETXEN_VERSION_CODE(major, minor, build) > ver) + val = NETXEN_DECODE_VERSION(val); + if (val > ver) { + dev_info(&pdev->dev, "%s: firmware is older than flash\n", + fwname); return -EINVAL; + } NXWR32(adapter, NETXEN_CAM_RAM(0x1fc), NETXEN_BDINFO_MAGIC); return 0; } -static char *fw_name[] = { "nxromimg.bin", "nx3fwct.bin", "nx3fwmn.bin" }; - void netxen_request_firmware(struct netxen_adapter *adapter) { u32 capability, flashed_ver; - int fw_type; + u8 fw_type; struct pci_dev *pdev = adapter->pdev; int rc = 0; @@ -830,6 +900,8 @@ request_mn: netxen_rom_fast_read(adapter, NX_FW_VERSION_OFFSET, (int *)&flashed_ver); + flashed_ver = NETXEN_DECODE_VERSION(flashed_ver); + if (flashed_ver >= NETXEN_VERSION_CODE(4, 0, 220)) { capability = NXRD32(adapter, NX_PEG_TUNE_CAPABILITY); if (capability & NX_PEG_TUNE_MN_PRESENT) { @@ -838,6 +910,10 @@ request_mn: } } + fw_type = NX_FLASH_ROMIMAGE; + adapter->fw = NULL; + goto done; + request_fw: rc = request_firmware(&adapter->fw, fw_name[fw_type], &pdev->dev); if (rc != 0) { @@ -846,6 +922,7 @@ request_fw: goto request_mn; } + fw_type = NX_FLASH_ROMIMAGE; adapter->fw = NULL; goto done; } @@ -859,16 +936,13 @@ request_fw: goto request_mn; } + fw_type = NX_FLASH_ROMIMAGE; adapter->fw = NULL; goto done; } done: - if (adapter->fw) - dev_info(&pdev->dev, "loading firmware from file %s\n", - fw_name[fw_type]); - else - dev_info(&pdev->dev, "loading firmware from flash\n"); + adapter->fw_type = fw_type; } @@ -1327,10 +1401,10 @@ int netxen_process_cmd_ring(struct netxen_adapter *adapter) smp_mb(); if (netif_queue_stopped(netdev) && netif_carrier_ok(netdev)) { - netif_tx_lock(netdev); + __netif_tx_lock(tx_ring->txq, smp_processor_id()); if (netxen_tx_avail(tx_ring) > TX_STOP_THRESH) netif_wake_queue(netdev); - netif_tx_unlock(netdev); + __netif_tx_unlock(tx_ring->txq); } } /* diff --git a/drivers/net/netxen/netxen_nic_main.c b/drivers/net/netxen/netxen_nic_main.c index 2919a2d12bf..28f270f5ac7 100644 --- a/drivers/net/netxen/netxen_nic_main.c +++ b/drivers/net/netxen/netxen_nic_main.c @@ -94,10 +94,6 @@ static struct pci_device_id netxen_pci_tbl[] __devinitdata = { MODULE_DEVICE_TABLE(pci, netxen_pci_tbl); -static struct workqueue_struct *netxen_workq; -#define SCHEDULE_WORK(tp) queue_work(netxen_workq, tp) -#define FLUSH_SCHEDULED_WORK() flush_workqueue(netxen_workq) - static void netxen_watchdog(unsigned long); static uint32_t crb_cmd_producer[4] = { @@ -171,6 +167,8 @@ netxen_free_sds_rings(struct netxen_recv_context *recv_ctx) { if (recv_ctx->sds_rings != NULL) kfree(recv_ctx->sds_rings); + + recv_ctx->sds_rings = NULL; } static int @@ -193,6 +191,21 @@ netxen_napi_add(struct netxen_adapter *adapter, struct net_device *netdev) } static void +netxen_napi_del(struct netxen_adapter *adapter) +{ + int ring; + struct nx_host_sds_ring *sds_ring; + struct netxen_recv_context *recv_ctx = &adapter->recv_ctx; + + for (ring = 0; ring < adapter->max_sds_rings; ring++) { + sds_ring = &recv_ctx->sds_rings[ring]; + netif_napi_del(&sds_ring->napi); + } + + netxen_free_sds_rings(&adapter->recv_ctx); +} + +static void netxen_napi_enable(struct netxen_adapter *adapter) { int ring; @@ -215,13 +228,13 @@ netxen_napi_disable(struct netxen_adapter *adapter) for (ring = 0; ring < adapter->max_sds_rings; ring++) { sds_ring = &recv_ctx->sds_rings[ring]; - napi_disable(&sds_ring->napi); netxen_nic_disable_int(sds_ring); - synchronize_irq(sds_ring->irq); + napi_synchronize(&sds_ring->napi); + napi_disable(&sds_ring->napi); } } -static int nx_set_dma_mask(struct netxen_adapter *adapter, uint8_t revision_id) +static int nx_set_dma_mask(struct netxen_adapter *adapter) { struct pci_dev *pdev = adapter->pdev; uint64_t mask, cmask; @@ -229,19 +242,17 @@ static int nx_set_dma_mask(struct netxen_adapter *adapter, uint8_t revision_id) adapter->pci_using_dac = 0; mask = DMA_BIT_MASK(32); - /* - * Consistent DMA mask is set to 32 bit because it cannot be set to - * 35 bits. For P3 also leave it at 32 bits for now. Only the rings - * come off this pool. - */ cmask = DMA_BIT_MASK(32); + if (NX_IS_REVISION_P2(adapter->ahw.revision_id)) { #ifndef CONFIG_IA64 - if (revision_id >= NX_P3_B0) - mask = DMA_BIT_MASK(39); - else if (revision_id == NX_P2_C1) mask = DMA_BIT_MASK(35); #endif + } else { + mask = DMA_BIT_MASK(39); + cmask = mask; + } + if (pci_set_dma_mask(pdev, mask) == 0 && pci_set_consistent_dma_mask(pdev, cmask) == 0) { adapter->pci_using_dac = 1; @@ -256,13 +267,13 @@ static int nx_update_dma_mask(struct netxen_adapter *adapter) { int change, shift, err; - uint64_t mask, old_mask; + uint64_t mask, old_mask, old_cmask; struct pci_dev *pdev = adapter->pdev; change = 0; shift = NXRD32(adapter, CRB_DMA_SHIFT); - if (shift >= 32) + if (shift > 32) return 0; if (NX_IS_REVISION_P3(adapter->ahw.revision_id) && (shift > 9)) @@ -272,14 +283,29 @@ nx_update_dma_mask(struct netxen_adapter *adapter) if (change) { old_mask = pdev->dma_mask; - mask = (1ULL<<(32+shift)) - 1; + old_cmask = pdev->dev.coherent_dma_mask; + + mask = DMA_BIT_MASK(32+shift); err = pci_set_dma_mask(pdev, mask); if (err) - return pci_set_dma_mask(pdev, old_mask); + goto err_out; + + if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) { + + err = pci_set_consistent_dma_mask(pdev, mask); + if (err) + goto err_out; + } + dev_info(&pdev->dev, "using %d-bit dma mask\n", 32+shift); } return 0; + +err_out: + pci_set_dma_mask(pdev, old_mask); + pci_set_consistent_dma_mask(pdev, old_cmask); + return err; } static void netxen_check_options(struct netxen_adapter *adapter) @@ -718,6 +744,10 @@ netxen_start_firmware(struct netxen_adapter *adapter, int request_fw) if (request_fw) netxen_request_firmware(adapter); + err = netxen_need_fw_reset(adapter); + if (err <= 0) + return err; + if (first_boot != 0x55555555) { NXWR32(adapter, CRB_CMDPEG_STATE, 0); netxen_pinit_from_rom(adapter, 0); @@ -829,11 +859,11 @@ netxen_nic_up(struct netxen_adapter *adapter, struct net_device *netdev) adapter->ahw.linkup = 0; - netxen_napi_enable(adapter); - if (adapter->max_sds_rings > 1) netxen_config_rss(adapter, 1); + netxen_napi_enable(adapter); + if (adapter->capabilities & NX_FW_CAPABILITY_LINK_NOTIFICATION) netxen_linkevent_request(adapter, 1); else @@ -847,8 +877,9 @@ netxen_nic_up(struct netxen_adapter *adapter, struct net_device *netdev) static void netxen_nic_down(struct netxen_adapter *adapter, struct net_device *netdev) { + spin_lock(&adapter->tx_clean_lock); netif_carrier_off(netdev); - netif_stop_queue(netdev); + netif_tx_disable(netdev); if (adapter->stop_port) adapter->stop_port(adapter); @@ -859,8 +890,8 @@ netxen_nic_down(struct netxen_adapter *adapter, struct net_device *netdev) netxen_napi_disable(adapter); netxen_release_tx_buffers(adapter); + spin_unlock(&adapter->tx_clean_lock); - FLUSH_SCHEDULED_WORK(); del_timer_sync(&adapter->watchdog_timer); } @@ -875,10 +906,12 @@ netxen_nic_attach(struct netxen_adapter *adapter) struct nx_host_tx_ring *tx_ring; err = netxen_init_firmware(adapter); - if (err != 0) { - printk(KERN_ERR "Failed to init firmware\n"); - return -EIO; - } + if (err) + return err; + + err = netxen_napi_add(adapter, netdev); + if (err) + return err; if (adapter->fw_major < 4) adapter->max_rds_rings = 3; @@ -939,9 +972,10 @@ err_out_free_sw: static void netxen_nic_detach(struct netxen_adapter *adapter) { - netxen_release_rx_buffers(adapter); netxen_free_hw_resources(adapter); + netxen_release_rx_buffers(adapter); netxen_nic_free_irq(adapter); + netxen_napi_del(adapter); netxen_free_sw_resources(adapter); adapter->is_up = 0; @@ -1000,7 +1034,7 @@ netxen_nic_probe(struct pci_dev *pdev, const struct pci_device_id *ent) revision_id = pdev->revision; adapter->ahw.revision_id = revision_id; - err = nx_set_dma_mask(adapter, revision_id); + err = nx_set_dma_mask(adapter); if (err) goto err_out_free_netdev; @@ -1086,9 +1120,6 @@ netxen_nic_probe(struct pci_dev *pdev, const struct pci_device_id *ent) netdev->irq = adapter->msix_entries[0].vector; - if (netxen_napi_add(adapter, netdev)) - goto err_out_disable_msi; - init_timer(&adapter->watchdog_timer); adapter->watchdog_timer.function = &netxen_watchdog; adapter->watchdog_timer.data = (unsigned long)adapter; @@ -1158,6 +1189,9 @@ static void __devexit netxen_nic_remove(struct pci_dev *pdev) unregister_netdev(netdev); + cancel_work_sync(&adapter->watchdog_task); + cancel_work_sync(&adapter->tx_timeout_task); + if (adapter->is_up == NETXEN_ADAPTER_UP_MAGIC) { netxen_nic_detach(adapter); } @@ -1166,7 +1200,6 @@ static void __devexit netxen_nic_remove(struct pci_dev *pdev) netxen_free_adapter_offload(adapter); netxen_teardown_intr(adapter); - netxen_free_sds_rings(&adapter->recv_ctx); netxen_cleanup_pci_map(adapter); @@ -1192,6 +1225,9 @@ netxen_nic_suspend(struct pci_dev *pdev, pm_message_t state) if (netif_running(netdev)) netxen_nic_down(adapter, netdev); + cancel_work_sync(&adapter->watchdog_task); + cancel_work_sync(&adapter->tx_timeout_task); + if (adapter->is_up == NETXEN_ADAPTER_UP_MAGIC) netxen_nic_detach(adapter); @@ -1529,10 +1565,7 @@ static int netxen_nic_check_temp(struct netxen_adapter *adapter) printk(KERN_ALERT "%s: Device temperature %d degrees C exceeds" " maximum allowed. Hardware has been shut down.\n", - netxen_nic_driver_name, temp_val); - - netif_carrier_off(netdev); - netif_stop_queue(netdev); + netdev->name, temp_val); rv = 1; } else if (temp_state == NX_TEMP_WARN) { if (adapter->temp == NX_TEMP_NORMAL) { @@ -1540,13 +1573,13 @@ static int netxen_nic_check_temp(struct netxen_adapter *adapter) "%s: Device temperature %d degrees C " "exceeds operating range." " Immediate action needed.\n", - netxen_nic_driver_name, temp_val); + netdev->name, temp_val); } } else { if (adapter->temp == NX_TEMP_WARN) { printk(KERN_INFO "%s: Device temperature is now %d degrees C" - " in normal range.\n", netxen_nic_driver_name, + " in normal range.\n", netdev->name, temp_val); } } @@ -1566,10 +1599,7 @@ void netxen_advert_link_change(struct netxen_adapter *adapter, int linkup) netif_carrier_off(netdev); netif_stop_queue(netdev); } - - if (!adapter->has_link_events) - netxen_nic_set_link_parameters(adapter); - + adapter->link_changed = !adapter->has_link_events; } else if (!adapter->ahw.linkup && linkup) { printk(KERN_INFO "%s: %s NIC Link is up\n", netxen_nic_driver_name, netdev->name); @@ -1578,9 +1608,7 @@ void netxen_advert_link_change(struct netxen_adapter *adapter, int linkup) netif_carrier_on(netdev); netif_wake_queue(netdev); } - - if (!adapter->has_link_events) - netxen_nic_set_link_parameters(adapter); + adapter->link_changed = !adapter->has_link_events; } } @@ -1607,11 +1635,36 @@ static void netxen_nic_handle_phy_intr(struct netxen_adapter *adapter) netxen_advert_link_change(adapter, linkup); } +static void netxen_nic_thermal_shutdown(struct netxen_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + + netif_device_detach(netdev); + netxen_nic_down(adapter, netdev); + netxen_nic_detach(adapter); +} + static void netxen_watchdog(unsigned long v) { struct netxen_adapter *adapter = (struct netxen_adapter *)v; - SCHEDULE_WORK(&adapter->watchdog_task); + if (netxen_nic_check_temp(adapter)) + goto do_sched; + + if (!adapter->has_link_events) { + netxen_nic_handle_phy_intr(adapter); + + if (adapter->link_changed) + goto do_sched; + } + + if (netif_running(adapter->netdev)) + mod_timer(&adapter->watchdog_timer, jiffies + 2 * HZ); + + return; + +do_sched: + schedule_work(&adapter->watchdog_task); } void netxen_watchdog_task(struct work_struct *work) @@ -1619,11 +1672,13 @@ void netxen_watchdog_task(struct work_struct *work) struct netxen_adapter *adapter = container_of(work, struct netxen_adapter, watchdog_task); - if ((adapter->portnum == 0) && netxen_nic_check_temp(adapter)) + if (adapter->temp == NX_TEMP_PANIC) { + netxen_nic_thermal_shutdown(adapter); return; + } - if (!adapter->has_link_events) - netxen_nic_handle_phy_intr(adapter); + if (adapter->link_changed) + netxen_nic_set_link_parameters(adapter); if (netif_running(adapter->netdev)) mod_timer(&adapter->watchdog_timer, jiffies + 2 * HZ); @@ -1631,9 +1686,8 @@ void netxen_watchdog_task(struct work_struct *work) static void netxen_tx_timeout(struct net_device *netdev) { - struct netxen_adapter *adapter = (struct netxen_adapter *) - netdev_priv(netdev); - SCHEDULE_WORK(&adapter->tx_timeout_task); + struct netxen_adapter *adapter = netdev_priv(netdev); + schedule_work(&adapter->tx_timeout_task); } static void netxen_tx_timeout_task(struct work_struct *work) @@ -1641,6 +1695,9 @@ static void netxen_tx_timeout_task(struct work_struct *work) struct netxen_adapter *adapter = container_of(work, struct netxen_adapter, tx_timeout_task); + if (!netif_running(adapter->netdev)) + return; + printk(KERN_ERR "%s %s: transmit timeout, resetting.\n", netxen_nic_driver_name, adapter->netdev->name); @@ -1753,7 +1810,8 @@ static int netxen_nic_poll(struct napi_struct *napi, int budget) if ((work_done < budget) && tx_complete) { napi_complete(&sds_ring->napi); - netxen_nic_enable_int(sds_ring); + if (netif_running(adapter->netdev)) + netxen_nic_enable_int(sds_ring); } return work_done; @@ -1786,9 +1844,6 @@ static int __init netxen_init_module(void) { printk(KERN_INFO "%s\n", netxen_nic_driver_string); - if ((netxen_workq = create_singlethread_workqueue("netxen")) == NULL) - return -ENOMEM; - return pci_register_driver(&netxen_driver); } @@ -1797,7 +1852,6 @@ module_init(netxen_init_module); static void __exit netxen_exit_module(void) { pci_unregister_driver(&netxen_driver); - destroy_workqueue(netxen_workq); } module_exit(netxen_exit_module); diff --git a/drivers/net/pci-skeleton.c b/drivers/net/pci-skeleton.c index 8c1f6988f39..89f7b2ad523 100644 --- a/drivers/net/pci-skeleton.c +++ b/drivers/net/pci-skeleton.c @@ -105,7 +105,7 @@ IVc. Errata static char version[] __devinitdata = KERN_INFO NETDRV_DRIVER_LOAD_MSG "\n" -KERN_INFO " Support available from http://foo.com/bar/baz.html\n"; +" Support available from http://foo.com/bar/baz.html\n"; /* define to 1 to enable PIO instead of MMIO */ #undef USE_IO_OPS diff --git a/drivers/net/pcmcia/3c589_cs.c b/drivers/net/pcmcia/3c589_cs.c index ec7cf5ac4f0..690b9c76d34 100644 --- a/drivers/net/pcmcia/3c589_cs.c +++ b/drivers/net/pcmcia/3c589_cs.c @@ -156,6 +156,7 @@ static struct net_device_stats *el3_get_stats(struct net_device *dev); static int el3_rx(struct net_device *dev); static int el3_close(struct net_device *dev); static void el3_tx_timeout(struct net_device *dev); +static void set_rx_mode(struct net_device *dev); static void set_multicast_list(struct net_device *dev); static const struct ethtool_ops netdev_ethtool_ops; @@ -488,8 +489,7 @@ static void tc589_reset(struct net_device *dev) /* Switch to register set 1 for normal use. */ EL3WINDOW(1); - /* Accept b-cast and phys addr only. */ - outw(SetRxFilter | RxStation | RxBroadcast, ioaddr + EL3_CMD); + set_rx_mode(dev); outw(StatsEnable, ioaddr + EL3_CMD); /* Turn on statistics. */ outw(RxEnable, ioaddr + EL3_CMD); /* Enable the receiver. */ outw(TxEnable, ioaddr + EL3_CMD); /* Enable transmitter. */ @@ -700,7 +700,7 @@ static irqreturn_t el3_interrupt(int irq, void *dev_id) if (fifo_diag & 0x2000) { /* Rx underrun */ tc589_wait_for_completion(dev, RxReset); - set_multicast_list(dev); + set_rx_mode(dev); outw(RxEnable, ioaddr + EL3_CMD); } outw(AckIntr | AdapterFailure, ioaddr + EL3_CMD); @@ -905,14 +905,11 @@ static int el3_rx(struct net_device *dev) return 0; } -static void set_multicast_list(struct net_device *dev) +static void set_rx_mode(struct net_device *dev) { - struct el3_private *lp = netdev_priv(dev); - struct pcmcia_device *link = lp->p_dev; unsigned int ioaddr = dev->base_addr; u16 opts = SetRxFilter | RxStation | RxBroadcast; - if (!pcmcia_dev_present(link)) return; if (dev->flags & IFF_PROMISC) opts |= RxMulticast | RxProm; else if (dev->mc_count || (dev->flags & IFF_ALLMULTI)) @@ -920,6 +917,16 @@ static void set_multicast_list(struct net_device *dev) outw(opts, ioaddr + EL3_CMD); } +static void set_multicast_list(struct net_device *dev) +{ + struct el3_private *priv = netdev_priv(dev); + unsigned long flags; + + spin_lock_irqsave(&priv->lock, flags); + set_rx_mode(dev); + spin_unlock_irqrestore(&priv->lock, flags); +} + static int el3_close(struct net_device *dev) { struct el3_private *lp = netdev_priv(dev); diff --git a/drivers/net/pcmcia/ibmtr_cs.c b/drivers/net/pcmcia/ibmtr_cs.c index f51944b28cf..06618af1a46 100644 --- a/drivers/net/pcmcia/ibmtr_cs.c +++ b/drivers/net/pcmcia/ibmtr_cs.c @@ -298,14 +298,11 @@ static int __devinit ibmtr_config(struct pcmcia_device *link) strcpy(info->node.dev_name, dev->name); - printk(KERN_INFO "%s: port %#3lx, irq %d,", - dev->name, dev->base_addr, dev->irq); - printk (" mmio %#5lx,", (u_long)ti->mmio); - printk (" sram %#5lx,", (u_long)ti->sram_base << 12); - printk ("\n" KERN_INFO " hwaddr="); - for (i = 0; i < TR_ALEN; i++) - printk("%02X", dev->dev_addr[i]); - printk("\n"); + printk(KERN_INFO + "%s: port %#3lx, irq %d, mmio %#5lx, sram %#5lx, hwaddr=%pM\n", + dev->name, dev->base_addr, dev->irq, + (u_long)ti->mmio, (u_long)(ti->sram_base << 12), + dev->dev_addr); return 0; cs_failed: diff --git a/drivers/net/pcmcia/nmclan_cs.c b/drivers/net/pcmcia/nmclan_cs.c index 02ef63ed1f9..36de91baf23 100644 --- a/drivers/net/pcmcia/nmclan_cs.c +++ b/drivers/net/pcmcia/nmclan_cs.c @@ -1425,15 +1425,12 @@ static void BuildLAF(int *ladrf, int *adr) ladrf[byte] |= (1 << (hashcode & 7)); #ifdef PCMCIA_DEBUG - if (pc_debug > 2) { - printk(KERN_DEBUG " adr ="); - for (i = 0; i < 6; i++) - printk(" %02X", adr[i]); - printk("\n" KERN_DEBUG " hashcode = %d(decimal), ladrf[0:63]" - " =", hashcode); - for (i = 0; i < 8; i++) - printk(" %02X", ladrf[i]); - printk("\n"); + if (pc_debug > 2) + printk(KERN_DEBUG " adr =%pM\n", adr); + printk(KERN_DEBUG " hashcode = %d(decimal), ladrf[0:63] =", hashcode); + for (i = 0; i < 8; i++) + printk(KERN_CONT " %02X", ladrf[i]); + printk(KERN_CONT "\n"); } #endif } /* BuildLAF */ diff --git a/drivers/net/pcmcia/pcnet_cs.c b/drivers/net/pcmcia/pcnet_cs.c index 652a3688836..9ef1c1bfa83 100644 --- a/drivers/net/pcmcia/pcnet_cs.c +++ b/drivers/net/pcmcia/pcnet_cs.c @@ -1727,6 +1727,7 @@ static struct pcmcia_device_id pcnet_ids[] = { PCMCIA_DEVICE_PROD_ID12("PRETEC", "Ethernet CompactLAN 10BaseT 3.3V", 0xebf91155, 0x7f5a4f50), PCMCIA_DEVICE_PROD_ID12("Psion Dacom", "Gold Card Ethernet", 0xf5f025c2, 0x3a30e110), PCMCIA_DEVICE_PROD_ID12("=RELIA==", "Ethernet", 0xcdd0644a, 0x00b2e941), + PCMCIA_DEVICE_PROD_ID12("RIOS Systems Co.", "PC CARD3 ETHERNET", 0x7dd33481, 0x10b41826), PCMCIA_DEVICE_PROD_ID12("RP", "1625B Ethernet NE2000 Compatible", 0xe3e66e22, 0xb96150df), PCMCIA_DEVICE_PROD_ID12("RPTI", "EP400 Ethernet NE2000 Compatible", 0xdc6f88fd, 0x4a7e2ae0), PCMCIA_DEVICE_PROD_ID12("RPTI", "EP401 Ethernet NE2000 Compatible", 0xdc6f88fd, 0x4bcbd7fd), diff --git a/drivers/net/pcnet32.c b/drivers/net/pcnet32.c index 1c35e1d637a..23e1a0750fe 100644 --- a/drivers/net/pcnet32.c +++ b/drivers/net/pcnet32.c @@ -485,7 +485,7 @@ static void pcnet32_realloc_tx_ring(struct net_device *dev, &new_ring_dma_addr); if (new_tx_ring == NULL) { if (netif_msg_drv(lp)) - printk("\n" KERN_ERR + printk(KERN_ERR "%s: Consistent memory allocation failed.\n", dev->name); return; @@ -496,7 +496,7 @@ static void pcnet32_realloc_tx_ring(struct net_device *dev, GFP_ATOMIC); if (!new_dma_addr_list) { if (netif_msg_drv(lp)) - printk("\n" KERN_ERR + printk(KERN_ERR "%s: Memory allocation failed.\n", dev->name); goto free_new_tx_ring; } @@ -505,7 +505,7 @@ static void pcnet32_realloc_tx_ring(struct net_device *dev, GFP_ATOMIC); if (!new_skb_list) { if (netif_msg_drv(lp)) - printk("\n" KERN_ERR + printk(KERN_ERR "%s: Memory allocation failed.\n", dev->name); goto free_new_lists; } @@ -563,7 +563,7 @@ static void pcnet32_realloc_rx_ring(struct net_device *dev, &new_ring_dma_addr); if (new_rx_ring == NULL) { if (netif_msg_drv(lp)) - printk("\n" KERN_ERR + printk(KERN_ERR "%s: Consistent memory allocation failed.\n", dev->name); return; @@ -574,7 +574,7 @@ static void pcnet32_realloc_rx_ring(struct net_device *dev, GFP_ATOMIC); if (!new_dma_addr_list) { if (netif_msg_drv(lp)) - printk("\n" KERN_ERR + printk(KERN_ERR "%s: Memory allocation failed.\n", dev->name); goto free_new_rx_ring; } @@ -583,7 +583,7 @@ static void pcnet32_realloc_rx_ring(struct net_device *dev, GFP_ATOMIC); if (!new_skb_list) { if (netif_msg_drv(lp)) - printk("\n" KERN_ERR + printk(KERN_ERR "%s: Memory allocation failed.\n", dev->name); goto free_new_lists; } @@ -1611,8 +1611,11 @@ pcnet32_probe1(unsigned long ioaddr, int shared, struct pci_dev *pdev) if (pcnet32_dwio_read_csr(ioaddr, 0) == 4 && pcnet32_dwio_check(ioaddr)) { a = &pcnet32_dwio; - } else + } else { + if (pcnet32_debug & NETIF_MSG_PROBE) + printk(KERN_ERR PFX "No access methods\n"); goto err_release_region; + } } chip_version = @@ -1719,7 +1722,9 @@ pcnet32_probe1(unsigned long ioaddr, int shared, struct pci_dev *pdev) ret = -ENOMEM; goto err_release_region; } - SET_NETDEV_DEV(dev, &pdev->dev); + + if (pdev) + SET_NETDEV_DEV(dev, &pdev->dev); if (pcnet32_debug & NETIF_MSG_PROBE) printk(KERN_INFO PFX "%s at %#3lx,", chipname, ioaddr); @@ -1766,38 +1771,38 @@ pcnet32_probe1(unsigned long ioaddr, int shared, struct pci_dev *pdev) /* Version 0x2623 and 0x2624 */ if (((chip_version + 1) & 0xfffe) == 0x2624) { i = a->read_csr(ioaddr, 80) & 0x0C00; /* Check tx_start_pt */ - printk("\n" KERN_INFO " tx_start_pt(0x%04x):", i); + printk(KERN_INFO " tx_start_pt(0x%04x):", i); switch (i >> 10) { case 0: - printk(" 20 bytes,"); + printk(KERN_CONT " 20 bytes,"); break; case 1: - printk(" 64 bytes,"); + printk(KERN_CONT " 64 bytes,"); break; case 2: - printk(" 128 bytes,"); + printk(KERN_CONT " 128 bytes,"); break; case 3: - printk("~220 bytes,"); + printk(KERN_CONT "~220 bytes,"); break; } i = a->read_bcr(ioaddr, 18); /* Check Burst/Bus control */ - printk(" BCR18(%x):", i & 0xffff); + printk(KERN_CONT " BCR18(%x):", i & 0xffff); if (i & (1 << 5)) - printk("BurstWrEn "); + printk(KERN_CONT "BurstWrEn "); if (i & (1 << 6)) - printk("BurstRdEn "); + printk(KERN_CONT "BurstRdEn "); if (i & (1 << 7)) - printk("DWordIO "); + printk(KERN_CONT "DWordIO "); if (i & (1 << 11)) - printk("NoUFlow "); + printk(KERN_CONT "NoUFlow "); i = a->read_bcr(ioaddr, 25); - printk("\n" KERN_INFO " SRAMSIZE=0x%04x,", i << 8); + printk(KERN_INFO " SRAMSIZE=0x%04x,", i << 8); i = a->read_bcr(ioaddr, 26); - printk(" SRAM_BND=0x%04x,", i << 8); + printk(KERN_CONT " SRAM_BND=0x%04x,", i << 8); i = a->read_bcr(ioaddr, 27); if (i & (1 << 14)) - printk("LowLatRx"); + printk(KERN_CONT "LowLatRx"); } } @@ -1818,7 +1823,6 @@ pcnet32_probe1(unsigned long ioaddr, int shared, struct pci_dev *pdev) spin_lock_init(&lp->lock); - SET_NETDEV_DEV(dev, &pdev->dev); lp->name = chipname; lp->shared_irq = shared; lp->tx_ring_size = TX_RING_SIZE; /* default tx ring size */ @@ -1835,7 +1839,7 @@ pcnet32_probe1(unsigned long ioaddr, int shared, struct pci_dev *pdev) lp->chip_version = chip_version; lp->msg_enable = pcnet32_debug; if ((cards_found >= MAX_UNITS) - || (options[cards_found] > sizeof(options_mapping))) + || (options[cards_found] >= sizeof(options_mapping))) lp->options = PCNET32_PORT_ASEL; else lp->options = options_mapping[options[cards_found]]; @@ -1852,12 +1856,6 @@ pcnet32_probe1(unsigned long ioaddr, int shared, struct pci_dev *pdev) ((cards_found >= MAX_UNITS) || full_duplex[cards_found])) lp->options |= PCNET32_PORT_FD; - if (!a) { - if (pcnet32_debug & NETIF_MSG_PROBE) - printk(KERN_ERR PFX "No access methods\n"); - ret = -ENODEV; - goto err_free_consistent; - } lp->a = *a; /* prior to register_netdev, dev->name is not yet correct */ @@ -1973,14 +1971,13 @@ pcnet32_probe1(unsigned long ioaddr, int shared, struct pci_dev *pdev) return 0; - err_free_ring: +err_free_ring: pcnet32_free_ring(dev); - err_free_consistent: pci_free_consistent(lp->pci_dev, sizeof(*lp->init_block), lp->init_block, lp->init_dma_addr); - err_free_netdev: +err_free_netdev: free_netdev(dev); - err_release_region: +err_release_region: release_region(ioaddr, PCNET32_TOTAL_SIZE); return ret; } @@ -1996,7 +1993,7 @@ static int pcnet32_alloc_ring(struct net_device *dev, const char *name) &lp->tx_ring_dma_addr); if (lp->tx_ring == NULL) { if (netif_msg_drv(lp)) - printk("\n" KERN_ERR PFX + printk(KERN_ERR PFX "%s: Consistent memory allocation failed.\n", name); return -ENOMEM; @@ -2008,7 +2005,7 @@ static int pcnet32_alloc_ring(struct net_device *dev, const char *name) &lp->rx_ring_dma_addr); if (lp->rx_ring == NULL) { if (netif_msg_drv(lp)) - printk("\n" KERN_ERR PFX + printk(KERN_ERR PFX "%s: Consistent memory allocation failed.\n", name); return -ENOMEM; @@ -2018,7 +2015,7 @@ static int pcnet32_alloc_ring(struct net_device *dev, const char *name) GFP_ATOMIC); if (!lp->tx_dma_addr) { if (netif_msg_drv(lp)) - printk("\n" KERN_ERR PFX + printk(KERN_ERR PFX "%s: Memory allocation failed.\n", name); return -ENOMEM; } @@ -2027,7 +2024,7 @@ static int pcnet32_alloc_ring(struct net_device *dev, const char *name) GFP_ATOMIC); if (!lp->rx_dma_addr) { if (netif_msg_drv(lp)) - printk("\n" KERN_ERR PFX + printk(KERN_ERR PFX "%s: Memory allocation failed.\n", name); return -ENOMEM; } @@ -2036,7 +2033,7 @@ static int pcnet32_alloc_ring(struct net_device *dev, const char *name) GFP_ATOMIC); if (!lp->tx_skbuff) { if (netif_msg_drv(lp)) - printk("\n" KERN_ERR PFX + printk(KERN_ERR PFX "%s: Memory allocation failed.\n", name); return -ENOMEM; } @@ -2045,7 +2042,7 @@ static int pcnet32_alloc_ring(struct net_device *dev, const char *name) GFP_ATOMIC); if (!lp->rx_skbuff) { if (netif_msg_drv(lp)) - printk("\n" KERN_ERR PFX + printk(KERN_ERR PFX "%s: Memory allocation failed.\n", name); return -ENOMEM; } @@ -2089,6 +2086,7 @@ static void pcnet32_free_ring(struct net_device *dev) static int pcnet32_open(struct net_device *dev) { struct pcnet32_private *lp = netdev_priv(dev); + struct pci_dev *pdev = lp->pci_dev; unsigned long ioaddr = dev->base_addr; u16 val; int i; @@ -2149,9 +2147,9 @@ static int pcnet32_open(struct net_device *dev) lp->a.write_csr(ioaddr, 124, val); /* Allied Telesyn AT 2700/2701 FX are 100Mbit only and do not negotiate */ - if (lp->pci_dev->subsystem_vendor == PCI_VENDOR_ID_AT && - (lp->pci_dev->subsystem_device == PCI_SUBDEVICE_ID_AT_2700FX || - lp->pci_dev->subsystem_device == PCI_SUBDEVICE_ID_AT_2701FX)) { + if (pdev && pdev->subsystem_vendor == PCI_VENDOR_ID_AT && + (pdev->subsystem_device == PCI_SUBDEVICE_ID_AT_2700FX || + pdev->subsystem_device == PCI_SUBDEVICE_ID_AT_2701FX)) { if (lp->options & PCNET32_PORT_ASEL) { lp->options = PCNET32_PORT_FD | PCNET32_PORT_100; if (netif_msg_link(lp)) diff --git a/drivers/net/phy/mdio-gpio.c b/drivers/net/phy/mdio-gpio.c index 33984b73723..22cdd451fb8 100644 --- a/drivers/net/phy/mdio-gpio.c +++ b/drivers/net/phy/mdio-gpio.c @@ -30,6 +30,7 @@ #ifdef CONFIG_OF_GPIO #include <linux/of_gpio.h> +#include <linux/of_mdio.h> #include <linux/of_platform.h> #endif @@ -81,13 +82,12 @@ static struct mdiobb_ops mdio_gpio_ops = { .get_mdio_data = mdio_get, }; -static int __devinit mdio_gpio_bus_init(struct device *dev, +static struct mii_bus * __devinit mdio_gpio_bus_init(struct device *dev, struct mdio_gpio_platform_data *pdata, int bus_id) { struct mii_bus *new_bus; struct mdio_gpio_info *bitbang; - int ret = -ENOMEM; int i; bitbang = kzalloc(sizeof(*bitbang), GFP_KERNEL); @@ -104,8 +104,6 @@ static int __devinit mdio_gpio_bus_init(struct device *dev, new_bus->name = "GPIO Bitbanged MDIO", - ret = -ENODEV; - new_bus->phy_mask = pdata->phy_mask; new_bus->irq = pdata->irqs; new_bus->parent = dev; @@ -129,15 +127,8 @@ static int __devinit mdio_gpio_bus_init(struct device *dev, dev_set_drvdata(dev, new_bus); - ret = mdiobus_register(new_bus); - if (ret) - goto out_free_all; - - return 0; + return new_bus; -out_free_all: - dev_set_drvdata(dev, NULL); - gpio_free(bitbang->mdio); out_free_mdc: gpio_free(bitbang->mdc); out_free_bus: @@ -145,30 +136,47 @@ out_free_bus: out_free_bitbang: kfree(bitbang); out: - return ret; + return NULL; } -static void __devexit mdio_gpio_bus_destroy(struct device *dev) +static void __devinit mdio_gpio_bus_deinit(struct device *dev) { struct mii_bus *bus = dev_get_drvdata(dev); struct mdio_gpio_info *bitbang = bus->priv; - mdiobus_unregister(bus); - free_mdio_bitbang(bus); dev_set_drvdata(dev, NULL); - gpio_free(bitbang->mdc); gpio_free(bitbang->mdio); + gpio_free(bitbang->mdc); + free_mdio_bitbang(bus); kfree(bitbang); } +static void __devexit mdio_gpio_bus_destroy(struct device *dev) +{ + struct mii_bus *bus = dev_get_drvdata(dev); + + mdiobus_unregister(bus); + mdio_gpio_bus_deinit(dev); +} + static int __devinit mdio_gpio_probe(struct platform_device *pdev) { struct mdio_gpio_platform_data *pdata = pdev->dev.platform_data; + struct mii_bus *new_bus; + int ret; if (!pdata) return -ENODEV; - return mdio_gpio_bus_init(&pdev->dev, pdata, pdev->id); + new_bus = mdio_gpio_bus_init(&pdev->dev, pdata, pdev->id); + if (!new_bus) + return -ENODEV; + + ret = mdiobus_register(new_bus); + if (ret) + mdio_gpio_bus_deinit(&pdev->dev); + + return ret; } static int __devexit mdio_gpio_remove(struct platform_device *pdev) @@ -179,29 +187,12 @@ static int __devexit mdio_gpio_remove(struct platform_device *pdev) } #ifdef CONFIG_OF_GPIO -static void __devinit add_phy(struct mdio_gpio_platform_data *pdata, - struct device_node *np) -{ - const u32 *data; - int len, id, irq; - - data = of_get_property(np, "reg", &len); - if (!data || len != 4) - return; - - id = *data; - pdata->phy_mask &= ~(1 << id); - - irq = of_irq_to_resource(np, 0, NULL); - if (irq) - pdata->irqs[id] = irq; -} static int __devinit mdio_ofgpio_probe(struct of_device *ofdev, const struct of_device_id *match) { - struct device_node *np = NULL; struct mdio_gpio_platform_data *pdata; + struct mii_bus *new_bus; int ret; pdata = kzalloc(sizeof(*pdata), GFP_KERNEL); @@ -215,14 +206,18 @@ static int __devinit mdio_ofgpio_probe(struct of_device *ofdev, ret = of_get_gpio(ofdev->node, 1); if (ret < 0) - goto out_free; + goto out_free; pdata->mdio = ret; - while ((np = of_get_next_child(ofdev->node, np))) - if (!strcmp(np->type, "ethernet-phy")) - add_phy(pdata, np); + new_bus = mdio_gpio_bus_init(&ofdev->dev, pdata, pdata->mdc); + if (!new_bus) + return -ENODEV; - return mdio_gpio_bus_init(&ofdev->dev, pdata, pdata->mdc); + ret = of_mdiobus_register(new_bus, ofdev->node); + if (ret) + mdio_gpio_bus_deinit(&ofdev->dev); + + return ret; out_free: kfree(pdata); diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c index 61755cbd978..eda94fcd406 100644 --- a/drivers/net/phy/phy.c +++ b/drivers/net/phy/phy.c @@ -928,13 +928,32 @@ static void phy_state_machine(struct work_struct *work) * Otherwise, it's 0, and we're * still waiting for AN */ if (err > 0) { - phydev->state = PHY_RUNNING; + err = phy_read_status(phydev); + if (err) + break; + + if (phydev->link) { + phydev->state = PHY_RUNNING; + netif_carrier_on(phydev->attached_dev); + } else + phydev->state = PHY_NOLINK; + phydev->adjust_link(phydev->attached_dev); } else { phydev->state = PHY_AN; phydev->link_timeout = PHY_AN_TIMEOUT; } - } else - phydev->state = PHY_RUNNING; + } else { + err = phy_read_status(phydev); + if (err) + break; + + if (phydev->link) { + phydev->state = PHY_RUNNING; + netif_carrier_on(phydev->attached_dev); + } else + phydev->state = PHY_NOLINK; + phydev->adjust_link(phydev->attached_dev); + } break; } diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c index eba937c4637..b10fedd8214 100644 --- a/drivers/net/phy/phy_device.c +++ b/drivers/net/phy/phy_device.c @@ -134,8 +134,10 @@ int phy_scan_fixups(struct phy_device *phydev) err = fixup->run(phydev); - if (err < 0) + if (err < 0) { + mutex_unlock(&phy_fixup_lock); return err; + } } } mutex_unlock(&phy_fixup_lock); diff --git a/drivers/net/plip.c b/drivers/net/plip.c index 7a62f781fef..2ca8b0d84ee 100644 --- a/drivers/net/plip.c +++ b/drivers/net/plip.c @@ -270,6 +270,9 @@ static const struct net_device_ops plip_netdev_ops = { .ndo_stop = plip_close, .ndo_start_xmit = plip_tx_packet, .ndo_do_ioctl = plip_ioctl, + .ndo_change_mtu = eth_change_mtu, + .ndo_set_mac_address = eth_mac_addr, + .ndo_validate_addr = eth_validate_addr, }; /* Entry point of PLIP driver. diff --git a/drivers/net/ppp_async.c b/drivers/net/ppp_async.c index 17c116bb332..6de8399d6dd 100644 --- a/drivers/net/ppp_async.c +++ b/drivers/net/ppp_async.c @@ -356,6 +356,7 @@ ppp_asynctty_receive(struct tty_struct *tty, const unsigned char *buf, if (!skb_queue_empty(&ap->rqueue)) tasklet_schedule(&ap->tsk); ap_put(ap); + tty_unthrottle(tty); } static void diff --git a/drivers/net/ppp_generic.c b/drivers/net/ppp_generic.c index 639d11bc444..cd37d739ac7 100644 --- a/drivers/net/ppp_generic.c +++ b/drivers/net/ppp_generic.c @@ -1384,7 +1384,7 @@ static int ppp_mp_explode(struct ppp *ppp, struct sk_buff *skb) /* create a fragment for each channel */ bits = B; - while (nfree > 0 && len > 0) { + while (len > 0) { list = list->next; if (list == &ppp->channels) { i = 0; @@ -1431,29 +1431,31 @@ static int ppp_mp_explode(struct ppp *ppp, struct sk_buff *skb) *otherwise divide it according to the speed *of the channel we are going to transmit on */ - if (pch->speed == 0) { - flen = totlen/nfree ; - if (nbigger > 0) { - flen++; - nbigger--; - } - } else { - flen = (((totfree - nzero)*(totlen + hdrlen*totfree)) / - ((totspeed*totfree)/pch->speed)) - hdrlen; - if (nbigger > 0) { - flen += ((totfree - nzero)*pch->speed)/totspeed; - nbigger -= ((totfree - nzero)*pch->speed)/ + if (nfree > 0) { + if (pch->speed == 0) { + flen = totlen/nfree ; + if (nbigger > 0) { + flen++; + nbigger--; + } + } else { + flen = (((totfree - nzero)*(totlen + hdrlen*totfree)) / + ((totspeed*totfree)/pch->speed)) - hdrlen; + if (nbigger > 0) { + flen += ((totfree - nzero)*pch->speed)/totspeed; + nbigger -= ((totfree - nzero)*pch->speed)/ totspeed; + } } + nfree--; } - nfree--; /* *check if we are on the last channel or *we exceded the lenght of the data to *fragment */ - if ((nfree == 0) || (flen > len)) + if ((nfree <= 0) || (flen > len)) flen = len; /* *it is not worth to tx on slow channels: @@ -1467,7 +1469,7 @@ static int ppp_mp_explode(struct ppp *ppp, struct sk_buff *skb) continue; } - mtu = pch->chan->mtu + 2 - hdrlen; + mtu = pch->chan->mtu - hdrlen; if (mtu < 4) mtu = 4; if (flen > mtu) diff --git a/drivers/net/ppp_synctty.c b/drivers/net/ppp_synctty.c index aa3d39f38e2..d2fa2db1358 100644 --- a/drivers/net/ppp_synctty.c +++ b/drivers/net/ppp_synctty.c @@ -397,6 +397,7 @@ ppp_sync_receive(struct tty_struct *tty, const unsigned char *buf, if (!skb_queue_empty(&ap->rqueue)) tasklet_schedule(&ap->tsk); sp_put(ap); + tty_unthrottle(tty); } static void diff --git a/drivers/net/pppoe.c b/drivers/net/pppoe.c index f0031f1f97e..5f2090233d7 100644 --- a/drivers/net/pppoe.c +++ b/drivers/net/pppoe.c @@ -1063,6 +1063,7 @@ static void *pppoe_seq_next(struct seq_file *seq, void *v, loff_t *pos) else { int hash = hash_item(po->pppoe_pa.sid, po->pppoe_pa.remote); + po = NULL; while (++hash < PPPOE_HASH_SIZE) { po = pn->hash_table[hash]; if (po) diff --git a/drivers/net/pppol2tp.c b/drivers/net/pppol2tp.c index e7935d09c89..e0f9219a0ae 100644 --- a/drivers/net/pppol2tp.c +++ b/drivers/net/pppol2tp.c @@ -2680,6 +2680,7 @@ out_unregister_pppol2tp_proto: static void __exit pppol2tp_exit(void) { unregister_pppox_proto(PX_PROTO_OL2TP); + unregister_pernet_gen_device(pppol2tp_net_id, &pppol2tp_net_ops); proto_unregister(&pppol2tp_sk_proto); } diff --git a/drivers/net/ps3_gelic_net.c b/drivers/net/ps3_gelic_net.c index d1a5fb4d6ac..a3932c9f340 100644 --- a/drivers/net/ps3_gelic_net.c +++ b/drivers/net/ps3_gelic_net.c @@ -1411,6 +1411,7 @@ static const struct net_device_ops gelic_netdevice_ops = { .ndo_set_multicast_list = gelic_net_set_multi, .ndo_change_mtu = gelic_net_change_mtu, .ndo_tx_timeout = gelic_net_tx_timeout, + .ndo_set_mac_address = eth_mac_addr, .ndo_validate_addr = eth_validate_addr, #ifdef CONFIG_NET_POLL_CONTROLLER .ndo_poll_controller = gelic_net_poll_controller, diff --git a/drivers/net/ps3_gelic_wireless.c b/drivers/net/ps3_gelic_wireless.c index b6b3ca9bdb2..6932b08d746 100644 --- a/drivers/net/ps3_gelic_wireless.c +++ b/drivers/net/ps3_gelic_wireless.c @@ -2707,6 +2707,7 @@ static const struct net_device_ops gelic_wl_netdevice_ops = { .ndo_set_multicast_list = gelic_net_set_multi, .ndo_change_mtu = gelic_net_change_mtu, .ndo_tx_timeout = gelic_net_tx_timeout, + .ndo_set_mac_address = eth_mac_addr, .ndo_validate_addr = eth_validate_addr, #ifdef CONFIG_NET_POLL_CONTROLLER .ndo_poll_controller = gelic_net_poll_controller, diff --git a/drivers/net/qlge/qlge.h b/drivers/net/qlge/qlge.h index 156e02e8905..6ed5317ab1c 100644 --- a/drivers/net/qlge/qlge.h +++ b/drivers/net/qlge/qlge.h @@ -1607,6 +1607,8 @@ int ql_mb_get_fw_state(struct ql_adapter *qdev); int ql_cam_route_initialize(struct ql_adapter *qdev); int ql_read_mpi_reg(struct ql_adapter *qdev, u32 reg, u32 *data); int ql_mb_about_fw(struct ql_adapter *qdev); +void ql_link_on(struct ql_adapter *qdev); +void ql_link_off(struct ql_adapter *qdev); #if 1 #define QL_ALL_DUMP diff --git a/drivers/net/qlge/qlge_ethtool.c b/drivers/net/qlge/qlge_ethtool.c index 37c99fe7977..eb6a9ee640e 100644 --- a/drivers/net/qlge/qlge_ethtool.c +++ b/drivers/net/qlge/qlge_ethtool.c @@ -59,7 +59,7 @@ static int ql_update_ring_coalescing(struct ql_adapter *qdev) cqicb->pkt_delay = cpu_to_le16(qdev->tx_max_coalesced_frames); cqicb->flags = FLAGS_LI; - status = ql_write_cfg(qdev, cqicb, sizeof(cqicb), + status = ql_write_cfg(qdev, cqicb, sizeof(*cqicb), CFG_LCQ, rx_ring->cq_id); if (status) { QPRINTK(qdev, IFUP, ERR, @@ -82,7 +82,7 @@ static int ql_update_ring_coalescing(struct ql_adapter *qdev) cqicb->pkt_delay = cpu_to_le16(qdev->rx_max_coalesced_frames); cqicb->flags = FLAGS_LI; - status = ql_write_cfg(qdev, cqicb, sizeof(cqicb), + status = ql_write_cfg(qdev, cqicb, sizeof(*cqicb), CFG_LCQ, rx_ring->cq_id); if (status) { QPRINTK(qdev, IFUP, ERR, diff --git a/drivers/net/qlge/qlge_main.c b/drivers/net/qlge/qlge_main.c index 90d1f76c0e8..5768af17f16 100644 --- a/drivers/net/qlge/qlge_main.c +++ b/drivers/net/qlge/qlge_main.c @@ -214,6 +214,10 @@ int ql_write_cfg(struct ql_adapter *qdev, void *ptr, int size, u32 bit, return -ENOMEM; } + status = ql_sem_spinlock(qdev, SEM_ICB_MASK); + if (status) + return status; + status = ql_wait_cfg(qdev, bit); if (status) { QPRINTK(qdev, IFUP, ERR, @@ -221,12 +225,8 @@ int ql_write_cfg(struct ql_adapter *qdev, void *ptr, int size, u32 bit, goto exit; } - status = ql_sem_spinlock(qdev, SEM_ICB_MASK); - if (status) - goto exit; ql_write32(qdev, ICB_L, (u32) map); ql_write32(qdev, ICB_H, (u32) (map >> 32)); - ql_sem_unlock(qdev, SEM_ICB_MASK); /* does flush too */ mask = CFG_Q_MASK | (bit << 16); value = bit | (q_id << CFG_Q_SHIFT); @@ -237,6 +237,7 @@ int ql_write_cfg(struct ql_adapter *qdev, void *ptr, int size, u32 bit, */ status = ql_wait_cfg(qdev, bit); exit: + ql_sem_unlock(qdev, SEM_ICB_MASK); /* does flush too */ pci_unmap_single(qdev->pdev, map, size, direction); return status; } @@ -412,6 +413,57 @@ exit: return status; } +/* Set or clear MAC address in hardware. We sometimes + * have to clear it to prevent wrong frame routing + * especially in a bonding environment. + */ +static int ql_set_mac_addr(struct ql_adapter *qdev, int set) +{ + int status; + char zero_mac_addr[ETH_ALEN]; + char *addr; + + if (set) { + addr = &qdev->ndev->dev_addr[0]; + QPRINTK(qdev, IFUP, DEBUG, + "Set Mac addr %02x:%02x:%02x:%02x:%02x:%02x\n", + addr[0], addr[1], addr[2], addr[3], + addr[4], addr[5]); + } else { + memset(zero_mac_addr, 0, ETH_ALEN); + addr = &zero_mac_addr[0]; + QPRINTK(qdev, IFUP, DEBUG, + "Clearing MAC address on %s\n", + qdev->ndev->name); + } + status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK); + if (status) + return status; + status = ql_set_mac_addr_reg(qdev, (u8 *) addr, + MAC_ADDR_TYPE_CAM_MAC, qdev->func * MAX_CQ); + ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK); + if (status) + QPRINTK(qdev, IFUP, ERR, "Failed to init mac " + "address.\n"); + return status; +} + +void ql_link_on(struct ql_adapter *qdev) +{ + QPRINTK(qdev, LINK, ERR, "%s: Link is up.\n", + qdev->ndev->name); + netif_carrier_on(qdev->ndev); + ql_set_mac_addr(qdev, 1); +} + +void ql_link_off(struct ql_adapter *qdev) +{ + QPRINTK(qdev, LINK, ERR, "%s: Link is down.\n", + qdev->ndev->name); + netif_carrier_off(qdev->ndev); + ql_set_mac_addr(qdev, 0); +} + /* Get a specific frame routing value from the CAM. * Used for debug and reg dump. */ @@ -1628,7 +1680,7 @@ static void ql_process_mac_tx_intr(struct ql_adapter *qdev, tx_ring = &qdev->tx_ring[mac_rsp->txq_idx]; tx_ring_desc = &tx_ring->q[mac_rsp->tid]; ql_unmap_send(qdev, tx_ring_desc, tx_ring_desc->map_cnt); - qdev->stats.tx_bytes += tx_ring_desc->map_cnt; + qdev->stats.tx_bytes += (tx_ring_desc->skb)->len; qdev->stats.tx_packets++; dev_kfree_skb(tx_ring_desc->skb); tx_ring_desc->skb = NULL; @@ -1660,13 +1712,13 @@ static void ql_process_mac_tx_intr(struct ql_adapter *qdev, /* Fire up a handler to reset the MPI processor. */ void ql_queue_fw_error(struct ql_adapter *qdev) { - netif_carrier_off(qdev->ndev); + ql_link_off(qdev); queue_delayed_work(qdev->workqueue, &qdev->mpi_reset_work, 0); } void ql_queue_asic_error(struct ql_adapter *qdev) { - netif_carrier_off(qdev->ndev); + ql_link_off(qdev); ql_disable_interrupts(qdev); /* Clear adapter up bit to signal the recovery * process that it shouldn't kill the reset worker @@ -2104,7 +2156,7 @@ static int qlge_send(struct sk_buff *skb, struct net_device *ndev) } tx_ring_desc = &tx_ring->q[tx_ring->prod_idx]; mac_iocb_ptr = tx_ring_desc->queue_entry; - memset((void *)mac_iocb_ptr, 0, sizeof(mac_iocb_ptr)); + memset((void *)mac_iocb_ptr, 0, sizeof(*mac_iocb_ptr)); mac_iocb_ptr->opcode = OPCODE_OB_MAC_IOCB; mac_iocb_ptr->tid = tx_ring_desc->index; @@ -2743,7 +2795,7 @@ static int ql_start_tx_ring(struct ql_adapter *qdev, struct tx_ring *tx_ring) ql_init_tx_ring(qdev, tx_ring); - err = ql_write_cfg(qdev, wqicb, sizeof(wqicb), CFG_LRQ, + err = ql_write_cfg(qdev, wqicb, sizeof(*wqicb), CFG_LRQ, (u16) tx_ring->wq_id); if (err) { QPRINTK(qdev, IFUP, ERR, "Failed to load tx_ring.\n"); @@ -3008,7 +3060,7 @@ static int ql_start_rss(struct ql_adapter *qdev) int i; u8 *hash_id = (u8 *) ricb->hash_cq_id; - memset((void *)ricb, 0, sizeof(ricb)); + memset((void *)ricb, 0, sizeof(*ricb)); ricb->base_cq = qdev->rss_ring_first_cq_id | RSS_L4K; ricb->flags = @@ -3030,7 +3082,7 @@ static int ql_start_rss(struct ql_adapter *qdev) QPRINTK(qdev, IFUP, DEBUG, "Initializing RSS.\n"); - status = ql_write_cfg(qdev, ricb, sizeof(ricb), CFG_LR, 0); + status = ql_write_cfg(qdev, ricb, sizeof(*ricb), CFG_LR, 0); if (status) { QPRINTK(qdev, IFUP, ERR, "Failed to load RICB.\n"); return status; @@ -3039,25 +3091,40 @@ static int ql_start_rss(struct ql_adapter *qdev) return status; } -/* Initialize the frame-to-queue routing. */ -static int ql_route_initialize(struct ql_adapter *qdev) +static int ql_clear_routing_entries(struct ql_adapter *qdev) { - int status = 0; - int i; + int i, status = 0; status = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK); if (status) return status; - /* Clear all the entries in the routing table. */ for (i = 0; i < 16; i++) { status = ql_set_routing_reg(qdev, i, 0, 0); if (status) { QPRINTK(qdev, IFUP, ERR, - "Failed to init routing register for CAM packets.\n"); - goto exit; + "Failed to init routing register for CAM " + "packets.\n"); + break; } } + ql_sem_unlock(qdev, SEM_RT_IDX_MASK); + return status; +} + +/* Initialize the frame-to-queue routing. */ +static int ql_route_initialize(struct ql_adapter *qdev) +{ + int status = 0; + + status = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK); + if (status) + return status; + + /* Clear all the entries in the routing table. */ + status = ql_clear_routing_entries(qdev); + if (status) + goto exit; status = ql_set_routing_reg(qdev, RT_IDX_ALL_ERR_SLOT, RT_IDX_ERR, 1); if (status) { @@ -3096,14 +3163,15 @@ exit: int ql_cam_route_initialize(struct ql_adapter *qdev) { - int status; + int status, set; - status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK); - if (status) - return status; - status = ql_set_mac_addr_reg(qdev, (u8 *) qdev->ndev->perm_addr, - MAC_ADDR_TYPE_CAM_MAC, qdev->func * MAX_CQ); - ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK); + /* If check if the link is up and use to + * determine if we are setting or clearing + * the MAC address in the CAM. + */ + set = ql_read32(qdev, STS); + set &= qdev->port_link_up; + status = ql_set_mac_addr(qdev, set); if (status) { QPRINTK(qdev, IFUP, ERR, "Failed to init mac address.\n"); return status; @@ -3210,9 +3278,17 @@ static int ql_adapter_reset(struct ql_adapter *qdev) { u32 value; int status = 0; - unsigned long end_jiffies = jiffies + - max((unsigned long)1, usecs_to_jiffies(30)); + unsigned long end_jiffies; + /* Clear all the entries in the routing table. */ + status = ql_clear_routing_entries(qdev); + if (status) { + QPRINTK(qdev, IFUP, ERR, "Failed to clear routing bits.\n"); + return status; + } + + end_jiffies = jiffies + + max((unsigned long)1, usecs_to_jiffies(30)); ql_write32(qdev, RST_FO, (RST_FO_FR << 16) | RST_FO_FR); do { @@ -3252,7 +3328,7 @@ static int ql_adapter_down(struct ql_adapter *qdev) int i, status = 0; struct rx_ring *rx_ring; - netif_carrier_off(qdev->ndev); + ql_link_off(qdev); /* Don't kill the reset worker thread if we * are in the process of recovery. @@ -3319,8 +3395,12 @@ static int ql_adapter_up(struct ql_adapter *qdev) } set_bit(QL_ADAPTER_UP, &qdev->flags); ql_alloc_rx_buffers(qdev); - if ((ql_read32(qdev, STS) & qdev->port_init)) - netif_carrier_on(qdev->ndev); + /* If the port is initialized and the + * link is up the turn on the carrier. + */ + if ((ql_read32(qdev, STS) & qdev->port_init) && + (ql_read32(qdev, STS) & qdev->port_link_up)) + ql_link_on(qdev); ql_enable_interrupts(qdev); ql_enable_all_completion_interrupts(qdev); netif_tx_start_all_queues(qdev->ndev); @@ -3346,11 +3426,6 @@ static int ql_get_adapter_resources(struct ql_adapter *qdev) return -ENOMEM; } status = ql_request_irq(qdev); - if (status) - goto err_irq; - return status; -err_irq: - ql_free_mem_resources(qdev); return status; } @@ -3414,7 +3489,7 @@ static int ql_configure_rings(struct ql_adapter *qdev) for (i = 0; i < qdev->tx_ring_count; i++) { tx_ring = &qdev->tx_ring[i]; - memset((void *)tx_ring, 0, sizeof(tx_ring)); + memset((void *)tx_ring, 0, sizeof(*tx_ring)); tx_ring->qdev = qdev; tx_ring->wq_id = i; tx_ring->wq_len = qdev->tx_ring_size; @@ -3430,7 +3505,7 @@ static int ql_configure_rings(struct ql_adapter *qdev) for (i = 0; i < qdev->rx_ring_count; i++) { rx_ring = &qdev->rx_ring[i]; - memset((void *)rx_ring, 0, sizeof(rx_ring)); + memset((void *)rx_ring, 0, sizeof(*rx_ring)); rx_ring->qdev = qdev; rx_ring->cq_id = i; rx_ring->cpu = i % cpu_cnt; /* CPU to run handler on. */ @@ -3789,7 +3864,7 @@ static int __devinit ql_init_device(struct pci_dev *pdev, int pos, err = 0; u16 val16; - memset((void *)qdev, 0, sizeof(qdev)); + memset((void *)qdev, 0, sizeof(*qdev)); err = pci_enable_device(pdev); if (err) { dev_err(&pdev->dev, "PCI device enable failed.\n"); @@ -3976,7 +4051,7 @@ static int __devinit qlge_probe(struct pci_dev *pdev, pci_disable_device(pdev); return err; } - netif_carrier_off(ndev); + ql_link_off(qdev); ql_display_dev_info(ndev); cards_found++; return 0; diff --git a/drivers/net/qlge/qlge_mpi.c b/drivers/net/qlge/qlge_mpi.c index 71afbf8b9c5..6685bd97da9 100644 --- a/drivers/net/qlge/qlge_mpi.c +++ b/drivers/net/qlge/qlge_mpi.c @@ -238,7 +238,7 @@ static void ql_link_up(struct ql_adapter *qdev, struct mbox_params *mbcp) &qdev->mpi_port_cfg_work, 0); } - netif_carrier_on(qdev->ndev); + ql_link_on(qdev); } static void ql_link_down(struct ql_adapter *qdev, struct mbox_params *mbcp) @@ -251,7 +251,7 @@ static void ql_link_down(struct ql_adapter *qdev, struct mbox_params *mbcp) if (status) QPRINTK(qdev, DRV, ERR, "Link down AEN broken!\n"); - netif_carrier_off(qdev->ndev); + ql_link_off(qdev); } static int ql_sfp_in(struct ql_adapter *qdev, struct mbox_params *mbcp) @@ -849,7 +849,7 @@ void ql_mpi_idc_work(struct work_struct *work) case MB_CMD_PORT_RESET: case MB_CMD_SET_PORT_CFG: case MB_CMD_STOP_FW: - netif_carrier_off(qdev->ndev); + ql_link_off(qdev); /* Signal the resulting link up AEN * that the frame routing and mac addr * needs to be set. diff --git a/drivers/net/r6040.c b/drivers/net/r6040.c index ed63d23a645..961b5397a53 100644 --- a/drivers/net/r6040.c +++ b/drivers/net/r6040.c @@ -49,8 +49,8 @@ #include <asm/processor.h> #define DRV_NAME "r6040" -#define DRV_VERSION "0.23" -#define DRV_RELDATE "05May2009" +#define DRV_VERSION "0.24" +#define DRV_RELDATE "08Jul2009" /* PHY CHIP Address */ #define PHY1_ADDR 1 /* For MAC1 */ @@ -704,8 +704,11 @@ static irqreturn_t r6040_interrupt(int irq, void *dev_id) /* Read MISR status and clear */ status = ioread16(ioaddr + MISR); - if (status == 0x0000 || status == 0xffff) + if (status == 0x0000 || status == 0xffff) { + /* Restore RDC MAC interrupt */ + iowrite16(misr, ioaddr + MIER); return IRQ_NONE; + } /* RX interrupt request */ if (status & RX_INTS) { diff --git a/drivers/net/r8169.c b/drivers/net/r8169.c index 4b53b58d75f..b82780d805f 100644 --- a/drivers/net/r8169.c +++ b/drivers/net/r8169.c @@ -2060,8 +2060,6 @@ rtl8169_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) } } - pci_set_master(pdev); - /* ioremap MMIO region */ ioaddr = ioremap(pci_resource_start(pdev, region), R8169_REGS_SIZE); if (!ioaddr) { @@ -2089,6 +2087,8 @@ rtl8169_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) RTL_W16(IntrStatus, 0xffff); + pci_set_master(pdev); + /* Identify chip attached to board */ rtl8169_get_mac_version(tp, ioaddr); @@ -3874,6 +3874,15 @@ static void rtl_shutdown(struct pci_dev *pdev) spin_unlock_irq(&tp->lock); if (system_state == SYSTEM_POWER_OFF) { + /* WoL fails with some 8168 when the receiver is disabled. */ + if (tp->features & RTL_FEATURE_WOL) { + pci_clear_master(pdev); + + RTL_W8(ChipCmd, CmdRxEnb); + /* PCI commit */ + RTL_R8(ChipCmd); + } + pci_wake_from_d3(pdev, true); pci_set_power_state(pdev, PCI_D3hot); } diff --git a/drivers/net/s6gmac.c b/drivers/net/s6gmac.c index 5345e47b35a..4525cbe8dd6 100644 --- a/drivers/net/s6gmac.c +++ b/drivers/net/s6gmac.c @@ -793,7 +793,7 @@ static inline int s6gmac_phy_start(struct net_device *dev) struct s6gmac *pd = netdev_priv(dev); int i = 0; struct phy_device *p = NULL; - while ((!(p = pd->mii.bus->phy_map[i])) && (i < PHY_MAX_ADDR)) + while ((i < PHY_MAX_ADDR) && (!(p = pd->mii.bus->phy_map[i]))) i++; p = phy_connect(dev, dev_name(&p->dev), &s6gmac_adjust_link, 0, PHY_INTERFACE_MODE_RGMII); diff --git a/drivers/net/sc92031.c b/drivers/net/sc92031.c index 18821f217e1..e3156c97bb5 100644 --- a/drivers/net/sc92031.c +++ b/drivers/net/sc92031.c @@ -1593,6 +1593,7 @@ out: static struct pci_device_id sc92031_pci_device_id_table[] __devinitdata = { { PCI_DEVICE(PCI_VENDOR_ID_SILAN, 0x2031) }, { PCI_DEVICE(PCI_VENDOR_ID_SILAN, 0x8139) }, + { PCI_DEVICE(0x1088, 0x2031) }, { 0, } }; MODULE_DEVICE_TABLE(pci, sc92031_pci_device_id_table); diff --git a/drivers/net/sh_eth.c b/drivers/net/sh_eth.c index 341882f959f..a2d82ddb3b4 100644 --- a/drivers/net/sh_eth.c +++ b/drivers/net/sh_eth.c @@ -865,8 +865,7 @@ static irqreturn_t sh_eth_interrupt(int irq, void *netdev) struct sh_eth_private *mdp = netdev_priv(ndev); struct sh_eth_cpu_data *cd = mdp->cd; irqreturn_t ret = IRQ_NONE; - u32 ioaddr, boguscnt = RX_RING_SIZE; - u32 intr_status = 0; + u32 ioaddr, intr_status = 0; ioaddr = ndev->base_addr; spin_lock(&mdp->lock); @@ -901,12 +900,6 @@ static irqreturn_t sh_eth_interrupt(int irq, void *netdev) if (intr_status & cd->eesr_err_check) sh_eth_error(ndev, intr_status); - if (--boguscnt < 0) { - printk(KERN_WARNING - "%s: Too much work at interrupt, status=0x%4.4x.\n", - ndev->name, intr_status); - } - other_irq: spin_unlock(&mdp->lock); diff --git a/drivers/net/skge.c b/drivers/net/skge.c index 60d502eef4f..543af2044f4 100644 --- a/drivers/net/skge.c +++ b/drivers/net/skge.c @@ -3854,8 +3854,10 @@ static struct net_device *skge_devinit(struct skge_hw *hw, int port, skge->speed = -1; skge->advertising = skge_supported_modes(hw); - if (device_may_wakeup(&hw->pdev->dev)) + if (device_can_wakeup(&hw->pdev->dev)) { skge->wol = wol_supported(hw) & WAKE_MAGIC; + device_set_wakeup_enable(&hw->pdev->dev, skge->wol); + } hw->dev[port] = dev; diff --git a/drivers/net/sky2.c b/drivers/net/sky2.c index 7681d28c53d..0a551d8f5d9 100644 --- a/drivers/net/sky2.c +++ b/drivers/net/sky2.c @@ -1151,14 +1151,7 @@ stopped: /* reset the Rx prefetch unit */ sky2_write32(hw, Y2_QADDR(rxq, PREF_UNIT_CTRL), PREF_UNIT_RST_SET); - - /* Reset the RAM Buffer receive queue */ - sky2_write8(hw, RB_ADDR(rxq, RB_CTRL), RB_RST_SET); - - /* Reset Rx MAC FIFO */ - sky2_write8(hw, SK_REG(sky2->port, RX_GMF_CTRL_T), GMF_RST_SET); - - sky2_read8(hw, B0_CTST); + mmiowb(); } /* Clean out receive buffer area, assumes receiver hardware stopped */ @@ -1495,6 +1488,8 @@ static int sky2_up(struct net_device *dev) sky2_set_vlan_mode(hw, port, sky2->vlgrp != NULL); #endif + sky2->restarting = 0; + err = sky2_rx_start(sky2); if (err) goto err_out; @@ -1507,6 +1502,9 @@ static int sky2_up(struct net_device *dev) sky2_set_multicast(dev); + /* wake queue incase we are restarting */ + netif_wake_queue(dev); + if (netif_msg_ifup(sky2)) printk(KERN_INFO PFX "%s: enabling interface\n", dev->name); return 0; @@ -1540,6 +1538,8 @@ static inline int tx_dist(unsigned tail, unsigned head) /* Number of list elements available for next tx */ static inline int tx_avail(const struct sky2_port *sky2) { + if (unlikely(sky2->restarting)) + return 0; return sky2->tx_pending - tx_dist(sky2->tx_cons, sky2->tx_prod); } @@ -1825,11 +1825,9 @@ static int sky2_down(struct net_device *dev) if (netif_msg_ifdown(sky2)) printk(KERN_INFO PFX "%s: disabling interface\n", dev->name); - /* Disable port IRQ */ - imask = sky2_read32(hw, B0_IMSK); - imask &= ~portirq_msk[port]; - sky2_write32(hw, B0_IMSK, imask); - sky2_read32(hw, B0_IMSK); + /* explicitly shut off tx incase we're restarting */ + sky2->restarting = 1; + netif_tx_disable(dev); /* Force flow control off */ sky2_write8(hw, SK_REG(port, GMAC_CTRL), GMC_PAUSE_OFF); @@ -1870,8 +1868,6 @@ static int sky2_down(struct net_device *dev) sky2_write32(hw, RB_ADDR(txqaddr[port], RB_CTRL), RB_RST_SET); - sky2_rx_stop(sky2); - sky2_write8(hw, SK_REG(port, RX_GMF_CTRL_T), GMF_RST_SET); sky2_write8(hw, SK_REG(port, TX_GMF_CTRL_T), GMF_RST_SET); @@ -1881,6 +1877,14 @@ static int sky2_down(struct net_device *dev) sky2_write32(hw, STAT_ISR_TIMER_CNT, 0); sky2_read8(hw, STAT_ISR_TIMER_CTRL); + sky2_rx_stop(sky2); + + /* Disable port IRQ */ + imask = sky2_read32(hw, B0_IMSK); + imask &= ~portirq_msk[port]; + sky2_write32(hw, B0_IMSK, imask); + sky2_read32(hw, B0_IMSK); + synchronize_irq(hw->pdev->irq); napi_synchronize(&hw->napi); @@ -2366,7 +2370,7 @@ static inline void sky2_tx_done(struct net_device *dev, u16 last) { struct sky2_port *sky2 = netdev_priv(dev); - if (netif_running(dev)) { + if (likely(netif_running(dev) && !sky2->restarting)) { netif_tx_lock(dev); sky2_tx_complete(sky2, last); netif_tx_unlock(dev); @@ -2495,7 +2499,7 @@ static int sky2_status_intr(struct sky2_hw *hw, int to_do, u16 idx) if (likely(status >> 16 == (status & 0xffff))) { skb = sky2->rx_ring[sky2->rx_next].skb; skb->ip_summed = CHECKSUM_COMPLETE; - skb->csum = status & 0xffff; + skb->csum = le16_to_cpu(status); } else { printk(KERN_NOTICE PFX "%s: hardware receive " "checksum problem (status = %#x)\n", @@ -4290,6 +4294,7 @@ static __devinit struct net_device *sky2_init_netdev(struct sky2_hw *hw, spin_lock_init(&sky2->phy_lock); sky2->tx_pending = TX_DEF_PENDING; sky2->rx_pending = RX_DEF_PENDING; + sky2->restarting = 0; hw->dev[port] = dev; diff --git a/drivers/net/sky2.h b/drivers/net/sky2.h index b5549c9e510..4486b066b43 100644 --- a/drivers/net/sky2.h +++ b/drivers/net/sky2.h @@ -2051,6 +2051,7 @@ struct sky2_port { u8 duplex; /* DUPLEX_HALF, DUPLEX_FULL */ u8 rx_csum; u8 wol; + u8 restarting; enum flow_control flow_mode; enum flow_control flow_status; diff --git a/drivers/net/smc91x.c b/drivers/net/smc91x.c index fdcbaf8dfa7..7567f510eff 100644 --- a/drivers/net/smc91x.c +++ b/drivers/net/smc91x.c @@ -196,21 +196,23 @@ static void PRINT_PKT(u_char *buf, int length) /* this enables an interrupt in the interrupt mask register */ #define SMC_ENABLE_INT(lp, x) do { \ unsigned char mask; \ - spin_lock_irq(&lp->lock); \ + unsigned long smc_enable_flags; \ + spin_lock_irqsave(&lp->lock, smc_enable_flags); \ mask = SMC_GET_INT_MASK(lp); \ mask |= (x); \ SMC_SET_INT_MASK(lp, mask); \ - spin_unlock_irq(&lp->lock); \ + spin_unlock_irqrestore(&lp->lock, smc_enable_flags); \ } while (0) /* this disables an interrupt from the interrupt mask register */ #define SMC_DISABLE_INT(lp, x) do { \ unsigned char mask; \ - spin_lock_irq(&lp->lock); \ + unsigned long smc_disable_flags; \ + spin_lock_irqsave(&lp->lock, smc_disable_flags); \ mask = SMC_GET_INT_MASK(lp); \ mask &= ~(x); \ SMC_SET_INT_MASK(lp, mask); \ - spin_unlock_irq(&lp->lock); \ + spin_unlock_irqrestore(&lp->lock, smc_disable_flags); \ } while (0) /* @@ -520,21 +522,21 @@ static inline void smc_rcv(struct net_device *dev) * any other concurrent access and C would always interrupt B. But life * isn't that easy in a SMP world... */ -#define smc_special_trylock(lock) \ +#define smc_special_trylock(lock, flags) \ ({ \ int __ret; \ - local_irq_disable(); \ + local_irq_save(flags); \ __ret = spin_trylock(lock); \ if (!__ret) \ - local_irq_enable(); \ + local_irq_restore(flags); \ __ret; \ }) -#define smc_special_lock(lock) spin_lock_irq(lock) -#define smc_special_unlock(lock) spin_unlock_irq(lock) +#define smc_special_lock(lock, flags) spin_lock_irqsave(lock, flags) +#define smc_special_unlock(lock, flags) spin_unlock_irqrestore(lock, flags) #else -#define smc_special_trylock(lock) (1) -#define smc_special_lock(lock) do { } while (0) -#define smc_special_unlock(lock) do { } while (0) +#define smc_special_trylock(lock, flags) (1) +#define smc_special_lock(lock, flags) do { } while (0) +#define smc_special_unlock(lock, flags) do { } while (0) #endif /* @@ -548,10 +550,11 @@ static void smc_hardware_send_pkt(unsigned long data) struct sk_buff *skb; unsigned int packet_no, len; unsigned char *buf; + unsigned long flags; DBG(3, "%s: %s\n", dev->name, __func__); - if (!smc_special_trylock(&lp->lock)) { + if (!smc_special_trylock(&lp->lock, flags)) { netif_stop_queue(dev); tasklet_schedule(&lp->tx_task); return; @@ -559,7 +562,7 @@ static void smc_hardware_send_pkt(unsigned long data) skb = lp->pending_tx_skb; if (unlikely(!skb)) { - smc_special_unlock(&lp->lock); + smc_special_unlock(&lp->lock, flags); return; } lp->pending_tx_skb = NULL; @@ -569,7 +572,7 @@ static void smc_hardware_send_pkt(unsigned long data) printk("%s: Memory allocation failed.\n", dev->name); dev->stats.tx_errors++; dev->stats.tx_fifo_errors++; - smc_special_unlock(&lp->lock); + smc_special_unlock(&lp->lock, flags); goto done; } @@ -608,7 +611,7 @@ static void smc_hardware_send_pkt(unsigned long data) /* queue the packet for TX */ SMC_SET_MMU_CMD(lp, MC_ENQUEUE); - smc_special_unlock(&lp->lock); + smc_special_unlock(&lp->lock, flags); dev->trans_start = jiffies; dev->stats.tx_packets++; @@ -633,6 +636,7 @@ static int smc_hard_start_xmit(struct sk_buff *skb, struct net_device *dev) struct smc_local *lp = netdev_priv(dev); void __iomem *ioaddr = lp->base; unsigned int numPages, poll_count, status; + unsigned long flags; DBG(3, "%s: %s\n", dev->name, __func__); @@ -658,7 +662,7 @@ static int smc_hard_start_xmit(struct sk_buff *skb, struct net_device *dev) return 0; } - smc_special_lock(&lp->lock); + smc_special_lock(&lp->lock, flags); /* now, try to allocate the memory */ SMC_SET_MMU_CMD(lp, MC_ALLOC | numPages); @@ -676,7 +680,7 @@ static int smc_hard_start_xmit(struct sk_buff *skb, struct net_device *dev) } } while (--poll_count); - smc_special_unlock(&lp->lock); + smc_special_unlock(&lp->lock, flags); lp->pending_tx_skb = skb; if (!poll_count) { @@ -1774,6 +1778,7 @@ static const struct net_device_ops smc_netdev_ops = { .ndo_start_xmit = smc_hard_start_xmit, .ndo_tx_timeout = smc_timeout, .ndo_set_multicast_list = smc_set_multicast_list, + .ndo_change_mtu = eth_change_mtu, .ndo_validate_addr = eth_validate_addr, .ndo_set_mac_address = eth_mac_addr, #ifdef CONFIG_NET_POLL_CONTROLLER diff --git a/drivers/net/smc91x.h b/drivers/net/smc91x.h index f1f773b17fe..57a159fac99 100644 --- a/drivers/net/smc91x.h +++ b/drivers/net/smc91x.h @@ -186,7 +186,8 @@ static inline void SMC_outw(u16 val, void __iomem *ioaddr, int reg) #define SMC_outsb(a, r, p, l) writesb((a) + (r), p, (l)) #define SMC_IRQ_FLAGS (-1) /* from resource */ -#elif defined(CONFIG_MACH_LOGICPD_PXA270) +#elif defined(CONFIG_MACH_LOGICPD_PXA270) \ + || defined(CONFIG_MACH_NOMADIK_8815NHK) #define SMC_CAN_USE_8BIT 0 #define SMC_CAN_USE_16BIT 1 diff --git a/drivers/net/smsc911x.c b/drivers/net/smsc911x.c index b60639bd181..94b6d2658dd 100644 --- a/drivers/net/smsc911x.c +++ b/drivers/net/smsc911x.c @@ -1779,6 +1779,7 @@ static const struct net_device_ops smsc911x_netdev_ops = { .ndo_get_stats = smsc911x_get_stats, .ndo_set_multicast_list = smsc911x_set_multicast_list, .ndo_do_ioctl = smsc911x_do_ioctl, + .ndo_change_mtu = eth_change_mtu, .ndo_validate_addr = eth_validate_addr, .ndo_set_mac_address = smsc911x_set_mac_address, #ifdef CONFIG_NET_POLL_CONTROLLER @@ -1938,7 +1939,7 @@ static int __devexit smsc911x_drv_remove(struct platform_device *pdev) if (!res) res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - release_mem_region(res->start, res->end - res->start); + release_mem_region(res->start, resource_size(res)); iounmap(pdata->ioaddr); @@ -1976,7 +1977,7 @@ static int __devinit smsc911x_drv_probe(struct platform_device *pdev) retval = -ENODEV; goto out_0; } - res_size = res->end - res->start + 1; + res_size = resource_size(res); irq_res = platform_get_resource(pdev, IORESOURCE_IRQ, 0); if (!irq_res) { @@ -2104,7 +2105,7 @@ out_unmap_io_3: out_free_netdev_2: free_netdev(dev); out_release_io_1: - release_mem_region(res->start, res->end - res->start); + release_mem_region(res->start, resource_size(res)); out_0: return retval; } diff --git a/drivers/net/starfire.c b/drivers/net/starfire.c index 838cce8b8ff..669253c7bd4 100644 --- a/drivers/net/starfire.c +++ b/drivers/net/starfire.c @@ -180,7 +180,7 @@ static int full_duplex[MAX_UNITS] = {0, }; /* These identify the driver base version and may not be removed. */ static const char version[] __devinitconst = KERN_INFO "starfire.c:v1.03 7/26/2000 Written by Donald Becker <becker@scyld.com>\n" -KERN_INFO " (unofficial 2.2/2.4 kernel port, version " DRV_VERSION ", " DRV_RELDATE ")\n"; +" (unofficial 2.2/2.4 kernel port, version " DRV_VERSION ", " DRV_RELDATE ")\n"; MODULE_AUTHOR("Donald Becker <becker@scyld.com>"); MODULE_DESCRIPTION("Adaptec Starfire Ethernet driver"); diff --git a/drivers/net/sundance.c b/drivers/net/sundance.c index 545f81b34ad..d1521c3875b 100644 --- a/drivers/net/sundance.c +++ b/drivers/net/sundance.c @@ -1698,13 +1698,13 @@ static int netdev_close(struct net_device *dev) #ifdef __i386__ if (netif_msg_hw(np)) { - printk("\n"KERN_DEBUG" Tx ring at %8.8x:\n", + printk(KERN_DEBUG " Tx ring at %8.8x:\n", (int)(np->tx_ring_dma)); for (i = 0; i < TX_RING_SIZE; i++) - printk(" #%d desc. %4.4x %8.8x %8.8x.\n", + printk(KERN_DEBUG " #%d desc. %4.4x %8.8x %8.8x.\n", i, np->tx_ring[i].status, np->tx_ring[i].frag[0].addr, np->tx_ring[i].frag[0].length); - printk("\n"KERN_DEBUG " Rx ring %8.8x:\n", + printk(KERN_DEBUG " Rx ring %8.8x:\n", (int)(np->rx_ring_dma)); for (i = 0; i < /*RX_RING_SIZE*/4 ; i++) { printk(KERN_DEBUG " #%d desc. %4.4x %4.4x %8.8x\n", diff --git a/drivers/net/sunvnet.c b/drivers/net/sunvnet.c index a82fb2aca4c..f1e5e4542c2 100644 --- a/drivers/net/sunvnet.c +++ b/drivers/net/sunvnet.c @@ -1016,7 +1016,9 @@ static const struct net_device_ops vnet_ops = { .ndo_open = vnet_open, .ndo_stop = vnet_close, .ndo_set_multicast_list = vnet_set_rx_mode, + .ndo_change_mtu = eth_change_mtu, .ndo_set_mac_address = vnet_set_mac_addr, + .ndo_validate_addr = eth_validate_addr, .ndo_tx_timeout = vnet_tx_timeout, .ndo_change_mtu = vnet_change_mtu, .ndo_start_xmit = vnet_start_xmit, diff --git a/drivers/net/tokenring/ibmtr.c b/drivers/net/tokenring/ibmtr.c index 9d896116cf7..08a6c41c159 100644 --- a/drivers/net/tokenring/ibmtr.c +++ b/drivers/net/tokenring/ibmtr.c @@ -1912,7 +1912,7 @@ static int __init ibmtr_init(void) find_turbo_adapters(io); - for (i = 0; io[i] && (i < IBMTR_MAX_ADAPTERS); i++) { + for (i = 0; i < IBMTR_MAX_ADAPTERS && io[i]; i++) { struct net_device *dev; irq[i] = 0; mem[i] = 0; diff --git a/drivers/net/tsi108_eth.c b/drivers/net/tsi108_eth.c index 0f78f99f9b2..7030bd5e984 100644 --- a/drivers/net/tsi108_eth.c +++ b/drivers/net/tsi108_eth.c @@ -1132,7 +1132,9 @@ static int tsi108_get_mac(struct net_device *dev) } if (!is_valid_ether_addr(dev->dev_addr)) { - printk("KERN_ERR: word1: %08x, word2: %08x\n", word1, word2); + printk(KERN_ERR + "%s: Invalid MAC address. word1: %08x, word2: %08x\n", + dev->name, word1, word2); return -EINVAL; } @@ -1201,8 +1203,8 @@ static void tsi108_set_rx_mode(struct net_device *dev) __set_bit(hash, &data->mc_hash[0]); } else { printk(KERN_ERR - "%s: got multicast address of length %d " - "instead of 6.\n", dev->name, + "%s: got multicast address of length %d instead of 6.\n", + dev->name, mc->dmi_addrlen); } diff --git a/drivers/net/tulip/de2104x.c b/drivers/net/tulip/de2104x.c index 81f054dbb88..ef49744a508 100644 --- a/drivers/net/tulip/de2104x.c +++ b/drivers/net/tulip/de2104x.c @@ -944,9 +944,10 @@ static void de_set_media (struct de_private *de) macmode &= ~FullDuplex; if (netif_msg_link(de)) { - printk(KERN_INFO "%s: set link %s\n" - KERN_INFO "%s: mode 0x%x, sia 0x%x,0x%x,0x%x,0x%x\n" - KERN_INFO "%s: set mode 0x%x, set sia 0x%x,0x%x,0x%x\n", + printk(KERN_INFO + "%s: set link %s\n" + "%s: mode 0x%x, sia 0x%x,0x%x,0x%x,0x%x\n" + "%s: set mode 0x%x, set sia 0x%x,0x%x,0x%x\n", de->dev->name, media_name[media], de->dev->name, dr32(MacMode), dr32(SIAStatus), dr32(CSR13), dr32(CSR14), dr32(CSR15), diff --git a/drivers/net/tulip/de4x5.c b/drivers/net/tulip/de4x5.c index eb72d2e9ab3..acfdccd4456 100644 --- a/drivers/net/tulip/de4x5.c +++ b/drivers/net/tulip/de4x5.c @@ -5059,7 +5059,7 @@ mii_get_phy(struct net_device *dev) if ((id == 0) || (id == 65535)) continue; /* Valid ID? */ for (j=0; j<limit; j++) { /* Search PHY table */ if (id != phy_info[j].id) continue; /* ID match? */ - for (k=0; lp->phy[k].id && (k < DE4X5_MAX_PHY); k++); + for (k=0; k < DE4X5_MAX_PHY && lp->phy[k].id; k++); if (k < DE4X5_MAX_PHY) { memcpy((char *)&lp->phy[k], (char *)&phy_info[j], sizeof(struct phy_table)); @@ -5072,7 +5072,7 @@ mii_get_phy(struct net_device *dev) break; } if ((j == limit) && (i < DE4X5_MAX_MII)) { - for (k=0; lp->phy[k].id && (k < DE4X5_MAX_PHY); k++); + for (k=0; k < DE4X5_MAX_PHY && lp->phy[k].id; k++); lp->phy[k].addr = i; lp->phy[k].id = id; lp->phy[k].spd.reg = GENERIC_REG; /* ANLPA register */ @@ -5091,7 +5091,7 @@ mii_get_phy(struct net_device *dev) purgatory: lp->active = 0; if (lp->phy[0].id) { /* Reset the PHY devices */ - for (k=0; lp->phy[k].id && (k < DE4X5_MAX_PHY); k++) { /*For each PHY*/ + for (k=0; k < DE4X5_MAX_PHY && lp->phy[k].id; k++) { /*For each PHY*/ mii_wr(MII_CR_RST, MII_CR, lp->phy[k].addr, DE4X5_MII); while (mii_rd(MII_CR, lp->phy[k].addr, DE4X5_MII) & MII_CR_RST); diff --git a/drivers/net/tulip/tulip_core.c b/drivers/net/tulip/tulip_core.c index 2abb5d3becc..4cf9a658875 100644 --- a/drivers/net/tulip/tulip_core.c +++ b/drivers/net/tulip/tulip_core.c @@ -570,16 +570,18 @@ static void tulip_tx_timeout(struct net_device *dev) (unsigned int)tp->rx_ring[i].buffer2, buf[0], buf[1], buf[2]); for (j = 0; buf[j] != 0xee && j < 1600; j++) - if (j < 100) printk(" %2.2x", buf[j]); - printk(" j=%d.\n", j); + if (j < 100) + printk(KERN_CONT " %2.2x", buf[j]); + printk(KERN_CONT " j=%d.\n", j); } printk(KERN_DEBUG " Rx ring %8.8x: ", (int)tp->rx_ring); for (i = 0; i < RX_RING_SIZE; i++) - printk(" %8.8x", (unsigned int)tp->rx_ring[i].status); - printk("\n" KERN_DEBUG " Tx ring %8.8x: ", (int)tp->tx_ring); + printk(KERN_CONT " %8.8x", + (unsigned int)tp->rx_ring[i].status); + printk(KERN_DEBUG " Tx ring %8.8x: ", (int)tp->tx_ring); for (i = 0; i < TX_RING_SIZE; i++) - printk(" %8.8x", (unsigned int)tp->tx_ring[i].status); - printk("\n"); + printk(KERN_CONT " %8.8x", (unsigned int)tp->tx_ring[i].status); + printk(KERN_CONT "\n"); } #endif @@ -650,8 +652,9 @@ tulip_start_xmit(struct sk_buff *skb, struct net_device *dev) int entry; u32 flag; dma_addr_t mapping; + unsigned long flags; - spin_lock_irq(&tp->lock); + spin_lock_irqsave(&tp->lock, flags); /* Calculate the next Tx descriptor entry. */ entry = tp->cur_tx % TX_RING_SIZE; @@ -686,7 +689,7 @@ tulip_start_xmit(struct sk_buff *skb, struct net_device *dev) /* Trigger an immediate transmit demand. */ iowrite32(0, tp->base_addr + CSR1); - spin_unlock_irq(&tp->lock); + spin_unlock_irqrestore(&tp->lock, flags); dev->trans_start = jiffies; diff --git a/drivers/net/tulip/winbond-840.c b/drivers/net/tulip/winbond-840.c index 842b1a2c40d..0f15773dae5 100644 --- a/drivers/net/tulip/winbond-840.c +++ b/drivers/net/tulip/winbond-840.c @@ -142,7 +142,7 @@ static int full_duplex[MAX_UNITS] = {-1, -1, -1, -1, -1, -1, -1, -1}; static const char version[] __initconst = KERN_INFO DRV_NAME ".c:v" DRV_VERSION " (2.4 port) " DRV_RELDATE " Donald Becker <becker@scyld.com>\n" - KERN_INFO " http://www.scyld.com/network/drivers.html\n"; + " http://www.scyld.com/network/drivers.html\n"; MODULE_AUTHOR("Donald Becker <becker@scyld.com>"); MODULE_DESCRIPTION("Winbond W89c840 Ethernet driver"); @@ -939,7 +939,7 @@ static void tx_timeout(struct net_device *dev) printk(KERN_DEBUG " Rx ring %p: ", np->rx_ring); for (i = 0; i < RX_RING_SIZE; i++) printk(" %8.8x", (unsigned int)np->rx_ring[i].status); - printk("\n"KERN_DEBUG" Tx ring %p: ", np->tx_ring); + printk(KERN_DEBUG" Tx ring %p: ", np->tx_ring); for (i = 0; i < TX_RING_SIZE; i++) printk(" %8.8x", np->tx_ring[i].status); printk("\n"); @@ -1520,7 +1520,7 @@ static int netdev_close(struct net_device *dev) printk(KERN_DEBUG " #%d desc. %4.4x %4.4x %8.8x.\n", i, np->tx_ring[i].length, np->tx_ring[i].status, np->tx_ring[i].buffer1); - printk("\n"KERN_DEBUG " Rx ring %8.8x:\n", + printk(KERN_DEBUG " Rx ring %8.8x:\n", (int)np->rx_ring); for (i = 0; i < RX_RING_SIZE; i++) { printk(KERN_DEBUG " #%d desc. %4.4x %4.4x %8.8x\n", diff --git a/drivers/net/tun.c b/drivers/net/tun.c index 11a0ba47b67..42b6c6319bc 100644 --- a/drivers/net/tun.c +++ b/drivers/net/tun.c @@ -486,12 +486,14 @@ static unsigned int tun_chr_poll(struct file *file, poll_table * wait) { struct tun_file *tfile = file->private_data; struct tun_struct *tun = __tun_get(tfile); - struct sock *sk = tun->sk; + struct sock *sk; unsigned int mask = 0; if (!tun) return POLLERR; + sk = tun->sk; + DBG(KERN_INFO "%s: tun_chr_poll\n", tun->dev->name); poll_wait(file, &tun->socket.wait, wait); @@ -1046,20 +1048,15 @@ static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr) return err; } -static int tun_get_iff(struct net *net, struct file *file, struct ifreq *ifr) +static int tun_get_iff(struct net *net, struct tun_struct *tun, + struct ifreq *ifr) { - struct tun_struct *tun = tun_get(file); - - if (!tun) - return -EBADFD; - DBG(KERN_INFO "%s: tun_get_iff\n", tun->dev->name); strcpy(ifr->ifr_name, tun->dev->name); ifr->ifr_flags = tun_flags(tun); - tun_put(tun); return 0; } @@ -1103,8 +1100,8 @@ static int set_offload(struct net_device *dev, unsigned long arg) return 0; } -static int tun_chr_ioctl(struct inode *inode, struct file *file, - unsigned int cmd, unsigned long arg) +static long tun_chr_ioctl(struct file *file, unsigned int cmd, + unsigned long arg) { struct tun_file *tfile = file->private_data; struct tun_struct *tun; @@ -1126,34 +1123,32 @@ static int tun_chr_ioctl(struct inode *inode, struct file *file, (unsigned int __user*)argp); } + rtnl_lock(); + tun = __tun_get(tfile); if (cmd == TUNSETIFF && !tun) { - int err; - ifr.ifr_name[IFNAMSIZ-1] = '\0'; - rtnl_lock(); - err = tun_set_iff(tfile->net, file, &ifr); - rtnl_unlock(); + ret = tun_set_iff(tfile->net, file, &ifr); - if (err) - return err; + if (ret) + goto unlock; if (copy_to_user(argp, &ifr, sizeof(ifr))) - return -EFAULT; - return 0; + ret = -EFAULT; + goto unlock; } - + ret = -EBADFD; if (!tun) - return -EBADFD; + goto unlock; DBG(KERN_INFO "%s: tun_chr_ioctl cmd %d\n", tun->dev->name, cmd); ret = 0; switch (cmd) { case TUNGETIFF: - ret = tun_get_iff(current->nsproxy->net_ns, file, &ifr); + ret = tun_get_iff(current->nsproxy->net_ns, tun, &ifr); if (ret) break; @@ -1199,7 +1194,6 @@ static int tun_chr_ioctl(struct inode *inode, struct file *file, case TUNSETLINK: /* Only allow setting the type when the interface is down */ - rtnl_lock(); if (tun->dev->flags & IFF_UP) { DBG(KERN_INFO "%s: Linktype set failed because interface is up\n", tun->dev->name); @@ -1209,7 +1203,6 @@ static int tun_chr_ioctl(struct inode *inode, struct file *file, DBG(KERN_INFO "%s: linktype set to %d\n", tun->dev->name, tun->dev->type); ret = 0; } - rtnl_unlock(); break; #ifdef TUN_DEBUG @@ -1218,9 +1211,7 @@ static int tun_chr_ioctl(struct inode *inode, struct file *file, break; #endif case TUNSETOFFLOAD: - rtnl_lock(); ret = set_offload(tun->dev, arg); - rtnl_unlock(); break; case TUNSETTXFILTER: @@ -1228,9 +1219,7 @@ static int tun_chr_ioctl(struct inode *inode, struct file *file, ret = -EINVAL; if ((tun->flags & TUN_TYPE_MASK) != TUN_TAP_DEV) break; - rtnl_lock(); ret = update_filter(&tun->txflt, (void __user *)arg); - rtnl_unlock(); break; case SIOCGIFHWADDR: @@ -1246,9 +1235,7 @@ static int tun_chr_ioctl(struct inode *inode, struct file *file, DBG(KERN_DEBUG "%s: set hw address: %pM\n", tun->dev->name, ifr.ifr_hwaddr.sa_data); - rtnl_lock(); ret = dev_set_mac_address(tun->dev, &ifr.ifr_hwaddr); - rtnl_unlock(); break; case TUNGETSNDBUF: @@ -1271,7 +1258,10 @@ static int tun_chr_ioctl(struct inode *inode, struct file *file, break; }; - tun_put(tun); +unlock: + rtnl_unlock(); + if (tun) + tun_put(tun); return ret; } @@ -1324,20 +1314,22 @@ static int tun_chr_close(struct inode *inode, struct file *file) struct tun_file *tfile = file->private_data; struct tun_struct *tun; - - rtnl_lock(); tun = __tun_get(tfile); if (tun) { - DBG(KERN_INFO "%s: tun_chr_close\n", tun->dev->name); + struct net_device *dev = tun->dev; + + DBG(KERN_INFO "%s: tun_chr_close\n", dev->name); __tun_detach(tun); /* If desireable, unregister the netdevice. */ - if (!(tun->flags & TUN_PERSIST)) - unregister_netdevice(tun->dev); - + if (!(tun->flags & TUN_PERSIST)) { + rtnl_lock(); + if (dev->reg_state == NETREG_REGISTERED) + unregister_netdevice(dev); + rtnl_unlock(); + } } - rtnl_unlock(); tun = tfile->tun; if (tun) @@ -1357,7 +1349,7 @@ static const struct file_operations tun_fops = { .write = do_sync_write, .aio_write = tun_chr_aio_write, .poll = tun_chr_poll, - .ioctl = tun_chr_ioctl, + .unlocked_ioctl = tun_chr_ioctl, .open = tun_chr_open, .release = tun_chr_close, .fasync = tun_chr_fasync diff --git a/drivers/net/ucc_geth.c b/drivers/net/ucc_geth.c index 40c6eba775c..8a7b8c7bd78 100644 --- a/drivers/net/ucc_geth.c +++ b/drivers/net/ucc_geth.c @@ -1590,13 +1590,13 @@ static int init_phy(struct net_device *dev) priv->oldspeed = 0; priv->oldduplex = -1; - if (!ug_info->phy_node) - return 0; - phydev = of_phy_connect(dev, ug_info->phy_node, &adjust_link, 0, priv->phy_interface); + if (!phydev) + phydev = of_phy_connect_fixed_link(dev, &adjust_link, + priv->phy_interface); if (!phydev) { - printk("%s: Could not attach to PHY\n", dev->name); + dev_err(&dev->dev, "Could not attach to PHY\n"); return -ENODEV; } @@ -3111,10 +3111,11 @@ static int ucc_geth_start_xmit(struct sk_buff *skb, struct net_device *dev) u8 __iomem *bd; /* BD pointer */ u32 bd_status; u8 txQ = 0; + unsigned long flags; ugeth_vdbg("%s: IN", __func__); - spin_lock_irq(&ugeth->lock); + spin_lock_irqsave(&ugeth->lock, flags); dev->stats.tx_bytes += skb->len; @@ -3171,7 +3172,7 @@ static int ucc_geth_start_xmit(struct sk_buff *skb, struct net_device *dev) uccf = ugeth->uccf; out_be16(uccf->p_utodr, UCC_FAST_TOD); #endif - spin_unlock_irq(&ugeth->lock); + spin_unlock_irqrestore(&ugeth->lock, flags); return 0; } @@ -3608,9 +3609,7 @@ static int ucc_geth_probe(struct of_device* ofdev, const struct of_device_id *ma struct ucc_geth_private *ugeth = NULL; struct ucc_geth_info *ug_info; struct resource res; - struct device_node *phy; int err, ucc_num, max_speed = 0; - const u32 *fixed_link; const unsigned int *prop; const char *sprop; const void *mac_addr; @@ -3708,15 +3707,8 @@ static int ucc_geth_probe(struct of_device* ofdev, const struct of_device_id *ma ug_info->uf_info.regs = res.start; ug_info->uf_info.irq = irq_of_parse_and_map(np, 0); - fixed_link = of_get_property(np, "fixed-link", NULL); - if (fixed_link) { - phy = NULL; - } else { - phy = of_parse_phandle(np, "phy-handle", 0); - if (phy == NULL) - return -ENODEV; - } - ug_info->phy_node = phy; + + ug_info->phy_node = of_parse_phandle(np, "phy-handle", 0); /* Find the TBI PHY node. If it's not there, we don't support SGMII */ ug_info->tbi_node = of_parse_phandle(np, "tbi-handle", 0); @@ -3725,7 +3717,7 @@ static int ucc_geth_probe(struct of_device* ofdev, const struct of_device_id *ma prop = of_get_property(np, "phy-connection-type", NULL); if (!prop) { /* handle interface property present in old trees */ - prop = of_get_property(phy, "interface", NULL); + prop = of_get_property(ug_info->phy_node, "interface", NULL); if (prop != NULL) { phy_interface = enet_to_phy_interface[*prop]; max_speed = enet_to_speed[*prop]; diff --git a/drivers/net/usb/Kconfig b/drivers/net/usb/Kconfig index a906d399813..c47237c2d63 100644 --- a/drivers/net/usb/Kconfig +++ b/drivers/net/usb/Kconfig @@ -369,4 +369,12 @@ config USB_NET_INT51X1 (Powerline Communications) solution with an Intellon INT51x1/INT5200 chip, like the "devolo dLan duo". +config USB_CDC_PHONET + tristate "CDC Phonet support" + depends on PHONET + help + Choose this option to support the Phonet interface to a Nokia + cellular modem, as found on most Nokia handsets with the + "PC suite" USB profile. + endmenu diff --git a/drivers/net/usb/Makefile b/drivers/net/usb/Makefile index b870b0b1cbe..e17afb78f37 100644 --- a/drivers/net/usb/Makefile +++ b/drivers/net/usb/Makefile @@ -21,4 +21,5 @@ obj-$(CONFIG_USB_NET_ZAURUS) += zaurus.o obj-$(CONFIG_USB_NET_MCS7830) += mcs7830.o obj-$(CONFIG_USB_USBNET) += usbnet.o obj-$(CONFIG_USB_NET_INT51X1) += int51x1.o +obj-$(CONFIG_USB_CDC_PHONET) += cdc-phonet.o diff --git a/drivers/net/usb/cdc-phonet.c b/drivers/net/usb/cdc-phonet.c new file mode 100644 index 00000000000..792af72da8a --- /dev/null +++ b/drivers/net/usb/cdc-phonet.c @@ -0,0 +1,461 @@ +/* + * phonet.c -- USB CDC Phonet host driver + * + * Copyright (C) 2008-2009 Nokia Corporation. All rights reserved. + * + * Author: Rémi Denis-Courmont + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA + * 02110-1301 USA + */ + +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/usb.h> +#include <linux/usb/cdc.h> +#include <linux/netdevice.h> +#include <linux/if_arp.h> +#include <linux/if_phonet.h> + +#define PN_MEDIA_USB 0x1B + +static const unsigned rxq_size = 17; + +struct usbpn_dev { + struct net_device *dev; + + struct usb_interface *intf, *data_intf; + struct usb_device *usb; + unsigned int tx_pipe, rx_pipe; + u8 active_setting; + u8 disconnected; + + unsigned tx_queue; + spinlock_t tx_lock; + + spinlock_t rx_lock; + struct sk_buff *rx_skb; + struct urb *urbs[0]; +}; + +static void tx_complete(struct urb *req); +static void rx_complete(struct urb *req); + +/* + * Network device callbacks + */ +static int usbpn_xmit(struct sk_buff *skb, struct net_device *dev) +{ + struct usbpn_dev *pnd = netdev_priv(dev); + struct urb *req = NULL; + unsigned long flags; + int err; + + if (skb->protocol != htons(ETH_P_PHONET)) + goto drop; + + req = usb_alloc_urb(0, GFP_ATOMIC); + if (!req) + goto drop; + usb_fill_bulk_urb(req, pnd->usb, pnd->tx_pipe, skb->data, skb->len, + tx_complete, skb); + req->transfer_flags = URB_ZERO_PACKET; + err = usb_submit_urb(req, GFP_ATOMIC); + if (err) { + usb_free_urb(req); + goto drop; + } + + spin_lock_irqsave(&pnd->tx_lock, flags); + pnd->tx_queue++; + if (pnd->tx_queue >= dev->tx_queue_len) + netif_stop_queue(dev); + spin_unlock_irqrestore(&pnd->tx_lock, flags); + return 0; + +drop: + dev_kfree_skb(skb); + dev->stats.tx_dropped++; + return 0; +} + +static void tx_complete(struct urb *req) +{ + struct sk_buff *skb = req->context; + struct net_device *dev = skb->dev; + struct usbpn_dev *pnd = netdev_priv(dev); + + switch (req->status) { + case 0: + dev->stats.tx_bytes += skb->len; + break; + + case -ENOENT: + case -ECONNRESET: + case -ESHUTDOWN: + dev->stats.tx_aborted_errors++; + default: + dev->stats.tx_errors++; + dev_dbg(&dev->dev, "TX error (%d)\n", req->status); + } + dev->stats.tx_packets++; + + spin_lock(&pnd->tx_lock); + pnd->tx_queue--; + netif_wake_queue(dev); + spin_unlock(&pnd->tx_lock); + + dev_kfree_skb_any(skb); + usb_free_urb(req); +} + +static int rx_submit(struct usbpn_dev *pnd, struct urb *req, gfp_t gfp_flags) +{ + struct net_device *dev = pnd->dev; + struct page *page; + int err; + + page = __netdev_alloc_page(dev, gfp_flags); + if (!page) + return -ENOMEM; + + usb_fill_bulk_urb(req, pnd->usb, pnd->rx_pipe, page_address(page), + PAGE_SIZE, rx_complete, dev); + req->transfer_flags = 0; + err = usb_submit_urb(req, gfp_flags); + if (unlikely(err)) { + dev_dbg(&dev->dev, "RX submit error (%d)\n", err); + netdev_free_page(dev, page); + } + return err; +} + +static void rx_complete(struct urb *req) +{ + struct net_device *dev = req->context; + struct usbpn_dev *pnd = netdev_priv(dev); + struct page *page = virt_to_page(req->transfer_buffer); + struct sk_buff *skb; + unsigned long flags; + + switch (req->status) { + case 0: + spin_lock_irqsave(&pnd->rx_lock, flags); + skb = pnd->rx_skb; + if (!skb) { + skb = pnd->rx_skb = netdev_alloc_skb(dev, 12); + if (likely(skb)) { + /* Can't use pskb_pull() on page in IRQ */ + memcpy(skb_put(skb, 1), page_address(page), 1); + skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, + page, 1, req->actual_length); + page = NULL; + } + } else { + skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, + page, 0, req->actual_length); + page = NULL; + } + if (req->actual_length < PAGE_SIZE) + pnd->rx_skb = NULL; /* Last fragment */ + else + skb = NULL; + spin_unlock_irqrestore(&pnd->rx_lock, flags); + if (skb) { + skb->protocol = htons(ETH_P_PHONET); + skb_reset_mac_header(skb); + __skb_pull(skb, 1); + skb->dev = dev; + dev->stats.rx_packets++; + dev->stats.rx_bytes += skb->len; + + netif_rx(skb); + } + goto resubmit; + + case -ENOENT: + case -ECONNRESET: + case -ESHUTDOWN: + req = NULL; + break; + + case -EOVERFLOW: + dev->stats.rx_over_errors++; + dev_dbg(&dev->dev, "RX overflow\n"); + break; + + case -EILSEQ: + dev->stats.rx_crc_errors++; + break; + } + + dev->stats.rx_errors++; +resubmit: + if (page) + netdev_free_page(dev, page); + if (req) + rx_submit(pnd, req, GFP_ATOMIC); +} + +static int usbpn_close(struct net_device *dev); + +static int usbpn_open(struct net_device *dev) +{ + struct usbpn_dev *pnd = netdev_priv(dev); + int err; + unsigned i; + unsigned num = pnd->data_intf->cur_altsetting->desc.bInterfaceNumber; + + err = usb_set_interface(pnd->usb, num, pnd->active_setting); + if (err) + return err; + + for (i = 0; i < rxq_size; i++) { + struct urb *req = usb_alloc_urb(0, GFP_KERNEL); + + if (!req || rx_submit(pnd, req, GFP_KERNEL)) { + usbpn_close(dev); + return -ENOMEM; + } + pnd->urbs[i] = req; + } + + netif_wake_queue(dev); + return 0; +} + +static int usbpn_close(struct net_device *dev) +{ + struct usbpn_dev *pnd = netdev_priv(dev); + unsigned i; + unsigned num = pnd->data_intf->cur_altsetting->desc.bInterfaceNumber; + + netif_stop_queue(dev); + + for (i = 0; i < rxq_size; i++) { + struct urb *req = pnd->urbs[i]; + + if (!req) + continue; + usb_kill_urb(req); + usb_free_urb(req); + pnd->urbs[i] = NULL; + } + + return usb_set_interface(pnd->usb, num, !pnd->active_setting); +} + +static int usbpn_set_mtu(struct net_device *dev, int new_mtu) +{ + if ((new_mtu < PHONET_MIN_MTU) || (new_mtu > PHONET_MAX_MTU)) + return -EINVAL; + + dev->mtu = new_mtu; + return 0; +} + +static const struct net_device_ops usbpn_ops = { + .ndo_open = usbpn_open, + .ndo_stop = usbpn_close, + .ndo_start_xmit = usbpn_xmit, + .ndo_change_mtu = usbpn_set_mtu, +}; + +static void usbpn_setup(struct net_device *dev) +{ + dev->features = 0; + dev->netdev_ops = &usbpn_ops, + dev->header_ops = &phonet_header_ops; + dev->type = ARPHRD_PHONET; + dev->flags = IFF_POINTOPOINT | IFF_NOARP; + dev->mtu = PHONET_MAX_MTU; + dev->hard_header_len = 1; + dev->dev_addr[0] = PN_MEDIA_USB; + dev->addr_len = 1; + dev->tx_queue_len = 3; + + dev->destructor = free_netdev; +} + +/* + * USB driver callbacks + */ +static struct usb_device_id usbpn_ids[] = { + { + .match_flags = USB_DEVICE_ID_MATCH_VENDOR + | USB_DEVICE_ID_MATCH_INT_CLASS + | USB_DEVICE_ID_MATCH_INT_SUBCLASS, + .idVendor = 0x0421, /* Nokia */ + .bInterfaceClass = USB_CLASS_COMM, + .bInterfaceSubClass = 0xFE, + }, + { }, +}; + +MODULE_DEVICE_TABLE(usb, usbpn_ids); + +static struct usb_driver usbpn_driver; + +int usbpn_probe(struct usb_interface *intf, const struct usb_device_id *id) +{ + static const char ifname[] = "usbpn%d"; + const struct usb_cdc_union_desc *union_header = NULL; + const struct usb_cdc_header_desc *phonet_header = NULL; + const struct usb_host_interface *data_desc; + struct usb_interface *data_intf; + struct usb_device *usbdev = interface_to_usbdev(intf); + struct net_device *dev; + struct usbpn_dev *pnd; + u8 *data; + int len, err; + + data = intf->altsetting->extra; + len = intf->altsetting->extralen; + while (len >= 3) { + u8 dlen = data[0]; + if (dlen < 3) + return -EINVAL; + + /* bDescriptorType */ + if (data[1] == USB_DT_CS_INTERFACE) { + /* bDescriptorSubType */ + switch (data[2]) { + case USB_CDC_UNION_TYPE: + if (union_header || dlen < 5) + break; + union_header = + (struct usb_cdc_union_desc *)data; + break; + case 0xAB: + if (phonet_header || dlen < 5) + break; + phonet_header = + (struct usb_cdc_header_desc *)data; + break; + } + } + data += dlen; + len -= dlen; + } + + if (!union_header || !phonet_header) + return -EINVAL; + + data_intf = usb_ifnum_to_if(usbdev, union_header->bSlaveInterface0); + if (data_intf == NULL) + return -ENODEV; + /* Data interface has one inactive and one active setting */ + if (data_intf->num_altsetting != 2) + return -EINVAL; + if (data_intf->altsetting[0].desc.bNumEndpoints == 0 + && data_intf->altsetting[1].desc.bNumEndpoints == 2) + data_desc = data_intf->altsetting + 1; + else + if (data_intf->altsetting[0].desc.bNumEndpoints == 2 + && data_intf->altsetting[1].desc.bNumEndpoints == 0) + data_desc = data_intf->altsetting; + else + return -EINVAL; + + dev = alloc_netdev(sizeof(*pnd) + sizeof(pnd->urbs[0]) * rxq_size, + ifname, usbpn_setup); + if (!dev) + return -ENOMEM; + + pnd = netdev_priv(dev); + SET_NETDEV_DEV(dev, &intf->dev); + netif_stop_queue(dev); + + pnd->dev = dev; + pnd->usb = usb_get_dev(usbdev); + pnd->intf = intf; + pnd->data_intf = data_intf; + spin_lock_init(&pnd->tx_lock); + spin_lock_init(&pnd->rx_lock); + /* Endpoints */ + if (usb_pipein(data_desc->endpoint[0].desc.bEndpointAddress)) { + pnd->rx_pipe = usb_rcvbulkpipe(usbdev, + data_desc->endpoint[0].desc.bEndpointAddress); + pnd->tx_pipe = usb_sndbulkpipe(usbdev, + data_desc->endpoint[1].desc.bEndpointAddress); + } else { + pnd->rx_pipe = usb_rcvbulkpipe(usbdev, + data_desc->endpoint[1].desc.bEndpointAddress); + pnd->tx_pipe = usb_sndbulkpipe(usbdev, + data_desc->endpoint[0].desc.bEndpointAddress); + } + pnd->active_setting = data_desc - data_intf->altsetting; + + err = usb_driver_claim_interface(&usbpn_driver, data_intf, pnd); + if (err) + goto out; + + /* Force inactive mode until the network device is brought UP */ + usb_set_interface(usbdev, union_header->bSlaveInterface0, + !pnd->active_setting); + usb_set_intfdata(intf, pnd); + + err = register_netdev(dev); + if (err) { + usb_driver_release_interface(&usbpn_driver, data_intf); + goto out; + } + + dev_dbg(&dev->dev, "USB CDC Phonet device found\n"); + return 0; + +out: + usb_set_intfdata(intf, NULL); + free_netdev(dev); + return err; +} + +static void usbpn_disconnect(struct usb_interface *intf) +{ + struct usbpn_dev *pnd = usb_get_intfdata(intf); + struct usb_device *usb = pnd->usb; + + if (pnd->disconnected) + return; + + pnd->disconnected = 1; + usb_driver_release_interface(&usbpn_driver, + (pnd->intf == intf) ? pnd->data_intf : pnd->intf); + unregister_netdev(pnd->dev); + usb_put_dev(usb); +} + +static struct usb_driver usbpn_driver = { + .name = "cdc_phonet", + .probe = usbpn_probe, + .disconnect = usbpn_disconnect, + .id_table = usbpn_ids, +}; + +static int __init usbpn_init(void) +{ + return usb_register(&usbpn_driver); +} + +static void __exit usbpn_exit(void) +{ + usb_deregister(&usbpn_driver); +} + +module_init(usbpn_init); +module_exit(usbpn_exit); + +MODULE_AUTHOR("Remi Denis-Courmont"); +MODULE_DESCRIPTION("USB CDC Phonet host interface"); +MODULE_LICENSE("GPL"); diff --git a/drivers/net/usb/cdc_eem.c b/drivers/net/usb/cdc_eem.c index 80e01778dd3..45cebfb302c 100644 --- a/drivers/net/usb/cdc_eem.c +++ b/drivers/net/usb/cdc_eem.c @@ -311,7 +311,7 @@ static int eem_rx_fixup(struct usbnet *dev, struct sk_buff *skb) * bmCRC = 0 : CRC = 0xDEADBEEF */ if (header & BIT(14)) - crc2 = ~crc32_le(~0, skb2->data, len); + crc2 = ~crc32_le(~0, skb2->data, skb2->len); else crc2 = 0xdeadbeef; @@ -319,7 +319,7 @@ static int eem_rx_fixup(struct usbnet *dev, struct sk_buff *skb) return crc == crc2; if (unlikely(crc != crc2)) { - dev->stats.rx_errors++; + dev->net->stats.rx_errors++; dev_kfree_skb_any(skb2); } else usbnet_skb_return(dev, skb2); diff --git a/drivers/net/usb/dm9601.c b/drivers/net/usb/dm9601.c index 7ae82446b93..1d3730d6690 100644 --- a/drivers/net/usb/dm9601.c +++ b/drivers/net/usb/dm9601.c @@ -513,11 +513,11 @@ static int dm9601_rx_fixup(struct usbnet *dev, struct sk_buff *skb) len = (skb->data[1] | (skb->data[2] << 8)) - 4; if (unlikely(status & 0xbf)) { - if (status & 0x01) dev->stats.rx_fifo_errors++; - if (status & 0x02) dev->stats.rx_crc_errors++; - if (status & 0x04) dev->stats.rx_frame_errors++; - if (status & 0x20) dev->stats.rx_missed_errors++; - if (status & 0x90) dev->stats.rx_length_errors++; + if (status & 0x01) dev->net->stats.rx_fifo_errors++; + if (status & 0x02) dev->net->stats.rx_crc_errors++; + if (status & 0x04) dev->net->stats.rx_frame_errors++; + if (status & 0x20) dev->net->stats.rx_missed_errors++; + if (status & 0x90) dev->net->stats.rx_length_errors++; return 0; } diff --git a/drivers/net/usb/kaweth.c b/drivers/net/usb/kaweth.c index e0131478971..1f9ec29fce5 100644 --- a/drivers/net/usb/kaweth.c +++ b/drivers/net/usb/kaweth.c @@ -999,6 +999,9 @@ static const struct net_device_ops kaweth_netdev_ops = { .ndo_tx_timeout = kaweth_tx_timeout, .ndo_set_multicast_list = kaweth_set_rx_mode, .ndo_get_stats = kaweth_netdev_stats, + .ndo_change_mtu = eth_change_mtu, + .ndo_set_mac_address = eth_mac_addr, + .ndo_validate_addr = eth_validate_addr, }; static int kaweth_probe( diff --git a/drivers/net/usb/net1080.c b/drivers/net/usb/net1080.c index 034e8a73ca6..aeb1ab03a9e 100644 --- a/drivers/net/usb/net1080.c +++ b/drivers/net/usb/net1080.c @@ -433,7 +433,7 @@ static int net1080_rx_fixup(struct usbnet *dev, struct sk_buff *skb) dbg("rx framesize %d range %d..%d mtu %d", skb->len, net->hard_header_len, dev->hard_mtu, net->mtu); #endif - dev->stats.rx_frame_errors++; + dev->net->stats.rx_frame_errors++; nc_ensure_sync(dev); return 0; } @@ -442,12 +442,12 @@ static int net1080_rx_fixup(struct usbnet *dev, struct sk_buff *skb) hdr_len = le16_to_cpup(&header->hdr_len); packet_len = le16_to_cpup(&header->packet_len); if (FRAMED_SIZE(packet_len) > NC_MAX_PACKET) { - dev->stats.rx_frame_errors++; + dev->net->stats.rx_frame_errors++; dbg("packet too big, %d", packet_len); nc_ensure_sync(dev); return 0; } else if (hdr_len < MIN_HEADER) { - dev->stats.rx_frame_errors++; + dev->net->stats.rx_frame_errors++; dbg("header too short, %d", hdr_len); nc_ensure_sync(dev); return 0; @@ -465,21 +465,21 @@ static int net1080_rx_fixup(struct usbnet *dev, struct sk_buff *skb) if ((packet_len & 0x01) == 0) { if (skb->data [packet_len] != PAD_BYTE) { - dev->stats.rx_frame_errors++; + dev->net->stats.rx_frame_errors++; dbg("bad pad"); return 0; } skb_trim(skb, skb->len - 1); } if (skb->len != packet_len) { - dev->stats.rx_frame_errors++; + dev->net->stats.rx_frame_errors++; dbg("bad packet len %d (expected %d)", skb->len, packet_len); nc_ensure_sync(dev); return 0; } if (header->packet_id != get_unaligned(&trailer->packet_id)) { - dev->stats.rx_fifo_errors++; + dev->net->stats.rx_fifo_errors++; dbg("(2+ dropped) rx packet_id mismatch 0x%x 0x%x", le16_to_cpu(header->packet_id), le16_to_cpu(trailer->packet_id)); diff --git a/drivers/net/usb/pegasus.c b/drivers/net/usb/pegasus.c index 73acbd244aa..631d269ac98 100644 --- a/drivers/net/usb/pegasus.c +++ b/drivers/net/usb/pegasus.c @@ -1493,6 +1493,9 @@ static const struct net_device_ops pegasus_netdev_ops = { .ndo_set_multicast_list = pegasus_set_multicast, .ndo_get_stats = pegasus_netdev_stats, .ndo_tx_timeout = pegasus_tx_timeout, + .ndo_change_mtu = eth_change_mtu, + .ndo_set_mac_address = eth_mac_addr, + .ndo_validate_addr = eth_validate_addr, }; static struct usb_driver pegasus_driver = { diff --git a/drivers/net/usb/pegasus.h b/drivers/net/usb/pegasus.h index c7467823cd1..f968c834ff6 100644 --- a/drivers/net/usb/pegasus.h +++ b/drivers/net/usb/pegasus.h @@ -250,6 +250,8 @@ PEGASUS_DEV( "IO DATA USB ET/TX", VENDOR_IODATA, 0x0904, DEFAULT_GPIO_RESET ) PEGASUS_DEV( "IO DATA USB ET/TX-S", VENDOR_IODATA, 0x0913, DEFAULT_GPIO_RESET | PEGASUS_II ) +PEGASUS_DEV( "IO DATA USB ETX-US2", VENDOR_IODATA, 0x092a, + DEFAULT_GPIO_RESET | PEGASUS_II ) PEGASUS_DEV( "Kingston KNU101TX Ethernet", VENDOR_KINGSTON, 0x000a, DEFAULT_GPIO_RESET) PEGASUS_DEV( "LANEED USB Ethernet LD-USB/TX", VENDOR_LANEED, 0x4002, diff --git a/drivers/net/usb/rndis_host.c b/drivers/net/usb/rndis_host.c index 1bf243ef950..2232232b798 100644 --- a/drivers/net/usb/rndis_host.c +++ b/drivers/net/usb/rndis_host.c @@ -487,7 +487,7 @@ int rndis_rx_fixup(struct usbnet *dev, struct sk_buff *skb) if (unlikely(hdr->msg_type != RNDIS_MSG_PACKET || skb->len < msg_len || (data_offset + data_len + 8) > msg_len)) { - dev->stats.rx_frame_errors++; + dev->net->stats.rx_frame_errors++; devdbg(dev, "bad rndis message %d/%d/%d/%d, len %d", le32_to_cpu(hdr->msg_type), msg_len, data_offset, data_len, skb->len); diff --git a/drivers/net/usb/smsc95xx.c b/drivers/net/usb/smsc95xx.c index 89a91f8c22d..fe045896406 100644 --- a/drivers/net/usb/smsc95xx.c +++ b/drivers/net/usb/smsc95xx.c @@ -1108,18 +1108,18 @@ static int smsc95xx_rx_fixup(struct usbnet *dev, struct sk_buff *skb) if (unlikely(header & RX_STS_ES_)) { if (netif_msg_rx_err(dev)) devdbg(dev, "Error header=0x%08x", header); - dev->stats.rx_errors++; - dev->stats.rx_dropped++; + dev->net->stats.rx_errors++; + dev->net->stats.rx_dropped++; if (header & RX_STS_CRC_) { - dev->stats.rx_crc_errors++; + dev->net->stats.rx_crc_errors++; } else { if (header & (RX_STS_TL_ | RX_STS_RF_)) - dev->stats.rx_frame_errors++; + dev->net->stats.rx_frame_errors++; if ((header & RX_STS_LE_) && (!(header & RX_STS_FT_))) - dev->stats.rx_length_errors++; + dev->net->stats.rx_length_errors++; } } else { /* ETH_FRAME_LEN + 4(CRC) + 2(COE) + 4(Vlan) */ diff --git a/drivers/net/usb/usbnet.c b/drivers/net/usb/usbnet.c index 22c0585a031..edfd9e10ceb 100644 --- a/drivers/net/usb/usbnet.c +++ b/drivers/net/usb/usbnet.c @@ -234,8 +234,8 @@ void usbnet_skb_return (struct usbnet *dev, struct sk_buff *skb) int status; skb->protocol = eth_type_trans (skb, dev->net); - dev->stats.rx_packets++; - dev->stats.rx_bytes += skb->len; + dev->net->stats.rx_packets++; + dev->net->stats.rx_bytes += skb->len; if (netif_msg_rx_status (dev)) devdbg (dev, "< rx, len %zu, type 0x%x", @@ -397,7 +397,7 @@ static inline void rx_process (struct usbnet *dev, struct sk_buff *skb) if (netif_msg_rx_err (dev)) devdbg (dev, "drop"); error: - dev->stats.rx_errors++; + dev->net->stats.rx_errors++; skb_queue_tail (&dev->done, skb); } } @@ -420,8 +420,8 @@ static void rx_complete (struct urb *urb) case 0: if (skb->len < dev->net->hard_header_len) { entry->state = rx_cleanup; - dev->stats.rx_errors++; - dev->stats.rx_length_errors++; + dev->net->stats.rx_errors++; + dev->net->stats.rx_length_errors++; if (netif_msg_rx_err (dev)) devdbg (dev, "rx length %d", skb->len); } @@ -433,7 +433,7 @@ static void rx_complete (struct urb *urb) * storm, recovering as needed. */ case -EPIPE: - dev->stats.rx_errors++; + dev->net->stats.rx_errors++; usbnet_defer_kevent (dev, EVENT_RX_HALT); // FALLTHROUGH @@ -451,7 +451,7 @@ static void rx_complete (struct urb *urb) case -EPROTO: case -ETIME: case -EILSEQ: - dev->stats.rx_errors++; + dev->net->stats.rx_errors++; if (!timer_pending (&dev->delay)) { mod_timer (&dev->delay, jiffies + THROTTLE_JIFFIES); if (netif_msg_link (dev)) @@ -465,12 +465,12 @@ block: /* data overrun ... flush fifo? */ case -EOVERFLOW: - dev->stats.rx_over_errors++; + dev->net->stats.rx_over_errors++; // FALLTHROUGH default: entry->state = rx_cleanup; - dev->stats.rx_errors++; + dev->net->stats.rx_errors++; if (netif_msg_rx_err (dev)) devdbg (dev, "rx status %d", urb_status); break; @@ -583,8 +583,8 @@ int usbnet_stop (struct net_device *net) if (netif_msg_ifdown (dev)) devinfo (dev, "stop stats: rx/tx %ld/%ld, errs %ld/%ld", - dev->stats.rx_packets, dev->stats.tx_packets, - dev->stats.rx_errors, dev->stats.tx_errors + net->stats.rx_packets, net->stats.tx_packets, + net->stats.rx_errors, net->stats.tx_errors ); // ensure there are no more active urbs @@ -891,10 +891,10 @@ static void tx_complete (struct urb *urb) struct usbnet *dev = entry->dev; if (urb->status == 0) { - dev->stats.tx_packets++; - dev->stats.tx_bytes += entry->length; + dev->net->stats.tx_packets++; + dev->net->stats.tx_bytes += entry->length; } else { - dev->stats.tx_errors++; + dev->net->stats.tx_errors++; switch (urb->status) { case -EPIPE: @@ -1020,7 +1020,7 @@ int usbnet_start_xmit (struct sk_buff *skb, struct net_device *net) devdbg (dev, "drop, code %d", retval); drop: retval = NET_XMIT_SUCCESS; - dev->stats.tx_dropped++; + dev->net->stats.tx_dropped++; if (skb) dev_kfree_skb_any (skb); usb_free_urb (urb); diff --git a/drivers/net/veth.c b/drivers/net/veth.c index 87197dd9c78..1097c72e44d 100644 --- a/drivers/net/veth.c +++ b/drivers/net/veth.c @@ -208,11 +208,14 @@ rx_drop: static struct net_device_stats *veth_get_stats(struct net_device *dev) { - struct veth_priv *priv = netdev_priv(dev); - struct net_device_stats *dev_stats = &dev->stats; - unsigned int cpu; + struct veth_priv *priv; + struct net_device_stats *dev_stats; + int cpu; struct veth_net_stats *stats; + priv = netdev_priv(dev); + dev_stats = &dev->stats; + dev_stats->rx_packets = 0; dev_stats->tx_packets = 0; dev_stats->rx_bytes = 0; @@ -220,17 +223,16 @@ static struct net_device_stats *veth_get_stats(struct net_device *dev) dev_stats->tx_dropped = 0; dev_stats->rx_dropped = 0; - if (priv->stats) - for_each_online_cpu(cpu) { - stats = per_cpu_ptr(priv->stats, cpu); + for_each_online_cpu(cpu) { + stats = per_cpu_ptr(priv->stats, cpu); - dev_stats->rx_packets += stats->rx_packets; - dev_stats->tx_packets += stats->tx_packets; - dev_stats->rx_bytes += stats->rx_bytes; - dev_stats->tx_bytes += stats->tx_bytes; - dev_stats->tx_dropped += stats->tx_dropped; - dev_stats->rx_dropped += stats->rx_dropped; - } + dev_stats->rx_packets += stats->rx_packets; + dev_stats->tx_packets += stats->tx_packets; + dev_stats->rx_bytes += stats->rx_bytes; + dev_stats->tx_bytes += stats->tx_bytes; + dev_stats->tx_dropped += stats->tx_dropped; + dev_stats->rx_dropped += stats->rx_dropped; + } return dev_stats; } @@ -257,8 +259,6 @@ static int veth_close(struct net_device *dev) netif_carrier_off(dev); netif_carrier_off(priv->peer); - free_percpu(priv->stats); - priv->stats = NULL; return 0; } @@ -289,6 +289,15 @@ static int veth_dev_init(struct net_device *dev) return 0; } +static void veth_dev_free(struct net_device *dev) +{ + struct veth_priv *priv; + + priv = netdev_priv(dev); + free_percpu(priv->stats); + free_netdev(dev); +} + static const struct net_device_ops veth_netdev_ops = { .ndo_init = veth_dev_init, .ndo_open = veth_open, @@ -306,7 +315,7 @@ static void veth_setup(struct net_device *dev) dev->netdev_ops = &veth_netdev_ops; dev->ethtool_ops = &veth_ethtool_ops; dev->features |= NETIF_F_LLTX; - dev->destructor = free_netdev; + dev->destructor = veth_dev_free; } /* diff --git a/drivers/net/via-rhine.c b/drivers/net/via-rhine.c index d3489a3c4c0..934f7671650 100644 --- a/drivers/net/via-rhine.c +++ b/drivers/net/via-rhine.c @@ -621,6 +621,7 @@ static const struct net_device_ops rhine_netdev_ops = { .ndo_start_xmit = rhine_start_tx, .ndo_get_stats = rhine_get_stats, .ndo_set_multicast_list = rhine_set_rx_mode, + .ndo_change_mtu = eth_change_mtu, .ndo_validate_addr = eth_validate_addr, .ndo_set_mac_address = eth_mac_addr, .ndo_do_ioctl = netdev_ioctl, @@ -1217,6 +1218,7 @@ static int rhine_start_tx(struct sk_buff *skb, struct net_device *dev) struct rhine_private *rp = netdev_priv(dev); void __iomem *ioaddr = rp->base; unsigned entry; + unsigned long flags; /* Caution: the write order is important here, set the field with the "ownership" bits last. */ @@ -1260,7 +1262,7 @@ static int rhine_start_tx(struct sk_buff *skb, struct net_device *dev) cpu_to_le32(TXDESC | (skb->len >= ETH_ZLEN ? skb->len : ETH_ZLEN)); /* lock eth irq */ - spin_lock_irq(&rp->lock); + spin_lock_irqsave(&rp->lock, flags); wmb(); rp->tx_ring[entry].tx_status = cpu_to_le32(DescOwn); wmb(); @@ -1279,7 +1281,7 @@ static int rhine_start_tx(struct sk_buff *skb, struct net_device *dev) dev->trans_start = jiffies; - spin_unlock_irq(&rp->lock); + spin_unlock_irqrestore(&rp->lock, flags); if (debug > 4) { printk(KERN_DEBUG "%s: Transmit frame #%d queued in slot %d.\n", diff --git a/drivers/net/via-velocity.c b/drivers/net/via-velocity.c index 3ba35956327..cee08a1e497 100644 --- a/drivers/net/via-velocity.c +++ b/drivers/net/via-velocity.c @@ -1778,7 +1778,7 @@ static void velocity_error(struct velocity_info *vptr, int status) * mode */ if (vptr->rev_id < REV_ID_VT3216_A0) { - if (vptr->mii_status | VELOCITY_DUPLEX_FULL) + if (vptr->mii_status & VELOCITY_DUPLEX_FULL) BYTE_REG_BITS_ON(TCR_TB2BDIS, ®s->TCR); else BYTE_REG_BITS_OFF(TCR_TB2BDIS, ®s->TCR); diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c index 2a6e81d5b57..bbedf03a212 100644 --- a/drivers/net/virtio_net.c +++ b/drivers/net/virtio_net.c @@ -70,6 +70,9 @@ struct virtnet_info struct sk_buff_head recv; struct sk_buff_head send; + /* Work struct for refilling if we run low on memory. */ + struct delayed_work refill; + /* Chain pages by the private ptr. */ struct page *pages; }; @@ -273,19 +276,22 @@ drop: dev_kfree_skb(skb); } -static void try_fill_recv_maxbufs(struct virtnet_info *vi) +static bool try_fill_recv_maxbufs(struct virtnet_info *vi, gfp_t gfp) { struct sk_buff *skb; struct scatterlist sg[2+MAX_SKB_FRAGS]; int num, err, i; + bool oom = false; sg_init_table(sg, 2+MAX_SKB_FRAGS); for (;;) { struct virtio_net_hdr *hdr; skb = netdev_alloc_skb(vi->dev, MAX_PACKET_LEN + NET_IP_ALIGN); - if (unlikely(!skb)) + if (unlikely(!skb)) { + oom = true; break; + } skb_reserve(skb, NET_IP_ALIGN); skb_put(skb, MAX_PACKET_LEN); @@ -296,7 +302,7 @@ static void try_fill_recv_maxbufs(struct virtnet_info *vi) if (vi->big_packets) { for (i = 0; i < MAX_SKB_FRAGS; i++) { skb_frag_t *f = &skb_shinfo(skb)->frags[i]; - f->page = get_a_page(vi, GFP_ATOMIC); + f->page = get_a_page(vi, gfp); if (!f->page) break; @@ -325,31 +331,35 @@ static void try_fill_recv_maxbufs(struct virtnet_info *vi) if (unlikely(vi->num > vi->max)) vi->max = vi->num; vi->rvq->vq_ops->kick(vi->rvq); + return !oom; } -static void try_fill_recv(struct virtnet_info *vi) +/* Returns false if we couldn't fill entirely (OOM). */ +static bool try_fill_recv(struct virtnet_info *vi, gfp_t gfp) { struct sk_buff *skb; struct scatterlist sg[1]; int err; + bool oom = false; - if (!vi->mergeable_rx_bufs) { - try_fill_recv_maxbufs(vi); - return; - } + if (!vi->mergeable_rx_bufs) + return try_fill_recv_maxbufs(vi, gfp); for (;;) { skb_frag_t *f; skb = netdev_alloc_skb(vi->dev, GOOD_COPY_LEN + NET_IP_ALIGN); - if (unlikely(!skb)) + if (unlikely(!skb)) { + oom = true; break; + } skb_reserve(skb, NET_IP_ALIGN); f = &skb_shinfo(skb)->frags[0]; - f->page = get_a_page(vi, GFP_ATOMIC); + f->page = get_a_page(vi, gfp); if (!f->page) { + oom = true; kfree_skb(skb); break; } @@ -373,6 +383,7 @@ static void try_fill_recv(struct virtnet_info *vi) if (unlikely(vi->num > vi->max)) vi->max = vi->num; vi->rvq->vq_ops->kick(vi->rvq); + return !oom; } static void skb_recv_done(struct virtqueue *rvq) @@ -385,6 +396,23 @@ static void skb_recv_done(struct virtqueue *rvq) } } +static void refill_work(struct work_struct *work) +{ + struct virtnet_info *vi; + bool still_empty; + + vi = container_of(work, struct virtnet_info, refill.work); + napi_disable(&vi->napi); + try_fill_recv(vi, GFP_KERNEL); + still_empty = (vi->num == 0); + napi_enable(&vi->napi); + + /* In theory, this can happen: if we don't get any buffers in + * we will *never* try to fill again. */ + if (still_empty) + schedule_delayed_work(&vi->refill, HZ/2); +} + static int virtnet_poll(struct napi_struct *napi, int budget) { struct virtnet_info *vi = container_of(napi, struct virtnet_info, napi); @@ -400,10 +428,10 @@ again: received++; } - /* FIXME: If we oom and completely run out of inbufs, we need - * to start a timer trying to fill more. */ - if (vi->num < vi->max / 2) - try_fill_recv(vi); + if (vi->num < vi->max / 2) { + if (!try_fill_recv(vi, GFP_ATOMIC)) + schedule_delayed_work(&vi->refill, 0); + } /* Out of packets? */ if (received < budget) { @@ -893,6 +921,7 @@ static int virtnet_probe(struct virtio_device *vdev) vi->vdev = vdev; vdev->priv = vi; vi->pages = NULL; + INIT_DELAYED_WORK(&vi->refill, refill_work); /* If they give us a callback when all buffers are done, we don't need * the timer. */ @@ -941,7 +970,7 @@ static int virtnet_probe(struct virtio_device *vdev) } /* Last of all, set up some receive buffers. */ - try_fill_recv(vi); + try_fill_recv(vi, GFP_KERNEL); /* If we didn't even get one input buffer, we're useless. */ if (vi->num == 0) { @@ -958,6 +987,7 @@ static int virtnet_probe(struct virtio_device *vdev) unregister: unregister_netdev(dev); + cancel_delayed_work_sync(&vi->refill); free_vqs: vdev->config->del_vqs(vdev); free: @@ -986,6 +1016,7 @@ static void virtnet_remove(struct virtio_device *vdev) BUG_ON(vi->num != 0); unregister_netdev(vi->dev); + cancel_delayed_work_sync(&vi->refill); vdev->config->del_vqs(vi->vdev); diff --git a/drivers/net/wan/hd64570.c b/drivers/net/wan/hd64570.c index 223238de475..1ea1ef6c3b9 100644 --- a/drivers/net/wan/hd64570.c +++ b/drivers/net/wan/hd64570.c @@ -584,8 +584,9 @@ static void sca_dump_rings(struct net_device *dev) sca_in(DSR_RX(phy_node(port)), card) & DSR_DE ? "" : "in"); for (cnt = 0; cnt < port_to_card(port)->rx_ring_buffers; cnt++) printk(" %02X", readb(&(desc_address(port, cnt, 0)->stat))); + printk(KERN_CONT "\n"); - printk("\n" KERN_DEBUG "TX ring: CDA=%u EDA=%u DSR=%02X in=%u " + printk(KERN_DEBUG "TX ring: CDA=%u EDA=%u DSR=%02X in=%u " "last=%u %sactive", sca_inw(get_dmac_tx(port) + CDAL, card), sca_inw(get_dmac_tx(port) + EDAL, card), diff --git a/drivers/net/wan/hd64572.c b/drivers/net/wan/hd64572.c index 497b003d723..f099c34a3ae 100644 --- a/drivers/net/wan/hd64572.c +++ b/drivers/net/wan/hd64572.c @@ -529,8 +529,9 @@ static void sca_dump_rings(struct net_device *dev) sca_in(DSR_RX(port->chan), card) & DSR_DE ? "" : "in"); for (cnt = 0; cnt < port->card->rx_ring_buffers; cnt++) printk(" %02X", readb(&(desc_address(port, cnt, 0)->stat))); + printk(KERN_CONT "\n"); - printk("\n" KERN_DEBUG "TX ring: CDA=%u EDA=%u DSR=%02X in=%u " + printk(KERN_DEBUG "TX ring: CDA=%u EDA=%u DSR=%02X in=%u " "last=%u %sactive", sca_inl(get_dmac_tx(port) + CDAL, card), sca_inl(get_dmac_tx(port) + EDAL, card), diff --git a/drivers/net/wan/sbni.c b/drivers/net/wan/sbni.c index 3fb9dbc88a1..d14e95a08d6 100644 --- a/drivers/net/wan/sbni.c +++ b/drivers/net/wan/sbni.c @@ -326,11 +326,9 @@ sbni_pci_probe( struct net_device *dev ) } if (pci_irq_line <= 0 || pci_irq_line >= nr_irqs) - printk( KERN_WARNING " WARNING: The PCI BIOS assigned " - "this PCI card to IRQ %d, which is unlikely " - "to work!.\n" - KERN_WARNING " You should use the PCI BIOS " - "setup to assign a valid IRQ line.\n", + printk( KERN_WARNING + " WARNING: The PCI BIOS assigned this PCI card to IRQ %d, which is unlikely to work!.\n" + " You should use the PCI BIOS setup to assign a valid IRQ line.\n", pci_irq_line ); /* avoiding re-enable dual adapters */ diff --git a/drivers/net/wireless/airo.c b/drivers/net/wireless/airo.c index c70604f0329..8ce5e4cee16 100644 --- a/drivers/net/wireless/airo.c +++ b/drivers/net/wireless/airo.c @@ -5918,20 +5918,19 @@ static int airo_set_essid(struct net_device *dev, readSsidRid(local, &SSID_rid); /* Check if we asked for `any' */ - if(dwrq->flags == 0) { + if (dwrq->flags == 0) { /* Just send an empty SSID list */ memset(&SSID_rid, 0, sizeof(SSID_rid)); } else { - int index = (dwrq->flags & IW_ENCODE_INDEX) - 1; + unsigned index = (dwrq->flags & IW_ENCODE_INDEX) - 1; /* Check the size of the string */ - if(dwrq->length > IW_ESSID_MAX_SIZE) { + if (dwrq->length > IW_ESSID_MAX_SIZE) return -E2BIG ; - } + /* Check if index is valid */ - if((index < 0) || (index >= 4)) { + if (index >= ARRAY_SIZE(SSID_rid.ssids)) return -EINVAL; - } /* Set the SSID */ memset(SSID_rid.ssids[index].ssid, 0, @@ -6819,7 +6818,7 @@ static int airo_set_txpow(struct net_device *dev, return -EINVAL; } clear_bit (FLAG_RADIO_OFF, &local->flags); - for (i = 0; cap_rid.txPowerLevels[i] && (i < 8); i++) + for (i = 0; i < 8 && cap_rid.txPowerLevels[i]; i++) if (v == cap_rid.txPowerLevels[i]) { readConfigRid(local, 1); local->config.txPower = v; diff --git a/drivers/net/wireless/ath/Kconfig b/drivers/net/wireless/ath/Kconfig index d26e7b48531..eb0337c4954 100644 --- a/drivers/net/wireless/ath/Kconfig +++ b/drivers/net/wireless/ath/Kconfig @@ -1,5 +1,6 @@ config ATH_COMMON tristate "Atheros Wireless Cards" + depends on WLAN_80211 depends on ATH5K || ATH9K || AR9170_USB source "drivers/net/wireless/ath/ath5k/Kconfig" diff --git a/drivers/net/wireless/ath/ar9170/main.c b/drivers/net/wireless/ath/ar9170/main.c index 9d38cf60a0d..88c3d857386 100644 --- a/drivers/net/wireless/ath/ar9170/main.c +++ b/drivers/net/wireless/ath/ar9170/main.c @@ -1967,13 +1967,14 @@ static int ar9170_conf_tx(struct ieee80211_hw *hw, u16 queue, int ret; mutex_lock(&ar->mutex); - if ((param) && !(queue > __AR9170_NUM_TXQ)) { + if (queue < __AR9170_NUM_TXQ) { memcpy(&ar->edcf[ar9170_qos_hwmap[queue]], param, sizeof(*param)); ret = ar9170_set_qos(ar); - } else + } else { ret = -EINVAL; + } mutex_unlock(&ar->mutex); return ret; diff --git a/drivers/net/wireless/ath/ar9170/usb.c b/drivers/net/wireless/ath/ar9170/usb.c index 754b1f8d8da..007eb85fc67 100644 --- a/drivers/net/wireless/ath/ar9170/usb.c +++ b/drivers/net/wireless/ath/ar9170/usb.c @@ -598,11 +598,15 @@ static int ar9170_usb_request_firmware(struct ar9170_usb *aru) err = request_firmware(&aru->init_values, "ar9170-1.fw", &aru->udev->dev); + if (err) { + dev_err(&aru->udev->dev, "file with init values not found.\n"); + return err; + } err = request_firmware(&aru->firmware, "ar9170-2.fw", &aru->udev->dev); if (err) { release_firmware(aru->init_values); - dev_err(&aru->udev->dev, "file with init values not found.\n"); + dev_err(&aru->udev->dev, "firmware file not found.\n"); return err; } diff --git a/drivers/net/wireless/ath/ath5k/base.c b/drivers/net/wireless/ath/ath5k/base.c index ea045151f95..029c1bc7468 100644 --- a/drivers/net/wireless/ath/ath5k/base.c +++ b/drivers/net/wireless/ath/ath5k/base.c @@ -2970,6 +2970,9 @@ ath5k_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd, if (modparam_nohwcrypt) return -EOPNOTSUPP; + if (sc->opmode == NL80211_IFTYPE_AP) + return -EOPNOTSUPP; + switch (key->alg) { case ALG_WEP: case ALG_TKIP: diff --git a/drivers/net/wireless/ath/ath9k/ani.c b/drivers/net/wireless/ath/ath9k/ani.c index 1aeafb511dd..aad259b4c19 100644 --- a/drivers/net/wireless/ath/ath9k/ani.c +++ b/drivers/net/wireless/ath/ath9k/ani.c @@ -478,6 +478,18 @@ void ath9k_ani_reset(struct ath_hw *ah) "Reset ANI state opmode %u\n", ah->opmode); ah->stats.ast_ani_reset++; + if (ah->opmode == NL80211_IFTYPE_AP) { + /* + * ath9k_hw_ani_control() will only process items set on + * ah->ani_function + */ + if (IS_CHAN_2GHZ(chan)) + ah->ani_function = (ATH9K_ANI_SPUR_IMMUNITY_LEVEL | + ATH9K_ANI_FIRSTEP_LEVEL); + else + ah->ani_function = 0; + } + ath9k_hw_ani_control(ah, ATH9K_ANI_NOISE_IMMUNITY_LEVEL, 0); ath9k_hw_ani_control(ah, ATH9K_ANI_SPUR_IMMUNITY_LEVEL, 0); ath9k_hw_ani_control(ah, ATH9K_ANI_FIRSTEP_LEVEL, 0); diff --git a/drivers/net/wireless/ath/ath9k/eeprom.c b/drivers/net/wireless/ath/ath9k/eeprom.c index a2fda702b62..ce0e86c36a8 100644 --- a/drivers/net/wireless/ath/ath9k/eeprom.c +++ b/drivers/net/wireless/ath/ath9k/eeprom.c @@ -460,7 +460,7 @@ static int ath9k_hw_4k_check_eeprom(struct ath_hw *ah) integer = swab32(eep->modalHeader.antCtrlCommon); eep->modalHeader.antCtrlCommon = integer; - for (i = 0; i < AR5416_MAX_CHAINS; i++) { + for (i = 0; i < AR5416_EEP4K_MAX_CHAINS; i++) { integer = swab32(eep->modalHeader.antCtrlChain[i]); eep->modalHeader.antCtrlChain[i] = integer; } @@ -914,7 +914,7 @@ static void ath9k_hw_set_4k_power_per_rate_table(struct ath_hw *ah, ctlMode, numCtlModes, isHt40CtlMode, (pCtlMode[ctlMode] & EXT_ADDITIVE)); - for (i = 0; (i < AR5416_NUM_CTLS) && + for (i = 0; (i < AR5416_EEP4K_NUM_CTLS) && pEepData->ctlIndex[i]; i++) { DPRINTF(ah->ah_sc, ATH_DBG_EEPROM, " LOOP-Ctlidx %d: cfgCtl 0x%2.2x " diff --git a/drivers/net/wireless/ath/ath9k/xmit.c b/drivers/net/wireless/ath/ath9k/xmit.c index b61a071788a..4ccf48e396d 100644 --- a/drivers/net/wireless/ath/ath9k/xmit.c +++ b/drivers/net/wireless/ath/ath9k/xmit.c @@ -355,7 +355,14 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq, } if (bf_next == NULL) { - INIT_LIST_HEAD(&bf_head); + /* + * Make sure the last desc is reclaimed if it + * not a holding desc. + */ + if (!bf_last->bf_stale) + list_move_tail(&bf->list, &bf_head); + else + INIT_LIST_HEAD(&bf_head); } else { ASSERT(!list_empty(bf_q)); list_move_tail(&bf->list, &bf_head); diff --git a/drivers/net/wireless/ath/regd.c b/drivers/net/wireless/ath/regd.c index eef370bd121..bf3d25ba7be 100644 --- a/drivers/net/wireless/ath/regd.c +++ b/drivers/net/wireless/ath/regd.c @@ -474,6 +474,21 @@ ath_regd_init_wiphy(struct ath_regulatory *reg, return 0; } +/* + * Some users have reported their EEPROM programmed with + * 0x8000 set, this is not a supported regulatory domain + * but since we have more than one user with it we need + * a solution for them. We default to 0x64, which is the + * default Atheros world regulatory domain. + */ +static void ath_regd_sanitize(struct ath_regulatory *reg) +{ + if (reg->current_rd != COUNTRY_ERD_FLAG) + return; + printk(KERN_DEBUG "ath: EEPROM regdomain sanitized\n"); + reg->current_rd = 0x64; +} + int ath_regd_init(struct ath_regulatory *reg, struct wiphy *wiphy, @@ -486,6 +501,8 @@ ath_regd_init(struct ath_regulatory *reg, if (!reg) return -EINVAL; + ath_regd_sanitize(reg); + printk(KERN_DEBUG "ath: EEPROM regdomain: 0x%0x\n", reg->current_rd); if (!ath_regd_is_eeprom_valid(reg)) { diff --git a/drivers/net/wireless/b43/b43.h b/drivers/net/wireless/b43/b43.h index f580c2812d9..40448067e4c 100644 --- a/drivers/net/wireless/b43/b43.h +++ b/drivers/net/wireless/b43/b43.h @@ -648,6 +648,7 @@ struct b43_wl { u8 nr_devs; bool radiotap_enabled; + bool radio_enabled; /* The beacon we are currently using (AP or IBSS mode). * This beacon stuff is protected by the irq_lock. */ diff --git a/drivers/net/wireless/b43/main.c b/drivers/net/wireless/b43/main.c index 6456afebdba..e71c8d9cd70 100644 --- a/drivers/net/wireless/b43/main.c +++ b/drivers/net/wireless/b43/main.c @@ -3497,8 +3497,8 @@ static int b43_op_config(struct ieee80211_hw *hw, u32 changed) if (phy->ops->set_rx_antenna) phy->ops->set_rx_antenna(dev, antenna); - if (!!conf->radio_enabled != phy->radio_on) { - if (conf->radio_enabled) { + if (wl->radio_enabled != phy->radio_on) { + if (wl->radio_enabled) { b43_software_rfkill(dev, false); b43info(dev->wl, "Radio turned on by software\n"); if (!dev->radio_hw_enable) { @@ -4339,6 +4339,7 @@ static int b43_op_start(struct ieee80211_hw *hw) wl->beacon0_uploaded = 0; wl->beacon1_uploaded = 0; wl->beacon_templates_virgin = 1; + wl->radio_enabled = 1; mutex_lock(&wl->mutex); @@ -4378,6 +4379,7 @@ static void b43_op_stop(struct ieee80211_hw *hw) if (b43_status(dev) >= B43_STAT_STARTED) b43_wireless_core_stop(dev); b43_wireless_core_exit(dev); + wl->radio_enabled = 0; mutex_unlock(&wl->mutex); cancel_work_sync(&(wl->txpower_adjust_work)); @@ -4560,6 +4562,7 @@ static int b43_wireless_core_attach(struct b43_wldev *dev) B43_WARN_ON(1); dev->phy.gmode = have_2ghz_phy; + dev->phy.radio_on = 1; tmp = dev->phy.gmode ? B43_TMSLOW_GMODE : 0; b43_wireless_core_reset(dev, tmp); diff --git a/drivers/net/wireless/b43/pcmcia.c b/drivers/net/wireless/b43/pcmcia.c index 3cfc30307a2..6c3a74964ab 100644 --- a/drivers/net/wireless/b43/pcmcia.c +++ b/drivers/net/wireless/b43/pcmcia.c @@ -35,6 +35,7 @@ static /*const */ struct pcmcia_device_id b43_pcmcia_tbl[] = { PCMCIA_DEVICE_MANF_CARD(0x2D0, 0x448), + PCMCIA_DEVICE_MANF_CARD(0x2D0, 0x476), PCMCIA_DEVICE_NULL, }; diff --git a/drivers/net/wireless/b43legacy/b43legacy.h b/drivers/net/wireless/b43legacy/b43legacy.h index 77fda148ac4..038baa8869e 100644 --- a/drivers/net/wireless/b43legacy/b43legacy.h +++ b/drivers/net/wireless/b43legacy/b43legacy.h @@ -607,6 +607,7 @@ struct b43legacy_wl { u8 nr_devs; bool radiotap_enabled; + bool radio_enabled; /* The beacon we are currently using (AP or IBSS mode). * This beacon stuff is protected by the irq_lock. */ diff --git a/drivers/net/wireless/b43legacy/main.c b/drivers/net/wireless/b43legacy/main.c index e5136fb65dd..c4973c1942b 100644 --- a/drivers/net/wireless/b43legacy/main.c +++ b/drivers/net/wireless/b43legacy/main.c @@ -2689,8 +2689,8 @@ static int b43legacy_op_dev_config(struct ieee80211_hw *hw, /* Antennas for RX and management frame TX. */ b43legacy_mgmtframe_txantenna(dev, antenna_tx); - if (!!conf->radio_enabled != phy->radio_on) { - if (conf->radio_enabled) { + if (wl->radio_enabled != phy->radio_on) { + if (wl->radio_enabled) { b43legacy_radio_turn_on(dev); b43legacyinfo(dev->wl, "Radio turned on by software\n"); if (!dev->radio_hw_enable) @@ -3441,6 +3441,7 @@ static int b43legacy_op_start(struct ieee80211_hw *hw) wl->beacon0_uploaded = 0; wl->beacon1_uploaded = 0; wl->beacon_templates_virgin = 1; + wl->radio_enabled = 1; mutex_lock(&wl->mutex); @@ -3479,6 +3480,7 @@ static void b43legacy_op_stop(struct ieee80211_hw *hw) if (b43legacy_status(dev) >= B43legacy_STAT_STARTED) b43legacy_wireless_core_stop(dev); b43legacy_wireless_core_exit(dev); + wl->radio_enabled = 0; mutex_unlock(&wl->mutex); } @@ -3620,6 +3622,7 @@ static int b43legacy_wireless_core_attach(struct b43legacy_wldev *dev) have_bphy = 1; dev->phy.gmode = (have_gphy || have_bphy); + dev->phy.radio_on = 1; tmp = dev->phy.gmode ? B43legacy_TMSLOW_GMODE : 0; b43legacy_wireless_core_reset(dev, tmp); diff --git a/drivers/net/wireless/ipw2x00/ipw2200.c b/drivers/net/wireless/ipw2x00/ipw2200.c index 44c29b3f672..f593fbbb4e5 100644 --- a/drivers/net/wireless/ipw2x00/ipw2200.c +++ b/drivers/net/wireless/ipw2x00/ipw2200.c @@ -2874,45 +2874,27 @@ static int ipw_fw_dma_add_command_block(struct ipw_priv *priv, return 0; } -static int ipw_fw_dma_add_buffer(struct ipw_priv *priv, - u32 src_phys, u32 dest_address, u32 length) +static int ipw_fw_dma_add_buffer(struct ipw_priv *priv, dma_addr_t *src_address, + int nr, u32 dest_address, u32 len) { - u32 bytes_left = length; - u32 src_offset = 0; - u32 dest_offset = 0; - int status = 0; + int ret, i; + u32 size; + IPW_DEBUG_FW(">> \n"); - IPW_DEBUG_FW_INFO("src_phys=0x%x dest_address=0x%x length=0x%x\n", - src_phys, dest_address, length); - while (bytes_left > CB_MAX_LENGTH) { - status = ipw_fw_dma_add_command_block(priv, - src_phys + src_offset, - dest_address + - dest_offset, - CB_MAX_LENGTH, 0, 0); - if (status) { + IPW_DEBUG_FW_INFO("nr=%d dest_address=0x%x len=0x%x\n", + nr, dest_address, len); + + for (i = 0; i < nr; i++) { + size = min_t(u32, len - i * CB_MAX_LENGTH, CB_MAX_LENGTH); + ret = ipw_fw_dma_add_command_block(priv, src_address[i], + dest_address + + i * CB_MAX_LENGTH, size, + 0, 0); + if (ret) { IPW_DEBUG_FW_INFO(": Failed\n"); return -1; } else IPW_DEBUG_FW_INFO(": Added new cb\n"); - - src_offset += CB_MAX_LENGTH; - dest_offset += CB_MAX_LENGTH; - bytes_left -= CB_MAX_LENGTH; - } - - /* add the buffer tail */ - if (bytes_left > 0) { - status = - ipw_fw_dma_add_command_block(priv, src_phys + src_offset, - dest_address + dest_offset, - bytes_left, 0, 0); - if (status) { - IPW_DEBUG_FW_INFO(": Failed on the buffer tail\n"); - return -1; - } else - IPW_DEBUG_FW_INFO - (": Adding new cb - the buffer tail\n"); } IPW_DEBUG_FW("<< \n"); @@ -3160,59 +3142,91 @@ static int ipw_load_ucode(struct ipw_priv *priv, u8 * data, size_t len) static int ipw_load_firmware(struct ipw_priv *priv, u8 * data, size_t len) { - int rc = -1; + int ret = -1; int offset = 0; struct fw_chunk *chunk; - dma_addr_t shared_phys; - u8 *shared_virt; + int total_nr = 0; + int i; + struct pci_pool *pool; + u32 *virts[CB_NUMBER_OF_ELEMENTS_SMALL]; + dma_addr_t phys[CB_NUMBER_OF_ELEMENTS_SMALL]; IPW_DEBUG_TRACE("<< : \n"); - shared_virt = pci_alloc_consistent(priv->pci_dev, len, &shared_phys); - if (!shared_virt) + pool = pci_pool_create("ipw2200", priv->pci_dev, CB_MAX_LENGTH, 0, 0); + if (!pool) { + IPW_ERROR("pci_pool_create failed\n"); return -ENOMEM; - - memmove(shared_virt, data, len); + } /* Start the Dma */ - rc = ipw_fw_dma_enable(priv); + ret = ipw_fw_dma_enable(priv); /* the DMA is already ready this would be a bug. */ BUG_ON(priv->sram_desc.last_cb_index > 0); do { + u32 chunk_len; + u8 *start; + int size; + int nr = 0; + chunk = (struct fw_chunk *)(data + offset); offset += sizeof(struct fw_chunk); + chunk_len = le32_to_cpu(chunk->length); + start = data + offset; + + nr = (chunk_len + CB_MAX_LENGTH - 1) / CB_MAX_LENGTH; + for (i = 0; i < nr; i++) { + virts[total_nr] = pci_pool_alloc(pool, GFP_KERNEL, + &phys[total_nr]); + if (!virts[total_nr]) { + ret = -ENOMEM; + goto out; + } + size = min_t(u32, chunk_len - i * CB_MAX_LENGTH, + CB_MAX_LENGTH); + memcpy(virts[total_nr], start, size); + start += size; + total_nr++; + /* We don't support fw chunk larger than 64*8K */ + BUG_ON(total_nr > CB_NUMBER_OF_ELEMENTS_SMALL); + } + /* build DMA packet and queue up for sending */ /* dma to chunk->address, the chunk->length bytes from data + * offeset*/ /* Dma loading */ - rc = ipw_fw_dma_add_buffer(priv, shared_phys + offset, - le32_to_cpu(chunk->address), - le32_to_cpu(chunk->length)); - if (rc) { + ret = ipw_fw_dma_add_buffer(priv, &phys[total_nr - nr], + nr, le32_to_cpu(chunk->address), + chunk_len); + if (ret) { IPW_DEBUG_INFO("dmaAddBuffer Failed\n"); goto out; } - offset += le32_to_cpu(chunk->length); + offset += chunk_len; } while (offset < len); /* Run the DMA and wait for the answer */ - rc = ipw_fw_dma_kick(priv); - if (rc) { + ret = ipw_fw_dma_kick(priv); + if (ret) { IPW_ERROR("dmaKick Failed\n"); goto out; } - rc = ipw_fw_dma_wait(priv); - if (rc) { + ret = ipw_fw_dma_wait(priv); + if (ret) { IPW_ERROR("dmaWaitSync Failed\n"); goto out; } - out: - pci_free_consistent(priv->pci_dev, len, shared_virt, shared_phys); - return rc; + out: + for (i = 0; i < total_nr; i++) + pci_pool_free(pool, virts[i], phys[i]); + + pci_pool_destroy(pool); + + return ret; } /* stop nic */ @@ -6226,7 +6240,7 @@ static void ipw_add_scan_channels(struct ipw_priv *priv, }; u8 channel; - while (channel_index < IPW_SCAN_CHANNELS) { + while (channel_index < IPW_SCAN_CHANNELS - 1) { channel = priv->speed_scan[priv->speed_scan_pos]; if (channel == 0) { diff --git a/drivers/net/wireless/iwlwifi/iwl-3945.h b/drivers/net/wireless/iwlwifi/iwl-3945.h index fbb3a573463..2de6471d4be 100644 --- a/drivers/net/wireless/iwlwifi/iwl-3945.h +++ b/drivers/net/wireless/iwlwifi/iwl-3945.h @@ -112,7 +112,7 @@ enum iwl3945_antenna { #define IWL_TX_FIFO_NONE 7 /* Minimum number of queues. MAX_NUM is defined in hw specific files */ -#define IWL_MIN_NUM_QUEUES 4 +#define IWL39_MIN_NUM_QUEUES 4 #define IEEE80211_DATA_LEN 2304 #define IEEE80211_4ADDR_LEN 30 diff --git a/drivers/net/wireless/iwlwifi/iwl-agn.c b/drivers/net/wireless/iwlwifi/iwl-agn.c index 6d1519e1f01..355f50ea7fe 100644 --- a/drivers/net/wireless/iwlwifi/iwl-agn.c +++ b/drivers/net/wireless/iwlwifi/iwl-agn.c @@ -2675,12 +2675,10 @@ static ssize_t show_power_level(struct device *d, struct device_attribute *attr, char *buf) { struct iwl_priv *priv = dev_get_drvdata(d); - int mode = priv->power_data.user_power_setting; int level = priv->power_data.power_mode; char *p = buf; - p += sprintf(p, "INDEX:%d\t", level); - p += sprintf(p, "USER:%d\n", mode); + p += sprintf(p, "%d\n", level); return p - buf + 1; } diff --git a/drivers/net/wireless/iwlwifi/iwl-core.c b/drivers/net/wireless/iwlwifi/iwl-core.c index 6ab07165ea2..18b135f510e 100644 --- a/drivers/net/wireless/iwlwifi/iwl-core.c +++ b/drivers/net/wireless/iwlwifi/iwl-core.c @@ -1332,6 +1332,9 @@ int iwl_setup_mac(struct iwl_priv *priv) hw->wiphy->custom_regulatory = true; + /* Firmware does not support this */ + hw->wiphy->disable_beacon_hints = true; + hw->wiphy->max_scan_ssids = PROBE_OPTION_MAX; /* we create the 802.11 header and a zero-length SSID element */ hw->wiphy->max_scan_ie_len = IWL_MAX_PROBE_REQUEST - 24 - 2; diff --git a/drivers/net/wireless/iwlwifi/iwl-debugfs.c b/drivers/net/wireless/iwlwifi/iwl-debugfs.c index 11e08c06891..ca00cc8ad4c 100644 --- a/drivers/net/wireless/iwlwifi/iwl-debugfs.c +++ b/drivers/net/wireless/iwlwifi/iwl-debugfs.c @@ -308,18 +308,18 @@ static ssize_t iwl_dbgfs_nvm_read(struct file *file, return -ENODATA; } + ptr = priv->eeprom; + if (!ptr) { + IWL_ERR(priv, "Invalid EEPROM/OTP memory\n"); + return -ENOMEM; + } + /* 4 characters for byte 0xYY */ buf = kzalloc(buf_size, GFP_KERNEL); if (!buf) { IWL_ERR(priv, "Can not allocate Buffer\n"); return -ENOMEM; } - - ptr = priv->eeprom; - if (!ptr) { - IWL_ERR(priv, "Invalid EEPROM/OTP memory\n"); - return -ENOMEM; - } pos += scnprintf(buf + pos, buf_size - pos, "NVM Type: %s\n", (priv->nvm_device_type == NVM_DEVICE_TYPE_OTP) ? "OTP" : "EEPROM"); diff --git a/drivers/net/wireless/iwlwifi/iwl-dev.h b/drivers/net/wireless/iwlwifi/iwl-dev.h index e2d620f0b6e..650e20af20f 100644 --- a/drivers/net/wireless/iwlwifi/iwl-dev.h +++ b/drivers/net/wireless/iwlwifi/iwl-dev.h @@ -258,8 +258,10 @@ struct iwl_channel_info { #define IWL_TX_FIFO_HCCA_2 6 #define IWL_TX_FIFO_NONE 7 -/* Minimum number of queues. MAX_NUM is defined in hw specific files */ -#define IWL_MIN_NUM_QUEUES 4 +/* Minimum number of queues. MAX_NUM is defined in hw specific files. + * Set the minimum to accommodate the 4 standard TX queues, 1 command + * queue, 2 (unused) HCCA queues, and 4 HT queues (one for each AC) */ +#define IWL_MIN_NUM_QUEUES 10 /* Power management (not Tx power) structures */ diff --git a/drivers/net/wireless/iwlwifi/iwl-sta.c b/drivers/net/wireless/iwlwifi/iwl-sta.c index 2addf735b19..ffd5c61a755 100644 --- a/drivers/net/wireless/iwlwifi/iwl-sta.c +++ b/drivers/net/wireless/iwlwifi/iwl-sta.c @@ -566,6 +566,8 @@ int iwl_remove_default_wep_key(struct iwl_priv *priv, unsigned long flags; spin_lock_irqsave(&priv->sta_lock, flags); + IWL_DEBUG_WEP(priv, "Removing default WEP key: idx=%d\n", + keyconf->keyidx); if (!test_and_clear_bit(keyconf->keyidx, &priv->ucode_key_table)) IWL_ERR(priv, "index %d not used in uCode key table.\n", @@ -573,6 +575,11 @@ int iwl_remove_default_wep_key(struct iwl_priv *priv, priv->default_wep_key--; memset(&priv->wep_keys[keyconf->keyidx], 0, sizeof(priv->wep_keys[0])); + if (iwl_is_rfkill(priv)) { + IWL_DEBUG_WEP(priv, "Not sending REPLY_WEPKEY command due to RFKILL.\n"); + spin_unlock_irqrestore(&priv->sta_lock, flags); + return 0; + } ret = iwl_send_static_wepkey_cmd(priv, 1); IWL_DEBUG_WEP(priv, "Remove default WEP key: idx=%d ret=%d\n", keyconf->keyidx, ret); @@ -853,6 +860,11 @@ int iwl_remove_dynamic_key(struct iwl_priv *priv, priv->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_KEY_MASK; priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK; + if (iwl_is_rfkill(priv)) { + IWL_DEBUG_WEP(priv, "Not sending REPLY_ADD_STA command because RFKILL enabled. \n"); + spin_unlock_irqrestore(&priv->sta_lock, flags); + return 0; + } ret = iwl_send_add_sta(priv, &priv->stations[sta_id].sta, CMD_ASYNC); spin_unlock_irqrestore(&priv->sta_lock, flags); return ret; diff --git a/drivers/net/wireless/iwlwifi/iwl-tx.c b/drivers/net/wireless/iwlwifi/iwl-tx.c index 85ae7a62109..2e89040e63b 100644 --- a/drivers/net/wireless/iwlwifi/iwl-tx.c +++ b/drivers/net/wireless/iwlwifi/iwl-tx.c @@ -720,8 +720,6 @@ int iwl_tx_skb(struct iwl_priv *priv, struct sk_buff *skb) goto drop_unlock; } - spin_unlock_irqrestore(&priv->lock, flags); - hdr_len = ieee80211_hdrlen(fc); /* Find (or create) index into station table for destination station */ @@ -729,7 +727,7 @@ int iwl_tx_skb(struct iwl_priv *priv, struct sk_buff *skb) if (sta_id == IWL_INVALID_STATION) { IWL_DEBUG_DROP(priv, "Dropping - INVALID STATION: %pM\n", hdr->addr1); - goto drop; + goto drop_unlock; } IWL_DEBUG_TX(priv, "station Id %d\n", sta_id); @@ -750,14 +748,17 @@ int iwl_tx_skb(struct iwl_priv *priv, struct sk_buff *skb) txq_id = priv->stations[sta_id].tid[tid].agg.txq_id; swq_id = iwl_virtual_agg_queue_num(swq_id, txq_id); } - priv->stations[sta_id].tid[tid].tfds_in_queue++; } txq = &priv->txq[txq_id]; q = &txq->q; txq->swq_id = swq_id; - spin_lock_irqsave(&priv->lock, flags); + if (unlikely(iwl_queue_space(q) < q->high_mark)) + goto drop_unlock; + + if (ieee80211_is_data_qos(fc)) + priv->stations[sta_id].tid[tid].tfds_in_queue++; /* Set up driver data for this TFD */ memset(&(txq->txb[q->write_ptr]), 0, sizeof(struct iwl_tx_info)); @@ -872,7 +873,8 @@ int iwl_tx_skb(struct iwl_priv *priv, struct sk_buff *skb) iwl_print_hex_dump(priv, IWL_DL_TX, (u8 *)tx_cmd->hdr, hdr_len); /* Set up entry for this TFD in Tx byte-count array */ - priv->cfg->ops->lib->txq_update_byte_cnt_tbl(priv, txq, + if (info->flags & IEEE80211_TX_CTL_AMPDU) + priv->cfg->ops->lib->txq_update_byte_cnt_tbl(priv, txq, le16_to_cpu(tx_cmd->len)); pci_dma_sync_single_for_device(priv->pci_dev, txcmd_phys, @@ -901,7 +903,6 @@ int iwl_tx_skb(struct iwl_priv *priv, struct sk_buff *skb) drop_unlock: spin_unlock_irqrestore(&priv->lock, flags); -drop: return -1; } EXPORT_SYMBOL(iwl_tx_skb); @@ -1170,6 +1171,8 @@ int iwl_tx_agg_start(struct iwl_priv *priv, const u8 *ra, u16 tid, u16 *ssn) IWL_ERR(priv, "Start AGG on invalid station\n"); return -ENXIO; } + if (unlikely(tid >= MAX_TID_COUNT)) + return -EINVAL; if (priv->stations[sta_id].tid[tid].agg.state != IWL_AGG_OFF) { IWL_ERR(priv, "Start AGG when state is not IWL_AGG_OFF !\n"); diff --git a/drivers/net/wireless/iwlwifi/iwl3945-base.c b/drivers/net/wireless/iwlwifi/iwl3945-base.c index cb9bd4c8f25..523843369ca 100644 --- a/drivers/net/wireless/iwlwifi/iwl3945-base.c +++ b/drivers/net/wireless/iwlwifi/iwl3945-base.c @@ -3643,12 +3643,10 @@ static ssize_t show_power_level(struct device *d, struct device_attribute *attr, char *buf) { struct iwl_priv *priv = dev_get_drvdata(d); - int mode = priv->power_data.user_power_setting; int level = priv->power_data.power_mode; char *p = buf; - p += sprintf(p, "INDEX:%d\t", level); - p += sprintf(p, "USER:%d\n", mode); + p += sprintf(p, "%d\n", level); return p - buf + 1; } @@ -3970,6 +3968,9 @@ static int iwl3945_setup_mac(struct iwl_priv *priv) hw->wiphy->custom_regulatory = true; + /* Firmware does not support this */ + hw->wiphy->disable_beacon_hints = true; + hw->wiphy->max_scan_ssids = PROBE_OPTION_MAX_3945; /* we create the 802.11 header and a zero-length SSID element */ hw->wiphy->max_scan_ie_len = IWL_MAX_PROBE_REQUEST - 24 - 2; @@ -4020,10 +4021,10 @@ static int iwl3945_pci_probe(struct pci_dev *pdev, const struct pci_device_id *e SET_IEEE80211_DEV(hw, &pdev->dev); if ((iwl3945_mod_params.num_of_queues > IWL39_MAX_NUM_QUEUES) || - (iwl3945_mod_params.num_of_queues < IWL_MIN_NUM_QUEUES)) { + (iwl3945_mod_params.num_of_queues < IWL39_MIN_NUM_QUEUES)) { IWL_ERR(priv, "invalid queues_num, should be between %d and %d\n", - IWL_MIN_NUM_QUEUES, IWL39_MAX_NUM_QUEUES); + IWL39_MIN_NUM_QUEUES, IWL39_MAX_NUM_QUEUES); err = -EINVAL; goto out_ieee80211_free_hw; } diff --git a/drivers/net/wireless/iwmc3200wifi/Kconfig b/drivers/net/wireless/iwmc3200wifi/Kconfig index 1eccb6df46d..030401d367d 100644 --- a/drivers/net/wireless/iwmc3200wifi/Kconfig +++ b/drivers/net/wireless/iwmc3200wifi/Kconfig @@ -4,6 +4,15 @@ config IWM depends on CFG80211 select WIRELESS_EXT select FW_LOADER + help + The Intel Wireless Multicomm 3200 hardware is a combo + card with GPS, Bluetooth, WiMax and 802.11 radios. It + runs over SDIO and is typically found on Moorestown + based platform. This driver takes care of the 802.11 + part, which is a fullmac one. + + If you choose to build it as a module, it'll be called + iwmc3200wifi.ko. config IWM_DEBUG bool "Enable full debugging output in iwmc3200wifi" diff --git a/drivers/net/wireless/iwmc3200wifi/commands.c b/drivers/net/wireless/iwmc3200wifi/commands.c index 834a7f544e5..e2334d12359 100644 --- a/drivers/net/wireless/iwmc3200wifi/commands.c +++ b/drivers/net/wireless/iwmc3200wifi/commands.c @@ -220,6 +220,7 @@ int iwm_store_rxiq_calib_result(struct iwm_priv *iwm) eeprom_rxiq = iwm_eeprom_access(iwm, IWM_EEPROM_CALIB_RXIQ); if (IS_ERR(eeprom_rxiq)) { IWM_ERR(iwm, "Couldn't access EEPROM RX IQ entry\n"); + kfree(rxiq); return PTR_ERR(eeprom_rxiq); } diff --git a/drivers/net/wireless/iwmc3200wifi/netdev.c b/drivers/net/wireless/iwmc3200wifi/netdev.c index aaa20c6885c..bf294e41753 100644 --- a/drivers/net/wireless/iwmc3200wifi/netdev.c +++ b/drivers/net/wireless/iwmc3200wifi/netdev.c @@ -106,10 +106,8 @@ void *iwm_if_alloc(int sizeof_bus, struct device *dev, int ret = 0; wdev = iwm_wdev_alloc(sizeof_bus, dev); - if (!wdev) { - dev_err(dev, "no memory for wireless device instance\n"); - return ERR_PTR(-ENOMEM); - } + if (IS_ERR(wdev)) + return wdev; iwm = wdev_to_iwm(wdev); iwm->bus_ops = if_ops; @@ -151,8 +149,8 @@ void iwm_if_free(struct iwm_priv *iwm) return; free_netdev(iwm_to_ndev(iwm)); - iwm_wdev_free(iwm); iwm_priv_deinit(iwm); + iwm_wdev_free(iwm); } int iwm_if_add(struct iwm_priv *iwm) diff --git a/drivers/net/wireless/libertas/11d.c b/drivers/net/wireless/libertas/11d.c index 9a5408e7d94..5c6968101f0 100644 --- a/drivers/net/wireless/libertas/11d.c +++ b/drivers/net/wireless/libertas/11d.c @@ -47,7 +47,7 @@ static u8 lbs_region_2_code(u8 *region) { u8 i; - for (i = 0; region[i] && i < COUNTRY_CODE_LEN; i++) + for (i = 0; i < COUNTRY_CODE_LEN && region[i]; i++) region[i] = toupper(region[i]); for (i = 0; i < ARRAY_SIZE(region_code_mapping); i++) { diff --git a/drivers/net/wireless/libertas/cmd.c b/drivers/net/wireless/libertas/cmd.c index 01db705a38e..685098148e1 100644 --- a/drivers/net/wireless/libertas/cmd.c +++ b/drivers/net/wireless/libertas/cmd.c @@ -135,8 +135,14 @@ int lbs_update_hw_spec(struct lbs_private *priv) /* Clamp region code to 8-bit since FW spec indicates that it should * only ever be 8-bit, even though the field size is 16-bit. Some firmware * returns non-zero high 8 bits here. + * + * Firmware version 4.0.102 used in CF8381 has region code shifted. We + * need to check for this problem and handle it properly. */ - priv->regioncode = le16_to_cpu(cmd.regioncode) & 0xFF; + if (MRVL_FW_MAJOR_REV(priv->fwrelease) == MRVL_FW_V4) + priv->regioncode = (le16_to_cpu(cmd.regioncode) >> 8) & 0xFF; + else + priv->regioncode = le16_to_cpu(cmd.regioncode) & 0xFF; for (i = 0; i < MRVDRV_MAX_REGION_CODE; i++) { /* use the region code to search for the index */ diff --git a/drivers/net/wireless/libertas/defs.h b/drivers/net/wireless/libertas/defs.h index 48da157d6cd..72f3479a4d7 100644 --- a/drivers/net/wireless/libertas/defs.h +++ b/drivers/net/wireless/libertas/defs.h @@ -234,6 +234,8 @@ static inline void lbs_deb_hex(unsigned int grp, const char *prompt, u8 *buf, in /** Mesh enable bit in FW capability */ #define MESH_CAPINFO_ENABLE_MASK (1<<16) +/** FW definition from Marvell v4 */ +#define MRVL_FW_V4 (0x04) /** FW definition from Marvell v5 */ #define MRVL_FW_V5 (0x05) /** FW definition from Marvell v10 */ diff --git a/drivers/net/wireless/libertas/hostcmd.h b/drivers/net/wireless/libertas/hostcmd.h index 0a2e29140ad..c8a1998d474 100644 --- a/drivers/net/wireless/libertas/hostcmd.h +++ b/drivers/net/wireless/libertas/hostcmd.h @@ -56,8 +56,8 @@ struct rxpd { u8 bss_type; /* BSS number */ u8 bss_num; - } bss; - } u; + } __attribute__ ((packed)) bss; + } __attribute__ ((packed)) u; /* SNR */ u8 snr; diff --git a/drivers/net/wireless/libertas/scan.c b/drivers/net/wireless/libertas/scan.c index 601b5424967..6c95af3023c 100644 --- a/drivers/net/wireless/libertas/scan.c +++ b/drivers/net/wireless/libertas/scan.c @@ -5,6 +5,7 @@ * for sending scan commands to the firmware. */ #include <linux/types.h> +#include <linux/kernel.h> #include <linux/etherdevice.h> #include <linux/if_arp.h> #include <asm/unaligned.h> @@ -876,7 +877,7 @@ static inline char *lbs_translate_scan(struct lbs_private *priv, iwe.u.bitrate.disabled = 0; iwe.u.bitrate.value = 0; - for (j = 0; bss->rates[j] && (j < sizeof(bss->rates)); j++) { + for (j = 0; j < ARRAY_SIZE(bss->rates) && bss->rates[j]; j++) { /* Bit rate given in 500 kb/s units */ iwe.u.bitrate.value = bss->rates[j] * 500000; current_val = iwe_stream_add_value(info, start, current_val, diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c index e789c6e9938..7916ca3f84c 100644 --- a/drivers/net/wireless/mac80211_hwsim.c +++ b/drivers/net/wireless/mac80211_hwsim.c @@ -418,6 +418,7 @@ static bool mac80211_hwsim_tx_frame(struct ieee80211_hw *hw, continue; if (!data2->started || !hwsim_ps_rx_ok(data2, skb) || + !data->channel || !data2->channel || data->channel->center_freq != data2->channel->center_freq || !(data->group & data2->group)) continue; @@ -708,7 +709,7 @@ static const struct ieee80211_ops mac80211_hwsim_ops = static void mac80211_hwsim_free(void) { struct list_head tmplist, *i, *tmp; - struct mac80211_hwsim_data *data; + struct mac80211_hwsim_data *data, *tmpdata; INIT_LIST_HEAD(&tmplist); @@ -717,7 +718,7 @@ static void mac80211_hwsim_free(void) list_move(i, &tmplist); spin_unlock_bh(&hwsim_radio_lock); - list_for_each_entry(data, &tmplist, list) { + list_for_each_entry_safe(data, tmpdata, &tmplist, list) { debugfs_remove(data->debugfs_group); debugfs_remove(data->debugfs_ps); debugfs_remove(data->debugfs); @@ -1166,8 +1167,8 @@ static void __exit exit_mac80211_hwsim(void) { printk(KERN_DEBUG "mac80211_hwsim: unregister radios\n"); - unregister_netdev(hwsim_mon); mac80211_hwsim_free(); + unregister_netdev(hwsim_mon); } diff --git a/drivers/net/wireless/mwl8k.c b/drivers/net/wireless/mwl8k.c index a263d5c84c0..83967afe082 100644 --- a/drivers/net/wireless/mwl8k.c +++ b/drivers/net/wireless/mwl8k.c @@ -261,7 +261,7 @@ struct mwl8k_vif { */ }; -#define MWL8K_VIF(_vif) (struct mwl8k_vif *)(&((_vif)->drv_priv)) +#define MWL8K_VIF(_vif) ((struct mwl8k_vif *)&((_vif)->drv_priv)) static const struct ieee80211_channel mwl8k_channels[] = { { .center_freq = 2412, .hw_value = 1, }, @@ -1012,6 +1012,8 @@ static int rxq_process(struct ieee80211_hw *hw, int index, int limit) rmb(); skb = rxq->rx_skb[rxq->rx_head]; + if (skb == NULL) + break; rxq->rx_skb[rxq->rx_head] = NULL; rxq->rx_head = (rxq->rx_head + 1) % MWL8K_RX_DESCS; @@ -1591,6 +1593,9 @@ static int mwl8k_post_cmd(struct ieee80211_hw *hw, struct mwl8k_cmd_pkt *cmd) timeout = wait_for_completion_timeout(&cmd_wait, msecs_to_jiffies(MWL8K_CMD_TIMEOUT_MS)); + pci_unmap_single(priv->pdev, dma_addr, dma_size, + PCI_DMA_BIDIRECTIONAL); + result = &cmd->result; if (!timeout) { spin_lock_irq(&priv->fw_lock); @@ -1610,8 +1615,6 @@ static int mwl8k_post_cmd(struct ieee80211_hw *hw, struct mwl8k_cmd_pkt *cmd) *result); } - pci_unmap_single(priv->pdev, dma_addr, dma_size, - PCI_DMA_BIDIRECTIONAL); return rc; } @@ -1654,18 +1657,18 @@ static int mwl8k_cmd_get_hw_spec(struct ieee80211_hw *hw) memset(cmd->perm_addr, 0xff, sizeof(cmd->perm_addr)); cmd->ps_cookie = cpu_to_le32(priv->cookie_dma); cmd->rx_queue_ptr = cpu_to_le32(priv->rxq[0].rx_desc_dma); - cmd->num_tx_queues = MWL8K_TX_QUEUES; + cmd->num_tx_queues = cpu_to_le32(MWL8K_TX_QUEUES); for (i = 0; i < MWL8K_TX_QUEUES; i++) cmd->tx_queue_ptrs[i] = cpu_to_le32(priv->txq[i].tx_desc_dma); - cmd->num_tx_desc_per_queue = MWL8K_TX_DESCS; - cmd->total_rx_desc = MWL8K_RX_DESCS; + cmd->num_tx_desc_per_queue = cpu_to_le32(MWL8K_TX_DESCS); + cmd->total_rx_desc = cpu_to_le32(MWL8K_RX_DESCS); rc = mwl8k_post_cmd(hw, &cmd->header); if (!rc) { SET_IEEE80211_PERM_ADDR(hw, cmd->perm_addr); priv->num_mcaddrs = le16_to_cpu(cmd->num_mcaddrs); - priv->fw_rev = cmd->fw_rev; + priv->fw_rev = le32_to_cpu(cmd->fw_rev); priv->hw_rev = cmd->hw_rev; priv->region_code = le16_to_cpu(cmd->region_code); } @@ -3216,15 +3219,19 @@ static int mwl8k_configure_filter_wt(struct work_struct *wt) struct dev_addr_list *mclist = worker->mclist; struct mwl8k_priv *priv = hw->priv; - struct mwl8k_vif *mv_vif; int rc = 0; if (changed_flags & FIF_BCN_PRBRESP_PROMISC) { if (*total_flags & FIF_BCN_PRBRESP_PROMISC) rc = mwl8k_cmd_set_pre_scan(hw); else { - mv_vif = MWL8K_VIF(priv->vif); - rc = mwl8k_cmd_set_post_scan(hw, mv_vif->bssid); + u8 *bssid; + + bssid = "\x00\x00\x00\x00\x00\x00"; + if (priv->vif != NULL) + bssid = MWL8K_VIF(priv->vif)->bssid; + + rc = mwl8k_cmd_set_post_scan(hw, bssid); } } @@ -3726,6 +3733,8 @@ static void __devexit mwl8k_remove(struct pci_dev *pdev) ieee80211_stop_queues(hw); + ieee80211_unregister_hw(hw); + /* Remove tx reclaim tasklet */ tasklet_kill(&priv->tx_reclaim_task); @@ -3739,8 +3748,6 @@ static void __devexit mwl8k_remove(struct pci_dev *pdev) for (i = 0; i < MWL8K_TX_QUEUES; i++) mwl8k_txq_reclaim(hw, i, 1); - ieee80211_unregister_hw(hw); - for (i = 0; i < MWL8K_TX_QUEUES; i++) mwl8k_txq_deinit(hw, i); diff --git a/drivers/net/wireless/orinoco/hw.c b/drivers/net/wireless/orinoco/hw.c index 632fac86a30..b3946272c72 100644 --- a/drivers/net/wireless/orinoco/hw.c +++ b/drivers/net/wireless/orinoco/hw.c @@ -70,7 +70,7 @@ int orinoco_hw_get_tkip_iv(struct orinoco_private *priv, int key, u8 *tsc) int err = 0; u8 tsc_arr[4][IW_ENCODE_SEQ_MAX_SIZE]; - if ((key < 0) || (key > 4)) + if ((key < 0) || (key >= 4)) return -EINVAL; err = hermes_read_ltv(hw, USER_BAP, HERMES_RID_CURRENT_TKIP_IV, diff --git a/drivers/net/wireless/orinoco/main.c b/drivers/net/wireless/orinoco/main.c index 345593c4acc..a370e510f19 100644 --- a/drivers/net/wireless/orinoco/main.c +++ b/drivers/net/wireless/orinoco/main.c @@ -2521,6 +2521,8 @@ static const struct net_device_ops orinoco_netdev_ops = { .ndo_start_xmit = orinoco_xmit, .ndo_set_multicast_list = orinoco_set_multicast_list, .ndo_change_mtu = orinoco_change_mtu, + .ndo_set_mac_address = eth_mac_addr, + .ndo_validate_addr = eth_validate_addr, .ndo_tx_timeout = orinoco_tx_timeout, .ndo_get_stats = orinoco_get_stats, }; @@ -2555,7 +2557,6 @@ struct net_device priv->wireless_data.spy_data = &priv->spy_data; dev->wireless_data = &priv->wireless_data; #endif - /* we use the default eth_mac_addr for setting the MAC addr */ /* Reserve space in skb for the SNAP header */ dev->hard_header_len += ENCAPS_OVERHEAD; diff --git a/drivers/net/wireless/p54/p54common.c b/drivers/net/wireless/p54/p54common.c index b618bd14583..22ca122bd79 100644 --- a/drivers/net/wireless/p54/p54common.c +++ b/drivers/net/wireless/p54/p54common.c @@ -823,30 +823,30 @@ void p54_free_skb(struct ieee80211_hw *dev, struct sk_buff *skb) struct p54_tx_info *range; unsigned long flags; - if (unlikely(!skb || !dev || skb_queue_empty(&priv->tx_queue))) + if (unlikely(!skb || !dev || !skb_queue_len(&priv->tx_queue))) return; - /* There used to be a check here to see if the SKB was on the - * TX queue or not. This can never happen because all SKBs we - * see here successfully went through p54_assign_address() - * which means the SKB is on the ->tx_queue. + /* + * don't try to free an already unlinked skb */ + if (unlikely((!skb->next) || (!skb->prev))) + return; spin_lock_irqsave(&priv->tx_queue.lock, flags); info = IEEE80211_SKB_CB(skb); range = (void *)info->rate_driver_data; - if (!skb_queue_is_first(&priv->tx_queue, skb)) { + if (skb->prev != (struct sk_buff *)&priv->tx_queue) { struct ieee80211_tx_info *ni; struct p54_tx_info *mr; - ni = IEEE80211_SKB_CB(skb_queue_prev(&priv->tx_queue, skb)); + ni = IEEE80211_SKB_CB(skb->prev); mr = (struct p54_tx_info *)ni->rate_driver_data; } - if (!skb_queue_is_last(&priv->tx_queue, skb)) { + if (skb->next != (struct sk_buff *)&priv->tx_queue) { struct ieee80211_tx_info *ni; struct p54_tx_info *mr; - ni = IEEE80211_SKB_CB(skb_queue_next(&priv->tx_queue, skb)); + ni = IEEE80211_SKB_CB(skb->next); mr = (struct p54_tx_info *)ni->rate_driver_data; } __skb_unlink(skb, &priv->tx_queue); @@ -864,13 +864,15 @@ static struct sk_buff *p54_find_tx_entry(struct ieee80211_hw *dev, unsigned long flags; spin_lock_irqsave(&priv->tx_queue.lock, flags); - skb_queue_walk(&priv->tx_queue, entry) { + entry = priv->tx_queue.next; + while (entry != (struct sk_buff *)&priv->tx_queue) { struct p54_hdr *hdr = (struct p54_hdr *) entry->data; if (hdr->req_id == req_id) { spin_unlock_irqrestore(&priv->tx_queue.lock, flags); return entry; } + entry = entry->next; } spin_unlock_irqrestore(&priv->tx_queue.lock, flags); return NULL; @@ -888,33 +890,36 @@ static void p54_rx_frame_sent(struct ieee80211_hw *dev, struct sk_buff *skb) int count, idx; spin_lock_irqsave(&priv->tx_queue.lock, flags); - skb_queue_walk(&priv->tx_queue, entry) { + entry = (struct sk_buff *) priv->tx_queue.next; + while (entry != (struct sk_buff *)&priv->tx_queue) { struct ieee80211_tx_info *info = IEEE80211_SKB_CB(entry); struct p54_hdr *entry_hdr; struct p54_tx_data *entry_data; unsigned int pad = 0, frame_len; range = (void *)info->rate_driver_data; - if (range->start_addr != addr) + if (range->start_addr != addr) { + entry = entry->next; continue; + } - if (!skb_queue_is_last(&priv->tx_queue, entry)) { + if (entry->next != (struct sk_buff *)&priv->tx_queue) { struct ieee80211_tx_info *ni; struct p54_tx_info *mr; - ni = IEEE80211_SKB_CB(skb_queue_next(&priv->tx_queue, - entry)); + ni = IEEE80211_SKB_CB(entry->next); mr = (struct p54_tx_info *)ni->rate_driver_data; } __skb_unlink(entry, &priv->tx_queue); - spin_unlock_irqrestore(&priv->tx_queue.lock, flags); frame_len = entry->len; entry_hdr = (struct p54_hdr *) entry->data; entry_data = (struct p54_tx_data *) entry_hdr->data; - priv->tx_stats[entry_data->hw_queue].len--; + if (priv->tx_stats[entry_data->hw_queue].len) + priv->tx_stats[entry_data->hw_queue].len--; priv->stats.dot11ACKFailureCount += payload->tries - 1; + spin_unlock_irqrestore(&priv->tx_queue.lock, flags); /* * Frames in P54_QUEUE_FWSCAN and P54_QUEUE_BEACON are @@ -1164,21 +1169,23 @@ static int p54_assign_address(struct ieee80211_hw *dev, struct sk_buff *skb, } } - skb_queue_walk(&priv->tx_queue, entry) { + entry = priv->tx_queue.next; + while (left--) { u32 hole_size; info = IEEE80211_SKB_CB(entry); range = (void *)info->rate_driver_data; hole_size = range->start_addr - last_addr; if (!target_skb && hole_size >= len) { - target_skb = skb_queue_prev(&priv->tx_queue, entry); + target_skb = entry->prev; hole_size -= len; target_addr = last_addr; } largest_hole = max(largest_hole, hole_size); last_addr = range->end_addr; + entry = entry->next; } if (!target_skb && priv->rx_end - last_addr >= len) { - target_skb = skb_peek_tail(&priv->tx_queue); + target_skb = priv->tx_queue.prev; largest_hole = max(largest_hole, priv->rx_end - last_addr - len); if (!skb_queue_empty(&priv->tx_queue)) { info = IEEE80211_SKB_CB(target_skb); @@ -2084,6 +2091,7 @@ out: static void p54_stop(struct ieee80211_hw *dev) { struct p54_common *priv = dev->priv; + struct sk_buff *skb; mutex_lock(&priv->conf_mutex); priv->mode = NL80211_IFTYPE_UNSPECIFIED; @@ -2098,7 +2106,8 @@ static void p54_stop(struct ieee80211_hw *dev) p54_tx_cancel(dev, priv->cached_beacon); priv->stop(dev); - skb_queue_purge(&priv->tx_queue); + while ((skb = skb_dequeue(&priv->tx_queue))) + kfree_skb(skb); priv->cached_beacon = NULL; priv->tsf_high32 = priv->tsf_low32 = 0; mutex_unlock(&priv->conf_mutex); diff --git a/drivers/net/wireless/p54/p54spi.c b/drivers/net/wireless/p54/p54spi.c index 83116baeb11..72c7dbd39d0 100644 --- a/drivers/net/wireless/p54/p54spi.c +++ b/drivers/net/wireless/p54/p54spi.c @@ -635,7 +635,7 @@ static int __devinit p54spi_probe(struct spi_device *spi) hw = p54_init_common(sizeof(*priv)); if (!hw) { - dev_err(&priv->spi->dev, "could not alloc ieee80211_hw"); + dev_err(&spi->dev, "could not alloc ieee80211_hw"); return -ENOMEM; } diff --git a/drivers/net/wireless/ray_cs.c b/drivers/net/wireless/ray_cs.c index b10b0383dfa..698b11b1cad 100644 --- a/drivers/net/wireless/ray_cs.c +++ b/drivers/net/wireless/ray_cs.c @@ -2427,11 +2427,10 @@ static void untranslate(ray_dev_t *local, struct sk_buff *skb, int len) #ifdef PCMCIA_DEBUG if (pc_debug > 3) { - int i; - printk(KERN_DEBUG "skb->data before untranslate"); - for (i = 0; i < 64; i++) - printk("%02x ", skb->data[i]); - printk("\n" KERN_DEBUG + print_hex_dump(KERN_DEBUG, "skb->data before untranslate: ", + DUMP_PREFIX_NONE, 16, 1, + skb->data, 64, true); + printk(KERN_DEBUG "type = %08x, xsap = %02x%02x%02x, org = %02x02x02x\n", ntohs(type), psnap->dsap, psnap->ssap, psnap->ctrl, psnap->org[0], psnap->org[1], psnap->org[2]); diff --git a/drivers/net/wireless/rt2x00/rt2500usb.c b/drivers/net/wireless/rt2x00/rt2500usb.c index 66daf68ff0e..ce75426764a 100644 --- a/drivers/net/wireless/rt2x00/rt2500usb.c +++ b/drivers/net/wireless/rt2x00/rt2500usb.c @@ -1550,7 +1550,9 @@ static int rt2500usb_init_eeprom(struct rt2x00_dev *rt2x00dev) rt2500usb_register_read(rt2x00dev, MAC_CSR0, ®); rt2x00_set_chip(rt2x00dev, RT2570, value, reg); - if (!rt2x00_check_rev(&rt2x00dev->chip, 0x000ffff0, 0)) { + if (!rt2x00_check_rev(&rt2x00dev->chip, 0x000ffff0, 0) || + rt2x00_check_rev(&rt2x00dev->chip, 0x0000000f, 0)) { + ERROR(rt2x00dev, "Invalid RT chipset detected.\n"); return -ENODEV; } diff --git a/drivers/net/wireless/rt2x00/rt2x00.h b/drivers/net/wireless/rt2x00/rt2x00.h index a498dde024e..49c9e2c1433 100644 --- a/drivers/net/wireless/rt2x00/rt2x00.h +++ b/drivers/net/wireless/rt2x00/rt2x00.h @@ -849,13 +849,15 @@ struct rt2x00_dev { static inline void rt2x00_rf_read(struct rt2x00_dev *rt2x00dev, const unsigned int word, u32 *data) { - *data = rt2x00dev->rf[word]; + BUG_ON(word < 1 || word > rt2x00dev->ops->rf_size / sizeof(u32)); + *data = rt2x00dev->rf[word - 1]; } static inline void rt2x00_rf_write(struct rt2x00_dev *rt2x00dev, const unsigned int word, u32 data) { - rt2x00dev->rf[word] = data; + BUG_ON(word < 1 || word > rt2x00dev->ops->rf_size / sizeof(u32)); + rt2x00dev->rf[word - 1] = data; } /* diff --git a/drivers/net/wireless/rtl818x/rtl8187_dev.c b/drivers/net/wireless/rtl818x/rtl8187_dev.c index 294250e294d..87a95588a8e 100644 --- a/drivers/net/wireless/rtl818x/rtl8187_dev.c +++ b/drivers/net/wireless/rtl818x/rtl8187_dev.c @@ -869,6 +869,9 @@ static int rtl8187b_init_hw(struct ieee80211_hw *dev) priv->aifsn[3] = 3; /* AIFSN[AC_BE] */ rtl818x_iowrite8(priv, &priv->map->ACM_CONTROL, 0); + /* ENEDCA flag must always be set, transmit issues? */ + rtl818x_iowrite8(priv, &priv->map->MSR, RTL818X_MSR_ENEDCA); + return 0; } @@ -1173,13 +1176,16 @@ static void rtl8187_bss_info_changed(struct ieee80211_hw *dev, rtl818x_iowrite8(priv, &priv->map->BSSID[i], info->bssid[i]); + if (priv->is_rtl8187b) + reg = RTL818X_MSR_ENEDCA; + else + reg = 0; + if (is_valid_ether_addr(info->bssid)) { - reg = RTL818X_MSR_INFRA; - if (priv->is_rtl8187b) - reg |= RTL818X_MSR_ENEDCA; + reg |= RTL818X_MSR_INFRA; rtl818x_iowrite8(priv, &priv->map->MSR, reg); } else { - reg = RTL818X_MSR_NO_LINK; + reg |= RTL818X_MSR_NO_LINK; rtl818x_iowrite8(priv, &priv->map->MSR, reg); } diff --git a/drivers/net/wireless/rtl818x/rtl8187_leds.c b/drivers/net/wireless/rtl818x/rtl8187_leds.c index b4425359224..cf9f899fe0e 100644 --- a/drivers/net/wireless/rtl818x/rtl8187_leds.c +++ b/drivers/net/wireless/rtl818x/rtl8187_leds.c @@ -208,11 +208,12 @@ void rtl8187_leds_exit(struct ieee80211_hw *dev) { struct rtl8187_priv *priv = dev->priv; - rtl8187_unregister_led(&priv->led_tx); /* turn the LED off before exiting */ queue_delayed_work(dev->workqueue, &priv->led_off, 0); cancel_delayed_work_sync(&priv->led_off); + cancel_delayed_work_sync(&priv->led_on); rtl8187_unregister_led(&priv->led_rx); + rtl8187_unregister_led(&priv->led_tx); } #endif /* def CONFIG_RTL8187_LED */ diff --git a/drivers/net/wireless/wavelan_cs.c b/drivers/net/wireless/wavelan_cs.c index 6af706408ac..c6d300666ad 100644 --- a/drivers/net/wireless/wavelan_cs.c +++ b/drivers/net/wireless/wavelan_cs.c @@ -3556,17 +3556,8 @@ wv_82593_config(struct net_device * dev) cfblk.rcvstop = TRUE; /* Enable Receive Stop Register */ #ifdef DEBUG_I82593_SHOW - { - u_char *c = (u_char *) &cfblk; - int i; - printk(KERN_DEBUG "wavelan_cs: config block:"); - for(i = 0; i < sizeof(struct i82593_conf_block); i++,c++) - { - if((i % 16) == 0) printk("\n" KERN_DEBUG); - printk("%02x ", *c); - } - printk("\n"); - } + print_hex_dump(KERN_DEBUG, "wavelan_cs: config block: ", DUMP_PREFIX_NONE, + 16, 1, &cfblk, sizeof(struct i82593_conf_block), false); #endif /* Copy the config block to the i82593 */ diff --git a/drivers/net/wireless/zd1211rw/zd_mac.c b/drivers/net/wireless/zd1211rw/zd_mac.c index 40b07b98822..3bd3c779fff 100644 --- a/drivers/net/wireless/zd1211rw/zd_mac.c +++ b/drivers/net/wireless/zd1211rw/zd_mac.c @@ -698,7 +698,7 @@ int zd_mac_rx(struct ieee80211_hw *hw, const u8 *buffer, unsigned int length) && !mac->pass_ctrl) return 0; - fc = *(__le16 *)buffer; + fc = get_unaligned((__le16*)buffer); need_padding = ieee80211_is_data_qos(fc) ^ ieee80211_has_a4(fc); skb = dev_alloc_skb(length + (need_padding ? 2 : 0)); diff --git a/drivers/net/wireless/zd1211rw/zd_usb.c b/drivers/net/wireless/zd1211rw/zd_usb.c index 14a19baff21..0e6e44689cc 100644 --- a/drivers/net/wireless/zd1211rw/zd_usb.c +++ b/drivers/net/wireless/zd1211rw/zd_usb.c @@ -38,7 +38,6 @@ static struct usb_device_id usb_ids[] = { /* ZD1211 */ { USB_DEVICE(0x0ace, 0x1211), .driver_info = DEVICE_ZD1211 }, { USB_DEVICE(0x0ace, 0xa211), .driver_info = DEVICE_ZD1211 }, - { USB_DEVICE(0x07b8, 0x6001), .driver_info = DEVICE_ZD1211 }, { USB_DEVICE(0x126f, 0xa006), .driver_info = DEVICE_ZD1211 }, { USB_DEVICE(0x6891, 0xa727), .driver_info = DEVICE_ZD1211 }, { USB_DEVICE(0x0df6, 0x9071), .driver_info = DEVICE_ZD1211 }, @@ -61,6 +60,7 @@ static struct usb_device_id usb_ids[] = { { USB_DEVICE(0x157e, 0x300a), .driver_info = DEVICE_ZD1211 }, { USB_DEVICE(0x0105, 0x145f), .driver_info = DEVICE_ZD1211 }, /* ZD1211B */ + { USB_DEVICE(0x054c, 0x0257), .driver_info = DEVICE_ZD1211B }, { USB_DEVICE(0x0ace, 0x1215), .driver_info = DEVICE_ZD1211B }, { USB_DEVICE(0x0ace, 0xb215), .driver_info = DEVICE_ZD1211B }, { USB_DEVICE(0x157e, 0x300d), .driver_info = DEVICE_ZD1211B }, @@ -87,6 +87,7 @@ static struct usb_device_id usb_ids[] = { { USB_DEVICE(0x0471, 0x1237), .driver_info = DEVICE_ZD1211B }, { USB_DEVICE(0x07fa, 0x1196), .driver_info = DEVICE_ZD1211B }, { USB_DEVICE(0x0df6, 0x0036), .driver_info = DEVICE_ZD1211B }, + { USB_DEVICE(0x07b8, 0x6001), .driver_info = DEVICE_ZD1211B }, /* "Driverless" devices that need ejecting */ { USB_DEVICE(0x0ace, 0x2011), .driver_info = DEVICE_INSTALLER }, { USB_DEVICE(0x0ace, 0x20ff), .driver_info = DEVICE_INSTALLER }, diff --git a/drivers/net/yellowfin.c b/drivers/net/yellowfin.c index 3c7a5053f1d..c2fd6187773 100644 --- a/drivers/net/yellowfin.c +++ b/drivers/net/yellowfin.c @@ -109,7 +109,7 @@ static int gx_fix; /* These identify the driver base version and may not be removed. */ static const char version[] __devinitconst = KERN_INFO DRV_NAME ".c:v1.05 1/09/2001 Written by Donald Becker <becker@scyld.com>\n" - KERN_INFO " (unofficial 2.4.x port, " DRV_VERSION ", " DRV_RELDATE ")\n"; + " (unofficial 2.4.x port, " DRV_VERSION ", " DRV_RELDATE ")\n"; MODULE_AUTHOR("Donald Becker <becker@scyld.com>"); MODULE_DESCRIPTION("Packet Engines Yellowfin G-NIC Gigabit Ethernet driver"); @@ -346,7 +346,7 @@ static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd); static int yellowfin_open(struct net_device *dev); static void yellowfin_timer(unsigned long data); static void yellowfin_tx_timeout(struct net_device *dev); -static void yellowfin_init_ring(struct net_device *dev); +static int yellowfin_init_ring(struct net_device *dev); static int yellowfin_start_xmit(struct sk_buff *skb, struct net_device *dev); static irqreturn_t yellowfin_interrupt(int irq, void *dev_instance); static int yellowfin_rx(struct net_device *dev); @@ -573,19 +573,24 @@ static int yellowfin_open(struct net_device *dev) { struct yellowfin_private *yp = netdev_priv(dev); void __iomem *ioaddr = yp->base; - int i; + int i, ret; /* Reset the chip. */ iowrite32(0x80000000, ioaddr + DMACtrl); - i = request_irq(dev->irq, &yellowfin_interrupt, IRQF_SHARED, dev->name, dev); - if (i) return i; + ret = request_irq(dev->irq, &yellowfin_interrupt, IRQF_SHARED, dev->name, dev); + if (ret) + return ret; if (yellowfin_debug > 1) printk(KERN_DEBUG "%s: yellowfin_open() irq %d.\n", dev->name, dev->irq); - yellowfin_init_ring(dev); + ret = yellowfin_init_ring(dev); + if (ret) { + free_irq(dev->irq, dev); + return ret; + } iowrite32(yp->rx_ring_dma, ioaddr + RxPtr); iowrite32(yp->tx_ring_dma, ioaddr + TxPtr); @@ -700,12 +705,15 @@ static void yellowfin_tx_timeout(struct net_device *dev) int i; printk(KERN_WARNING " Rx ring %p: ", yp->rx_ring); for (i = 0; i < RX_RING_SIZE; i++) - printk(" %8.8x", yp->rx_ring[i].result_status); - printk("\n"KERN_WARNING" Tx ring %p: ", yp->tx_ring); + printk(KERN_CONT " %8.8x", + yp->rx_ring[i].result_status); + printk(KERN_CONT "\n"); + printk(KERN_WARNING" Tx ring %p: ", yp->tx_ring); for (i = 0; i < TX_RING_SIZE; i++) - printk(" %4.4x /%8.8x", yp->tx_status[i].tx_errs, - yp->tx_ring[i].result_status); - printk("\n"); + printk(KERN_CONT " %4.4x /%8.8x", + yp->tx_status[i].tx_errs, + yp->tx_ring[i].result_status); + printk(KERN_CONT "\n"); } /* If the hardware is found to hang regularly, we will update the code @@ -722,10 +730,10 @@ static void yellowfin_tx_timeout(struct net_device *dev) } /* Initialize the Rx and Tx rings, along with various 'dev' bits. */ -static void yellowfin_init_ring(struct net_device *dev) +static int yellowfin_init_ring(struct net_device *dev) { struct yellowfin_private *yp = netdev_priv(dev); - int i; + int i, j; yp->tx_full = 0; yp->cur_rx = yp->cur_tx = 0; @@ -750,6 +758,11 @@ static void yellowfin_init_ring(struct net_device *dev) yp->rx_ring[i].addr = cpu_to_le32(pci_map_single(yp->pci_dev, skb->data, yp->rx_buf_sz, PCI_DMA_FROMDEVICE)); } + if (i != RX_RING_SIZE) { + for (j = 0; j < i; j++) + dev_kfree_skb(yp->rx_skbuff[j]); + return -ENOMEM; + } yp->rx_ring[i-1].dbdma_cmd = cpu_to_le32(CMD_STOP); yp->dirty_rx = (unsigned int)(i - RX_RING_SIZE); @@ -766,8 +779,6 @@ static void yellowfin_init_ring(struct net_device *dev) yp->tx_ring[--i].dbdma_cmd = cpu_to_le32(CMD_STOP | BRANCH_ALWAYS); #else { - int j; - /* Tx ring needs a pair of descriptors, the second for the status. */ for (i = 0; i < TX_RING_SIZE; i++) { j = 2*i; @@ -802,7 +813,7 @@ static void yellowfin_init_ring(struct net_device *dev) } #endif yp->tx_tail_desc = &yp->tx_status[0]; - return; + return 0; } static int yellowfin_start_xmit(struct sk_buff *skb, struct net_device *dev) @@ -1216,20 +1227,20 @@ static int yellowfin_close(struct net_device *dev) #if defined(__i386__) if (yellowfin_debug > 2) { - printk("\n"KERN_DEBUG" Tx ring at %8.8llx:\n", + printk(KERN_DEBUG" Tx ring at %8.8llx:\n", (unsigned long long)yp->tx_ring_dma); for (i = 0; i < TX_RING_SIZE*2; i++) - printk(" %c #%d desc. %8.8x %8.8x %8.8x %8.8x.\n", + printk(KERN_DEBUG " %c #%d desc. %8.8x %8.8x %8.8x %8.8x.\n", ioread32(ioaddr + TxPtr) == (long)&yp->tx_ring[i] ? '>' : ' ', i, yp->tx_ring[i].dbdma_cmd, yp->tx_ring[i].addr, yp->tx_ring[i].branch_addr, yp->tx_ring[i].result_status); printk(KERN_DEBUG " Tx status %p:\n", yp->tx_status); for (i = 0; i < TX_RING_SIZE; i++) - printk(" #%d status %4.4x %4.4x %4.4x %4.4x.\n", + printk(KERN_DEBUG " #%d status %4.4x %4.4x %4.4x %4.4x.\n", i, yp->tx_status[i].tx_cnt, yp->tx_status[i].tx_errs, yp->tx_status[i].total_tx_cnt, yp->tx_status[i].paused); - printk("\n"KERN_DEBUG " Rx ring %8.8llx:\n", + printk(KERN_DEBUG " Rx ring %8.8llx:\n", (unsigned long long)yp->rx_ring_dma); for (i = 0; i < RX_RING_SIZE; i++) { printk(KERN_DEBUG " %c #%d desc. %8.8x %8.8x %8.8x\n", diff --git a/drivers/net/zorro8390.c b/drivers/net/zorro8390.c index 37c84e3b8be..81c753a617a 100644 --- a/drivers/net/zorro8390.c +++ b/drivers/net/zorro8390.c @@ -120,6 +120,9 @@ static int __devinit zorro8390_init_one(struct zorro_dev *z, for (i = ARRAY_SIZE(cards)-1; i >= 0; i--) if (z->id == cards[i].id) break; + if (i < 0) + return -ENODEV; + board = z->resource.start; ioaddr = board+cards[i].offset; dev = alloc_ei_netdev(); diff --git a/drivers/of/of_mdio.c b/drivers/of/of_mdio.c index aee967d7f76..bacaa536fd5 100644 --- a/drivers/of/of_mdio.c +++ b/drivers/of/of_mdio.c @@ -9,6 +9,10 @@ * out of the OpenFirmware device tree and using it to populate an mii_bus. */ +#include <linux/kernel.h> +#include <linux/device.h> +#include <linux/netdevice.h> +#include <linux/err.h> #include <linux/phy.h> #include <linux/of.h> #include <linux/of_mdio.h> @@ -137,3 +141,41 @@ struct phy_device *of_phy_connect(struct net_device *dev, return phy_connect_direct(dev, phy, hndlr, flags, iface) ? NULL : phy; } EXPORT_SYMBOL(of_phy_connect); + +/** + * of_phy_connect_fixed_link - Parse fixed-link property and return a dummy phy + * @dev: pointer to net_device claiming the phy + * @hndlr: Link state callback for the network device + * @iface: PHY data interface type + * + * This function is a temporary stop-gap and will be removed soon. It is + * only to support the fs_enet, ucc_geth and gianfar Ethernet drivers. Do + * not call this function from new drivers. + */ +struct phy_device *of_phy_connect_fixed_link(struct net_device *dev, + void (*hndlr)(struct net_device *), + phy_interface_t iface) +{ + struct device_node *net_np; + char bus_id[MII_BUS_ID_SIZE + 3]; + struct phy_device *phy; + const u32 *phy_id; + int sz; + + if (!dev->dev.parent) + return NULL; + + net_np = dev_archdata_get_node(&dev->dev.parent->archdata); + if (!net_np) + return NULL; + + phy_id = of_get_property(net_np, "fixed-link", &sz); + if (!phy_id || sz < sizeof(*phy_id)) + return NULL; + + sprintf(bus_id, PHY_ID_FMT, "0", phy_id[0]); + + phy = phy_connect(dev, bus_id, hndlr, 0, iface); + return IS_ERR(phy) ? NULL : phy; +} +EXPORT_SYMBOL(of_phy_connect_fixed_link); diff --git a/drivers/oprofile/oprofile_stats.c b/drivers/oprofile/oprofile_stats.c index e1f6ce03705..3c2270a8300 100644 --- a/drivers/oprofile/oprofile_stats.c +++ b/drivers/oprofile/oprofile_stats.c @@ -33,6 +33,7 @@ void oprofile_reset_stats(void) atomic_set(&oprofile_stats.sample_lost_no_mm, 0); atomic_set(&oprofile_stats.sample_lost_no_mapping, 0); atomic_set(&oprofile_stats.event_lost_overflow, 0); + atomic_set(&oprofile_stats.bt_lost_no_mapping, 0); } diff --git a/drivers/parisc/ccio-dma.c b/drivers/parisc/ccio-dma.c index 5d610cbcfe8..a45b0c0d574 100644 --- a/drivers/parisc/ccio-dma.c +++ b/drivers/parisc/ccio-dma.c @@ -70,7 +70,6 @@ #undef CCIO_COLLECT_STATS #endif -#include <linux/proc_fs.h> #include <asm/runway.h> /* for proc_runway_root */ #ifdef DEBUG_CCIO_INIT @@ -1134,7 +1133,7 @@ static const struct file_operations ccio_proc_bitmap_fops = { .llseek = seq_lseek, .release = single_release, }; -#endif +#endif /* CONFIG_PROC_FS */ /** * ccio_find_ioc - Find the ioc in the ioc_list @@ -1568,14 +1567,15 @@ static int __init ccio_probe(struct parisc_device *dev) /* if this fails, no I/O cards will work, so may as well bug */ BUG_ON(dev->dev.platform_data == NULL); HBA_DATA(dev->dev.platform_data)->iommu = ioc; - + +#ifdef CONFIG_PROC_FS if (ioc_count == 0) { proc_create(MODULE_NAME, 0, proc_runway_root, &ccio_proc_info_fops); proc_create(MODULE_NAME"-bitmap", 0, proc_runway_root, &ccio_proc_bitmap_fops); } - +#endif ioc_count++; parisc_has_iommu(); diff --git a/drivers/parisc/dino.c b/drivers/parisc/dino.c index 52ae0b1d470..d69bde6a234 100644 --- a/drivers/parisc/dino.c +++ b/drivers/parisc/dino.c @@ -353,7 +353,7 @@ static unsigned int dino_startup_irq(unsigned int irq) return 0; } -static struct hw_interrupt_type dino_interrupt_type = { +static struct irq_chip dino_interrupt_type = { .typename = "GSC-PCI", .startup = dino_startup_irq, .shutdown = dino_disable_irq, @@ -614,7 +614,7 @@ dino_fixup_bus(struct pci_bus *bus) dev_name(&bus->self->dev), i, bus->self->resource[i].start, bus->self->resource[i].end); - pci_assign_resource(bus->self, i); + WARN_ON(pci_assign_resource(bus->self, i)); DBG("DEBUG %s after assign %d [0x%lx,0x%lx]\n", dev_name(&bus->self->dev), i, bus->self->resource[i].start, @@ -1019,22 +1019,22 @@ static int __init dino_probe(struct parisc_device *dev) ** It's not used to avoid chicken/egg problems ** with configuration accessor functions. */ - bus = pci_scan_bus_parented(&dev->dev, dino_current_bus, - &dino_cfg_ops, NULL); + dino_dev->hba.hba_bus = bus = pci_scan_bus_parented(&dev->dev, + dino_current_bus, &dino_cfg_ops, NULL); + if(bus) { - pci_bus_add_devices(bus); /* This code *depends* on scanning being single threaded * if it isn't, this global bus number count will fail */ dino_current_bus = bus->subordinate + 1; pci_bus_assign_resources(bus); + pci_bus_add_devices(bus); } else { - printk(KERN_ERR "ERROR: failed to scan PCI bus on %s (probably duplicate bus number %d)\n", + printk(KERN_ERR "ERROR: failed to scan PCI bus on %s (duplicate bus number %d?)\n", dev_name(&dev->dev), dino_current_bus); /* increment the bus number in case of duplicates */ dino_current_bus++; } - dino_dev->hba.hba_bus = bus; return 0; } diff --git a/drivers/parisc/eisa.c b/drivers/parisc/eisa.c index 5b89f404e66..51220749cb6 100644 --- a/drivers/parisc/eisa.c +++ b/drivers/parisc/eisa.c @@ -188,7 +188,7 @@ static unsigned int eisa_startup_irq(unsigned int irq) return 0; } -static struct hw_interrupt_type eisa_interrupt_type = { +static struct irq_chip eisa_interrupt_type = { .typename = "EISA", .startup = eisa_startup_irq, .shutdown = eisa_disable_irq, diff --git a/drivers/parisc/eisa_eeprom.c b/drivers/parisc/eisa_eeprom.c index 685d94e69d4..8c0b26e9b98 100644 --- a/drivers/parisc/eisa_eeprom.c +++ b/drivers/parisc/eisa_eeprom.c @@ -55,7 +55,7 @@ static ssize_t eisa_eeprom_read(struct file * file, ssize_t ret; int i; - if (*ppos >= HPEE_MAX_LENGTH) + if (*ppos < 0 || *ppos >= HPEE_MAX_LENGTH) return 0; count = *ppos + count < HPEE_MAX_LENGTH ? count : HPEE_MAX_LENGTH - *ppos; diff --git a/drivers/parisc/eisa_enumerator.c b/drivers/parisc/eisa_enumerator.c index c709ecc2b7f..0be1d50645a 100644 --- a/drivers/parisc/eisa_enumerator.c +++ b/drivers/parisc/eisa_enumerator.c @@ -101,7 +101,7 @@ static int configure_memory(const unsigned char *buf, printk("memory %lx-%lx ", (unsigned long)res->start, (unsigned long)res->end); result = request_resource(mem_parent, res); if (result < 0) { - printk("\n" KERN_ERR "EISA Enumerator: failed to claim EISA Bus address space!\n"); + printk(KERN_ERR "EISA Enumerator: failed to claim EISA Bus address space!\n"); return result; } } @@ -191,7 +191,7 @@ static int configure_port(const unsigned char *buf, struct resource *io_parent, printk("ioports %lx-%lx ", (unsigned long)res->start, (unsigned long)res->end); result = request_resource(io_parent, res); if (result < 0) { - printk("\n" KERN_ERR "EISA Enumerator: failed to claim EISA Bus address space!\n"); + printk(KERN_ERR "EISA Enumerator: failed to claim EISA Bus address space!\n"); return result; } } @@ -224,7 +224,7 @@ static int configure_port_init(const unsigned char *buf) case HPEE_PORT_INIT_WIDTH_BYTE: s=1; if (c & HPEE_PORT_INIT_MASK) { - printk("\n" KERN_WARNING "port_init: unverified mask attribute\n"); + printk(KERN_WARNING "port_init: unverified mask attribute\n"); outb((inb(get_16(buf+len+1) & get_8(buf+len+3)) | get_8(buf+len+4)), get_16(buf+len+1)); @@ -249,7 +249,7 @@ static int configure_port_init(const unsigned char *buf) case HPEE_PORT_INIT_WIDTH_DWORD: s=4; if (c & HPEE_PORT_INIT_MASK) { - printk("\n" KERN_WARNING "port_init: unverified mask attribute\n"); + printk(KERN_WARNING "port_init: unverified mask attribute\n"); outl((inl(get_16(buf+len+1) & get_32(buf+len+3)) | get_32(buf+len+7)), get_16(buf+len+1)); @@ -259,7 +259,7 @@ static int configure_port_init(const unsigned char *buf) break; default: - printk("\n" KERN_ERR "Invalid port init word %02x\n", c); + printk(KERN_ERR "Invalid port init word %02x\n", c); return 0; } @@ -297,7 +297,7 @@ static int configure_type_string(const unsigned char *buf) /* just skip past the type field */ len = get_8(buf); if (len > 80) { - printk("\n" KERN_ERR "eisa_enumerator: type info field too long (%d, max is 80)\n", len); + printk(KERN_ERR "eisa_enumerator: type info field too long (%d, max is 80)\n", len); } return 1+len; @@ -398,7 +398,7 @@ static int parse_slot_config(int slot, } if (p0 + function_len < pos) { - printk("\n" KERN_ERR "eisa_enumerator: function %d length mis-match " + printk(KERN_ERR "eisa_enumerator: function %d length mis-match " "got %d, expected %d\n", num_func, pos-p0, function_len); res=-1; diff --git a/drivers/parisc/gsc.c b/drivers/parisc/gsc.c index d3363291769..647adc9f85a 100644 --- a/drivers/parisc/gsc.c +++ b/drivers/parisc/gsc.c @@ -148,7 +148,7 @@ static unsigned int gsc_asic_startup_irq(unsigned int irq) return 0; } -static struct hw_interrupt_type gsc_asic_interrupt_type = { +static struct irq_chip gsc_asic_interrupt_type = { .typename = "GSC-ASIC", .startup = gsc_asic_startup_irq, .shutdown = gsc_asic_disable_irq, @@ -158,7 +158,7 @@ static struct hw_interrupt_type gsc_asic_interrupt_type = { .end = no_end_irq, }; -int gsc_assign_irq(struct hw_interrupt_type *type, void *data) +int gsc_assign_irq(struct irq_chip *type, void *data) { static int irq = GSC_IRQ_BASE; struct irq_desc *desc; diff --git a/drivers/parisc/gsc.h b/drivers/parisc/gsc.h index 762a1babad6..b9d7bfb68e2 100644 --- a/drivers/parisc/gsc.h +++ b/drivers/parisc/gsc.h @@ -38,7 +38,7 @@ struct gsc_asic { int gsc_common_setup(struct parisc_device *parent, struct gsc_asic *gsc_asic); int gsc_alloc_irq(struct gsc_irq *dev); /* dev needs an irq */ int gsc_claim_irq(struct gsc_irq *dev, int irq); /* dev needs this irq */ -int gsc_assign_irq(struct hw_interrupt_type *type, void *data); +int gsc_assign_irq(struct irq_chip *type, void *data); int gsc_find_local_irq(unsigned int irq, int *global_irq, int limit); void gsc_fixup_irqs(struct parisc_device *parent, void *ctrl, void (*choose)(struct parisc_device *child, void *ctrl)); diff --git a/drivers/parisc/hppb.c b/drivers/parisc/hppb.c index 13856415b43..815db175d42 100644 --- a/drivers/parisc/hppb.c +++ b/drivers/parisc/hppb.c @@ -62,7 +62,8 @@ static int hppb_probe(struct parisc_device *dev) } card = card->next; } - printk(KERN_INFO "Found GeckoBoa at 0x%x\n", dev->hpa.start); + printk(KERN_INFO "Found GeckoBoa at 0x%llx\n", + (unsigned long long) dev->hpa.start); card->hpa = dev->hpa.start; card->mmio_region.name = "HP-PB Bus"; @@ -73,8 +74,10 @@ static int hppb_probe(struct parisc_device *dev) status = ccio_request_resource(dev, &card->mmio_region); if(status < 0) { - printk(KERN_ERR "%s: failed to claim HP-PB bus space (%08x, %08x)\n", - __FILE__, card->mmio_region.start, card->mmio_region.end); + printk(KERN_ERR "%s: failed to claim HP-PB " + "bus space (0x%08llx, 0x%08llx)\n", + __FILE__, (unsigned long long) card->mmio_region.start, + (unsigned long long) card->mmio_region.end); } return 0; diff --git a/drivers/parisc/iosapic.c b/drivers/parisc/iosapic.c index 4a9cc92d4d1..88e33355321 100644 --- a/drivers/parisc/iosapic.c +++ b/drivers/parisc/iosapic.c @@ -729,7 +729,7 @@ static int iosapic_set_affinity_irq(unsigned int irq, } #endif -static struct hw_interrupt_type iosapic_interrupt_type = { +static struct irq_chip iosapic_interrupt_type = { .typename = "IO-SAPIC-level", .startup = iosapic_startup_irq, .shutdown = iosapic_disable_irq, diff --git a/drivers/parisc/lba_pci.c b/drivers/parisc/lba_pci.c index 59fbbf12836..3aeb3279c92 100644 --- a/drivers/parisc/lba_pci.c +++ b/drivers/parisc/lba_pci.c @@ -980,28 +980,38 @@ static void lba_pat_resources(struct parisc_device *pa_dev, struct lba_device *lba_dev) { unsigned long bytecnt; - pdc_pat_cell_mod_maddr_block_t pa_pdc_cell; /* PA_VIEW */ - pdc_pat_cell_mod_maddr_block_t io_pdc_cell; /* IO_VIEW */ long io_count; long status; /* PDC return status */ long pa_count; + pdc_pat_cell_mod_maddr_block_t *pa_pdc_cell; /* PA_VIEW */ + pdc_pat_cell_mod_maddr_block_t *io_pdc_cell; /* IO_VIEW */ int i; + pa_pdc_cell = kzalloc(sizeof(pdc_pat_cell_mod_maddr_block_t), GFP_KERNEL); + if (!pa_pdc_cell) + return; + + io_pdc_cell = kzalloc(sizeof(pdc_pat_cell_mod_maddr_block_t), GFP_KERNEL); + if (!io_pdc_cell) { + kfree(pa_pdc_cell); + return; + } + /* return cell module (IO view) */ status = pdc_pat_cell_module(&bytecnt, pa_dev->pcell_loc, pa_dev->mod_index, - PA_VIEW, & pa_pdc_cell); - pa_count = pa_pdc_cell.mod[1]; + PA_VIEW, pa_pdc_cell); + pa_count = pa_pdc_cell->mod[1]; status |= pdc_pat_cell_module(&bytecnt, pa_dev->pcell_loc, pa_dev->mod_index, - IO_VIEW, &io_pdc_cell); - io_count = io_pdc_cell.mod[1]; + IO_VIEW, io_pdc_cell); + io_count = io_pdc_cell->mod[1]; /* We've already done this once for device discovery...*/ if (status != PDC_OK) { panic("pdc_pat_cell_module() call failed for LBA!\n"); } - if (PAT_GET_ENTITY(pa_pdc_cell.mod_info) != PAT_ENTITY_LBA) { + if (PAT_GET_ENTITY(pa_pdc_cell->mod_info) != PAT_ENTITY_LBA) { panic("pdc_pat_cell_module() entity returned != PAT_ENTITY_LBA!\n"); } @@ -1016,8 +1026,8 @@ lba_pat_resources(struct parisc_device *pa_dev, struct lba_device *lba_dev) } *p, *io; struct resource *r; - p = (void *) &(pa_pdc_cell.mod[2+i*3]); - io = (void *) &(io_pdc_cell.mod[2+i*3]); + p = (void *) &(pa_pdc_cell->mod[2+i*3]); + io = (void *) &(io_pdc_cell->mod[2+i*3]); /* Convert the PAT range data to PCI "struct resource" */ switch(p->type & 0xff) { @@ -1096,6 +1106,9 @@ lba_pat_resources(struct parisc_device *pa_dev, struct lba_device *lba_dev) break; } } + + kfree(pa_pdc_cell); + kfree(io_pdc_cell); } #else /* keep compiler from complaining about missing declarations */ @@ -1509,10 +1522,6 @@ lba_driver_probe(struct parisc_device *dev) lba_bus = lba_dev->hba.hba_bus = pci_scan_bus_parented(&dev->dev, lba_dev->hba.bus_num.start, cfg_ops, NULL); - if (lba_bus) { - lba_next_bus = lba_bus->subordinate + 1; - pci_bus_add_devices(lba_bus); - } /* This is in lieu of calling pci_assign_unassigned_resources() */ if (is_pdc_pat()) { @@ -1533,7 +1542,6 @@ lba_driver_probe(struct parisc_device *dev) } pci_enable_bridges(lba_bus); - /* ** Once PCI register ops has walked the bus, access to config ** space is restricted. Avoids master aborts on config cycles. @@ -1543,6 +1551,11 @@ lba_driver_probe(struct parisc_device *dev) lba_dev->flags |= LBA_FLAG_SKIP_PROBE; } + if (lba_bus) { + lba_next_bus = lba_bus->subordinate + 1; + pci_bus_add_devices(lba_bus); + } + /* Whew! Finally done! Tell services we got this one covered. */ return 0; } diff --git a/drivers/parisc/pdc_stable.c b/drivers/parisc/pdc_stable.c index f9f9a5f1bbd..13a64bc081b 100644 --- a/drivers/parisc/pdc_stable.c +++ b/drivers/parisc/pdc_stable.c @@ -370,7 +370,7 @@ pdcspath_layer_read(struct pdcspath_entry *entry, char *buf) if (!i) /* entry is not ready */ return -ENODATA; - for (i = 0; devpath->layers[i] && (likely(i < 6)); i++) + for (i = 0; i < 6 && devpath->layers[i]; i++) out += sprintf(out, "%u ", devpath->layers[i]); out += sprintf(out, "\n"); diff --git a/drivers/parisc/sba_iommu.c b/drivers/parisc/sba_iommu.c index d46dd57450a..123d8fe3427 100644 --- a/drivers/parisc/sba_iommu.c +++ b/drivers/parisc/sba_iommu.c @@ -2057,6 +2057,7 @@ void sba_directed_lmmio(struct parisc_device *pci_hba, struct resource *r) r->start = (base & ~1UL) | PCI_F_EXTEND; size = ~ READ_REG32(reg + LMMIO_DIRECT0_MASK); r->end = r->start + size; + r->flags = IORESOURCE_MEM; } } @@ -2093,4 +2094,5 @@ void sba_distributed_lmmio(struct parisc_device *pci_hba, struct resource *r ) size = (~READ_REG32(sba->sba_hpa + LMMIO_DIST_MASK)) / ROPES_PER_IOC; r->start += rope * (size + 1); /* adjust base for this rope */ r->end = r->start + size; + r->flags = IORESOURCE_MEM; } diff --git a/drivers/parisc/superio.c b/drivers/parisc/superio.c index 33e5ade774c..675f04e6597 100644 --- a/drivers/parisc/superio.c +++ b/drivers/parisc/superio.c @@ -325,7 +325,7 @@ static unsigned int superio_startup_irq(unsigned int irq) return 0; } -static struct hw_interrupt_type superio_interrupt_type = { +static struct irq_chip superio_interrupt_type = { .typename = SUPERIO, .startup = superio_startup_irq, .shutdown = superio_disable_irq, @@ -434,8 +434,8 @@ static void __init superio_parport_init(void) 0 /*base_hi*/, PAR_IRQ, PARPORT_DMA_NONE /* dma */, - NULL /*struct pci_dev* */), - 0 /* shared irq flags */ ) + NULL /*struct pci_dev* */, + 0 /* shared irq flags */)) printk(KERN_WARNING PFX "Probing parallel port failed.\n"); #endif /* CONFIG_PARPORT_PC */ diff --git a/drivers/parport/parport_pc.c b/drivers/parport/parport_pc.c index 1032d5fdbd4..2597145a066 100644 --- a/drivers/parport/parport_pc.c +++ b/drivers/parport/parport_pc.c @@ -2907,6 +2907,7 @@ enum parport_pc_pci_cards { netmos_9755, netmos_9805, netmos_9815, + netmos_9901, quatech_sppxp100, }; @@ -2987,7 +2988,7 @@ static struct parport_pc_pci { /* netmos_9755 */ { 2, { { 0, 1 }, { 2, 3 },} }, /* netmos_9805 */ { 1, { { 0, -1 }, } }, /* netmos_9815 */ { 2, { { 0, -1 }, { 2, -1 }, } }, - + /* netmos_9901 */ { 1, { { 0, -1 }, } }, /* quatech_sppxp100 */ { 1, { { 0, 1 }, } }, }; @@ -3089,6 +3090,8 @@ static const struct pci_device_id parport_pc_pci_tbl[] = { PCI_ANY_ID, PCI_ANY_ID, 0, 0, netmos_9805 }, { PCI_VENDOR_ID_NETMOS, PCI_DEVICE_ID_NETMOS_9815, PCI_ANY_ID, PCI_ANY_ID, 0, 0, netmos_9815 }, + { PCI_VENDOR_ID_NETMOS, PCI_DEVICE_ID_NETMOS_9901, + 0xA000, 0x2000, 0, 0, netmos_9901 }, /* Quatech SPPXP-100 Parallel port PCI ExpressCard */ { PCI_VENDOR_ID_QUATECH, PCI_DEVICE_ID_QUATECH_SPPXP_100, PCI_ANY_ID, PCI_ANY_ID, 0, 0, quatech_sppxp100 }, diff --git a/drivers/pci/hotplug/cpci_hotplug_core.c b/drivers/pci/hotplug/cpci_hotplug_core.c index a5b9f6ae507..d703e73fffa 100644 --- a/drivers/pci/hotplug/cpci_hotplug_core.c +++ b/drivers/pci/hotplug/cpci_hotplug_core.c @@ -32,7 +32,6 @@ #include <linux/pci_hotplug.h> #include <linux/init.h> #include <linux/interrupt.h> -#include <linux/smp_lock.h> #include <asm/atomic.h> #include <linux/delay.h> #include <linux/kthread.h> diff --git a/drivers/pci/hotplug/cpqphp_ctrl.c b/drivers/pci/hotplug/cpqphp_ctrl.c index 2fa47af992a..0ff689afa75 100644 --- a/drivers/pci/hotplug/cpqphp_ctrl.c +++ b/drivers/pci/hotplug/cpqphp_ctrl.c @@ -34,7 +34,6 @@ #include <linux/interrupt.h> #include <linux/delay.h> #include <linux/wait.h> -#include <linux/smp_lock.h> #include <linux/pci.h> #include <linux/pci_hotplug.h> #include <linux/kthread.h> diff --git a/drivers/pci/hotplug/cpqphp_sysfs.c b/drivers/pci/hotplug/cpqphp_sysfs.c index 8450f4a6568..e6089bdb6e5 100644 --- a/drivers/pci/hotplug/cpqphp_sysfs.c +++ b/drivers/pci/hotplug/cpqphp_sysfs.c @@ -33,6 +33,7 @@ #include <linux/workqueue.h> #include <linux/pci.h> #include <linux/pci_hotplug.h> +#include <linux/smp_lock.h> #include <linux/debugfs.h> #include "cpqphp.h" diff --git a/drivers/pci/hotplug/pci_hotplug_core.c b/drivers/pci/hotplug/pci_hotplug_core.c index 844580489d4..5c5043f239c 100644 --- a/drivers/pci/hotplug/pci_hotplug_core.c +++ b/drivers/pci/hotplug/pci_hotplug_core.c @@ -555,6 +555,8 @@ static struct hotplug_slot *get_slot_from_name (const char *name) * @slot: pointer to the &struct hotplug_slot to register * @devnr: device number * @name: name registered with kobject core + * @owner: caller module owner + * @mod_name: caller module name * * Registers a hotplug slot with the pci hotplug subsystem, which will allow * userspace interaction to the slot. diff --git a/drivers/pci/hotplug/pciehp_ctrl.c b/drivers/pci/hotplug/pciehp_ctrl.c index ff4034502d2..8aab8edf123 100644 --- a/drivers/pci/hotplug/pciehp_ctrl.c +++ b/drivers/pci/hotplug/pciehp_ctrl.c @@ -30,7 +30,6 @@ #include <linux/module.h> #include <linux/kernel.h> #include <linux/types.h> -#include <linux/smp_lock.h> #include <linux/pci.h> #include <linux/workqueue.h> #include "../pci.h" diff --git a/drivers/pci/hotplug/sgi_hotplug.c b/drivers/pci/hotplug/sgi_hotplug.c index a4494d78e7c..8aebe1e9d3d 100644 --- a/drivers/pci/hotplug/sgi_hotplug.c +++ b/drivers/pci/hotplug/sgi_hotplug.c @@ -90,11 +90,10 @@ static struct hotplug_slot_ops sn_hotplug_slot_ops = { static DEFINE_MUTEX(sn_hotplug_mutex); -static ssize_t path_show (struct hotplug_slot *bss_hotplug_slot, - char *buf) +static ssize_t path_show(struct pci_slot *pci_slot, char *buf) { int retval = -ENOENT; - struct slot *slot = bss_hotplug_slot->private; + struct slot *slot = pci_slot->hotplug->private; if (!slot) return retval; @@ -103,7 +102,7 @@ static ssize_t path_show (struct hotplug_slot *bss_hotplug_slot, return retval; } -static struct hotplug_slot_attribute sn_slot_path_attr = __ATTR_RO(path); +static struct pci_slot_attribute sn_slot_path_attr = __ATTR_RO(path); static int sn_pci_slot_valid(struct pci_bus *pci_bus, int device) { diff --git a/drivers/pci/intel-iommu.c b/drivers/pci/intel-iommu.c index e53eacd75c8..2314ad7ee5f 100644 --- a/drivers/pci/intel-iommu.c +++ b/drivers/pci/intel-iommu.c @@ -39,7 +39,6 @@ #include <linux/sysdev.h> #include <asm/cacheflush.h> #include <asm/iommu.h> -#include <asm/e820.h> #include "pci.h" #define ROOT_SIZE VTD_PAGE_SIZE @@ -57,14 +56,32 @@ #define MAX_AGAW_WIDTH 64 #define DOMAIN_MAX_ADDR(gaw) ((((u64)1) << gaw) - 1) +#define DOMAIN_MAX_PFN(gaw) ((((u64)1) << (gaw-VTD_PAGE_SHIFT)) - 1) #define IOVA_PFN(addr) ((addr) >> PAGE_SHIFT) #define DMA_32BIT_PFN IOVA_PFN(DMA_BIT_MASK(32)) #define DMA_64BIT_PFN IOVA_PFN(DMA_BIT_MASK(64)) -#ifndef PHYSICAL_PAGE_MASK -#define PHYSICAL_PAGE_MASK PAGE_MASK -#endif + +/* VT-d pages must always be _smaller_ than MM pages. Otherwise things + are never going to work. */ +static inline unsigned long dma_to_mm_pfn(unsigned long dma_pfn) +{ + return dma_pfn >> (PAGE_SHIFT - VTD_PAGE_SHIFT); +} + +static inline unsigned long mm_to_dma_pfn(unsigned long mm_pfn) +{ + return mm_pfn << (PAGE_SHIFT - VTD_PAGE_SHIFT); +} +static inline unsigned long page_to_dma_pfn(struct page *pg) +{ + return mm_to_dma_pfn(page_to_pfn(pg)); +} +static inline unsigned long virt_to_dma_pfn(void *p) +{ + return page_to_dma_pfn(virt_to_page(p)); +} /* global iommu list, set NULL for ignored DMAR units */ static struct intel_iommu **g_iommus; @@ -205,12 +222,17 @@ static inline void dma_set_pte_prot(struct dma_pte *pte, unsigned long prot) static inline u64 dma_pte_addr(struct dma_pte *pte) { - return (pte->val & VTD_PAGE_MASK); +#ifdef CONFIG_64BIT + return pte->val & VTD_PAGE_MASK; +#else + /* Must have a full atomic 64-bit read */ + return __cmpxchg64(pte, 0ULL, 0ULL) & VTD_PAGE_MASK; +#endif } -static inline void dma_set_pte_addr(struct dma_pte *pte, u64 addr) +static inline void dma_set_pte_pfn(struct dma_pte *pte, unsigned long pfn) { - pte->val |= (addr & VTD_PAGE_MASK); + pte->val |= (uint64_t)pfn << VTD_PAGE_SHIFT; } static inline bool dma_pte_present(struct dma_pte *pte) @@ -218,6 +240,11 @@ static inline bool dma_pte_present(struct dma_pte *pte) return (pte->val & 3) != 0; } +static inline int first_pte_in_page(struct dma_pte *pte) +{ + return !((unsigned long)pte & ~VTD_PAGE_MASK); +} + /* * This domain is a statically identity mapping domain. * 1. This domain creats a static 1:1 mapping to all usable memory. @@ -245,7 +272,6 @@ struct dmar_domain { struct iova_domain iovad; /* iova's that belong to this domain */ struct dma_pte *pgd; /* virtual address */ - spinlock_t mapping_lock; /* page table lock */ int gaw; /* max guest address width */ /* adjusted guest address width, 0 is level 2 30-bit */ @@ -649,80 +675,78 @@ static inline int width_to_agaw(int width) static inline unsigned int level_to_offset_bits(int level) { - return (12 + (level - 1) * LEVEL_STRIDE); + return (level - 1) * LEVEL_STRIDE; } -static inline int address_level_offset(u64 addr, int level) +static inline int pfn_level_offset(unsigned long pfn, int level) { - return ((addr >> level_to_offset_bits(level)) & LEVEL_MASK); + return (pfn >> level_to_offset_bits(level)) & LEVEL_MASK; } -static inline u64 level_mask(int level) +static inline unsigned long level_mask(int level) { - return ((u64)-1 << level_to_offset_bits(level)); + return -1UL << level_to_offset_bits(level); } -static inline u64 level_size(int level) +static inline unsigned long level_size(int level) { - return ((u64)1 << level_to_offset_bits(level)); + return 1UL << level_to_offset_bits(level); } -static inline u64 align_to_level(u64 addr, int level) +static inline unsigned long align_to_level(unsigned long pfn, int level) { - return ((addr + level_size(level) - 1) & level_mask(level)); + return (pfn + level_size(level) - 1) & level_mask(level); } -static struct dma_pte * addr_to_dma_pte(struct dmar_domain *domain, u64 addr) +static struct dma_pte *pfn_to_dma_pte(struct dmar_domain *domain, + unsigned long pfn) { - int addr_width = agaw_to_width(domain->agaw); + int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT; struct dma_pte *parent, *pte = NULL; int level = agaw_to_level(domain->agaw); int offset; - unsigned long flags; BUG_ON(!domain->pgd); - - addr &= (((u64)1) << addr_width) - 1; + BUG_ON(addr_width < BITS_PER_LONG && pfn >> addr_width); parent = domain->pgd; - spin_lock_irqsave(&domain->mapping_lock, flags); while (level > 0) { void *tmp_page; - offset = address_level_offset(addr, level); + offset = pfn_level_offset(pfn, level); pte = &parent[offset]; if (level == 1) break; if (!dma_pte_present(pte)) { + uint64_t pteval; + tmp_page = alloc_pgtable_page(); - if (!tmp_page) { - spin_unlock_irqrestore(&domain->mapping_lock, - flags); + if (!tmp_page) return NULL; + + domain_flush_cache(domain, tmp_page, VTD_PAGE_SIZE); + pteval = (virt_to_dma_pfn(tmp_page) << VTD_PAGE_SHIFT) | DMA_PTE_READ | DMA_PTE_WRITE; + if (cmpxchg64(&pte->val, 0ULL, pteval)) { + /* Someone else set it while we were thinking; use theirs. */ + free_pgtable_page(tmp_page); + } else { + dma_pte_addr(pte); + domain_flush_cache(domain, pte, sizeof(*pte)); } - domain_flush_cache(domain, tmp_page, PAGE_SIZE); - dma_set_pte_addr(pte, virt_to_phys(tmp_page)); - /* - * high level table always sets r/w, last level page - * table control read/write - */ - dma_set_pte_readable(pte); - dma_set_pte_writable(pte); - domain_flush_cache(domain, pte, sizeof(*pte)); } parent = phys_to_virt(dma_pte_addr(pte)); level--; } - spin_unlock_irqrestore(&domain->mapping_lock, flags); return pte; } /* return address's pte at specific level */ -static struct dma_pte *dma_addr_level_pte(struct dmar_domain *domain, u64 addr, - int level) +static struct dma_pte *dma_pfn_level_pte(struct dmar_domain *domain, + unsigned long pfn, + int level) { struct dma_pte *parent, *pte = NULL; int total = agaw_to_level(domain->agaw); @@ -730,7 +754,7 @@ static struct dma_pte *dma_addr_level_pte(struct dmar_domain *domain, u64 addr, parent = domain->pgd; while (level <= total) { - offset = address_level_offset(addr, total); + offset = pfn_level_offset(pfn, total); pte = &parent[offset]; if (level == total) return pte; @@ -743,74 +767,82 @@ static struct dma_pte *dma_addr_level_pte(struct dmar_domain *domain, u64 addr, return NULL; } -/* clear one page's page table */ -static void dma_pte_clear_one(struct dmar_domain *domain, u64 addr) -{ - struct dma_pte *pte = NULL; - - /* get last level pte */ - pte = dma_addr_level_pte(domain, addr, 1); - - if (pte) { - dma_clear_pte(pte); - domain_flush_cache(domain, pte, sizeof(*pte)); - } -} - /* clear last level pte, a tlb flush should be followed */ -static void dma_pte_clear_range(struct dmar_domain *domain, u64 start, u64 end) +static void dma_pte_clear_range(struct dmar_domain *domain, + unsigned long start_pfn, + unsigned long last_pfn) { - int addr_width = agaw_to_width(domain->agaw); - int npages; + int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT; + struct dma_pte *first_pte, *pte; - start &= (((u64)1) << addr_width) - 1; - end &= (((u64)1) << addr_width) - 1; - /* in case it's partial page */ - start &= PAGE_MASK; - end = PAGE_ALIGN(end); - npages = (end - start) / VTD_PAGE_SIZE; + BUG_ON(addr_width < BITS_PER_LONG && start_pfn >> addr_width); + BUG_ON(addr_width < BITS_PER_LONG && last_pfn >> addr_width); - /* we don't need lock here, nobody else touches the iova range */ - while (npages--) { - dma_pte_clear_one(domain, start); - start += VTD_PAGE_SIZE; + /* we don't need lock here; nobody else touches the iova range */ + while (start_pfn <= last_pfn) { + first_pte = pte = dma_pfn_level_pte(domain, start_pfn, 1); + if (!pte) { + start_pfn = align_to_level(start_pfn + 1, 2); + continue; + } + do { + dma_clear_pte(pte); + start_pfn++; + pte++; + } while (start_pfn <= last_pfn && !first_pte_in_page(pte)); + + domain_flush_cache(domain, first_pte, + (void *)pte - (void *)first_pte); } } /* free page table pages. last level pte should already be cleared */ static void dma_pte_free_pagetable(struct dmar_domain *domain, - u64 start, u64 end) + unsigned long start_pfn, + unsigned long last_pfn) { - int addr_width = agaw_to_width(domain->agaw); - struct dma_pte *pte; + int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT; + struct dma_pte *first_pte, *pte; int total = agaw_to_level(domain->agaw); int level; - u64 tmp; + unsigned long tmp; - start &= (((u64)1) << addr_width) - 1; - end &= (((u64)1) << addr_width) - 1; + BUG_ON(addr_width < BITS_PER_LONG && start_pfn >> addr_width); + BUG_ON(addr_width < BITS_PER_LONG && last_pfn >> addr_width); - /* we don't need lock here, nobody else touches the iova range */ + /* We don't need lock here; nobody else touches the iova range */ level = 2; while (level <= total) { - tmp = align_to_level(start, level); - if (tmp >= end || (tmp + level_size(level) > end)) + tmp = align_to_level(start_pfn, level); + + /* If we can't even clear one PTE at this level, we're done */ + if (tmp + level_size(level) - 1 > last_pfn) return; - while (tmp < end) { - pte = dma_addr_level_pte(domain, tmp, level); - if (pte) { - free_pgtable_page( - phys_to_virt(dma_pte_addr(pte))); - dma_clear_pte(pte); - domain_flush_cache(domain, pte, sizeof(*pte)); + while (tmp + level_size(level) - 1 <= last_pfn) { + first_pte = pte = dma_pfn_level_pte(domain, tmp, level); + if (!pte) { + tmp = align_to_level(tmp + 1, level + 1); + continue; } - tmp += level_size(level); + do { + if (dma_pte_present(pte)) { + free_pgtable_page(phys_to_virt(dma_pte_addr(pte))); + dma_clear_pte(pte); + } + pte++; + tmp += level_size(level); + } while (!first_pte_in_page(pte) && + tmp + level_size(level) - 1 <= last_pfn); + + domain_flush_cache(domain, first_pte, + (void *)pte - (void *)first_pte); + } level++; } /* free pgd */ - if (start == 0 && end >= ((((u64)1) << addr_width) - 1)) { + if (start_pfn == 0 && last_pfn == DOMAIN_MAX_PFN(domain->gaw)) { free_pgtable_page(domain->pgd); domain->pgd = NULL; } @@ -1036,11 +1068,11 @@ static void iommu_flush_dev_iotlb(struct dmar_domain *domain, } static void iommu_flush_iotlb_psi(struct intel_iommu *iommu, u16 did, - u64 addr, unsigned int pages) + unsigned long pfn, unsigned int pages) { unsigned int mask = ilog2(__roundup_pow_of_two(pages)); + uint64_t addr = (uint64_t)pfn << VTD_PAGE_SHIFT; - BUG_ON(addr & (~VTD_PAGE_MASK)); BUG_ON(pages == 0); /* @@ -1055,7 +1087,12 @@ static void iommu_flush_iotlb_psi(struct intel_iommu *iommu, u16 did, else iommu->flush.flush_iotlb(iommu, did, addr, mask, DMA_TLB_PSI_FLUSH); - if (did) + + /* + * In caching mode, domain ID 0 is reserved for non-present to present + * mapping flush. Device IOTLB doesn't need to be flushed in this case. + */ + if (!cap_caching_mode(iommu->cap) || did) iommu_flush_dev_iotlb(iommu->domains[did], addr, mask); } @@ -1280,7 +1317,6 @@ static void dmar_init_reserved_ranges(void) struct pci_dev *pdev = NULL; struct iova *iova; int i; - u64 addr, size; init_iova_domain(&reserved_iova_list, DMA_32BIT_PFN); @@ -1303,12 +1339,9 @@ static void dmar_init_reserved_ranges(void) r = &pdev->resource[i]; if (!r->flags || !(r->flags & IORESOURCE_MEM)) continue; - addr = r->start; - addr &= PHYSICAL_PAGE_MASK; - size = r->end - addr; - size = PAGE_ALIGN(size); - iova = reserve_iova(&reserved_iova_list, IOVA_PFN(addr), - IOVA_PFN(size + addr) - 1); + iova = reserve_iova(&reserved_iova_list, + IOVA_PFN(r->start), + IOVA_PFN(r->end)); if (!iova) printk(KERN_ERR "Reserve iova failed\n"); } @@ -1342,7 +1375,6 @@ static int domain_init(struct dmar_domain *domain, int guest_width) unsigned long sagaw; init_iova_domain(&domain->iovad, DMA_32BIT_PFN); - spin_lock_init(&domain->mapping_lock); spin_lock_init(&domain->iommu_lock); domain_reserve_special_ranges(domain); @@ -1389,7 +1421,6 @@ static void domain_exit(struct dmar_domain *domain) { struct dmar_drhd_unit *drhd; struct intel_iommu *iommu; - u64 end; /* Domain 0 is reserved, so dont process it */ if (!domain) @@ -1398,14 +1429,12 @@ static void domain_exit(struct dmar_domain *domain) domain_remove_dev_info(domain); /* destroy iovas */ put_iova_domain(&domain->iovad); - end = DOMAIN_MAX_ADDR(domain->gaw); - end = end & (~PAGE_MASK); /* clear ptes */ - dma_pte_clear_range(domain, 0, end); + dma_pte_clear_range(domain, 0, DOMAIN_MAX_PFN(domain->gaw)); /* free page tables */ - dma_pte_free_pagetable(domain, 0, end); + dma_pte_free_pagetable(domain, 0, DOMAIN_MAX_PFN(domain->gaw)); for_each_active_iommu(iommu, drhd) if (test_bit(iommu->seq_id, &domain->iommu_bmp)) @@ -1476,7 +1505,6 @@ static int domain_context_mapping_one(struct dmar_domain *domain, int segment, } set_bit(num, iommu->domain_ids); - set_bit(iommu->seq_id, &domain->iommu_bmp); iommu->domains[num] = domain; id = num; } @@ -1619,42 +1647,94 @@ static int domain_context_mapped(struct pci_dev *pdev) tmp->devfn); } -static int -domain_page_mapping(struct dmar_domain *domain, dma_addr_t iova, - u64 hpa, size_t size, int prot) +/* Returns a number of VTD pages, but aligned to MM page size */ +static inline unsigned long aligned_nrpages(unsigned long host_addr, + size_t size) { - u64 start_pfn, end_pfn; - struct dma_pte *pte; - int index; - int addr_width = agaw_to_width(domain->agaw); + host_addr &= ~PAGE_MASK; + return PAGE_ALIGN(host_addr + size) >> VTD_PAGE_SHIFT; +} - hpa &= (((u64)1) << addr_width) - 1; +static int __domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn, + struct scatterlist *sg, unsigned long phys_pfn, + unsigned long nr_pages, int prot) +{ + struct dma_pte *first_pte = NULL, *pte = NULL; + phys_addr_t uninitialized_var(pteval); + int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT; + unsigned long sg_res; + + BUG_ON(addr_width < BITS_PER_LONG && (iov_pfn + nr_pages - 1) >> addr_width); if ((prot & (DMA_PTE_READ|DMA_PTE_WRITE)) == 0) return -EINVAL; - iova &= PAGE_MASK; - start_pfn = ((u64)hpa) >> VTD_PAGE_SHIFT; - end_pfn = (VTD_PAGE_ALIGN(((u64)hpa) + size)) >> VTD_PAGE_SHIFT; - index = 0; - while (start_pfn < end_pfn) { - pte = addr_to_dma_pte(domain, iova + VTD_PAGE_SIZE * index); - if (!pte) - return -ENOMEM; + + prot &= DMA_PTE_READ | DMA_PTE_WRITE | DMA_PTE_SNP; + + if (sg) + sg_res = 0; + else { + sg_res = nr_pages + 1; + pteval = ((phys_addr_t)phys_pfn << VTD_PAGE_SHIFT) | prot; + } + + while (nr_pages--) { + uint64_t tmp; + + if (!sg_res) { + sg_res = aligned_nrpages(sg->offset, sg->length); + sg->dma_address = ((dma_addr_t)iov_pfn << VTD_PAGE_SHIFT) + sg->offset; + sg->dma_length = sg->length; + pteval = page_to_phys(sg_page(sg)) | prot; + } + if (!pte) { + first_pte = pte = pfn_to_dma_pte(domain, iov_pfn); + if (!pte) + return -ENOMEM; + } /* We don't need lock here, nobody else * touches the iova range */ - BUG_ON(dma_pte_addr(pte)); - dma_set_pte_addr(pte, start_pfn << VTD_PAGE_SHIFT); - dma_set_pte_prot(pte, prot); - if (prot & DMA_PTE_SNP) - dma_set_pte_snp(pte); - domain_flush_cache(domain, pte, sizeof(*pte)); - start_pfn++; - index++; + tmp = cmpxchg64_local(&pte->val, 0ULL, pteval); + if (tmp) { + static int dumps = 5; + printk(KERN_CRIT "ERROR: DMA PTE for vPFN 0x%lx already set (to %llx not %llx)\n", + iov_pfn, tmp, (unsigned long long)pteval); + if (dumps) { + dumps--; + debug_dma_dump_mappings(NULL); + } + WARN_ON(1); + } + pte++; + if (!nr_pages || first_pte_in_page(pte)) { + domain_flush_cache(domain, first_pte, + (void *)pte - (void *)first_pte); + pte = NULL; + } + iov_pfn++; + pteval += VTD_PAGE_SIZE; + sg_res--; + if (!sg_res) + sg = sg_next(sg); } return 0; } +static inline int domain_sg_mapping(struct dmar_domain *domain, unsigned long iov_pfn, + struct scatterlist *sg, unsigned long nr_pages, + int prot) +{ + return __domain_mapping(domain, iov_pfn, sg, 0, nr_pages, prot); +} + +static inline int domain_pfn_mapping(struct dmar_domain *domain, unsigned long iov_pfn, + unsigned long phys_pfn, unsigned long nr_pages, + int prot) +{ + return __domain_mapping(domain, iov_pfn, NULL, phys_pfn, nr_pages, prot); +} + static void iommu_detach_dev(struct intel_iommu *iommu, u8 bus, u8 devfn) { if (!iommu) @@ -1845,58 +1925,61 @@ error: static int iommu_identity_mapping; +static int iommu_domain_identity_map(struct dmar_domain *domain, + unsigned long long start, + unsigned long long end) +{ + unsigned long first_vpfn = start >> VTD_PAGE_SHIFT; + unsigned long last_vpfn = end >> VTD_PAGE_SHIFT; + + if (!reserve_iova(&domain->iovad, dma_to_mm_pfn(first_vpfn), + dma_to_mm_pfn(last_vpfn))) { + printk(KERN_ERR "IOMMU: reserve iova failed\n"); + return -ENOMEM; + } + + pr_debug("Mapping reserved region %llx-%llx for domain %d\n", + start, end, domain->id); + /* + * RMRR range might have overlap with physical memory range, + * clear it first + */ + dma_pte_clear_range(domain, first_vpfn, last_vpfn); + + return domain_pfn_mapping(domain, first_vpfn, first_vpfn, + last_vpfn - first_vpfn + 1, + DMA_PTE_READ|DMA_PTE_WRITE); +} + static int iommu_prepare_identity_map(struct pci_dev *pdev, unsigned long long start, unsigned long long end) { struct dmar_domain *domain; - unsigned long size; - unsigned long long base; int ret; printk(KERN_INFO - "IOMMU: Setting identity map for device %s [0x%Lx - 0x%Lx]\n", - pci_name(pdev), start, end); - if (iommu_identity_mapping) - domain = si_domain; - else - /* page table init */ - domain = get_domain_for_dev(pdev, DEFAULT_DOMAIN_ADDRESS_WIDTH); + "IOMMU: Setting identity map for device %s [0x%Lx - 0x%Lx]\n", + pci_name(pdev), start, end); + + domain = get_domain_for_dev(pdev, DEFAULT_DOMAIN_ADDRESS_WIDTH); if (!domain) return -ENOMEM; - /* The address might not be aligned */ - base = start & PAGE_MASK; - size = end - base; - size = PAGE_ALIGN(size); - if (!reserve_iova(&domain->iovad, IOVA_PFN(base), - IOVA_PFN(base + size) - 1)) { - printk(KERN_ERR "IOMMU: reserve iova failed\n"); - ret = -ENOMEM; - goto error; - } - - pr_debug("Mapping reserved region %lx@%llx for %s\n", - size, base, pci_name(pdev)); - /* - * RMRR range might have overlap with physical memory range, - * clear it first - */ - dma_pte_clear_range(domain, base, base + size); - - ret = domain_page_mapping(domain, base, base, size, - DMA_PTE_READ|DMA_PTE_WRITE); + ret = iommu_domain_identity_map(domain, start, end); if (ret) goto error; /* context entry init */ ret = domain_context_mapping(domain, pdev, CONTEXT_TT_MULTI_LEVEL); - if (!ret) - return 0; -error: + if (ret) + goto error; + + return 0; + + error: domain_exit(domain); return ret; - } static inline int iommu_prepare_rmrr_dev(struct dmar_rmrr_unit *rmrr, @@ -1908,64 +1991,6 @@ static inline int iommu_prepare_rmrr_dev(struct dmar_rmrr_unit *rmrr, rmrr->end_address + 1); } -#ifdef CONFIG_DMAR_GFX_WA -struct iommu_prepare_data { - struct pci_dev *pdev; - int ret; -}; - -static int __init iommu_prepare_work_fn(unsigned long start_pfn, - unsigned long end_pfn, void *datax) -{ - struct iommu_prepare_data *data; - - data = (struct iommu_prepare_data *)datax; - - data->ret = iommu_prepare_identity_map(data->pdev, - start_pfn<<PAGE_SHIFT, end_pfn<<PAGE_SHIFT); - return data->ret; - -} - -static int __init iommu_prepare_with_active_regions(struct pci_dev *pdev) -{ - int nid; - struct iommu_prepare_data data; - - data.pdev = pdev; - data.ret = 0; - - for_each_online_node(nid) { - work_with_active_regions(nid, iommu_prepare_work_fn, &data); - if (data.ret) - return data.ret; - } - return data.ret; -} - -static void __init iommu_prepare_gfx_mapping(void) -{ - struct pci_dev *pdev = NULL; - int ret; - - for_each_pci_dev(pdev) { - if (pdev->dev.archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO || - !IS_GFX_DEVICE(pdev)) - continue; - printk(KERN_INFO "IOMMU: gfx device %s 1-1 mapping\n", - pci_name(pdev)); - ret = iommu_prepare_with_active_regions(pdev); - if (ret) - printk(KERN_ERR "IOMMU: mapping reserved region failed\n"); - } -} -#else /* !CONFIG_DMAR_GFX_WA */ -static inline void iommu_prepare_gfx_mapping(void) -{ - return; -} -#endif - #ifdef CONFIG_DMAR_FLOPPY_WA static inline void iommu_prepare_isa(void) { @@ -1976,12 +2001,12 @@ static inline void iommu_prepare_isa(void) if (!pdev) return; - printk(KERN_INFO "IOMMU: Prepare 0-16M unity mapping for LPC\n"); + printk(KERN_INFO "IOMMU: Prepare 0-16MiB unity mapping for LPC\n"); ret = iommu_prepare_identity_map(pdev, 0, 16*1024*1024); if (ret) - printk(KERN_ERR "IOMMU: Failed to create 0-64M identity map, " - "floppy might not work\n"); + printk(KERN_ERR "IOMMU: Failed to create 0-16MiB identity map; " + "floppy might not work\n"); } #else @@ -2009,16 +2034,30 @@ static int __init init_context_pass_through(void) } static int md_domain_init(struct dmar_domain *domain, int guest_width); + +static int __init si_domain_work_fn(unsigned long start_pfn, + unsigned long end_pfn, void *datax) +{ + int *ret = datax; + + *ret = iommu_domain_identity_map(si_domain, + (uint64_t)start_pfn << PAGE_SHIFT, + (uint64_t)end_pfn << PAGE_SHIFT); + return *ret; + +} + static int si_domain_init(void) { struct dmar_drhd_unit *drhd; struct intel_iommu *iommu; - int ret = 0; + int nid, ret = 0; si_domain = alloc_domain(); if (!si_domain) return -EFAULT; + pr_debug("Identity mapping domain is domain %d\n", si_domain->id); for_each_active_iommu(iommu, drhd) { ret = iommu_attach_domain(si_domain, iommu); @@ -2035,6 +2074,12 @@ static int si_domain_init(void) si_domain->flags = DOMAIN_FLAG_STATIC_IDENTITY; + for_each_online_node(nid) { + work_with_active_regions(nid, si_domain_work_fn, &ret); + if (ret) + return ret; + } + return 0; } @@ -2079,9 +2124,49 @@ static int domain_add_dev_info(struct dmar_domain *domain, return 0; } +static int iommu_should_identity_map(struct pci_dev *pdev, int startup) +{ + if (iommu_identity_mapping == 2) + return IS_GFX_DEVICE(pdev); + + /* + * We want to start off with all devices in the 1:1 domain, and + * take them out later if we find they can't access all of memory. + * + * However, we can't do this for PCI devices behind bridges, + * because all PCI devices behind the same bridge will end up + * with the same source-id on their transactions. + * + * Practically speaking, we can't change things around for these + * devices at run-time, because we can't be sure there'll be no + * DMA transactions in flight for any of their siblings. + * + * So PCI devices (unless they're on the root bus) as well as + * their parent PCI-PCI or PCIe-PCI bridges must be left _out_ of + * the 1:1 domain, just in _case_ one of their siblings turns out + * not to be able to map all of memory. + */ + if (!pdev->is_pcie) { + if (!pci_is_root_bus(pdev->bus)) + return 0; + if (pdev->class >> 8 == PCI_CLASS_BRIDGE_PCI) + return 0; + } else if (pdev->pcie_type == PCI_EXP_TYPE_PCI_BRIDGE) + return 0; + + /* + * At boot time, we don't yet know if devices will be 64-bit capable. + * Assume that they will -- if they turn out not to be, then we can + * take them out of the 1:1 domain later. + */ + if (!startup) + return pdev->dma_mask > DMA_BIT_MASK(32); + + return 1; +} + static int iommu_prepare_static_identity_mapping(void) { - int i; struct pci_dev *pdev = NULL; int ret; @@ -2089,23 +2174,19 @@ static int iommu_prepare_static_identity_mapping(void) if (ret) return -EFAULT; - printk(KERN_INFO "IOMMU: Setting identity map:\n"); for_each_pci_dev(pdev) { - for (i = 0; i < e820.nr_map; i++) { - struct e820entry *ei = &e820.map[i]; - - if (ei->type == E820_RAM) { - ret = iommu_prepare_identity_map(pdev, - ei->addr, ei->addr + ei->size); - if (ret) { - printk(KERN_INFO "1:1 mapping to one domain failed.\n"); - return -EFAULT; - } - } + if (iommu_should_identity_map(pdev, 1)) { + printk(KERN_INFO "IOMMU: identity mapping for device %s\n", + pci_name(pdev)); + + ret = domain_context_mapping(si_domain, pdev, + CONTEXT_TT_MULTI_LEVEL); + if (ret) + return ret; + ret = domain_add_dev_info(si_domain, pdev); + if (ret) + return ret; } - ret = domain_add_dev_info(si_domain, pdev); - if (ret) - return ret; } return 0; @@ -2260,6 +2341,10 @@ int __init init_dmars(void) * identity mapping if iommu_identity_mapping is set. */ if (!iommu_pass_through) { +#ifdef CONFIG_DMAR_BROKEN_GFX_WA + if (!iommu_identity_mapping) + iommu_identity_mapping = 2; +#endif if (iommu_identity_mapping) iommu_prepare_static_identity_mapping(); /* @@ -2293,8 +2378,6 @@ int __init init_dmars(void) } } - iommu_prepare_gfx_mapping(); - iommu_prepare_isa(); } @@ -2339,50 +2422,32 @@ error: return ret; } -static inline u64 aligned_size(u64 host_addr, size_t size) -{ - u64 addr; - addr = (host_addr & (~PAGE_MASK)) + size; - return PAGE_ALIGN(addr); -} - -struct iova * -iommu_alloc_iova(struct dmar_domain *domain, size_t size, u64 end) -{ - struct iova *piova; - - /* Make sure it's in range */ - end = min_t(u64, DOMAIN_MAX_ADDR(domain->gaw), end); - if (!size || (IOVA_START_ADDR + size > end)) - return NULL; - - piova = alloc_iova(&domain->iovad, - size >> PAGE_SHIFT, IOVA_PFN(end), 1); - return piova; -} - -static struct iova * -__intel_alloc_iova(struct device *dev, struct dmar_domain *domain, - size_t size, u64 dma_mask) +/* This takes a number of _MM_ pages, not VTD pages */ +static struct iova *intel_alloc_iova(struct device *dev, + struct dmar_domain *domain, + unsigned long nrpages, uint64_t dma_mask) { struct pci_dev *pdev = to_pci_dev(dev); struct iova *iova = NULL; - if (dma_mask <= DMA_BIT_MASK(32) || dmar_forcedac) - iova = iommu_alloc_iova(domain, size, dma_mask); - else { + /* Restrict dma_mask to the width that the iommu can handle */ + dma_mask = min_t(uint64_t, DOMAIN_MAX_ADDR(domain->gaw), dma_mask); + + if (!dmar_forcedac && dma_mask > DMA_BIT_MASK(32)) { /* * First try to allocate an io virtual address in * DMA_BIT_MASK(32) and if that fails then try allocating * from higher range */ - iova = iommu_alloc_iova(domain, size, DMA_BIT_MASK(32)); - if (!iova) - iova = iommu_alloc_iova(domain, size, dma_mask); - } - - if (!iova) { - printk(KERN_ERR"Allocating iova for %s failed", pci_name(pdev)); + iova = alloc_iova(&domain->iovad, nrpages, + IOVA_PFN(DMA_BIT_MASK(32)), 1); + if (iova) + return iova; + } + iova = alloc_iova(&domain->iovad, nrpages, IOVA_PFN(dma_mask), 1); + if (unlikely(!iova)) { + printk(KERN_ERR "Allocating %ld-page iova for %s failed", + nrpages, pci_name(pdev)); return NULL; } @@ -2424,16 +2489,24 @@ static int iommu_dummy(struct pci_dev *pdev) } /* Check if the pdev needs to go through non-identity map and unmap process.*/ -static int iommu_no_mapping(struct pci_dev *pdev) +static int iommu_no_mapping(struct device *dev) { + struct pci_dev *pdev; int found; + if (unlikely(dev->bus != &pci_bus_type)) + return 1; + + pdev = to_pci_dev(dev); + if (iommu_dummy(pdev)) + return 1; + if (!iommu_identity_mapping) - return iommu_dummy(pdev); + return 0; found = identity_mapping(pdev); if (found) { - if (pdev->dma_mask > DMA_BIT_MASK(32)) + if (iommu_should_identity_map(pdev, 0)) return 1; else { /* @@ -2450,9 +2523,12 @@ static int iommu_no_mapping(struct pci_dev *pdev) * In case of a detached 64 bit DMA device from vm, the device * is put into si_domain for identity mapping. */ - if (pdev->dma_mask > DMA_BIT_MASK(32)) { + if (iommu_should_identity_map(pdev, 0)) { int ret; ret = domain_add_dev_info(si_domain, pdev); + if (ret) + return 0; + ret = domain_context_mapping(si_domain, pdev, CONTEXT_TT_MULTI_LEVEL); if (!ret) { printk(KERN_INFO "64bit %s uses identity mapping\n", pci_name(pdev)); @@ -2461,7 +2537,7 @@ static int iommu_no_mapping(struct pci_dev *pdev) } } - return iommu_dummy(pdev); + return 0; } static dma_addr_t __intel_map_single(struct device *hwdev, phys_addr_t paddr, @@ -2474,10 +2550,11 @@ static dma_addr_t __intel_map_single(struct device *hwdev, phys_addr_t paddr, int prot = 0; int ret; struct intel_iommu *iommu; + unsigned long paddr_pfn = paddr >> PAGE_SHIFT; BUG_ON(dir == DMA_NONE); - if (iommu_no_mapping(pdev)) + if (iommu_no_mapping(hwdev)) return paddr; domain = get_valid_domain_for_dev(pdev); @@ -2485,14 +2562,13 @@ static dma_addr_t __intel_map_single(struct device *hwdev, phys_addr_t paddr, return 0; iommu = domain_get_iommu(domain); - size = aligned_size((u64)paddr, size); + size = aligned_nrpages(paddr, size); - iova = __intel_alloc_iova(hwdev, domain, size, pdev->dma_mask); + iova = intel_alloc_iova(hwdev, domain, dma_to_mm_pfn(size), + pdev->dma_mask); if (!iova) goto error; - start_paddr = (phys_addr_t)iova->pfn_lo << PAGE_SHIFT; - /* * Check if DMAR supports zero-length reads on write only * mappings.. @@ -2508,20 +2584,20 @@ static dma_addr_t __intel_map_single(struct device *hwdev, phys_addr_t paddr, * might have two guest_addr mapping to the same host paddr, but this * is not a big problem */ - ret = domain_page_mapping(domain, start_paddr, - ((u64)paddr) & PHYSICAL_PAGE_MASK, - size, prot); + ret = domain_pfn_mapping(domain, mm_to_dma_pfn(iova->pfn_lo), + mm_to_dma_pfn(paddr_pfn), size, prot); if (ret) goto error; /* it's a non-present to present mapping. Only flush if caching mode */ if (cap_caching_mode(iommu->cap)) - iommu_flush_iotlb_psi(iommu, 0, start_paddr, - size >> VTD_PAGE_SHIFT); + iommu_flush_iotlb_psi(iommu, 0, mm_to_dma_pfn(iova->pfn_lo), size); else iommu_flush_write_buffer(iommu); - return start_paddr + ((u64)paddr & (~PAGE_MASK)); + start_paddr = (phys_addr_t)iova->pfn_lo << PAGE_SHIFT; + start_paddr += paddr & ~PAGE_MASK; + return start_paddr; error: if (iova) @@ -2614,11 +2690,11 @@ static void intel_unmap_page(struct device *dev, dma_addr_t dev_addr, { struct pci_dev *pdev = to_pci_dev(dev); struct dmar_domain *domain; - unsigned long start_addr; + unsigned long start_pfn, last_pfn; struct iova *iova; struct intel_iommu *iommu; - if (iommu_no_mapping(pdev)) + if (iommu_no_mapping(dev)) return; domain = find_domain(pdev); @@ -2627,22 +2703,25 @@ static void intel_unmap_page(struct device *dev, dma_addr_t dev_addr, iommu = domain_get_iommu(domain); iova = find_iova(&domain->iovad, IOVA_PFN(dev_addr)); - if (!iova) + if (WARN_ONCE(!iova, "Driver unmaps unmatched page at PFN %llx\n", + (unsigned long long)dev_addr)) return; - start_addr = iova->pfn_lo << PAGE_SHIFT; - size = aligned_size((u64)dev_addr, size); + start_pfn = mm_to_dma_pfn(iova->pfn_lo); + last_pfn = mm_to_dma_pfn(iova->pfn_hi + 1) - 1; - pr_debug("Device %s unmapping: %zx@%llx\n", - pci_name(pdev), size, (unsigned long long)start_addr); + pr_debug("Device %s unmapping: pfn %lx-%lx\n", + pci_name(pdev), start_pfn, last_pfn); /* clear the whole page */ - dma_pte_clear_range(domain, start_addr, start_addr + size); + dma_pte_clear_range(domain, start_pfn, last_pfn); + /* free page tables */ - dma_pte_free_pagetable(domain, start_addr, start_addr + size); + dma_pte_free_pagetable(domain, start_pfn, last_pfn); + if (intel_iommu_strict) { - iommu_flush_iotlb_psi(iommu, domain->id, start_addr, - size >> VTD_PAGE_SHIFT); + iommu_flush_iotlb_psi(iommu, domain->id, start_pfn, + last_pfn - start_pfn + 1); /* free iova */ __free_iova(&domain->iovad, iova); } else { @@ -2700,17 +2779,13 @@ static void intel_unmap_sg(struct device *hwdev, struct scatterlist *sglist, int nelems, enum dma_data_direction dir, struct dma_attrs *attrs) { - int i; struct pci_dev *pdev = to_pci_dev(hwdev); struct dmar_domain *domain; - unsigned long start_addr; + unsigned long start_pfn, last_pfn; struct iova *iova; - size_t size = 0; - phys_addr_t addr; - struct scatterlist *sg; struct intel_iommu *iommu; - if (iommu_no_mapping(pdev)) + if (iommu_no_mapping(hwdev)) return; domain = find_domain(pdev); @@ -2719,22 +2794,21 @@ static void intel_unmap_sg(struct device *hwdev, struct scatterlist *sglist, iommu = domain_get_iommu(domain); iova = find_iova(&domain->iovad, IOVA_PFN(sglist[0].dma_address)); - if (!iova) + if (WARN_ONCE(!iova, "Driver unmaps unmatched sglist at PFN %llx\n", + (unsigned long long)sglist[0].dma_address)) return; - for_each_sg(sglist, sg, nelems, i) { - addr = page_to_phys(sg_page(sg)) + sg->offset; - size += aligned_size((u64)addr, sg->length); - } - start_addr = iova->pfn_lo << PAGE_SHIFT; + start_pfn = mm_to_dma_pfn(iova->pfn_lo); + last_pfn = mm_to_dma_pfn(iova->pfn_hi + 1) - 1; /* clear the whole page */ - dma_pte_clear_range(domain, start_addr, start_addr + size); + dma_pte_clear_range(domain, start_pfn, last_pfn); + /* free page tables */ - dma_pte_free_pagetable(domain, start_addr, start_addr + size); + dma_pte_free_pagetable(domain, start_pfn, last_pfn); - iommu_flush_iotlb_psi(iommu, domain->id, start_addr, - size >> VTD_PAGE_SHIFT); + iommu_flush_iotlb_psi(iommu, domain->id, start_pfn, + (last_pfn - start_pfn + 1)); /* free iova */ __free_iova(&domain->iovad, iova); @@ -2757,21 +2831,20 @@ static int intel_nontranslate_map_sg(struct device *hddev, static int intel_map_sg(struct device *hwdev, struct scatterlist *sglist, int nelems, enum dma_data_direction dir, struct dma_attrs *attrs) { - phys_addr_t addr; int i; struct pci_dev *pdev = to_pci_dev(hwdev); struct dmar_domain *domain; size_t size = 0; int prot = 0; - size_t offset = 0; + size_t offset_pfn = 0; struct iova *iova = NULL; int ret; struct scatterlist *sg; - unsigned long start_addr; + unsigned long start_vpfn; struct intel_iommu *iommu; BUG_ON(dir == DMA_NONE); - if (iommu_no_mapping(pdev)) + if (iommu_no_mapping(hwdev)) return intel_nontranslate_map_sg(hwdev, sglist, nelems, dir); domain = get_valid_domain_for_dev(pdev); @@ -2780,12 +2853,11 @@ static int intel_map_sg(struct device *hwdev, struct scatterlist *sglist, int ne iommu = domain_get_iommu(domain); - for_each_sg(sglist, sg, nelems, i) { - addr = page_to_phys(sg_page(sg)) + sg->offset; - size += aligned_size((u64)addr, sg->length); - } + for_each_sg(sglist, sg, nelems, i) + size += aligned_nrpages(sg->offset, sg->length); - iova = __intel_alloc_iova(hwdev, domain, size, pdev->dma_mask); + iova = intel_alloc_iova(hwdev, domain, dma_to_mm_pfn(size), + pdev->dma_mask); if (!iova) { sglist->dma_length = 0; return 0; @@ -2801,35 +2873,24 @@ static int intel_map_sg(struct device *hwdev, struct scatterlist *sglist, int ne if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL) prot |= DMA_PTE_WRITE; - start_addr = iova->pfn_lo << PAGE_SHIFT; - offset = 0; - for_each_sg(sglist, sg, nelems, i) { - addr = page_to_phys(sg_page(sg)) + sg->offset; - size = aligned_size((u64)addr, sg->length); - ret = domain_page_mapping(domain, start_addr + offset, - ((u64)addr) & PHYSICAL_PAGE_MASK, - size, prot); - if (ret) { - /* clear the page */ - dma_pte_clear_range(domain, start_addr, - start_addr + offset); - /* free page tables */ - dma_pte_free_pagetable(domain, start_addr, - start_addr + offset); - /* free iova */ - __free_iova(&domain->iovad, iova); - return 0; - } - sg->dma_address = start_addr + offset + - ((u64)addr & (~PAGE_MASK)); - sg->dma_length = sg->length; - offset += size; + start_vpfn = mm_to_dma_pfn(iova->pfn_lo); + + ret = domain_sg_mapping(domain, start_vpfn, sglist, size, prot); + if (unlikely(ret)) { + /* clear the page */ + dma_pte_clear_range(domain, start_vpfn, + start_vpfn + size - 1); + /* free page tables */ + dma_pte_free_pagetable(domain, start_vpfn, + start_vpfn + size - 1); + /* free iova */ + __free_iova(&domain->iovad, iova); + return 0; } /* it's a non-present to present mapping. Only flush if caching mode */ if (cap_caching_mode(iommu->cap)) - iommu_flush_iotlb_psi(iommu, 0, start_addr, - offset >> VTD_PAGE_SHIFT); + iommu_flush_iotlb_psi(iommu, 0, start_vpfn, offset_pfn); else iommu_flush_write_buffer(iommu); @@ -3334,7 +3395,6 @@ static int md_domain_init(struct dmar_domain *domain, int guest_width) int adjust_width; init_iova_domain(&domain->iovad, DMA_32BIT_PFN); - spin_lock_init(&domain->mapping_lock); spin_lock_init(&domain->iommu_lock); domain_reserve_special_ranges(domain); @@ -3348,6 +3408,7 @@ static int md_domain_init(struct dmar_domain *domain, int guest_width) domain->iommu_count = 0; domain->iommu_coherency = 0; + domain->iommu_snooping = 0; domain->max_addr = 0; /* always allocate the top pgd */ @@ -3388,8 +3449,6 @@ static void iommu_free_vm_domain(struct dmar_domain *domain) static void vm_domain_exit(struct dmar_domain *domain) { - u64 end; - /* Domain 0 is reserved, so dont process it */ if (!domain) return; @@ -3397,14 +3456,12 @@ static void vm_domain_exit(struct dmar_domain *domain) vm_domain_remove_all_dev_info(domain); /* destroy iovas */ put_iova_domain(&domain->iovad); - end = DOMAIN_MAX_ADDR(domain->gaw); - end = end & (~VTD_PAGE_MASK); /* clear ptes */ - dma_pte_clear_range(domain, 0, end); + dma_pte_clear_range(domain, 0, DOMAIN_MAX_PFN(domain->gaw)); /* free page tables */ - dma_pte_free_pagetable(domain, 0, end); + dma_pte_free_pagetable(domain, 0, DOMAIN_MAX_PFN(domain->gaw)); iommu_free_vm_domain(domain); free_domain_mem(domain); @@ -3513,7 +3570,7 @@ static int intel_iommu_map_range(struct iommu_domain *domain, if ((iommu_prot & IOMMU_CACHE) && dmar_domain->iommu_snooping) prot |= DMA_PTE_SNP; - max_addr = (iova & VTD_PAGE_MASK) + VTD_PAGE_ALIGN(size); + max_addr = iova + size; if (dmar_domain->max_addr < max_addr) { int min_agaw; u64 end; @@ -3531,8 +3588,11 @@ static int intel_iommu_map_range(struct iommu_domain *domain, } dmar_domain->max_addr = max_addr; } - - ret = domain_page_mapping(dmar_domain, iova, hpa, size, prot); + /* Round up size to next multiple of PAGE_SIZE, if it and + the low bits of hpa would take us onto the next page */ + size = aligned_nrpages(hpa, size); + ret = domain_pfn_mapping(dmar_domain, iova >> VTD_PAGE_SHIFT, + hpa >> VTD_PAGE_SHIFT, size, prot); return ret; } @@ -3540,15 +3600,15 @@ static void intel_iommu_unmap_range(struct iommu_domain *domain, unsigned long iova, size_t size) { struct dmar_domain *dmar_domain = domain->priv; - dma_addr_t base; - /* The address might not be aligned */ - base = iova & VTD_PAGE_MASK; - size = VTD_PAGE_ALIGN(size); - dma_pte_clear_range(dmar_domain, base, base + size); + if (!size) + return; + + dma_pte_clear_range(dmar_domain, iova >> VTD_PAGE_SHIFT, + (iova + size - 1) >> VTD_PAGE_SHIFT); - if (dmar_domain->max_addr == base + size) - dmar_domain->max_addr = base; + if (dmar_domain->max_addr == iova + size) + dmar_domain->max_addr = iova; } static phys_addr_t intel_iommu_iova_to_phys(struct iommu_domain *domain, @@ -3558,7 +3618,7 @@ static phys_addr_t intel_iommu_iova_to_phys(struct iommu_domain *domain, struct dma_pte *pte; u64 phys = 0; - pte = addr_to_dma_pte(dmar_domain, iova); + pte = pfn_to_dma_pte(dmar_domain, iova >> VTD_PAGE_SHIFT); if (pte) phys = dma_pte_addr(pte); diff --git a/drivers/pci/iov.c b/drivers/pci/iov.c index e3a87210e94..e03fe98f061 100644 --- a/drivers/pci/iov.c +++ b/drivers/pci/iov.c @@ -598,6 +598,29 @@ int pci_iov_resource_bar(struct pci_dev *dev, int resno, } /** + * pci_sriov_resource_alignment - get resource alignment for VF BAR + * @dev: the PCI device + * @resno: the resource number + * + * Returns the alignment of the VF BAR found in the SR-IOV capability. + * This is not the same as the resource size which is defined as + * the VF BAR size multiplied by the number of VFs. The alignment + * is just the VF BAR size. + */ +int pci_sriov_resource_alignment(struct pci_dev *dev, int resno) +{ + struct resource tmp; + enum pci_bar_type type; + int reg = pci_iov_resource_bar(dev, resno, &type); + + if (!reg) + return 0; + + __pci_read_base(dev, type, &tmp, reg); + return resource_alignment(&tmp); +} + +/** * pci_restore_iov_state - restore the state of the IOV capability * @dev: the PCI device */ diff --git a/drivers/pci/iova.c b/drivers/pci/iova.c index 2287116e982..46dd440e231 100644 --- a/drivers/pci/iova.c +++ b/drivers/pci/iova.c @@ -1,9 +1,19 @@ /* - * Copyright (c) 2006, Intel Corporation. + * Copyright © 2006-2009, Intel Corporation. * - * This file is released under the GPLv2. + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., 59 Temple + * Place - Suite 330, Boston, MA 02111-1307 USA. * - * Copyright (C) 2006-2008 Intel Corporation * Author: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com> */ @@ -123,7 +133,15 @@ move_left: /* Insert the new_iova into domain rbtree by holding writer lock */ /* Add new node and rebalance tree. */ { - struct rb_node **entry = &((prev)), *parent = NULL; + struct rb_node **entry, *parent = NULL; + + /* If we have 'prev', it's a valid place to start the + insertion. Otherwise, start from the root. */ + if (prev) + entry = &prev; + else + entry = &iovad->rbroot.rb_node; + /* Figure out where to put new node */ while (*entry) { struct iova *this = container_of(*entry, diff --git a/drivers/pci/msi.c b/drivers/pci/msi.c index d9f06fbfa0b..d986afb7032 100644 --- a/drivers/pci/msi.c +++ b/drivers/pci/msi.c @@ -127,17 +127,23 @@ static inline __attribute_const__ u32 msi_enabled_mask(u16 control) * reliably as devices without an INTx disable bit will then generate a * level IRQ which will never be cleared. */ -static void msi_mask_irq(struct msi_desc *desc, u32 mask, u32 flag) +static u32 __msi_mask_irq(struct msi_desc *desc, u32 mask, u32 flag) { u32 mask_bits = desc->masked; if (!desc->msi_attrib.maskbit) - return; + return 0; mask_bits &= ~mask; mask_bits |= flag; pci_write_config_dword(desc->dev, desc->mask_pos, mask_bits); - desc->masked = mask_bits; + + return mask_bits; +} + +static void msi_mask_irq(struct msi_desc *desc, u32 mask, u32 flag) +{ + desc->masked = __msi_mask_irq(desc, mask, flag); } /* @@ -147,15 +153,21 @@ static void msi_mask_irq(struct msi_desc *desc, u32 mask, u32 flag) * file. This saves a few milliseconds when initialising devices with lots * of MSI-X interrupts. */ -static void msix_mask_irq(struct msi_desc *desc, u32 flag) +static u32 __msix_mask_irq(struct msi_desc *desc, u32 flag) { u32 mask_bits = desc->masked; unsigned offset = desc->msi_attrib.entry_nr * PCI_MSIX_ENTRY_SIZE + - PCI_MSIX_ENTRY_VECTOR_CTRL_OFFSET; + PCI_MSIX_ENTRY_VECTOR_CTRL; mask_bits &= ~1; mask_bits |= flag; writel(mask_bits, desc->mask_base + offset); - desc->masked = mask_bits; + + return mask_bits; +} + +static void msix_mask_irq(struct msi_desc *desc, u32 flag) +{ + desc->masked = __msix_mask_irq(desc, flag); } static void msi_set_mask_bit(unsigned irq, u32 flag) @@ -188,9 +200,9 @@ void read_msi_msg_desc(struct irq_desc *desc, struct msi_msg *msg) void __iomem *base = entry->mask_base + entry->msi_attrib.entry_nr * PCI_MSIX_ENTRY_SIZE; - msg->address_lo = readl(base + PCI_MSIX_ENTRY_LOWER_ADDR_OFFSET); - msg->address_hi = readl(base + PCI_MSIX_ENTRY_UPPER_ADDR_OFFSET); - msg->data = readl(base + PCI_MSIX_ENTRY_DATA_OFFSET); + msg->address_lo = readl(base + PCI_MSIX_ENTRY_LOWER_ADDR); + msg->address_hi = readl(base + PCI_MSIX_ENTRY_UPPER_ADDR); + msg->data = readl(base + PCI_MSIX_ENTRY_DATA); } else { struct pci_dev *dev = entry->dev; int pos = entry->msi_attrib.pos; @@ -225,11 +237,9 @@ void write_msi_msg_desc(struct irq_desc *desc, struct msi_msg *msg) base = entry->mask_base + entry->msi_attrib.entry_nr * PCI_MSIX_ENTRY_SIZE; - writel(msg->address_lo, - base + PCI_MSIX_ENTRY_LOWER_ADDR_OFFSET); - writel(msg->address_hi, - base + PCI_MSIX_ENTRY_UPPER_ADDR_OFFSET); - writel(msg->data, base + PCI_MSIX_ENTRY_DATA_OFFSET); + writel(msg->address_lo, base + PCI_MSIX_ENTRY_LOWER_ADDR); + writel(msg->address_hi, base + PCI_MSIX_ENTRY_UPPER_ADDR); + writel(msg->data, base + PCI_MSIX_ENTRY_DATA); } else { struct pci_dev *dev = entry->dev; int pos = entry->msi_attrib.pos; @@ -385,6 +395,7 @@ static int msi_capability_init(struct pci_dev *dev, int nvec) /* Configure MSI capability structure */ ret = arch_setup_msi_irqs(dev, nvec, PCI_CAP_ID_MSI); if (ret) { + msi_mask_irq(entry, mask, ~mask); msi_free_irqs(dev); return ret; } @@ -439,8 +450,14 @@ static int msix_capability_init(struct pci_dev *dev, for (i = 0; i < nvec; i++) { entry = alloc_msi_entry(dev); - if (!entry) - break; + if (!entry) { + if (!i) + iounmap(base); + else + msi_free_irqs(dev); + /* No enough memory. Don't try again */ + return -ENOMEM; + } j = entries[i].entry; entry->msi_attrib.is_msix = 1; @@ -487,7 +504,7 @@ static int msix_capability_init(struct pci_dev *dev, set_irq_msi(entry->irq, entry); j = entries[i].entry; entry->masked = readl(base + j * PCI_MSIX_ENTRY_SIZE + - PCI_MSIX_ENTRY_VECTOR_CTRL_OFFSET); + PCI_MSIX_ENTRY_VECTOR_CTRL); msix_mask_irq(entry, 1); i++; } @@ -611,9 +628,11 @@ void pci_msi_shutdown(struct pci_dev *dev) pci_intx_for_msi(dev, 1); dev->msi_enabled = 0; + /* Return the device with MSI unmasked as initial states */ pci_read_config_word(dev, pos + PCI_MSI_FLAGS, &ctrl); mask = msi_capable_mask(ctrl); - msi_mask_irq(desc, mask, ~mask); + /* Keep cached state to be restored */ + __msi_mask_irq(desc, mask, ~mask); /* Restore dev->irq to its default pin-assertion irq */ dev->irq = desc->msi_attrib.default_irq; @@ -653,7 +672,6 @@ static int msi_free_irqs(struct pci_dev* dev) list_for_each_entry_safe(entry, tmp, &dev->msi_list, list) { if (entry->msi_attrib.is_msix) { - msix_mask_irq(entry, 1); if (list_is_last(&entry->list, &dev->msi_list)) iounmap(entry->mask_base); } @@ -741,9 +759,17 @@ static void msix_free_all_irqs(struct pci_dev *dev) void pci_msix_shutdown(struct pci_dev* dev) { + struct msi_desc *entry; + if (!pci_msi_enable || !dev || !dev->msix_enabled) return; + /* Return the device with MSI-X masked as initial states */ + list_for_each_entry(entry, &dev->msi_list, list) { + /* Keep cached states to be restored */ + __msix_mask_irq(entry, 1); + } + msix_set_enable(dev, 0); pci_intx_for_msi(dev, 1); dev->msix_enabled = 0; diff --git a/drivers/pci/msi.h b/drivers/pci/msi.h index a0662842550..de27c1cb5a2 100644 --- a/drivers/pci/msi.h +++ b/drivers/pci/msi.h @@ -6,11 +6,11 @@ #ifndef MSI_H #define MSI_H -#define PCI_MSIX_ENTRY_SIZE 16 -#define PCI_MSIX_ENTRY_LOWER_ADDR_OFFSET 0 -#define PCI_MSIX_ENTRY_UPPER_ADDR_OFFSET 4 -#define PCI_MSIX_ENTRY_DATA_OFFSET 8 -#define PCI_MSIX_ENTRY_VECTOR_CTRL_OFFSET 12 +#define PCI_MSIX_ENTRY_SIZE 16 +#define PCI_MSIX_ENTRY_LOWER_ADDR 0 +#define PCI_MSIX_ENTRY_UPPER_ADDR 4 +#define PCI_MSIX_ENTRY_DATA 8 +#define PCI_MSIX_ENTRY_VECTOR_CTRL 12 #define msi_control_reg(base) (base + PCI_MSI_FLAGS) #define msi_lower_address_reg(base) (base + PCI_MSI_ADDRESS_LO) diff --git a/drivers/pci/pci-driver.c b/drivers/pci/pci-driver.c index d76c4c85367..f99bc7f089f 100644 --- a/drivers/pci/pci-driver.c +++ b/drivers/pci/pci-driver.c @@ -508,7 +508,7 @@ static int pci_restore_standard_config(struct pci_dev *pci_dev) return error; } - return pci_dev->state_saved ? pci_restore_state(pci_dev) : 0; + return pci_restore_state(pci_dev); } static void pci_pm_default_resume_noirq(struct pci_dev *pci_dev) diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c index 6c93af5ced1..7b70312181d 100644 --- a/drivers/pci/pci.c +++ b/drivers/pci/pci.c @@ -846,6 +846,8 @@ pci_restore_state(struct pci_dev *dev) int i; u32 val; + if (!dev->state_saved) + return 0; /* PCI Express register must be restored first */ pci_restore_pcie_state(dev); @@ -1517,11 +1519,20 @@ void pci_enable_ari(struct pci_dev *dev) * * Perform INTx swizzling for a device behind one level of bridge. This is * required by section 9.1 of the PCI-to-PCI bridge specification for devices - * behind bridges on add-in cards. + * behind bridges on add-in cards. For devices with ARI enabled, the slot + * number is always 0 (see the Implementation Note in section 2.2.8.1 of + * the PCI Express Base Specification, Revision 2.1) */ u8 pci_swizzle_interrupt_pin(struct pci_dev *dev, u8 pin) { - return (((pin - 1) + PCI_SLOT(dev->devfn)) % 4) + 1; + int slot; + + if (pci_ari_enabled(dev->bus)) + slot = 0; + else + slot = PCI_SLOT(dev->devfn); + + return (((pin - 1) + slot) % 4) + 1; } int @@ -2171,7 +2182,7 @@ static int pci_parent_bus_reset(struct pci_dev *dev, int probe) u16 ctrl; struct pci_dev *pdev; - if (dev->subordinate) + if (pci_is_root_bus(dev->bus) || dev->subordinate || !dev->bus->self) return -ENOTTY; list_for_each_entry(pdev, &dev->bus->devices, bus_list) diff --git a/drivers/pci/pci.h b/drivers/pci/pci.h index f73bcbedf37..5ff4d25bf0e 100644 --- a/drivers/pci/pci.h +++ b/drivers/pci/pci.h @@ -243,6 +243,7 @@ extern int pci_iov_init(struct pci_dev *dev); extern void pci_iov_release(struct pci_dev *dev); extern int pci_iov_resource_bar(struct pci_dev *dev, int resno, enum pci_bar_type *type); +extern int pci_sriov_resource_alignment(struct pci_dev *dev, int resno); extern void pci_restore_iov_state(struct pci_dev *dev); extern int pci_iov_bus_range(struct pci_bus *bus); @@ -298,4 +299,16 @@ static inline int pci_ats_enabled(struct pci_dev *dev) } #endif /* CONFIG_PCI_IOV */ +static inline int pci_resource_alignment(struct pci_dev *dev, + struct resource *res) +{ +#ifdef CONFIG_PCI_IOV + int resno = res - dev->resource; + + if (resno >= PCI_IOV_RESOURCES && resno <= PCI_IOV_RESOURCE_END) + return pci_sriov_resource_alignment(dev, resno); +#endif + return resource_alignment(res); +} + #endif /* DRIVERS_PCI_H */ diff --git a/drivers/pci/pcie/aer/ecrc.c b/drivers/pci/pcie/aer/ecrc.c index ece97df4df6..a928d8ab6bd 100644 --- a/drivers/pci/pcie/aer/ecrc.c +++ b/drivers/pci/pcie/aer/ecrc.c @@ -106,7 +106,7 @@ void pcie_set_ecrc_checking(struct pci_dev *dev) disable_ecrc_checking(dev); break; case ECRC_POLICY_ON: - enable_ecrc_checking(dev);; + enable_ecrc_checking(dev); break; default: return; diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c index 56552d74abe..06b96562396 100644 --- a/drivers/pci/quirks.c +++ b/drivers/pci/quirks.c @@ -1058,6 +1058,11 @@ static void __devinit quirk_no_ata_d3(struct pci_dev *pdev) } DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_SERVERWORKS, PCI_ANY_ID, quirk_no_ata_d3); DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_ATI, PCI_ANY_ID, quirk_no_ata_d3); +/* ALi loses some register settings that we cannot then restore */ +DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_AL, PCI_ANY_ID, quirk_no_ata_d3); +/* VIA comes back fine but we need to keep it alive or ACPI GTM failures + occur when mode detecting */ +DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_VIA, PCI_ANY_ID, quirk_no_ata_d3); /* This was originally an Alpha specific thing, but it really fits here. * The i82375 PCI/EISA bridge appears as non-classified. Fix that. diff --git a/drivers/pci/setup-bus.c b/drivers/pci/setup-bus.c index b636e245445..7c443b4583a 100644 --- a/drivers/pci/setup-bus.c +++ b/drivers/pci/setup-bus.c @@ -25,7 +25,7 @@ #include <linux/ioport.h> #include <linux/cache.h> #include <linux/slab.h> - +#include "pci.h" static void pbus_assign_resources_sorted(const struct pci_bus *bus) { @@ -384,7 +384,7 @@ static int pbus_size_mem(struct pci_bus *bus, unsigned long mask, unsigned long continue; r_size = resource_size(r); /* For bridges size != alignment */ - align = resource_alignment(r); + align = pci_resource_alignment(dev, r); order = __ffs(align) - 20; if (order > 11) { dev_warn(&dev->dev, "BAR %d bad alignment %llx: " diff --git a/drivers/pci/setup-res.c b/drivers/pci/setup-res.c index b711fb7181e..88cdd1a937d 100644 --- a/drivers/pci/setup-res.c +++ b/drivers/pci/setup-res.c @@ -100,16 +100,16 @@ int pci_claim_resource(struct pci_dev *dev, int resource) { struct resource *res = &dev->resource[resource]; struct resource *root; - char *dtype = resource < PCI_BRIDGE_RESOURCES ? "device" : "bridge"; int err; root = pci_find_parent_resource(dev, res); err = -EINVAL; if (root != NULL) - err = insert_resource(root, res); + err = request_resource(root, res); if (err) { + const char *dtype = resource < PCI_BRIDGE_RESOURCES ? "device" : "bridge"; dev_err(&dev->dev, "BAR %d: %s of %s %pR\n", resource, root ? "address space collision on" : @@ -144,7 +144,7 @@ static int __pci_assign_resource(struct pci_bus *bus, struct pci_dev *dev, size = resource_size(res); min = (res->flags & IORESOURCE_IO) ? PCIBIOS_MIN_IO : PCIBIOS_MIN_MEM; - align = resource_alignment(res); + align = pci_resource_alignment(dev, res); /* First, try exact prefetching match.. */ ret = pci_bus_alloc_resource(bus, res, size, align, min, @@ -178,7 +178,7 @@ int pci_assign_resource(struct pci_dev *dev, int resno) struct pci_bus *bus; int ret; - align = resource_alignment(res); + align = pci_resource_alignment(dev, res); if (!align) { dev_info(&dev->dev, "BAR %d: can't allocate resource (bogus " "alignment) %pR flags %#lx\n", @@ -259,7 +259,7 @@ void pdev_sort_resources(struct pci_dev *dev, struct resource_list *head) if (!(r->flags) || r->parent) continue; - r_align = resource_alignment(r); + r_align = pci_resource_alignment(dev, r); if (!r_align) { dev_warn(&dev->dev, "BAR %d: bogus alignment " "%pR flags %#lx\n", @@ -271,7 +271,7 @@ void pdev_sort_resources(struct pci_dev *dev, struct resource_list *head) struct resource_list *ln = list->next; if (ln) - align = resource_alignment(ln->res); + align = pci_resource_alignment(ln->dev, ln->res); if (r_align > align) { tmp = kmalloc(sizeof(*tmp), GFP_KERNEL); diff --git a/drivers/pci/slot.c b/drivers/pci/slot.c index eddb0748b0e..8c02b6c53bd 100644 --- a/drivers/pci/slot.c +++ b/drivers/pci/slot.c @@ -311,7 +311,7 @@ EXPORT_SYMBOL_GPL(pci_destroy_slot); #include <linux/pci_hotplug.h> /** * pci_hp_create_link - create symbolic link to the hotplug driver module. - * @slot: struct pci_slot + * @pci_slot: struct pci_slot * * Helper function for pci_hotplug_core.c to create symbolic link to * the hotplug driver module. @@ -334,7 +334,7 @@ EXPORT_SYMBOL_GPL(pci_hp_create_module_link); /** * pci_hp_remove_link - remove symbolic link to the hotplug driver module. - * @slot: struct pci_slot + * @pci_slot: struct pci_slot * * Helper function for pci_hotplug_core.c to remove symbolic link to * the hotplug driver module. diff --git a/drivers/pci/syscall.c b/drivers/pci/syscall.c index ec22284eed3..e1c1ec54089 100644 --- a/drivers/pci/syscall.c +++ b/drivers/pci/syscall.c @@ -9,7 +9,6 @@ #include <linux/errno.h> #include <linux/pci.h> -#include <linux/smp_lock.h> #include <linux/syscalls.h> #include <asm/uaccess.h> #include "pci.h" diff --git a/drivers/pcmcia/tcic.c b/drivers/pcmcia/tcic.c index 9ad97ea836e..8eb04230fec 100644 --- a/drivers/pcmcia/tcic.c +++ b/drivers/pcmcia/tcic.c @@ -472,7 +472,8 @@ static int __init init_tcic(void) init_timer(&poll_timer); /* Build interrupt mask */ - printk(", %d sockets\n" KERN_INFO " irq list (", sockets); + printk(KERN_CONT ", %d sockets\n", sockets); + printk(KERN_INFO " irq list ("); if (irq_list_count == 0) mask = irq_mask; else diff --git a/drivers/pcmcia/vrc4171_card.c b/drivers/pcmcia/vrc4171_card.c index 659421d0ca4..d4ad50d737b 100644 --- a/drivers/pcmcia/vrc4171_card.c +++ b/drivers/pcmcia/vrc4171_card.c @@ -1,7 +1,7 @@ /* * vrc4171_card.c, NEC VRC4171 Card Controller driver for Socket Services. * - * Copyright (C) 2003-2005 Yoichi Yuasa <yoichi_yuasa@tripeaks.co.jp> + * Copyright (C) 2003-2005 Yoichi Yuasa <yuasa@linux-mips.org> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by @@ -32,7 +32,7 @@ #include "i82365.h" MODULE_DESCRIPTION("NEC VRC4171 Card Controllers driver for Socket Services"); -MODULE_AUTHOR("Yoichi Yuasa <yoichi_yuasa@tripeaks.co.jp>"); +MODULE_AUTHOR("Yoichi Yuasa <yuasa@linux-mips.org>"); MODULE_LICENSE("GPL"); #define CARD_MAX_SLOTS 2 diff --git a/drivers/pcmcia/vrc4173_cardu.c b/drivers/pcmcia/vrc4173_cardu.c index 812f038e9bd..9b3c15827e5 100644 --- a/drivers/pcmcia/vrc4173_cardu.c +++ b/drivers/pcmcia/vrc4173_cardu.c @@ -6,7 +6,7 @@ * NEC VRC4173 CARDU driver for Socket Services * (This device doesn't support CardBus. it is supporting only 16bit PC Card.) * - * Copyright 2002,2003 Yoichi Yuasa <yoichi_yuasa@tripeaks.co.jp> + * Copyright 2002,2003 Yoichi Yuasa <yuasa@linux-mips.org> * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the @@ -41,7 +41,7 @@ #include "vrc4173_cardu.h" MODULE_DESCRIPTION("NEC VRC4173 CARDU driver for Socket Services"); -MODULE_AUTHOR("Yoichi Yuasa <yoichi_yuasa@tripeaks.co.jp>"); +MODULE_AUTHOR("Yoichi Yuasa <yuasa@linux-mips.org>"); MODULE_LICENSE("GPL"); static int vrc4173_cardu_slots; diff --git a/drivers/pcmcia/vrc4173_cardu.h b/drivers/pcmcia/vrc4173_cardu.h index 7d77c74120c..a7d96018ed8 100644 --- a/drivers/pcmcia/vrc4173_cardu.h +++ b/drivers/pcmcia/vrc4173_cardu.h @@ -5,7 +5,7 @@ * BRIEF MODULE DESCRIPTION * Include file for NEC VRC4173 CARDU. * - * Copyright 2002 Yoichi Yuasa <yoichi_yuasa@tripeaks.co.jp> + * Copyright 2002 Yoichi Yuasa <yuasa@linux-mips.org> * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the diff --git a/drivers/platform/x86/Kconfig b/drivers/platform/x86/Kconfig index 7232fe7104a..77c6097ced8 100644 --- a/drivers/platform/x86/Kconfig +++ b/drivers/platform/x86/Kconfig @@ -277,31 +277,6 @@ config THINKPAD_ACPI_UNSAFE_LEDS Say N here, unless you are building a kernel for your own use, and need to control the important firmware LEDs. -config THINKPAD_ACPI_DOCK - bool "Legacy Docking Station Support" - depends on THINKPAD_ACPI - depends on ACPI_DOCK=n - default n - ---help--- - Allows the thinkpad_acpi driver to handle docking station events. - This support was made obsolete by the generic ACPI docking station - support (CONFIG_ACPI_DOCK). It will allow locking and removing the - laptop from the docking station, but will not properly connect PCI - devices. - - If you are not sure, say N here. - -config THINKPAD_ACPI_BAY - bool "Legacy Removable Bay Support" - depends on THINKPAD_ACPI - default y - ---help--- - Allows the thinkpad_acpi driver to handle removable bays. It will - electrically disable the device in the bay, and also generate - notifications when the bay lever is ejected or inserted. - - If you are not sure, say Y here. - config THINKPAD_ACPI_VIDEO bool "Video output control support" depends on THINKPAD_ACPI @@ -355,6 +330,7 @@ config EEEPC_LAPTOP depends on INPUT depends on EXPERIMENTAL depends on RFKILL || RFKILL = n + depends on HOTPLUG_PCI select BACKLIGHT_CLASS_DEVICE select HWMON ---help--- diff --git a/drivers/platform/x86/acer-wmi.c b/drivers/platform/x86/acer-wmi.c index be2fd6f9163..fb45f5ee8df 100644 --- a/drivers/platform/x86/acer-wmi.c +++ b/drivers/platform/x86/acer-wmi.c @@ -973,7 +973,7 @@ static int acer_rfkill_set(void *data, bool blocked) { acpi_status status; u32 cap = (unsigned long)data; - status = set_u32(!!blocked, cap); + status = set_u32(!blocked, cap); if (ACPI_FAILURE(status)) return -ENODEV; return 0; diff --git a/drivers/platform/x86/eeepc-laptop.c b/drivers/platform/x86/eeepc-laptop.c index 4207b26ff99..222ffb892f2 100644 --- a/drivers/platform/x86/eeepc-laptop.c +++ b/drivers/platform/x86/eeepc-laptop.c @@ -16,6 +16,8 @@ * GNU General Public License for more details. */ +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + #include <linux/kernel.h> #include <linux/module.h> #include <linux/init.h> @@ -31,6 +33,7 @@ #include <linux/input.h> #include <linux/rfkill.h> #include <linux/pci.h> +#include <linux/pci_hotplug.h> #define EEEPC_LAPTOP_VERSION "0.1" @@ -40,11 +43,6 @@ #define EEEPC_HOTK_DEVICE_NAME "Hotkey" #define EEEPC_HOTK_HID "ASUS010" -#define EEEPC_LOG EEEPC_HOTK_FILE ": " -#define EEEPC_ERR KERN_ERR EEEPC_LOG -#define EEEPC_WARNING KERN_WARNING EEEPC_LOG -#define EEEPC_NOTICE KERN_NOTICE EEEPC_LOG -#define EEEPC_INFO KERN_INFO EEEPC_LOG /* * Definitions for Asus EeePC @@ -141,8 +139,11 @@ struct eeepc_hotk { u16 event_count[128]; /* count for each event */ struct input_dev *inputdev; u16 *keycode_map; - struct rfkill *eeepc_wlan_rfkill; - struct rfkill *eeepc_bluetooth_rfkill; + struct rfkill *wlan_rfkill; + struct rfkill *bluetooth_rfkill; + struct rfkill *wwan3g_rfkill; + struct hotplug_slot *hotplug_slot; + struct work_struct hotplug_work; }; /* The actual device the driver binds to */ @@ -213,6 +214,15 @@ static struct acpi_driver eeepc_hotk_driver = { }, }; +/* PCI hotplug ops */ +static int eeepc_get_adapter_status(struct hotplug_slot *slot, u8 *value); + +static struct hotplug_slot_ops eeepc_hotplug_slot_ops = { + .owner = THIS_MODULE, + .get_adapter_status = eeepc_get_adapter_status, + .get_power_status = eeepc_get_adapter_status, +}; + /* The backlight device /sys/class/backlight */ static struct backlight_device *eeepc_backlight_device; @@ -274,20 +284,20 @@ static int set_acpi(int cm, int value) if (method == NULL) return -ENODEV; if (write_acpi_int(ehotk->handle, method, value, NULL)) - printk(EEEPC_WARNING "Error writing %s\n", method); + pr_warning("Error writing %s\n", method); } return 0; } static int get_acpi(int cm) { - int value = -1; + int value = -ENODEV; if ((ehotk->cm_supported & (0x1 << cm))) { const char *method = cm_getv[cm]; if (method == NULL) return -ENODEV; if (read_acpi_int(ehotk->handle, method, &value)) - printk(EEEPC_WARNING "Error reading %s\n", method); + pr_warning("Error reading %s\n", method); } return value; } @@ -359,13 +369,19 @@ static ssize_t store_sys_acpi(int cm, const char *buf, size_t count) rv = parse_arg(buf, count, &value); if (rv > 0) - set_acpi(cm, value); + value = set_acpi(cm, value); + if (value < 0) + return value; return rv; } static ssize_t show_sys_acpi(int cm, char *buf) { - return sprintf(buf, "%d\n", get_acpi(cm)); + int value = get_acpi(cm); + + if (value < 0) + return value; + return sprintf(buf, "%d\n", value); } #define EEEPC_CREATE_DEVICE_ATTR(_name, _cm) \ @@ -539,6 +555,28 @@ static int eeepc_setkeycode(struct input_dev *dev, int scancode, int keycode) return -EINVAL; } +static void cmsg_quirk(int cm, const char *name) +{ + int dummy; + + /* Some BIOSes do not report cm although it is avaliable. + Check if cm_getv[cm] works and, if yes, assume cm should be set. */ + if (!(ehotk->cm_supported & (1 << cm)) + && !read_acpi_int(ehotk->handle, cm_getv[cm], &dummy)) { + pr_info("%s (%x) not reported by BIOS," + " enabling anyway\n", name, 1 << cm); + ehotk->cm_supported |= 1 << cm; + } +} + +static void cmsg_quirks(void) +{ + cmsg_quirk(CM_ASL_LID, "LID"); + cmsg_quirk(CM_ASL_TYPE, "TYPE"); + cmsg_quirk(CM_ASL_PANELPOWER, "PANELPOWER"); + cmsg_quirk(CM_ASL_TPD, "TPD"); +} + static int eeepc_hotk_check(void) { const struct key_entry *key; @@ -551,26 +589,24 @@ static int eeepc_hotk_check(void) if (ehotk->device->status.present) { if (write_acpi_int(ehotk->handle, "INIT", ehotk->init_flag, &buffer)) { - printk(EEEPC_ERR "Hotkey initialization failed\n"); + pr_err("Hotkey initialization failed\n"); return -ENODEV; } else { - printk(EEEPC_NOTICE "Hotkey init flags 0x%x\n", - ehotk->init_flag); + pr_notice("Hotkey init flags 0x%x\n", ehotk->init_flag); } /* get control methods supported */ if (read_acpi_int(ehotk->handle, "CMSG" , &ehotk->cm_supported)) { - printk(EEEPC_ERR - "Get control methods supported failed\n"); + pr_err("Get control methods supported failed\n"); return -ENODEV; } else { - printk(EEEPC_INFO - "Get control methods supported: 0x%x\n", - ehotk->cm_supported); + cmsg_quirks(); + pr_info("Get control methods supported: 0x%x\n", + ehotk->cm_supported); } ehotk->inputdev = input_allocate_device(); if (!ehotk->inputdev) { - printk(EEEPC_INFO "Unable to allocate input device\n"); + pr_info("Unable to allocate input device\n"); return 0; } ehotk->inputdev->name = "Asus EeePC extra buttons"; @@ -589,12 +625,12 @@ static int eeepc_hotk_check(void) } result = input_register_device(ehotk->inputdev); if (result) { - printk(EEEPC_INFO "Unable to register input device\n"); + pr_info("Unable to register input device\n"); input_free_device(ehotk->inputdev); return 0; } } else { - printk(EEEPC_ERR "Hotkey device not present, aborting\n"); + pr_err("Hotkey device not present, aborting\n"); return -EINVAL; } return 0; @@ -612,14 +648,27 @@ static int notify_brn(void) return -1; } -static void eeepc_rfkill_hotplug(void) +static int eeepc_get_adapter_status(struct hotplug_slot *hotplug_slot, + u8 *value) +{ + int val = get_acpi(CM_ASL_WLAN); + + if (val == 1 || val == 0) + *value = val; + else + return -EINVAL; + + return 0; +} + +static void eeepc_hotplug_work(struct work_struct *work) { struct pci_dev *dev; struct pci_bus *bus = pci_find_bus(0, 1); bool blocked; if (!bus) { - printk(EEEPC_WARNING "Unable to find PCI bus 1?\n"); + pr_warning("Unable to find PCI bus 1?\n"); return; } @@ -635,7 +684,7 @@ static void eeepc_rfkill_hotplug(void) if (dev) { pci_bus_assign_resources(bus); if (pci_bus_add_device(dev)) - printk(EEEPC_ERR "Unable to hotplug wifi\n"); + pr_err("Unable to hotplug wifi\n"); } } else { dev = pci_get_slot(bus, 0); @@ -645,7 +694,7 @@ static void eeepc_rfkill_hotplug(void) } } - rfkill_set_sw_state(ehotk->eeepc_wlan_rfkill, blocked); + rfkill_set_sw_state(ehotk->wlan_rfkill, blocked); } static void eeepc_rfkill_notify(acpi_handle handle, u32 event, void *data) @@ -653,7 +702,7 @@ static void eeepc_rfkill_notify(acpi_handle handle, u32 event, void *data) if (event != ACPI_NOTIFY_BUS_CHECK) return; - eeepc_rfkill_hotplug(); + schedule_work(&ehotk->hotplug_work); } static void eeepc_hotk_notify(struct acpi_device *device, u32 event) @@ -718,8 +767,7 @@ static int eeepc_register_rfkill_notifier(char *node) eeepc_rfkill_notify, NULL); if (ACPI_FAILURE(status)) - printk(EEEPC_WARNING - "Failed to register notify on %s\n", node); + pr_warning("Failed to register notify on %s\n", node); } else return -ENODEV; @@ -738,19 +786,66 @@ static void eeepc_unregister_rfkill_notifier(char *node) ACPI_SYSTEM_NOTIFY, eeepc_rfkill_notify); if (ACPI_FAILURE(status)) - printk(EEEPC_ERR - "Error removing rfkill notify handler %s\n", + pr_err("Error removing rfkill notify handler %s\n", node); } } +static void eeepc_cleanup_pci_hotplug(struct hotplug_slot *hotplug_slot) +{ + kfree(hotplug_slot->info); + kfree(hotplug_slot); +} + +static int eeepc_setup_pci_hotplug(void) +{ + int ret = -ENOMEM; + struct pci_bus *bus = pci_find_bus(0, 1); + + if (!bus) { + pr_err("Unable to find wifi PCI bus\n"); + return -ENODEV; + } + + ehotk->hotplug_slot = kzalloc(sizeof(struct hotplug_slot), GFP_KERNEL); + if (!ehotk->hotplug_slot) + goto error_slot; + + ehotk->hotplug_slot->info = kzalloc(sizeof(struct hotplug_slot_info), + GFP_KERNEL); + if (!ehotk->hotplug_slot->info) + goto error_info; + + ehotk->hotplug_slot->private = ehotk; + ehotk->hotplug_slot->release = &eeepc_cleanup_pci_hotplug; + ehotk->hotplug_slot->ops = &eeepc_hotplug_slot_ops; + eeepc_get_adapter_status(ehotk->hotplug_slot, + &ehotk->hotplug_slot->info->adapter_status); + + ret = pci_hp_register(ehotk->hotplug_slot, bus, 0, "eeepc-wifi"); + if (ret) { + pr_err("Unable to register hotplug slot - %d\n", ret); + goto error_register; + } + + return 0; + +error_register: + kfree(ehotk->hotplug_slot->info); +error_info: + kfree(ehotk->hotplug_slot); + ehotk->hotplug_slot = NULL; +error_slot: + return ret; +} + static int eeepc_hotk_add(struct acpi_device *device) { int result; if (!device) return -EINVAL; - printk(EEEPC_NOTICE EEEPC_HOTK_NAME "\n"); + pr_notice(EEEPC_HOTK_NAME "\n"); ehotk = kzalloc(sizeof(struct eeepc_hotk), GFP_KERNEL); if (!ehotk) return -ENOMEM; @@ -764,53 +859,8 @@ static int eeepc_hotk_add(struct acpi_device *device) if (result) goto ehotk_fail; - eeepc_register_rfkill_notifier("\\_SB.PCI0.P0P6"); - eeepc_register_rfkill_notifier("\\_SB.PCI0.P0P7"); - - if (get_acpi(CM_ASL_WLAN) != -1) { - ehotk->eeepc_wlan_rfkill = rfkill_alloc("eeepc-wlan", - &device->dev, - RFKILL_TYPE_WLAN, - &eeepc_rfkill_ops, - (void *)CM_ASL_WLAN); - - if (!ehotk->eeepc_wlan_rfkill) - goto wlan_fail; - - rfkill_init_sw_state(ehotk->eeepc_wlan_rfkill, - get_acpi(CM_ASL_WLAN) != 1); - result = rfkill_register(ehotk->eeepc_wlan_rfkill); - if (result) - goto wlan_fail; - } - - if (get_acpi(CM_ASL_BLUETOOTH) != -1) { - ehotk->eeepc_bluetooth_rfkill = - rfkill_alloc("eeepc-bluetooth", - &device->dev, - RFKILL_TYPE_BLUETOOTH, - &eeepc_rfkill_ops, - (void *)CM_ASL_BLUETOOTH); - - if (!ehotk->eeepc_bluetooth_rfkill) - goto bluetooth_fail; - - rfkill_init_sw_state(ehotk->eeepc_bluetooth_rfkill, - get_acpi(CM_ASL_BLUETOOTH) != 1); - result = rfkill_register(ehotk->eeepc_bluetooth_rfkill); - if (result) - goto bluetooth_fail; - } - return 0; - bluetooth_fail: - rfkill_destroy(ehotk->eeepc_bluetooth_rfkill); - rfkill_unregister(ehotk->eeepc_wlan_rfkill); - wlan_fail: - rfkill_destroy(ehotk->eeepc_wlan_rfkill); - eeepc_unregister_rfkill_notifier("\\_SB.PCI0.P0P6"); - eeepc_unregister_rfkill_notifier("\\_SB.PCI0.P0P7"); ehotk_fail: kfree(ehotk); ehotk = NULL; @@ -823,16 +873,13 @@ static int eeepc_hotk_remove(struct acpi_device *device, int type) if (!device || !acpi_driver_data(device)) return -EINVAL; - eeepc_unregister_rfkill_notifier("\\_SB.PCI0.P0P6"); - eeepc_unregister_rfkill_notifier("\\_SB.PCI0.P0P7"); - kfree(ehotk); return 0; } static int eeepc_hotk_resume(struct acpi_device *device) { - if (ehotk->eeepc_wlan_rfkill) { + if (ehotk->wlan_rfkill) { bool wlan; /* Workaround - it seems that _PTS disables the wireless @@ -844,14 +891,13 @@ static int eeepc_hotk_resume(struct acpi_device *device) wlan = get_acpi(CM_ASL_WLAN); set_acpi(CM_ASL_WLAN, wlan); - rfkill_set_sw_state(ehotk->eeepc_wlan_rfkill, - wlan != 1); + rfkill_set_sw_state(ehotk->wlan_rfkill, wlan != 1); - eeepc_rfkill_hotplug(); + schedule_work(&ehotk->hotplug_work); } - if (ehotk->eeepc_bluetooth_rfkill) - rfkill_set_sw_state(ehotk->eeepc_bluetooth_rfkill, + if (ehotk->bluetooth_rfkill) + rfkill_set_sw_state(ehotk->bluetooth_rfkill, get_acpi(CM_ASL_BLUETOOTH) != 1); return 0; @@ -973,10 +1019,16 @@ static void eeepc_backlight_exit(void) static void eeepc_rfkill_exit(void) { - if (ehotk->eeepc_wlan_rfkill) - rfkill_unregister(ehotk->eeepc_wlan_rfkill); - if (ehotk->eeepc_bluetooth_rfkill) - rfkill_unregister(ehotk->eeepc_bluetooth_rfkill); + eeepc_unregister_rfkill_notifier("\\_SB.PCI0.P0P6"); + eeepc_unregister_rfkill_notifier("\\_SB.PCI0.P0P7"); + if (ehotk->wlan_rfkill) + rfkill_unregister(ehotk->wlan_rfkill); + if (ehotk->bluetooth_rfkill) + rfkill_unregister(ehotk->bluetooth_rfkill); + if (ehotk->wwan3g_rfkill) + rfkill_unregister(ehotk->wwan3g_rfkill); + if (ehotk->hotplug_slot) + pci_hp_deregister(ehotk->hotplug_slot); } static void eeepc_input_exit(void) @@ -1011,6 +1063,77 @@ static void __exit eeepc_laptop_exit(void) platform_driver_unregister(&platform_driver); } +static int eeepc_new_rfkill(struct rfkill **rfkill, + const char *name, struct device *dev, + enum rfkill_type type, int cm) +{ + int result; + + result = get_acpi(cm); + if (result < 0) + return result; + + *rfkill = rfkill_alloc(name, dev, type, + &eeepc_rfkill_ops, (void *)(unsigned long)cm); + + if (!*rfkill) + return -EINVAL; + + rfkill_init_sw_state(*rfkill, get_acpi(cm) != 1); + result = rfkill_register(*rfkill); + if (result) { + rfkill_destroy(*rfkill); + *rfkill = NULL; + return result; + } + return 0; +} + + +static int eeepc_rfkill_init(struct device *dev) +{ + int result = 0; + + INIT_WORK(&ehotk->hotplug_work, eeepc_hotplug_work); + + eeepc_register_rfkill_notifier("\\_SB.PCI0.P0P6"); + eeepc_register_rfkill_notifier("\\_SB.PCI0.P0P7"); + + result = eeepc_new_rfkill(&ehotk->wlan_rfkill, + "eeepc-wlan", dev, + RFKILL_TYPE_WLAN, CM_ASL_WLAN); + + if (result && result != -ENODEV) + goto exit; + + result = eeepc_new_rfkill(&ehotk->bluetooth_rfkill, + "eeepc-bluetooth", dev, + RFKILL_TYPE_BLUETOOTH, CM_ASL_BLUETOOTH); + + if (result && result != -ENODEV) + goto exit; + + result = eeepc_new_rfkill(&ehotk->wwan3g_rfkill, + "eeepc-wwan3g", dev, + RFKILL_TYPE_WWAN, CM_ASL_3G); + + if (result && result != -ENODEV) + goto exit; + + result = eeepc_setup_pci_hotplug(); + /* + * If we get -EBUSY then something else is handling the PCI hotplug - + * don't fail in this case + */ + if (result == -EBUSY) + result = 0; + +exit: + if (result && result != -ENODEV) + eeepc_rfkill_exit(); + return result; +} + static int eeepc_backlight_init(struct device *dev) { struct backlight_device *bd; @@ -1018,8 +1141,7 @@ static int eeepc_backlight_init(struct device *dev) bd = backlight_device_register(EEEPC_HOTK_FILE, dev, NULL, &eeepcbl_ops); if (IS_ERR(bd)) { - printk(EEEPC_ERR - "Could not register eeepc backlight device\n"); + pr_err("Could not register eeepc backlight device\n"); eeepc_backlight_device = NULL; return PTR_ERR(bd); } @@ -1038,8 +1160,7 @@ static int eeepc_hwmon_init(struct device *dev) hwmon = hwmon_device_register(dev); if (IS_ERR(hwmon)) { - printk(EEEPC_ERR - "Could not register eeepc hwmon device\n"); + pr_err("Could not register eeepc hwmon device\n"); eeepc_hwmon_device = NULL; return PTR_ERR(hwmon); } @@ -1065,19 +1186,6 @@ static int __init eeepc_laptop_init(void) acpi_bus_unregister_driver(&eeepc_hotk_driver); return -ENODEV; } - dev = acpi_get_physical_device(ehotk->device->handle); - - if (!acpi_video_backlight_support()) { - result = eeepc_backlight_init(dev); - if (result) - goto fail_backlight; - } else - printk(EEEPC_INFO "Backlight controlled by ACPI video " - "driver\n"); - - result = eeepc_hwmon_init(dev); - if (result) - goto fail_hwmon; eeepc_enable_camera(); @@ -1097,7 +1205,33 @@ static int __init eeepc_laptop_init(void) &platform_attribute_group); if (result) goto fail_sysfs; + + dev = &platform_device->dev; + + if (!acpi_video_backlight_support()) { + result = eeepc_backlight_init(dev); + if (result) + goto fail_backlight; + } else + pr_info("Backlight controlled by ACPI video " + "driver\n"); + + result = eeepc_hwmon_init(dev); + if (result) + goto fail_hwmon; + + result = eeepc_rfkill_init(dev); + if (result) + goto fail_rfkill; + return 0; +fail_rfkill: + eeepc_hwmon_exit(); +fail_hwmon: + eeepc_backlight_exit(); +fail_backlight: + sysfs_remove_group(&platform_device->dev.kobj, + &platform_attribute_group); fail_sysfs: platform_device_del(platform_device); fail_platform_device2: @@ -1105,12 +1239,7 @@ fail_platform_device2: fail_platform_device1: platform_driver_unregister(&platform_driver); fail_platform_driver: - eeepc_hwmon_exit(); -fail_hwmon: - eeepc_backlight_exit(); -fail_backlight: eeepc_input_exit(); - eeepc_rfkill_exit(); return result; } diff --git a/drivers/platform/x86/hp-wmi.c b/drivers/platform/x86/hp-wmi.c index 4ac2311c00a..a2ad53e1587 100644 --- a/drivers/platform/x86/hp-wmi.c +++ b/drivers/platform/x86/hp-wmi.c @@ -171,7 +171,7 @@ static int hp_wmi_tablet_state(void) static int hp_wmi_set_block(void *data, bool blocked) { unsigned long b = (unsigned long) data; - int query = BIT(b + 8) | ((!!blocked) << b); + int query = BIT(b + 8) | ((!blocked) << b); return hp_wmi_perform_query(HPWMI_WIRELESS_QUERY, 1, query); } @@ -520,11 +520,13 @@ static int hp_wmi_resume_handler(struct platform_device *device) * the input layer will only actually pass it on if the state * changed. */ - - input_report_switch(hp_wmi_input_dev, SW_DOCK, hp_wmi_dock_state()); - input_report_switch(hp_wmi_input_dev, SW_TABLET_MODE, - hp_wmi_tablet_state()); - input_sync(hp_wmi_input_dev); + if (hp_wmi_input_dev) { + input_report_switch(hp_wmi_input_dev, SW_DOCK, + hp_wmi_dock_state()); + input_report_switch(hp_wmi_input_dev, SW_TABLET_MODE, + hp_wmi_tablet_state()); + input_sync(hp_wmi_input_dev); + } return 0; } diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c index a463fd72c49..e8560085250 100644 --- a/drivers/platform/x86/thinkpad_acpi.c +++ b/drivers/platform/x86/thinkpad_acpi.c @@ -239,12 +239,6 @@ struct ibm_init_struct { }; static struct { -#ifdef CONFIG_THINKPAD_ACPI_BAY - u32 bay_status:1; - u32 bay_eject:1; - u32 bay_status2:1; - u32 bay_eject2:1; -#endif u32 bluetooth:1; u32 hotkey:1; u32 hotkey_mask:1; @@ -589,18 +583,6 @@ static int acpi_ec_write(int i, u8 v) return 1; } -#if defined(CONFIG_THINKPAD_ACPI_DOCK) || defined(CONFIG_THINKPAD_ACPI_BAY) -static int _sta(acpi_handle handle) -{ - int status; - - if (!handle || !acpi_evalf(handle, &status, "_STA", "d")) - status = 0; - - return status; -} -#endif - static int issue_thinkpad_cmos_command(int cmos_cmd) { if (!cmos_handle) @@ -784,6 +766,8 @@ static int dispatch_procfs_write(struct file *file, if (!ibm || !ibm->write) return -EINVAL; + if (count > PAGE_SIZE - 2) + return -EINVAL; kernbuf = kmalloc(count + 2, GFP_KERNEL); if (!kernbuf) @@ -4442,293 +4426,6 @@ static struct ibm_struct light_driver_data = { }; /************************************************************************* - * Dock subdriver - */ - -#ifdef CONFIG_THINKPAD_ACPI_DOCK - -static void dock_notify(struct ibm_struct *ibm, u32 event); -static int dock_read(char *p); -static int dock_write(char *buf); - -TPACPI_HANDLE(dock, root, "\\_SB.GDCK", /* X30, X31, X40 */ - "\\_SB.PCI0.DOCK", /* 600e/x,770e,770x,A2xm/p,T20-22,X20-21 */ - "\\_SB.PCI0.PCI1.DOCK", /* all others */ - "\\_SB.PCI.ISA.SLCE", /* 570 */ - ); /* A21e,G4x,R30,R31,R32,R40,R40e,R50e */ - -/* don't list other alternatives as we install a notify handler on the 570 */ -TPACPI_HANDLE(pci, root, "\\_SB.PCI"); /* 570 */ - -static const struct acpi_device_id ibm_pci_device_ids[] = { - {PCI_ROOT_HID_STRING, 0}, - {"", 0}, -}; - -static struct tp_acpi_drv_struct ibm_dock_acpidriver[2] = { - { - .notify = dock_notify, - .handle = &dock_handle, - .type = ACPI_SYSTEM_NOTIFY, - }, - { - /* THIS ONE MUST NEVER BE USED FOR DRIVER AUTOLOADING. - * We just use it to get notifications of dock hotplug - * in very old thinkpads */ - .hid = ibm_pci_device_ids, - .notify = dock_notify, - .handle = &pci_handle, - .type = ACPI_SYSTEM_NOTIFY, - }, -}; - -static struct ibm_struct dock_driver_data[2] = { - { - .name = "dock", - .read = dock_read, - .write = dock_write, - .acpi = &ibm_dock_acpidriver[0], - }, - { - .name = "dock", - .acpi = &ibm_dock_acpidriver[1], - }, -}; - -#define dock_docked() (_sta(dock_handle) & 1) - -static int __init dock_init(struct ibm_init_struct *iibm) -{ - vdbg_printk(TPACPI_DBG_INIT, "initializing dock subdriver\n"); - - TPACPI_ACPIHANDLE_INIT(dock); - - vdbg_printk(TPACPI_DBG_INIT, "dock is %s\n", - str_supported(dock_handle != NULL)); - - return (dock_handle)? 0 : 1; -} - -static int __init dock_init2(struct ibm_init_struct *iibm) -{ - int dock2_needed; - - vdbg_printk(TPACPI_DBG_INIT, "initializing dock subdriver part 2\n"); - - if (dock_driver_data[0].flags.acpi_driver_registered && - dock_driver_data[0].flags.acpi_notify_installed) { - TPACPI_ACPIHANDLE_INIT(pci); - dock2_needed = (pci_handle != NULL); - vdbg_printk(TPACPI_DBG_INIT, - "dock PCI handler for the TP 570 is %s\n", - str_supported(dock2_needed)); - } else { - vdbg_printk(TPACPI_DBG_INIT, - "dock subdriver part 2 not required\n"); - dock2_needed = 0; - } - - return (dock2_needed)? 0 : 1; -} - -static void dock_notify(struct ibm_struct *ibm, u32 event) -{ - int docked = dock_docked(); - int pci = ibm->acpi->hid && ibm->acpi->device && - acpi_match_device_ids(ibm->acpi->device, ibm_pci_device_ids); - int data; - - if (event == 1 && !pci) /* 570 */ - data = 1; /* button */ - else if (event == 1 && pci) /* 570 */ - data = 3; /* dock */ - else if (event == 3 && docked) - data = 1; /* button */ - else if (event == 3 && !docked) - data = 2; /* undock */ - else if (event == 0 && docked) - data = 3; /* dock */ - else { - printk(TPACPI_ERR "unknown dock event %d, status %d\n", - event, _sta(dock_handle)); - data = 0; /* unknown */ - } - acpi_bus_generate_proc_event(ibm->acpi->device, event, data); - acpi_bus_generate_netlink_event(ibm->acpi->device->pnp.device_class, - dev_name(&ibm->acpi->device->dev), - event, data); -} - -static int dock_read(char *p) -{ - int len = 0; - int docked = dock_docked(); - - if (!dock_handle) - len += sprintf(p + len, "status:\t\tnot supported\n"); - else if (!docked) - len += sprintf(p + len, "status:\t\tundocked\n"); - else { - len += sprintf(p + len, "status:\t\tdocked\n"); - len += sprintf(p + len, "commands:\tdock, undock\n"); - } - - return len; -} - -static int dock_write(char *buf) -{ - char *cmd; - - if (!dock_docked()) - return -ENODEV; - - while ((cmd = next_cmd(&buf))) { - if (strlencmp(cmd, "undock") == 0) { - if (!acpi_evalf(dock_handle, NULL, "_DCK", "vd", 0) || - !acpi_evalf(dock_handle, NULL, "_EJ0", "vd", 1)) - return -EIO; - } else if (strlencmp(cmd, "dock") == 0) { - if (!acpi_evalf(dock_handle, NULL, "_DCK", "vd", 1)) - return -EIO; - } else - return -EINVAL; - } - - return 0; -} - -#endif /* CONFIG_THINKPAD_ACPI_DOCK */ - -/************************************************************************* - * Bay subdriver - */ - -#ifdef CONFIG_THINKPAD_ACPI_BAY - -TPACPI_HANDLE(bay, root, "\\_SB.PCI.IDE.SECN.MAST", /* 570 */ - "\\_SB.PCI0.IDE0.IDES.IDSM", /* 600e/x, 770e, 770x */ - "\\_SB.PCI0.SATA.SCND.MSTR", /* T60, X60, Z60 */ - "\\_SB.PCI0.IDE0.SCND.MSTR", /* all others */ - ); /* A21e, R30, R31 */ -TPACPI_HANDLE(bay_ej, bay, "_EJ3", /* 600e/x, A2xm/p, A3x */ - "_EJ0", /* all others */ - ); /* 570,A21e,G4x,R30,R31,R32,R40e,R50e */ -TPACPI_HANDLE(bay2, root, "\\_SB.PCI0.IDE0.PRIM.SLAV", /* A3x, R32 */ - "\\_SB.PCI0.IDE0.IDEP.IDPS", /* 600e/x, 770e, 770x */ - ); /* all others */ -TPACPI_HANDLE(bay2_ej, bay2, "_EJ3", /* 600e/x, 770e, A3x */ - "_EJ0", /* 770x */ - ); /* all others */ - -static int __init bay_init(struct ibm_init_struct *iibm) -{ - vdbg_printk(TPACPI_DBG_INIT, "initializing bay subdriver\n"); - - TPACPI_ACPIHANDLE_INIT(bay); - if (bay_handle) - TPACPI_ACPIHANDLE_INIT(bay_ej); - TPACPI_ACPIHANDLE_INIT(bay2); - if (bay2_handle) - TPACPI_ACPIHANDLE_INIT(bay2_ej); - - tp_features.bay_status = bay_handle && - acpi_evalf(bay_handle, NULL, "_STA", "qv"); - tp_features.bay_status2 = bay2_handle && - acpi_evalf(bay2_handle, NULL, "_STA", "qv"); - - tp_features.bay_eject = bay_handle && bay_ej_handle && - (strlencmp(bay_ej_path, "_EJ0") == 0 || experimental); - tp_features.bay_eject2 = bay2_handle && bay2_ej_handle && - (strlencmp(bay2_ej_path, "_EJ0") == 0 || experimental); - - vdbg_printk(TPACPI_DBG_INIT, - "bay 1: status %s, eject %s; bay 2: status %s, eject %s\n", - str_supported(tp_features.bay_status), - str_supported(tp_features.bay_eject), - str_supported(tp_features.bay_status2), - str_supported(tp_features.bay_eject2)); - - return (tp_features.bay_status || tp_features.bay_eject || - tp_features.bay_status2 || tp_features.bay_eject2)? 0 : 1; -} - -static void bay_notify(struct ibm_struct *ibm, u32 event) -{ - acpi_bus_generate_proc_event(ibm->acpi->device, event, 0); - acpi_bus_generate_netlink_event(ibm->acpi->device->pnp.device_class, - dev_name(&ibm->acpi->device->dev), - event, 0); -} - -#define bay_occupied(b) (_sta(b##_handle) & 1) - -static int bay_read(char *p) -{ - int len = 0; - int occupied = bay_occupied(bay); - int occupied2 = bay_occupied(bay2); - int eject, eject2; - - len += sprintf(p + len, "status:\t\t%s\n", - tp_features.bay_status ? - (occupied ? "occupied" : "unoccupied") : - "not supported"); - if (tp_features.bay_status2) - len += sprintf(p + len, "status2:\t%s\n", occupied2 ? - "occupied" : "unoccupied"); - - eject = tp_features.bay_eject && occupied; - eject2 = tp_features.bay_eject2 && occupied2; - - if (eject && eject2) - len += sprintf(p + len, "commands:\teject, eject2\n"); - else if (eject) - len += sprintf(p + len, "commands:\teject\n"); - else if (eject2) - len += sprintf(p + len, "commands:\teject2\n"); - - return len; -} - -static int bay_write(char *buf) -{ - char *cmd; - - if (!tp_features.bay_eject && !tp_features.bay_eject2) - return -ENODEV; - - while ((cmd = next_cmd(&buf))) { - if (tp_features.bay_eject && strlencmp(cmd, "eject") == 0) { - if (!acpi_evalf(bay_ej_handle, NULL, NULL, "vd", 1)) - return -EIO; - } else if (tp_features.bay_eject2 && - strlencmp(cmd, "eject2") == 0) { - if (!acpi_evalf(bay2_ej_handle, NULL, NULL, "vd", 1)) - return -EIO; - } else - return -EINVAL; - } - - return 0; -} - -static struct tp_acpi_drv_struct ibm_bay_acpidriver = { - .notify = bay_notify, - .handle = &bay_handle, - .type = ACPI_SYSTEM_NOTIFY, -}; - -static struct ibm_struct bay_driver_data = { - .name = "bay", - .read = bay_read, - .write = bay_write, - .acpi = &ibm_bay_acpidriver, -}; - -#endif /* CONFIG_THINKPAD_ACPI_BAY */ - -/************************************************************************* * CMOS subdriver */ @@ -5945,14 +5642,48 @@ static struct backlight_ops ibm_backlight_data = { /* --------------------------------------------------------------------- */ +/* + * These are only useful for models that have only one possibility + * of GPU. If the BIOS model handles both ATI and Intel, don't use + * these quirks. + */ +#define TPACPI_BRGHT_Q_NOEC 0x0001 /* Must NOT use EC HBRV */ +#define TPACPI_BRGHT_Q_EC 0x0002 /* Should or must use EC HBRV */ +#define TPACPI_BRGHT_Q_ASK 0x8000 /* Ask for user report */ + +static const struct tpacpi_quirk brightness_quirk_table[] __initconst = { + /* Models with ATI GPUs known to require ECNVRAM mode */ + TPACPI_Q_IBM('1', 'Y', TPACPI_BRGHT_Q_EC), /* T43/p ATI */ + + /* Models with ATI GPUs (waiting confirmation) */ + TPACPI_Q_IBM('1', 'R', TPACPI_BRGHT_Q_ASK|TPACPI_BRGHT_Q_EC), + TPACPI_Q_IBM('1', 'Q', TPACPI_BRGHT_Q_ASK|TPACPI_BRGHT_Q_EC), + TPACPI_Q_IBM('7', '6', TPACPI_BRGHT_Q_ASK|TPACPI_BRGHT_Q_EC), + TPACPI_Q_IBM('7', '8', TPACPI_BRGHT_Q_ASK|TPACPI_BRGHT_Q_EC), + + /* Models with Intel Extreme Graphics 2 (waiting confirmation) */ + TPACPI_Q_IBM('1', 'V', TPACPI_BRGHT_Q_ASK|TPACPI_BRGHT_Q_NOEC), + TPACPI_Q_IBM('1', 'W', TPACPI_BRGHT_Q_ASK|TPACPI_BRGHT_Q_NOEC), + TPACPI_Q_IBM('1', 'U', TPACPI_BRGHT_Q_ASK|TPACPI_BRGHT_Q_NOEC), + + /* Models with Intel GMA900 */ + TPACPI_Q_IBM('7', '0', TPACPI_BRGHT_Q_NOEC), /* T43, R52 */ + TPACPI_Q_IBM('7', '4', TPACPI_BRGHT_Q_NOEC), /* X41 */ + TPACPI_Q_IBM('7', '5', TPACPI_BRGHT_Q_NOEC), /* X41 Tablet */ +}; + static int __init brightness_init(struct ibm_init_struct *iibm) { int b; + unsigned long quirks; vdbg_printk(TPACPI_DBG_INIT, "initializing brightness subdriver\n"); mutex_init(&brightness_mutex); + quirks = tpacpi_check_quirks(brightness_quirk_table, + ARRAY_SIZE(brightness_quirk_table)); + /* * We always attempt to detect acpi support, so as to switch * Lenovo Vista BIOS to ACPI brightness mode even if we are not @@ -6009,23 +5740,13 @@ static int __init brightness_init(struct ibm_init_struct *iibm) /* TPACPI_BRGHT_MODE_AUTO not implemented yet, just use default */ if (brightness_mode == TPACPI_BRGHT_MODE_AUTO || brightness_mode == TPACPI_BRGHT_MODE_MAX) { - if (thinkpad_id.vendor == PCI_VENDOR_ID_IBM) { - /* - * IBM models that define HBRV probably have - * EC-based backlight level control - */ - if (acpi_evalf(ec_handle, NULL, "HBRV", "qd")) - /* T40-T43, R50-R52, R50e, R51e, X31-X41 */ - brightness_mode = TPACPI_BRGHT_MODE_ECNVRAM; - else - /* all other IBM ThinkPads */ - brightness_mode = TPACPI_BRGHT_MODE_UCMS_STEP; - } else - /* All Lenovo ThinkPads */ + if (quirks & TPACPI_BRGHT_Q_EC) + brightness_mode = TPACPI_BRGHT_MODE_ECNVRAM; + else brightness_mode = TPACPI_BRGHT_MODE_UCMS_STEP; dbg_printk(TPACPI_DBG_BRGHT, - "selected brightness_mode=%d\n", + "driver auto-selected brightness_mode=%d\n", brightness_mode); } @@ -6052,6 +5773,15 @@ static int __init brightness_init(struct ibm_init_struct *iibm) vdbg_printk(TPACPI_DBG_INIT | TPACPI_DBG_BRGHT, "brightness is supported\n"); + if (quirks & TPACPI_BRGHT_Q_ASK) { + printk(TPACPI_NOTICE + "brightness: will use unverified default: " + "brightness_mode=%d\n", brightness_mode); + printk(TPACPI_NOTICE + "brightness: please report to %s whether it works well " + "or not on your ThinkPad\n", TPACPI_MAIL); + } + ibm_backlight_device->props.max_brightness = (tp_features.bright_16levels)? 15 : 7; ibm_backlight_device->props.brightness = b & TP_EC_BACKLIGHT_LVLMSK; @@ -7854,22 +7584,6 @@ static struct ibm_init_struct ibms_init[] __initdata = { .init = light_init, .data = &light_driver_data, }, -#ifdef CONFIG_THINKPAD_ACPI_DOCK - { - .init = dock_init, - .data = &dock_driver_data[0], - }, - { - .init = dock_init2, - .data = &dock_driver_data[1], - }, -#endif -#ifdef CONFIG_THINKPAD_ACPI_BAY - { - .init = bay_init, - .data = &bay_driver_data, - }, -#endif { .init = cmos_init, .data = &cmos_driver_data, @@ -7968,12 +7682,6 @@ TPACPI_PARAM(hotkey); TPACPI_PARAM(bluetooth); TPACPI_PARAM(video); TPACPI_PARAM(light); -#ifdef CONFIG_THINKPAD_ACPI_DOCK -TPACPI_PARAM(dock); -#endif -#ifdef CONFIG_THINKPAD_ACPI_BAY -TPACPI_PARAM(bay); -#endif /* CONFIG_THINKPAD_ACPI_BAY */ TPACPI_PARAM(cmos); TPACPI_PARAM(led); TPACPI_PARAM(beep); diff --git a/drivers/platform/x86/toshiba_acpi.c b/drivers/platform/x86/toshiba_acpi.c index 81d31ea507d..51c0a8bee41 100644 --- a/drivers/platform/x86/toshiba_acpi.c +++ b/drivers/platform/x86/toshiba_acpi.c @@ -335,6 +335,7 @@ static void bt_rfkill_poll(struct rfkill *rfkill, void *data) if (hci_result != HCI_SUCCESS) { /* Can't do anything useful */ mutex_unlock(&dev->mutex); + return; } new_rfk_state = value; diff --git a/drivers/platform/x86/wmi.c b/drivers/platform/x86/wmi.c index 043b208d971..f215a591919 100644 --- a/drivers/platform/x86/wmi.c +++ b/drivers/platform/x86/wmi.c @@ -270,7 +270,7 @@ u32 method_id, const struct acpi_buffer *in, struct acpi_buffer *out) acpi_status status; struct acpi_object_list input; union acpi_object params[3]; - char method[4] = "WM"; + char method[5] = "WM"; if (!find_guid(guid_string, &wblock)) return AE_ERROR; @@ -328,8 +328,8 @@ struct acpi_buffer *out) acpi_status status, wc_status = AE_ERROR; struct acpi_object_list input, wc_input; union acpi_object wc_params[1], wq_params[1]; - char method[4]; - char wc_method[4] = "WC"; + char method[5]; + char wc_method[5] = "WC"; if (!guid_string || !out) return AE_BAD_PARAMETER; @@ -410,7 +410,7 @@ const struct acpi_buffer *in) acpi_handle handle; struct acpi_object_list input; union acpi_object params[2]; - char method[4] = "WS"; + char method[5] = "WS"; if (!guid_string || !in) return AE_BAD_DATA; diff --git a/drivers/power/Kconfig b/drivers/power/Kconfig index 7eda34838bf..bdbc4f73fcd 100644 --- a/drivers/power/Kconfig +++ b/drivers/power/Kconfig @@ -43,6 +43,13 @@ config BATTERY_DS2760 help Say Y here to enable support for batteries with ds2760 chip. +config BATTERY_DS2782 + tristate "DS2782 standalone gas-gauge" + depends on I2C + help + Say Y here to enable support for the DS2782 standalone battery + gas-gauge. + config BATTERY_PMU tristate "Apple PMU battery" depends on PPC32 && ADB_PMU diff --git a/drivers/power/Makefile b/drivers/power/Makefile index daf3179689a..380d17c9ae2 100644 --- a/drivers/power/Makefile +++ b/drivers/power/Makefile @@ -19,6 +19,7 @@ obj-$(CONFIG_APM_POWER) += apm_power.o obj-$(CONFIG_WM8350_POWER) += wm8350_power.o obj-$(CONFIG_BATTERY_DS2760) += ds2760_battery.o +obj-$(CONFIG_BATTERY_DS2782) += ds2782_battery.o obj-$(CONFIG_BATTERY_PMU) += pmu_battery.o obj-$(CONFIG_BATTERY_OLPC) += olpc_battery.o obj-$(CONFIG_BATTERY_TOSA) += tosa_battery.o diff --git a/drivers/power/ds2782_battery.c b/drivers/power/ds2782_battery.c new file mode 100644 index 00000000000..da14f374cb6 --- /dev/null +++ b/drivers/power/ds2782_battery.c @@ -0,0 +1,330 @@ +/* + * I2C client/driver for the Maxim/Dallas DS2782 Stand-Alone Fuel Gauge IC + * + * Copyright (C) 2009 Bluewater Systems Ltd + * + * Author: Ryan Mallon <ryan@bluewatersys.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + */ + +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/types.h> +#include <linux/errno.h> +#include <linux/swab.h> +#include <linux/i2c.h> +#include <linux/idr.h> +#include <linux/power_supply.h> + +#define DS2782_REG_RARC 0x06 /* Remaining active relative capacity */ + +#define DS2782_REG_VOLT_MSB 0x0c +#define DS2782_REG_TEMP_MSB 0x0a +#define DS2782_REG_CURRENT_MSB 0x0e + +/* EEPROM Block */ +#define DS2782_REG_RSNSP 0x69 /* Sense resistor value */ + +/* Current unit measurement in uA for a 1 milli-ohm sense resistor */ +#define DS2782_CURRENT_UNITS 1563 + +#define to_ds2782_info(x) container_of(x, struct ds2782_info, battery) + +struct ds2782_info { + struct i2c_client *client; + struct power_supply battery; + int id; +}; + +static DEFINE_IDR(battery_id); +static DEFINE_MUTEX(battery_lock); + +static inline int ds2782_read_reg(struct ds2782_info *info, int reg, u8 *val) +{ + int ret; + + ret = i2c_smbus_read_byte_data(info->client, reg); + if (ret < 0) { + dev_err(&info->client->dev, "register read failed\n"); + return ret; + } + + *val = ret; + return 0; +} + +static inline int ds2782_read_reg16(struct ds2782_info *info, int reg_msb, + s16 *val) +{ + int ret; + + ret = swab16(i2c_smbus_read_word_data(info->client, reg_msb)); + if (ret < 0) { + dev_err(&info->client->dev, "register read failed\n"); + return ret; + } + + *val = ret; + return 0; +} + +static int ds2782_get_temp(struct ds2782_info *info, int *temp) +{ + s16 raw; + int err; + + /* + * Temperature is measured in units of 0.125 degrees celcius, the + * power_supply class measures temperature in tenths of degrees + * celsius. The temperature value is stored as a 10 bit number, plus + * sign in the upper bits of a 16 bit register. + */ + err = ds2782_read_reg16(info, DS2782_REG_TEMP_MSB, &raw); + if (err) + return err; + *temp = ((raw / 32) * 125) / 100; + return 0; +} + +static int ds2782_get_current(struct ds2782_info *info, int *current_uA) +{ + int sense_res; + int err; + u8 sense_res_raw; + s16 raw; + + /* + * The units of measurement for current are dependent on the value of + * the sense resistor. + */ + err = ds2782_read_reg(info, DS2782_REG_RSNSP, &sense_res_raw); + if (err) + return err; + if (sense_res_raw == 0) { + dev_err(&info->client->dev, "sense resistor value is 0\n"); + return -ENXIO; + } + sense_res = 1000 / sense_res_raw; + + dev_dbg(&info->client->dev, "sense resistor = %d milli-ohms\n", + sense_res); + err = ds2782_read_reg16(info, DS2782_REG_CURRENT_MSB, &raw); + if (err) + return err; + *current_uA = raw * (DS2782_CURRENT_UNITS / sense_res); + return 0; +} + +static int ds2782_get_voltage(struct ds2782_info *info, int *voltage_uA) +{ + s16 raw; + int err; + + /* + * Voltage is measured in units of 4.88mV. The voltage is stored as + * a 10-bit number plus sign, in the upper bits of a 16-bit register + */ + err = ds2782_read_reg16(info, DS2782_REG_VOLT_MSB, &raw); + if (err) + return err; + *voltage_uA = (raw / 32) * 4800; + return 0; +} + +static int ds2782_get_capacity(struct ds2782_info *info, int *capacity) +{ + int err; + u8 raw; + + err = ds2782_read_reg(info, DS2782_REG_RARC, &raw); + if (err) + return err; + *capacity = raw; + return raw; +} + +static int ds2782_get_status(struct ds2782_info *info, int *status) +{ + int err; + int current_uA; + int capacity; + + err = ds2782_get_current(info, ¤t_uA); + if (err) + return err; + + err = ds2782_get_capacity(info, &capacity); + if (err) + return err; + + if (capacity == 100) + *status = POWER_SUPPLY_STATUS_FULL; + else if (current_uA == 0) + *status = POWER_SUPPLY_STATUS_NOT_CHARGING; + else if (current_uA < 0) + *status = POWER_SUPPLY_STATUS_DISCHARGING; + else + *status = POWER_SUPPLY_STATUS_CHARGING; + + return 0; +} + +static int ds2782_battery_get_property(struct power_supply *psy, + enum power_supply_property prop, + union power_supply_propval *val) +{ + struct ds2782_info *info = to_ds2782_info(psy); + int ret; + + switch (prop) { + case POWER_SUPPLY_PROP_STATUS: + ret = ds2782_get_status(info, &val->intval); + break; + + case POWER_SUPPLY_PROP_CAPACITY: + ret = ds2782_get_capacity(info, &val->intval); + break; + + case POWER_SUPPLY_PROP_VOLTAGE_NOW: + ret = ds2782_get_voltage(info, &val->intval); + break; + + case POWER_SUPPLY_PROP_CURRENT_NOW: + ret = ds2782_get_current(info, &val->intval); + break; + + case POWER_SUPPLY_PROP_TEMP: + ret = ds2782_get_temp(info, &val->intval); + break; + + default: + ret = -EINVAL; + } + + return ret; +} + +static enum power_supply_property ds2782_battery_props[] = { + POWER_SUPPLY_PROP_STATUS, + POWER_SUPPLY_PROP_CAPACITY, + POWER_SUPPLY_PROP_VOLTAGE_NOW, + POWER_SUPPLY_PROP_CURRENT_NOW, + POWER_SUPPLY_PROP_TEMP, +}; + +static void ds2782_power_supply_init(struct power_supply *battery) +{ + battery->type = POWER_SUPPLY_TYPE_BATTERY; + battery->properties = ds2782_battery_props; + battery->num_properties = ARRAY_SIZE(ds2782_battery_props); + battery->get_property = ds2782_battery_get_property; + battery->external_power_changed = NULL; +} + +static int ds2782_battery_remove(struct i2c_client *client) +{ + struct ds2782_info *info = i2c_get_clientdata(client); + + power_supply_unregister(&info->battery); + kfree(info->battery.name); + + mutex_lock(&battery_lock); + idr_remove(&battery_id, info->id); + mutex_unlock(&battery_lock); + + i2c_set_clientdata(client, info); + + kfree(info); + return 0; +} + +static int ds2782_battery_probe(struct i2c_client *client, + const struct i2c_device_id *id) +{ + struct ds2782_info *info; + int ret; + int num; + + /* Get an ID for this battery */ + ret = idr_pre_get(&battery_id, GFP_KERNEL); + if (ret == 0) { + ret = -ENOMEM; + goto fail_id; + } + + mutex_lock(&battery_lock); + ret = idr_get_new(&battery_id, client, &num); + mutex_unlock(&battery_lock); + if (ret < 0) + goto fail_id; + + info = kzalloc(sizeof(*info), GFP_KERNEL); + if (!info) { + ret = -ENOMEM; + goto fail_info; + } + + info->battery.name = kasprintf(GFP_KERNEL, "ds2782-%d", num); + if (!info->battery.name) { + ret = -ENOMEM; + goto fail_name; + } + + i2c_set_clientdata(client, info); + info->client = client; + ds2782_power_supply_init(&info->battery); + + ret = power_supply_register(&client->dev, &info->battery); + if (ret) { + dev_err(&client->dev, "failed to register battery\n"); + goto fail_register; + } + + return 0; + +fail_register: + kfree(info->battery.name); +fail_name: + i2c_set_clientdata(client, info); + kfree(info); +fail_info: + mutex_lock(&battery_lock); + idr_remove(&battery_id, num); + mutex_unlock(&battery_lock); +fail_id: + return ret; +} + +static const struct i2c_device_id ds2782_id[] = { + {"ds2782", 0}, + {}, +}; + +static struct i2c_driver ds2782_battery_driver = { + .driver = { + .name = "ds2782-battery", + }, + .probe = ds2782_battery_probe, + .remove = ds2782_battery_remove, + .id_table = ds2782_id, +}; + +static int __init ds2782_init(void) +{ + return i2c_add_driver(&ds2782_battery_driver); +} +module_init(ds2782_init); + +static void __exit ds2782_exit(void) +{ + i2c_del_driver(&ds2782_battery_driver); +} +module_exit(ds2782_exit); + +MODULE_AUTHOR("Ryan Mallon <ryan@bluewatersys.com>"); +MODULE_DESCRIPTION("Maxim/Dallas DS2782 Stand-Alone Fuel Gauage IC driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/power/olpc_battery.c b/drivers/power/olpc_battery.c index 5fbca2681ba..58e419299cd 100644 --- a/drivers/power/olpc_battery.c +++ b/drivers/power/olpc_battery.c @@ -8,6 +8,7 @@ * published by the Free Software Foundation. */ +#include <linux/kernel.h> #include <linux/module.h> #include <linux/err.h> #include <linux/platform_device.h> @@ -35,6 +36,7 @@ #define BAT_STAT_AC 0x10 #define BAT_STAT_CHARGING 0x20 #define BAT_STAT_DISCHARGING 0x40 +#define BAT_STAT_TRICKLE 0x80 #define BAT_ERR_INFOFAIL 0x02 #define BAT_ERR_OVERVOLTAGE 0x04 @@ -89,7 +91,7 @@ static char bat_serial[17]; /* Ick */ static int olpc_bat_get_status(union power_supply_propval *val, uint8_t ec_byte) { if (olpc_platform_info.ecver > 0x44) { - if (ec_byte & BAT_STAT_CHARGING) + if (ec_byte & (BAT_STAT_CHARGING | BAT_STAT_TRICKLE)) val->intval = POWER_SUPPLY_STATUS_CHARGING; else if (ec_byte & BAT_STAT_DISCHARGING) val->intval = POWER_SUPPLY_STATUS_DISCHARGING; @@ -219,7 +221,8 @@ static int olpc_bat_get_property(struct power_supply *psy, It doesn't matter though -- the EC will return the last-known information, and it's as if we just ran that _little_ bit faster and managed to read it out before the battery went away. */ - if (!(ec_byte & BAT_STAT_PRESENT) && psp != POWER_SUPPLY_PROP_PRESENT) + if (!(ec_byte & (BAT_STAT_PRESENT | BAT_STAT_TRICKLE)) && + psp != POWER_SUPPLY_PROP_PRESENT) return -ENODEV; switch (psp) { @@ -229,7 +232,8 @@ static int olpc_bat_get_property(struct power_supply *psy, return ret; break; case POWER_SUPPLY_PROP_PRESENT: - val->intval = !!(ec_byte & BAT_STAT_PRESENT); + val->intval = !!(ec_byte & (BAT_STAT_PRESENT | + BAT_STAT_TRICKLE)); break; case POWER_SUPPLY_PROP_HEALTH: @@ -334,21 +338,21 @@ static ssize_t olpc_bat_eeprom_read(struct kobject *kobj, struct bin_attribute *attr, char *buf, loff_t off, size_t count) { uint8_t ec_byte; - int ret, end; + int ret; + int i; if (off >= EEPROM_SIZE) return 0; if (off + count > EEPROM_SIZE) count = EEPROM_SIZE - off; - end = EEPROM_START + off + count; - for (ec_byte = EEPROM_START + off; ec_byte < end; ec_byte++) { - ret = olpc_ec_cmd(EC_BAT_EEPROM, &ec_byte, 1, - &buf[ec_byte - EEPROM_START], 1); + for (i = 0; i < count; i++) { + ec_byte = EEPROM_START + off + i; + ret = olpc_ec_cmd(EC_BAT_EEPROM, &ec_byte, 1, &buf[i], 1); if (ret) { - printk(KERN_ERR "olpc-battery: EC command " - "EC_BAT_EEPROM @ 0x%x failed -" - " %d!\n", ec_byte, ret); + pr_err("olpc-battery: " + "EC_BAT_EEPROM cmd @ 0x%x failed - %d!\n", + ec_byte, ret); return -EIO; } } diff --git a/drivers/power/wm97xx_battery.c b/drivers/power/wm97xx_battery.c index 8bde92126d3..b787335a841 100644 --- a/drivers/power/wm97xx_battery.c +++ b/drivers/power/wm97xx_battery.c @@ -33,14 +33,14 @@ static enum power_supply_property *prop; static unsigned long wm97xx_read_bat(struct power_supply *bat_ps) { - return wm97xx_read_aux_adc(bat_ps->dev->parent->driver_data, + return wm97xx_read_aux_adc(dev_get_drvdata(bat_ps->dev->parent), pdata->batt_aux) * pdata->batt_mult / pdata->batt_div; } static unsigned long wm97xx_read_temp(struct power_supply *bat_ps) { - return wm97xx_read_aux_adc(bat_ps->dev->parent->driver_data, + return wm97xx_read_aux_adc(dev_get_drvdata(bat_ps->dev->parent), pdata->temp_aux) * pdata->temp_mult / pdata->temp_div; } diff --git a/drivers/pps/pps.c b/drivers/pps/pps.c index ac8cc8cea1e..fea17e7805e 100644 --- a/drivers/pps/pps.c +++ b/drivers/pps/pps.c @@ -244,7 +244,7 @@ int pps_register_cdev(struct pps_device *pps) } pps->dev = device_create(pps_class, pps->info.dev, pps->devno, NULL, "pps%d", pps->id); - if (err) + if (IS_ERR(pps->dev)) goto del_cdev; dev_set_drvdata(pps->dev, pps); diff --git a/drivers/rtc/rtc-bfin.c b/drivers/rtc/rtc-bfin.c index aafd3e6ebb0..a118eb0f1e6 100644 --- a/drivers/rtc/rtc-bfin.c +++ b/drivers/rtc/rtc-bfin.c @@ -1,8 +1,8 @@ /* * Blackfin On-Chip Real Time Clock Driver - * Supports BF52[257]/BF53[123]/BF53[467]/BF54[24789] + * Supports BF51x/BF52x/BF53[123]/BF53[467]/BF54x * - * Copyright 2004-2008 Analog Devices Inc. + * Copyright 2004-2009 Analog Devices Inc. * * Enter bugs at http://blackfin.uclinux.org/ * @@ -363,7 +363,7 @@ static int __devinit bfin_rtc_probe(struct platform_device *pdev) struct bfin_rtc *rtc; struct device *dev = &pdev->dev; int ret = 0; - unsigned long timeout; + unsigned long timeout = jiffies + HZ; dev_dbg_stamp(dev); @@ -374,32 +374,32 @@ static int __devinit bfin_rtc_probe(struct platform_device *pdev) platform_set_drvdata(pdev, rtc); device_init_wakeup(dev, 1); + /* Register our RTC with the RTC framework */ + rtc->rtc_dev = rtc_device_register(pdev->name, dev, &bfin_rtc_ops, + THIS_MODULE); + if (unlikely(IS_ERR(rtc->rtc_dev))) { + ret = PTR_ERR(rtc->rtc_dev); + goto err; + } + /* Grab the IRQ and init the hardware */ ret = request_irq(IRQ_RTC, bfin_rtc_interrupt, IRQF_SHARED, pdev->name, dev); if (unlikely(ret)) - goto err; + goto err_reg; /* sometimes the bootloader touched things, but the write complete was not * enabled, so let's just do a quick timeout here since the IRQ will not fire ... */ - timeout = jiffies + HZ; while (bfin_read_RTC_ISTAT() & RTC_ISTAT_WRITE_PENDING) if (time_after(jiffies, timeout)) break; bfin_rtc_reset(dev, RTC_ISTAT_WRITE_COMPLETE); bfin_write_RTC_SWCNT(0); - /* Register our RTC with the RTC framework */ - rtc->rtc_dev = rtc_device_register(pdev->name, dev, &bfin_rtc_ops, THIS_MODULE); - if (unlikely(IS_ERR(rtc->rtc_dev))) { - ret = PTR_ERR(rtc->rtc_dev); - goto err_irq; - } - return 0; - err_irq: - free_irq(IRQ_RTC, dev); - err: +err_reg: + rtc_device_unregister(rtc->rtc_dev); +err: kfree(rtc); return ret; } diff --git a/drivers/rtc/rtc-cmos.c b/drivers/rtc/rtc-cmos.c index 23e10b6263d..f7a4701bf86 100644 --- a/drivers/rtc/rtc-cmos.c +++ b/drivers/rtc/rtc-cmos.c @@ -1174,23 +1174,34 @@ static struct platform_driver cmos_platform_driver = { } }; +#ifdef CONFIG_PNP +static bool pnp_driver_registered; +#endif +static bool platform_driver_registered; + static int __init cmos_init(void) { int retval = 0; #ifdef CONFIG_PNP - pnp_register_driver(&cmos_pnp_driver); + retval = pnp_register_driver(&cmos_pnp_driver); + if (retval == 0) + pnp_driver_registered = true; #endif - if (!cmos_rtc.dev) + if (!cmos_rtc.dev) { retval = platform_driver_probe(&cmos_platform_driver, cmos_platform_probe); + if (retval == 0) + platform_driver_registered = true; + } if (retval == 0) return 0; #ifdef CONFIG_PNP - pnp_unregister_driver(&cmos_pnp_driver); + if (pnp_driver_registered) + pnp_unregister_driver(&cmos_pnp_driver); #endif return retval; } @@ -1199,9 +1210,11 @@ module_init(cmos_init); static void __exit cmos_exit(void) { #ifdef CONFIG_PNP - pnp_unregister_driver(&cmos_pnp_driver); + if (pnp_driver_registered) + pnp_unregister_driver(&cmos_pnp_driver); #endif - platform_driver_unregister(&cmos_platform_driver); + if (platform_driver_registered) + platform_driver_unregister(&cmos_platform_driver); } module_exit(cmos_exit); diff --git a/drivers/rtc/rtc-ds1374.c b/drivers/rtc/rtc-ds1374.c index 32b27739ec2..713f7bf5afb 100644 --- a/drivers/rtc/rtc-ds1374.c +++ b/drivers/rtc/rtc-ds1374.c @@ -283,7 +283,7 @@ static void ds1374_work(struct work_struct *work) stat = i2c_smbus_read_byte_data(client, DS1374_REG_SR); if (stat < 0) - return; + goto unlock; if (stat & DS1374_REG_SR_AF) { stat &= ~DS1374_REG_SR_AF; @@ -302,7 +302,7 @@ static void ds1374_work(struct work_struct *work) out: if (!ds1374->exiting) enable_irq(client->irq); - +unlock: mutex_unlock(&ds1374->mutex); } diff --git a/drivers/rtc/rtc-vr41xx.c b/drivers/rtc/rtc-vr41xx.c index f11297aff85..2c839d0d21b 100644 --- a/drivers/rtc/rtc-vr41xx.c +++ b/drivers/rtc/rtc-vr41xx.c @@ -1,7 +1,7 @@ /* * Driver for NEC VR4100 series Real Time Clock unit. * - * Copyright (C) 2003-2008 Yoichi Yuasa <yoichi_yuasa@tripeaks.co.jp> + * Copyright (C) 2003-2008 Yoichi Yuasa <yuasa@linux-mips.org> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by @@ -33,7 +33,7 @@ #include <asm/io.h> #include <asm/uaccess.h> -MODULE_AUTHOR("Yoichi Yuasa <yoichi_yuasa@tripeaks.co.jp>"); +MODULE_AUTHOR("Yoichi Yuasa <yuasa@linux-mips.org>"); MODULE_DESCRIPTION("NEC VR4100 series RTC driver"); MODULE_LICENSE("GPL v2"); diff --git a/drivers/s390/block/dasd.c b/drivers/s390/block/dasd.c index 74983666865..3f62dd50bbb 100644 --- a/drivers/s390/block/dasd.c +++ b/drivers/s390/block/dasd.c @@ -2135,9 +2135,9 @@ static int dasd_getgeo(struct block_device *bdev, struct hd_geometry *geo) struct dasd_device *base; block = bdev->bd_disk->private_data; - base = block->base; if (!block) return -ENODEV; + base = block->base; if (!base->discipline || !base->discipline->fill_geometry) diff --git a/drivers/s390/block/dasd_eckd.c b/drivers/s390/block/dasd_eckd.c index f8b1f04f26b..c11770f5b36 100644 --- a/drivers/s390/block/dasd_eckd.c +++ b/drivers/s390/block/dasd_eckd.c @@ -1696,8 +1696,7 @@ static void dasd_eckd_handle_unsolicited_interrupt(struct dasd_device *device, DBF_DEV_EVENT(DBF_ERR, device, "%s", "unsolicited interrupt received " "(sense available)"); - device->discipline->dump_sense_dbf(device, NULL, irb, - "unsolicited"); + device->discipline->dump_sense_dbf(device, irb, "unsolicited"); } dasd_schedule_device_bh(device); @@ -2941,42 +2940,20 @@ dasd_eckd_dump_ccw_range(struct ccw1 *from, struct ccw1 *to, char *page) } static void -dasd_eckd_dump_sense_dbf(struct dasd_device *device, struct dasd_ccw_req *req, - struct irb *irb, char *reason) +dasd_eckd_dump_sense_dbf(struct dasd_device *device, struct irb *irb, + char *reason) { u64 *sense; - int sl; - struct tsb *tsb; - sense = NULL; - tsb = NULL; - if (req && scsw_is_tm(&req->irb.scsw)) { - if (irb->scsw.tm.tcw) - tsb = tcw_get_tsb( - (struct tcw *)(unsigned long)irb->scsw.tm.tcw); - if (tsb && (irb->scsw.tm.fcxs == 0x01)) { - switch (tsb->flags & 0x07) { - case 1: /* tsa_iostat */ - sense = (u64 *)tsb->tsa.iostat.sense; - break; - case 2: /* ts_ddpc */ - sense = (u64 *)tsb->tsa.ddpc.sense; - break; - case 3: /* tsa_intrg */ - break; - } - } - } else { - if (irb->esw.esw0.erw.cons) - sense = (u64 *)irb->ecw; - } + sense = (u64 *) dasd_get_sense(irb); if (sense) { - for (sl = 0; sl < 4; sl++) { - DBF_DEV_EVENT(DBF_EMERG, device, - "%s: %016llx %016llx %016llx %016llx", - reason, sense[0], sense[1], sense[2], - sense[3]); - } + DBF_DEV_EVENT(DBF_EMERG, device, + "%s: %s %02x%02x%02x %016llx %016llx %016llx " + "%016llx", reason, + scsw_is_tm(&irb->scsw) ? "t" : "c", + scsw_cc(&irb->scsw), scsw_cstat(&irb->scsw), + scsw_dstat(&irb->scsw), sense[0], sense[1], + sense[2], sense[3]); } else { DBF_DEV_EVENT(DBF_EMERG, device, "%s", "SORRY - NO VALID SENSE AVAILABLE\n"); diff --git a/drivers/s390/block/dasd_erp.c b/drivers/s390/block/dasd_erp.c index d970ce2814b..cb8f9cef742 100644 --- a/drivers/s390/block/dasd_erp.c +++ b/drivers/s390/block/dasd_erp.c @@ -172,7 +172,7 @@ dasd_log_sense_dbf(struct dasd_ccw_req *cqr, struct irb *irb) device = cqr->startdev; /* dump sense data to s390 debugfeature*/ if (device->discipline && device->discipline->dump_sense_dbf) - device->discipline->dump_sense_dbf(device, cqr, irb, "log"); + device->discipline->dump_sense_dbf(device, irb, "log"); } EXPORT_SYMBOL(dasd_log_sense_dbf); diff --git a/drivers/s390/block/dasd_fba.c b/drivers/s390/block/dasd_fba.c index e21ee735f92..31849ad5e59 100644 --- a/drivers/s390/block/dasd_fba.c +++ b/drivers/s390/block/dasd_fba.c @@ -241,7 +241,7 @@ static void dasd_fba_handle_unsolicited_interrupt(struct dasd_device *device, /* check for unsolicited interrupts */ DBF_DEV_EVENT(DBF_WARNING, device, "%s", "unsolicited interrupt received"); - device->discipline->dump_sense_dbf(device, NULL, irb, "unsolicited"); + device->discipline->dump_sense_dbf(device, irb, "unsolicited"); dasd_schedule_device_bh(device); return; }; @@ -444,17 +444,20 @@ dasd_fba_fill_info(struct dasd_device * device, } static void -dasd_fba_dump_sense_dbf(struct dasd_device *device, struct dasd_ccw_req *req, - struct irb *irb, char *reason) +dasd_fba_dump_sense_dbf(struct dasd_device *device, struct irb *irb, + char *reason) { - int sl; - if (irb->esw.esw0.erw.cons) { - for (sl = 0; sl < 4; sl++) { - DBF_DEV_EVENT(DBF_EMERG, device, - "%s: %08x %08x %08x %08x", - reason, irb->ecw[8 * 0], irb->ecw[8 * 1], - irb->ecw[8 * 2], irb->ecw[8 * 3]); - } + u64 *sense; + + sense = (u64 *) dasd_get_sense(irb); + if (sense) { + DBF_DEV_EVENT(DBF_EMERG, device, + "%s: %s %02x%02x%02x %016llx %016llx %016llx " + "%016llx", reason, + scsw_is_tm(&irb->scsw) ? "t" : "c", + scsw_cc(&irb->scsw), scsw_cstat(&irb->scsw), + scsw_dstat(&irb->scsw), sense[0], sense[1], + sense[2], sense[3]); } else { DBF_DEV_EVENT(DBF_EMERG, device, "%s", "SORRY - NO VALID SENSE AVAILABLE\n"); diff --git a/drivers/s390/block/dasd_int.h b/drivers/s390/block/dasd_int.h index fd63b2f2bda..b699ca356ac 100644 --- a/drivers/s390/block/dasd_int.h +++ b/drivers/s390/block/dasd_int.h @@ -284,8 +284,7 @@ struct dasd_discipline { dasd_erp_fn_t(*erp_postaction) (struct dasd_ccw_req *); void (*dump_sense) (struct dasd_device *, struct dasd_ccw_req *, struct irb *); - void (*dump_sense_dbf) (struct dasd_device *, struct dasd_ccw_req *, - struct irb *, char *); + void (*dump_sense_dbf) (struct dasd_device *, struct irb *, char *); void (*handle_unsolicited_interrupt) (struct dasd_device *, struct irb *); diff --git a/drivers/s390/block/dasd_ioctl.c b/drivers/s390/block/dasd_ioctl.c index 4ce3f72ee1c..df918ef2796 100644 --- a/drivers/s390/block/dasd_ioctl.c +++ b/drivers/s390/block/dasd_ioctl.c @@ -16,6 +16,7 @@ #include <linux/major.h> #include <linux/fs.h> #include <linux/blkpg.h> +#include <linux/smp_lock.h> #include <asm/ccwdev.h> #include <asm/cmb.h> diff --git a/drivers/s390/block/dcssblk.c b/drivers/s390/block/dcssblk.c index 016f9e9d259..d34617682a6 100644 --- a/drivers/s390/block/dcssblk.c +++ b/drivers/s390/block/dcssblk.c @@ -964,7 +964,8 @@ static int dcssblk_freeze(struct device *dev) break; } if (rc) - pr_err("Suspend failed because device %s is writeable.\n", + pr_err("Suspending the system failed because DCSS device %s " + "is writable\n", dev_info->segment_name); return rc; } @@ -987,8 +988,8 @@ static int dcssblk_restore(struct device *dev) goto out_panic; } if (start != entry->start || end != entry->end) { - pr_err("Mismatch of start / end address after " - "resuming device %s\n", + pr_err("The address range of DCSS %s changed " + "while the system was suspended\n", entry->segment_name); goto out_panic; } diff --git a/drivers/s390/block/xpram.c b/drivers/s390/block/xpram.c index 2e9e1ecd6d8..db442cd6621 100644 --- a/drivers/s390/block/xpram.c +++ b/drivers/s390/block/xpram.c @@ -443,7 +443,7 @@ fail: */ static void xpram_resume_error(const char *message) { - pr_err("Resume error: %s\n", message); + pr_err("Resuming the system failed: %s\n", message); panic("xpram resume error\n"); } diff --git a/drivers/s390/char/monreader.c b/drivers/s390/char/monreader.c index 7892550d793..3234e90bd7f 100644 --- a/drivers/s390/char/monreader.c +++ b/drivers/s390/char/monreader.c @@ -320,7 +320,7 @@ static int mon_open(struct inode *inode, struct file *filp) goto out_path; } filp->private_data = monpriv; - dev_set_drvdata(&monreader_device, monpriv); + dev_set_drvdata(monreader_device, monpriv); unlock_kernel(); return nonseekable_open(inode, filp); @@ -463,7 +463,7 @@ static struct miscdevice mon_dev = { *****************************************************************************/ static int monreader_freeze(struct device *dev) { - struct mon_private *monpriv = dev_get_drvdata(&dev); + struct mon_private *monpriv = dev_get_drvdata(dev); int rc; if (!monpriv) diff --git a/drivers/s390/char/sclp_rw.h b/drivers/s390/char/sclp_rw.h index 85f491ea929..7a7bfc947d9 100644 --- a/drivers/s390/char/sclp_rw.h +++ b/drivers/s390/char/sclp_rw.h @@ -92,5 +92,10 @@ void sclp_set_columns(struct sclp_buffer *, unsigned short); void sclp_set_htab(struct sclp_buffer *, unsigned short); int sclp_chars_in_buffer(struct sclp_buffer *); +#ifdef CONFIG_SCLP_CONSOLE void sclp_console_pm_event(enum sclp_pm_event sclp_pm_event); +#else +static inline void sclp_console_pm_event(enum sclp_pm_event sclp_pm_event) { } +#endif + #endif /* __SCLP_RW_H__ */ diff --git a/drivers/s390/char/vmwatchdog.c b/drivers/s390/char/vmwatchdog.c index cb7854c10c0..f2bc287b69e 100644 --- a/drivers/s390/char/vmwatchdog.c +++ b/drivers/s390/char/vmwatchdog.c @@ -250,14 +250,14 @@ static int vmwdt_resume(void) static int vmwdt_suspend(void) { if (test_and_set_bit(VMWDT_OPEN, &vmwdt_is_open)) { - pr_err("The watchdog is in use. " - "This prevents hibernation or suspend.\n"); + pr_err("The system cannot be suspended while the watchdog" + " is in use\n"); return NOTIFY_BAD; } if (test_bit(VMWDT_RUNNING, &vmwdt_is_open)) { clear_bit(VMWDT_OPEN, &vmwdt_is_open); - pr_err("The watchdog is running. " - "This prevents hibernation or suspend.\n"); + pr_err("The system cannot be suspended while the watchdog" + " is running\n"); return NOTIFY_BAD; } return NOTIFY_DONE; diff --git a/drivers/s390/cio/device.c b/drivers/s390/cio/device.c index 3c57c1a18bb..d593bc76afe 100644 --- a/drivers/s390/cio/device.c +++ b/drivers/s390/cio/device.c @@ -772,10 +772,8 @@ static struct ccw_device * io_subchannel_create_ccwdev(struct subchannel *sch) cdev = io_subchannel_allocate_dev(sch); if (!IS_ERR(cdev)) { ret = io_subchannel_initialize_dev(sch, cdev); - if (ret) { - kfree(cdev); + if (ret) cdev = ERR_PTR(ret); - } } return cdev; } diff --git a/drivers/s390/crypto/ap_bus.c b/drivers/s390/crypto/ap_bus.c index 727a809636d..ed3dcdea7fe 100644 --- a/drivers/s390/crypto/ap_bus.c +++ b/drivers/s390/crypto/ap_bus.c @@ -1145,12 +1145,17 @@ ap_config_timeout(unsigned long ptr) */ static inline void ap_schedule_poll_timer(void) { + ktime_t hr_time; if (ap_using_interrupts() || ap_suspend_flag) return; if (hrtimer_is_queued(&ap_poll_timer)) return; - hrtimer_start(&ap_poll_timer, ktime_set(0, poll_timeout), - HRTIMER_MODE_ABS); + if (ktime_to_ns(hrtimer_expires_remaining(&ap_poll_timer)) <= 0) { + hr_time = ktime_set(0, poll_timeout); + hrtimer_forward_now(&ap_poll_timer, hr_time); + hrtimer_restart(&ap_poll_timer); + } + return; } /** diff --git a/drivers/s390/scsi/zfcp_erp.c b/drivers/s390/scsi/zfcp_erp.c index 8030e25152f..c75d6f35cb5 100644 --- a/drivers/s390/scsi/zfcp_erp.c +++ b/drivers/s390/scsi/zfcp_erp.c @@ -553,40 +553,35 @@ static void _zfcp_erp_unit_reopen_all(struct zfcp_port *port, int clear, _zfcp_erp_unit_reopen(unit, clear, id, ref); } -static void zfcp_erp_strategy_followup_actions(struct zfcp_erp_action *act) +static void zfcp_erp_strategy_followup_failed(struct zfcp_erp_action *act) { - struct zfcp_adapter *adapter = act->adapter; - struct zfcp_port *port = act->port; - struct zfcp_unit *unit = act->unit; - u32 status = act->status; - - /* initiate follow-up actions depending on success of finished action */ switch (act->action) { - case ZFCP_ERP_ACTION_REOPEN_ADAPTER: - if (status == ZFCP_ERP_SUCCEEDED) - _zfcp_erp_port_reopen_all(adapter, 0, "ersfa_1", NULL); - else - _zfcp_erp_adapter_reopen(adapter, 0, "ersfa_2", NULL); + _zfcp_erp_adapter_reopen(act->adapter, 0, "ersff_1", NULL); break; - case ZFCP_ERP_ACTION_REOPEN_PORT_FORCED: - if (status == ZFCP_ERP_SUCCEEDED) - _zfcp_erp_port_reopen(port, 0, "ersfa_3", NULL); - else - _zfcp_erp_adapter_reopen(adapter, 0, "ersfa_4", NULL); + _zfcp_erp_port_forced_reopen(act->port, 0, "ersff_2", NULL); break; - case ZFCP_ERP_ACTION_REOPEN_PORT: - if (status == ZFCP_ERP_SUCCEEDED) - _zfcp_erp_unit_reopen_all(port, 0, "ersfa_5", NULL); - else - _zfcp_erp_port_forced_reopen(port, 0, "ersfa_6", NULL); + _zfcp_erp_port_reopen(act->port, 0, "ersff_3", NULL); break; - case ZFCP_ERP_ACTION_REOPEN_UNIT: - if (status != ZFCP_ERP_SUCCEEDED) - _zfcp_erp_port_reopen(unit->port, 0, "ersfa_7", NULL); + _zfcp_erp_unit_reopen(act->unit, 0, "ersff_4", NULL); + break; + } +} + +static void zfcp_erp_strategy_followup_success(struct zfcp_erp_action *act) +{ + switch (act->action) { + case ZFCP_ERP_ACTION_REOPEN_ADAPTER: + _zfcp_erp_port_reopen_all(act->adapter, 0, "ersfs_1", NULL); + break; + case ZFCP_ERP_ACTION_REOPEN_PORT_FORCED: + _zfcp_erp_port_reopen(act->port, 0, "ersfs_2", NULL); + break; + case ZFCP_ERP_ACTION_REOPEN_PORT: + _zfcp_erp_unit_reopen_all(act->port, 0, "ersfs_3", NULL); break; } } @@ -801,7 +796,7 @@ static int zfcp_erp_port_forced_strategy(struct zfcp_erp_action *erp_action) return ZFCP_ERP_FAILED; case ZFCP_ERP_STEP_PHYS_PORT_CLOSING: - if (status & ZFCP_STATUS_PORT_PHYS_OPEN) + if (!(status & ZFCP_STATUS_PORT_PHYS_OPEN)) return ZFCP_ERP_SUCCEEDED; } return ZFCP_ERP_FAILED; @@ -853,11 +848,17 @@ void zfcp_erp_port_strategy_open_lookup(struct work_struct *work) gid_pn_work); retval = zfcp_fc_ns_gid_pn(&port->erp_action); - if (retval == -ENOMEM) - zfcp_erp_notify(&port->erp_action, ZFCP_ERP_NOMEM); - port->erp_action.step = ZFCP_ERP_STEP_NAMESERVER_LOOKUP; - if (retval) - zfcp_erp_notify(&port->erp_action, ZFCP_ERP_FAILED); + if (!retval) { + port->erp_action.step = ZFCP_ERP_STEP_NAMESERVER_LOOKUP; + goto out; + } + if (retval == -ENOMEM) { + zfcp_erp_notify(&port->erp_action, ZFCP_STATUS_ERP_LOWMEM); + goto out; + } + /* all other error condtions */ + zfcp_erp_notify(&port->erp_action, 0); +out: zfcp_port_put(port); } @@ -1289,7 +1290,10 @@ static int zfcp_erp_strategy(struct zfcp_erp_action *erp_action) retval = zfcp_erp_strategy_statechange(erp_action, retval); if (retval == ZFCP_ERP_EXIT) goto unlock; - zfcp_erp_strategy_followup_actions(erp_action); + if (retval == ZFCP_ERP_SUCCEEDED) + zfcp_erp_strategy_followup_success(erp_action); + if (retval == ZFCP_ERP_FAILED) + zfcp_erp_strategy_followup_failed(erp_action); unlock: write_unlock(&adapter->erp_lock); diff --git a/drivers/s390/scsi/zfcp_fc.c b/drivers/s390/scsi/zfcp_fc.c index 2f0705d76b7..47daebfa7e5 100644 --- a/drivers/s390/scsi/zfcp_fc.c +++ b/drivers/s390/scsi/zfcp_fc.c @@ -79,11 +79,9 @@ static int zfcp_wka_port_get(struct zfcp_wka_port *wka_port) mutex_unlock(&wka_port->mutex); - wait_event_timeout( - wka_port->completion_wq, - wka_port->status == ZFCP_WKA_PORT_ONLINE || - wka_port->status == ZFCP_WKA_PORT_OFFLINE, - HZ >> 1); + wait_event(wka_port->completion_wq, + wka_port->status == ZFCP_WKA_PORT_ONLINE || + wka_port->status == ZFCP_WKA_PORT_OFFLINE); if (wka_port->status == ZFCP_WKA_PORT_ONLINE) { atomic_inc(&wka_port->refcount); diff --git a/drivers/s390/scsi/zfcp_fsf.c b/drivers/s390/scsi/zfcp_fsf.c index c57658f3d34..47795fbf081 100644 --- a/drivers/s390/scsi/zfcp_fsf.c +++ b/drivers/s390/scsi/zfcp_fsf.c @@ -670,8 +670,11 @@ static int zfcp_fsf_req_sbal_get(struct zfcp_adapter *adapter) zfcp_fsf_sbal_check(adapter), 5 * HZ); if (ret > 0) return 0; - if (!ret) + if (!ret) { atomic_inc(&adapter->qdio_outb_full); + /* assume hanging outbound queue, try queue recovery */ + zfcp_erp_adapter_reopen(adapter, 0, "fsrsg_1", NULL); + } spin_lock_bh(&adapter->req_q_lock); return -EIO; @@ -722,7 +725,7 @@ static struct zfcp_fsf_req *zfcp_fsf_req_create(struct zfcp_adapter *adapter, req = zfcp_fsf_alloc_qtcb(pool); if (unlikely(!req)) - return ERR_PTR(-EIO); + return ERR_PTR(-ENOMEM); if (adapter->req_no == 0) adapter->req_no++; @@ -1010,6 +1013,23 @@ skip_fsfstatus: send_ct->handler(send_ct->handler_data); } +static void zfcp_fsf_setup_ct_els_unchained(struct qdio_buffer_element *sbale, + struct scatterlist *sg_req, + struct scatterlist *sg_resp) +{ + sbale[0].flags |= SBAL_FLAGS0_TYPE_WRITE_READ; + sbale[2].addr = sg_virt(sg_req); + sbale[2].length = sg_req->length; + sbale[3].addr = sg_virt(sg_resp); + sbale[3].length = sg_resp->length; + sbale[3].flags |= SBAL_FLAGS_LAST_ENTRY; +} + +static int zfcp_fsf_one_sbal(struct scatterlist *sg) +{ + return sg_is_last(sg) && sg->length <= PAGE_SIZE; +} + static int zfcp_fsf_setup_ct_els_sbals(struct zfcp_fsf_req *req, struct scatterlist *sg_req, struct scatterlist *sg_resp, @@ -1020,30 +1040,30 @@ static int zfcp_fsf_setup_ct_els_sbals(struct zfcp_fsf_req *req, int bytes; if (!(feat & FSF_FEATURE_ELS_CT_CHAINED_SBALS)) { - if (sg_req->length > PAGE_SIZE || sg_resp->length > PAGE_SIZE || - !sg_is_last(sg_req) || !sg_is_last(sg_resp)) + if (!zfcp_fsf_one_sbal(sg_req) || !zfcp_fsf_one_sbal(sg_resp)) return -EOPNOTSUPP; - sbale[0].flags |= SBAL_FLAGS0_TYPE_WRITE_READ; - sbale[2].addr = sg_virt(sg_req); - sbale[2].length = sg_req->length; - sbale[3].addr = sg_virt(sg_resp); - sbale[3].length = sg_resp->length; - sbale[3].flags |= SBAL_FLAGS_LAST_ENTRY; + zfcp_fsf_setup_ct_els_unchained(sbale, sg_req, sg_resp); + return 0; + } + + /* use single, unchained SBAL if it can hold the request */ + if (zfcp_fsf_one_sbal(sg_req) && zfcp_fsf_one_sbal(sg_resp)) { + zfcp_fsf_setup_ct_els_unchained(sbale, sg_req, sg_resp); return 0; } bytes = zfcp_qdio_sbals_from_sg(req, SBAL_FLAGS0_TYPE_WRITE_READ, sg_req, max_sbals); if (bytes <= 0) - return -ENOMEM; + return -EIO; req->qtcb->bottom.support.req_buf_length = bytes; req->sbale_curr = ZFCP_LAST_SBALE_PER_SBAL; bytes = zfcp_qdio_sbals_from_sg(req, SBAL_FLAGS0_TYPE_WRITE_READ, sg_resp, max_sbals); if (bytes <= 0) - return -ENOMEM; + return -EIO; req->qtcb->bottom.support.resp_buf_length = bytes; return 0; @@ -1607,10 +1627,10 @@ static void zfcp_fsf_open_wka_port_handler(struct zfcp_fsf_req *req) case FSF_ACCESS_DENIED: wka_port->status = ZFCP_WKA_PORT_OFFLINE; break; - case FSF_PORT_ALREADY_OPEN: - break; case FSF_GOOD: wka_port->handle = header->port_handle; + /* fall through */ + case FSF_PORT_ALREADY_OPEN: wka_port->status = ZFCP_WKA_PORT_ONLINE; } out: @@ -1731,15 +1751,16 @@ static void zfcp_fsf_close_physical_port_handler(struct zfcp_fsf_req *req) zfcp_fsf_access_denied_port(req, port); break; case FSF_PORT_BOXED: - zfcp_erp_port_boxed(port, "fscpph2", req); - req->status |= ZFCP_STATUS_FSFREQ_ERROR | - ZFCP_STATUS_FSFREQ_RETRY; /* can't use generic zfcp_erp_modify_port_status because * ZFCP_STATUS_COMMON_OPEN must not be reset for the port */ atomic_clear_mask(ZFCP_STATUS_PORT_PHYS_OPEN, &port->status); list_for_each_entry(unit, &port->unit_list_head, list) atomic_clear_mask(ZFCP_STATUS_COMMON_OPEN, &unit->status); + zfcp_erp_port_boxed(port, "fscpph2", req); + req->status |= ZFCP_STATUS_FSFREQ_ERROR | + ZFCP_STATUS_FSFREQ_RETRY; + break; case FSF_ADAPTER_STATUS_AVAILABLE: switch (header->fsf_status_qual.word[0]) { @@ -2541,7 +2562,6 @@ struct zfcp_fsf_req *zfcp_fsf_control_file(struct zfcp_adapter *adapter, bytes = zfcp_qdio_sbals_from_sg(req, direction, fsf_cfdc->sg, FSF_MAX_SBALS_PER_REQ); if (bytes != ZFCP_CFDC_MAX_SIZE) { - retval = -ENOMEM; zfcp_fsf_req_free(req); goto out; } diff --git a/drivers/s390/scsi/zfcp_scsi.c b/drivers/s390/scsi/zfcp_scsi.c index 967ede73f4c..6925a178468 100644 --- a/drivers/s390/scsi/zfcp_scsi.c +++ b/drivers/s390/scsi/zfcp_scsi.c @@ -167,20 +167,21 @@ static int zfcp_scsi_eh_abort_handler(struct scsi_cmnd *scpnt) struct zfcp_unit *unit = scpnt->device->hostdata; struct zfcp_fsf_req *old_req, *abrt_req; unsigned long flags; - unsigned long old_req_id = (unsigned long) scpnt->host_scribble; + unsigned long old_reqid = (unsigned long) scpnt->host_scribble; int retval = SUCCESS; int retry = 3; + char *dbf_tag; /* avoid race condition between late normal completion and abort */ write_lock_irqsave(&adapter->abort_lock, flags); spin_lock(&adapter->req_list_lock); - old_req = zfcp_reqlist_find(adapter, old_req_id); + old_req = zfcp_reqlist_find(adapter, old_reqid); spin_unlock(&adapter->req_list_lock); if (!old_req) { write_unlock_irqrestore(&adapter->abort_lock, flags); zfcp_scsi_dbf_event_abort("lte1", adapter, scpnt, NULL, - old_req_id); + old_reqid); return FAILED; /* completion could be in progress */ } old_req->data = NULL; @@ -189,7 +190,7 @@ static int zfcp_scsi_eh_abort_handler(struct scsi_cmnd *scpnt) write_unlock_irqrestore(&adapter->abort_lock, flags); while (retry--) { - abrt_req = zfcp_fsf_abort_fcp_command(old_req_id, unit); + abrt_req = zfcp_fsf_abort_fcp_command(old_reqid, unit); if (abrt_req) break; @@ -197,7 +198,7 @@ static int zfcp_scsi_eh_abort_handler(struct scsi_cmnd *scpnt) if (!(atomic_read(&adapter->status) & ZFCP_STATUS_COMMON_RUNNING)) { zfcp_scsi_dbf_event_abort("nres", adapter, scpnt, NULL, - old_req_id); + old_reqid); return SUCCESS; } } @@ -208,13 +209,14 @@ static int zfcp_scsi_eh_abort_handler(struct scsi_cmnd *scpnt) abrt_req->status & ZFCP_STATUS_FSFREQ_COMPLETED); if (abrt_req->status & ZFCP_STATUS_FSFREQ_ABORTSUCCEEDED) - zfcp_scsi_dbf_event_abort("okay", adapter, scpnt, abrt_req, 0); + dbf_tag = "okay"; else if (abrt_req->status & ZFCP_STATUS_FSFREQ_ABORTNOTNEEDED) - zfcp_scsi_dbf_event_abort("lte2", adapter, scpnt, abrt_req, 0); + dbf_tag = "lte2"; else { - zfcp_scsi_dbf_event_abort("fail", adapter, scpnt, abrt_req, 0); + dbf_tag = "fail"; retval = FAILED; } + zfcp_scsi_dbf_event_abort(dbf_tag, adapter, scpnt, abrt_req, old_reqid); zfcp_fsf_req_free(abrt_req); return retval; } @@ -534,6 +536,9 @@ static void zfcp_scsi_rport_register(struct zfcp_port *port) struct fc_rport_identifiers ids; struct fc_rport *rport; + if (port->rport) + return; + ids.node_name = port->wwnn; ids.port_name = port->wwpn; ids.port_id = port->d_id; @@ -557,8 +562,10 @@ static void zfcp_scsi_rport_block(struct zfcp_port *port) { struct fc_rport *rport = port->rport; - if (rport) + if (rport) { fc_remote_port_delete(rport); + port->rport = NULL; + } } void zfcp_scsi_schedule_rport_register(struct zfcp_port *port) diff --git a/drivers/s390/scsi/zfcp_sysfs.c b/drivers/s390/scsi/zfcp_sysfs.c index 3e51e64d110..0fe5cce818c 100644 --- a/drivers/s390/scsi/zfcp_sysfs.c +++ b/drivers/s390/scsi/zfcp_sysfs.c @@ -494,9 +494,14 @@ static ssize_t zfcp_sysfs_adapter_q_full_show(struct device *dev, struct Scsi_Host *scsi_host = class_to_shost(dev); struct zfcp_adapter *adapter = (struct zfcp_adapter *) scsi_host->hostdata[0]; + u64 util; + + spin_lock_bh(&adapter->qdio_stat_lock); + util = adapter->req_q_util; + spin_unlock_bh(&adapter->qdio_stat_lock); return sprintf(buf, "%d %llu\n", atomic_read(&adapter->qdio_outb_full), - (unsigned long long)adapter->req_q_util); + (unsigned long long)util); } static DEVICE_ATTR(queue_full, S_IRUGO, zfcp_sysfs_adapter_q_full_show, NULL); diff --git a/drivers/sbus/char/bbc_envctrl.c b/drivers/sbus/char/bbc_envctrl.c index 15dab96d05e..7c815d3327f 100644 --- a/drivers/sbus/char/bbc_envctrl.c +++ b/drivers/sbus/char/bbc_envctrl.c @@ -537,8 +537,12 @@ int bbc_envctrl_init(struct bbc_i2c_bus *bp) } if (temp_index != 0 && fan_index != 0) { kenvctrld_task = kthread_run(kenvctrld, NULL, "kenvctrld"); - if (IS_ERR(kenvctrld_task)) - return PTR_ERR(kenvctrld_task); + if (IS_ERR(kenvctrld_task)) { + int err = PTR_ERR(kenvctrld_task); + + kenvctrld_task = NULL; + return err; + } } return 0; @@ -561,7 +565,8 @@ void bbc_envctrl_cleanup(struct bbc_i2c_bus *bp) struct bbc_cpu_temperature *tp, *tpos; struct bbc_fan_control *fp, *fpos; - kthread_stop(kenvctrld_task); + if (kenvctrld_task) + kthread_stop(kenvctrld_task); list_for_each_entry_safe(tp, tpos, &bp->temps, bp_list) { list_del(&tp->bp_list); diff --git a/drivers/scsi/atari_NCR5380.c b/drivers/scsi/atari_NCR5380.c index 0471f880048..4240b05aef6 100644 --- a/drivers/scsi/atari_NCR5380.c +++ b/drivers/scsi/atari_NCR5380.c @@ -2826,8 +2826,7 @@ int NCR5380_abort(Scsi_Cmnd *cmd) */ local_irq_restore(flags); - printk(KERN_INFO "scsi%d: warning : SCSI command probably completed successfully\n" - KERN_INFO " before abortion\n", HOSTNO); + printk(KERN_INFO "scsi%d: warning : SCSI command probably completed successfully before abortion\n", HOSTNO); /* Maybe it is sufficient just to release the ST-DMA lock... (if * possible at all) At least, we should check if the lock could be diff --git a/drivers/scsi/cxgb3i/Kbuild b/drivers/scsi/cxgb3i/Kbuild index 25a2032bfa2..70d060b7ff4 100644 --- a/drivers/scsi/cxgb3i/Kbuild +++ b/drivers/scsi/cxgb3i/Kbuild @@ -1,4 +1,4 @@ -EXTRA_CFLAGS += -I$(TOPDIR)/drivers/net/cxgb3 +EXTRA_CFLAGS += -I$(srctree)/drivers/net/cxgb3 cxgb3i-y := cxgb3i_init.o cxgb3i_iscsi.o cxgb3i_pdu.o cxgb3i_offload.o cxgb3i_ddp.o obj-$(CONFIG_SCSI_CXGB3_ISCSI) += cxgb3i.o diff --git a/drivers/scsi/cxgb3i/cxgb3i_iscsi.c b/drivers/scsi/cxgb3i/cxgb3i_iscsi.c index 74369a3f963..c399f485aa7 100644 --- a/drivers/scsi/cxgb3i/cxgb3i_iscsi.c +++ b/drivers/scsi/cxgb3i/cxgb3i_iscsi.c @@ -13,6 +13,7 @@ #include <linux/inet.h> #include <linux/crypto.h> +#include <linux/if_vlan.h> #include <net/dst.h> #include <net/tcp.h> #include <scsi/scsi_cmnd.h> @@ -184,6 +185,9 @@ static struct cxgb3i_hba *cxgb3i_hba_find_by_netdev(struct net_device *ndev) struct cxgb3i_adapter *snic; int i; + if (ndev->priv_flags & IFF_802_1Q_VLAN) + ndev = vlan_dev_real_dev(ndev); + read_lock(&cxgb3i_snic_rwlock); list_for_each_entry(snic, &cxgb3i_snic_list, list_head) { for (i = 0; i < snic->hba_cnt; i++) { diff --git a/drivers/scsi/fnic/fnic_main.c b/drivers/scsi/fnic/fnic_main.c index a84072865fc..2c266c01dc5 100644 --- a/drivers/scsi/fnic/fnic_main.c +++ b/drivers/scsi/fnic/fnic_main.c @@ -473,16 +473,16 @@ static int __devinit fnic_probe(struct pci_dev *pdev, * limitation for the device. Try 40-bit first, and * fail to 32-bit. */ - err = pci_set_dma_mask(pdev, DMA_40BIT_MASK); + err = pci_set_dma_mask(pdev, DMA_BIT_MASK(40)); if (err) { - err = pci_set_dma_mask(pdev, DMA_32BIT_MASK); + err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); if (err) { shost_printk(KERN_ERR, fnic->lport->host, "No usable DMA configuration " "aborting\n"); goto err_out_release_regions; } - err = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK); + err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); if (err) { shost_printk(KERN_ERR, fnic->lport->host, "Unable to obtain 32-bit DMA " @@ -490,7 +490,7 @@ static int __devinit fnic_probe(struct pci_dev *pdev, goto err_out_release_regions; } } else { - err = pci_set_consistent_dma_mask(pdev, DMA_40BIT_MASK); + err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(40)); if (err) { shost_printk(KERN_ERR, fnic->lport->host, "Unable to obtain 40-bit DMA " diff --git a/drivers/scsi/fnic/fnic_scsi.c b/drivers/scsi/fnic/fnic_scsi.c index eabf3650285..bfc996971b8 100644 --- a/drivers/scsi/fnic/fnic_scsi.c +++ b/drivers/scsi/fnic/fnic_scsi.c @@ -245,7 +245,7 @@ static inline int fnic_queue_wq_copy_desc(struct fnic *fnic, struct vnic_wq_copy *wq, struct fnic_io_req *io_req, struct scsi_cmnd *sc, - u32 sg_count) + int sg_count) { struct scatterlist *sg; struct fc_rport *rport = starget_to_rport(scsi_target(sc->device)); @@ -260,9 +260,6 @@ static inline int fnic_queue_wq_copy_desc(struct fnic *fnic, char msg[2]; if (sg_count) { - BUG_ON(sg_count < 0); - BUG_ON(sg_count > FNIC_MAX_SG_DESC_CNT); - /* For each SGE, create a device desc entry */ desc = io_req->sgl_list; for_each_sg(scsi_sglist(sc), sg, sg_count, i) { @@ -344,7 +341,7 @@ int fnic_queuecommand(struct scsi_cmnd *sc, void (*done)(struct scsi_cmnd *)) struct fnic *fnic; struct vnic_wq_copy *wq; int ret; - u32 sg_count; + int sg_count; unsigned long flags; unsigned long ptr; diff --git a/drivers/scsi/ibmvscsi/ibmvscsi.c b/drivers/scsi/ibmvscsi/ibmvscsi.c index 869a11bdccb..9928704e235 100644 --- a/drivers/scsi/ibmvscsi/ibmvscsi.c +++ b/drivers/scsi/ibmvscsi/ibmvscsi.c @@ -1095,9 +1095,14 @@ static void adapter_info_rsp(struct srp_event_struct *evt_struct) MAX_INDIRECT_BUFS); hostdata->host->sg_tablesize = MAX_INDIRECT_BUFS; } + + if (hostdata->madapter_info.os_type == 3) { + enable_fast_fail(hostdata); + return; + } } - enable_fast_fail(hostdata); + send_srp_login(hostdata); } /** diff --git a/drivers/scsi/libfc/fc_exch.c b/drivers/scsi/libfc/fc_exch.c index 2bc22be5f84..145ab9ba55e 100644 --- a/drivers/scsi/libfc/fc_exch.c +++ b/drivers/scsi/libfc/fc_exch.c @@ -415,9 +415,9 @@ static void fc_exch_timeout(struct work_struct *work) e_stat = ep->esb_stat; if (e_stat & ESB_ST_COMPLETE) { ep->esb_stat = e_stat & ~ESB_ST_REC_QUAL; + spin_unlock_bh(&ep->ex_lock); if (e_stat & ESB_ST_REC_QUAL) fc_exch_rrq(ep); - spin_unlock_bh(&ep->ex_lock); goto done; } else { resp = ep->resp; @@ -1624,14 +1624,14 @@ static void fc_exch_rrq(struct fc_exch *ep) struct fc_lport *lp; struct fc_els_rrq *rrq; struct fc_frame *fp; - struct fc_seq *rrq_sp; u32 did; lp = ep->lp; fp = fc_frame_alloc(lp, sizeof(*rrq)); if (!fp) - return; + goto retry; + rrq = fc_frame_payload_get(fp, sizeof(*rrq)); memset(rrq, 0, sizeof(*rrq)); rrq->rrq_cmd = ELS_RRQ; @@ -1647,13 +1647,20 @@ static void fc_exch_rrq(struct fc_exch *ep) fc_host_port_id(lp->host), FC_TYPE_ELS, FC_FC_FIRST_SEQ | FC_FC_END_SEQ | FC_FC_SEQ_INIT, 0); - rrq_sp = fc_exch_seq_send(lp, fp, fc_exch_rrq_resp, NULL, ep, - lp->e_d_tov); - if (!rrq_sp) { - ep->esb_stat |= ESB_ST_REC_QUAL; - fc_exch_timer_set_locked(ep, ep->r_a_tov); + if (fc_exch_seq_send(lp, fp, fc_exch_rrq_resp, NULL, ep, lp->e_d_tov)) + return; + +retry: + spin_lock_bh(&ep->ex_lock); + if (ep->state & (FC_EX_RST_CLEANUP | FC_EX_DONE)) { + spin_unlock_bh(&ep->ex_lock); + /* drop hold for rec qual */ + fc_exch_release(ep); return; } + ep->esb_stat |= ESB_ST_REC_QUAL; + fc_exch_timer_set_locked(ep, ep->r_a_tov); + spin_unlock_bh(&ep->ex_lock); } diff --git a/drivers/scsi/libiscsi.c b/drivers/scsi/libiscsi.c index 716cc344c5d..a751f6230c2 100644 --- a/drivers/scsi/libiscsi.c +++ b/drivers/scsi/libiscsi.c @@ -1974,10 +1974,10 @@ int iscsi_eh_abort(struct scsi_cmnd *sc) * good and have never sent us a successful tmf response * then sent more data for the cmd. */ - spin_lock(&session->lock); + spin_lock_bh(&session->lock); fail_scsi_task(task, DID_ABORT); conn->tmf_state = TMF_INITIAL; - spin_unlock(&session->lock); + spin_unlock_bh(&session->lock); iscsi_start_tx(conn); goto success_unlocked; case TMF_TIMEDOUT: diff --git a/drivers/scsi/libsas/sas_expander.c b/drivers/scsi/libsas/sas_expander.c index 54fa1e42dc4..b3381959acc 100644 --- a/drivers/scsi/libsas/sas_expander.c +++ b/drivers/scsi/libsas/sas_expander.c @@ -766,6 +766,7 @@ static int sas_ex_join_wide_port(struct domain_device *parent, int phy_id) if (!memcmp(phy->attached_sas_addr, ephy->attached_sas_addr, SAS_ADDR_SIZE) && ephy->port) { sas_port_add_phy(ephy->port, phy->phy); + phy->port = ephy->port; phy->phy_state = PHY_DEVICE_DISCOVERED; return 0; } @@ -945,11 +946,21 @@ static int sas_ex_discover_dev(struct domain_device *dev, int phy_id) if (ex->ex_phy[i].phy_state == PHY_VACANT || ex->ex_phy[i].phy_state == PHY_NOT_PRESENT) continue; - + /* + * Due to races, the phy might not get added to the + * wide port, so we add the phy to the wide port here. + */ if (SAS_ADDR(ex->ex_phy[i].attached_sas_addr) == - SAS_ADDR(child->sas_addr)) + SAS_ADDR(child->sas_addr)) { ex->ex_phy[i].phy_state= PHY_DEVICE_DISCOVERED; + res = sas_ex_join_wide_port(dev, i); + if (!res) + SAS_DPRINTK("Attaching ex phy%d to wide port %016llx\n", + i, SAS_ADDR(ex->ex_phy[i].attached_sas_addr)); + + } } + res = 0; } return res; @@ -1598,7 +1609,7 @@ static int sas_get_phy_attached_sas_addr(struct domain_device *dev, } static int sas_find_bcast_phy(struct domain_device *dev, int *phy_id, - int from_phy) + int from_phy, bool update) { struct expander_device *ex = &dev->ex_dev; int res = 0; @@ -1611,7 +1622,9 @@ static int sas_find_bcast_phy(struct domain_device *dev, int *phy_id, if (res) goto out; else if (phy_change_count != ex->ex_phy[i].phy_change_count) { - ex->ex_phy[i].phy_change_count = phy_change_count; + if (update) + ex->ex_phy[i].phy_change_count = + phy_change_count; *phy_id = i; return 0; } @@ -1653,31 +1666,52 @@ out: kfree(rg_req); return res; } +/** + * sas_find_bcast_dev - find the device issue BROADCAST(CHANGE). + * @dev:domain device to be detect. + * @src_dev: the device which originated BROADCAST(CHANGE). + * + * Add self-configuration expander suport. Suppose two expander cascading, + * when the first level expander is self-configuring, hotplug the disks in + * second level expander, BROADCAST(CHANGE) will not only be originated + * in the second level expander, but also be originated in the first level + * expander (see SAS protocol SAS 2r-14, 7.11 for detail), it is to say, + * expander changed count in two level expanders will all increment at least + * once, but the phy which chang count has changed is the source device which + * we concerned. + */ static int sas_find_bcast_dev(struct domain_device *dev, struct domain_device **src_dev) { struct expander_device *ex = &dev->ex_dev; int ex_change_count = -1; + int phy_id = -1; int res; + struct domain_device *ch; res = sas_get_ex_change_count(dev, &ex_change_count); if (res) goto out; - if (ex_change_count != -1 && - ex_change_count != ex->ex_change_count) { - *src_dev = dev; - ex->ex_change_count = ex_change_count; - } else { - struct domain_device *ch; - - list_for_each_entry(ch, &ex->children, siblings) { - if (ch->dev_type == EDGE_DEV || - ch->dev_type == FANOUT_DEV) { - res = sas_find_bcast_dev(ch, src_dev); - if (src_dev) - return res; - } + if (ex_change_count != -1 && ex_change_count != ex->ex_change_count) { + /* Just detect if this expander phys phy change count changed, + * in order to determine if this expander originate BROADCAST, + * and do not update phy change count field in our structure. + */ + res = sas_find_bcast_phy(dev, &phy_id, 0, false); + if (phy_id != -1) { + *src_dev = dev; + ex->ex_change_count = ex_change_count; + SAS_DPRINTK("Expander phy change count has changed\n"); + return res; + } else + SAS_DPRINTK("Expander phys DID NOT change\n"); + } + list_for_each_entry(ch, &ex->children, siblings) { + if (ch->dev_type == EDGE_DEV || ch->dev_type == FANOUT_DEV) { + res = sas_find_bcast_dev(ch, src_dev); + if (src_dev) + return res; } } out: @@ -1700,24 +1734,26 @@ static void sas_unregister_ex_tree(struct domain_device *dev) } static void sas_unregister_devs_sas_addr(struct domain_device *parent, - int phy_id) + int phy_id, bool last) { struct expander_device *ex_dev = &parent->ex_dev; struct ex_phy *phy = &ex_dev->ex_phy[phy_id]; struct domain_device *child, *n; - - list_for_each_entry_safe(child, n, &ex_dev->children, siblings) { - if (SAS_ADDR(child->sas_addr) == - SAS_ADDR(phy->attached_sas_addr)) { - if (child->dev_type == EDGE_DEV || - child->dev_type == FANOUT_DEV) - sas_unregister_ex_tree(child); - else - sas_unregister_dev(child); - break; + if (last) { + list_for_each_entry_safe(child, n, + &ex_dev->children, siblings) { + if (SAS_ADDR(child->sas_addr) == + SAS_ADDR(phy->attached_sas_addr)) { + if (child->dev_type == EDGE_DEV || + child->dev_type == FANOUT_DEV) + sas_unregister_ex_tree(child); + else + sas_unregister_dev(child); + break; + } } + sas_disable_routing(parent, phy->attached_sas_addr); } - sas_disable_routing(parent, phy->attached_sas_addr); memset(phy->attached_sas_addr, 0, SAS_ADDR_SIZE); sas_port_delete_phy(phy->port, phy->phy); if (phy->port->num_phys == 0) @@ -1770,15 +1806,31 @@ static int sas_discover_new(struct domain_device *dev, int phy_id) { struct ex_phy *ex_phy = &dev->ex_dev.ex_phy[phy_id]; struct domain_device *child; - int res; + bool found = false; + int res, i; SAS_DPRINTK("ex %016llx phy%d new device attached\n", SAS_ADDR(dev->sas_addr), phy_id); res = sas_ex_phy_discover(dev, phy_id); if (res) goto out; + /* to support the wide port inserted */ + for (i = 0; i < dev->ex_dev.num_phys; i++) { + struct ex_phy *ex_phy_temp = &dev->ex_dev.ex_phy[i]; + if (i == phy_id) + continue; + if (SAS_ADDR(ex_phy_temp->attached_sas_addr) == + SAS_ADDR(ex_phy->attached_sas_addr)) { + found = true; + break; + } + } + if (found) { + sas_ex_join_wide_port(dev, phy_id); + return 0; + } res = sas_ex_discover_devices(dev, phy_id); - if (res) + if (!res) goto out; list_for_each_entry(child, &dev->ex_dev.children, siblings) { if (SAS_ADDR(child->sas_addr) == @@ -1793,7 +1845,7 @@ out: return res; } -static int sas_rediscover_dev(struct domain_device *dev, int phy_id) +static int sas_rediscover_dev(struct domain_device *dev, int phy_id, bool last) { struct expander_device *ex = &dev->ex_dev; struct ex_phy *phy = &ex->ex_phy[phy_id]; @@ -1804,11 +1856,11 @@ static int sas_rediscover_dev(struct domain_device *dev, int phy_id) switch (res) { case SMP_RESP_NO_PHY: phy->phy_state = PHY_NOT_PRESENT; - sas_unregister_devs_sas_addr(dev, phy_id); + sas_unregister_devs_sas_addr(dev, phy_id, last); goto out; break; case SMP_RESP_PHY_VACANT: phy->phy_state = PHY_VACANT; - sas_unregister_devs_sas_addr(dev, phy_id); + sas_unregister_devs_sas_addr(dev, phy_id, last); goto out; break; case SMP_RESP_FUNC_ACC: break; @@ -1816,7 +1868,7 @@ static int sas_rediscover_dev(struct domain_device *dev, int phy_id) if (SAS_ADDR(attached_sas_addr) == 0) { phy->phy_state = PHY_EMPTY; - sas_unregister_devs_sas_addr(dev, phy_id); + sas_unregister_devs_sas_addr(dev, phy_id, last); } else if (SAS_ADDR(attached_sas_addr) == SAS_ADDR(phy->attached_sas_addr)) { SAS_DPRINTK("ex %016llx phy 0x%x broadcast flutter\n", @@ -1828,12 +1880,27 @@ out: return res; } +/** + * sas_rediscover - revalidate the domain. + * @dev:domain device to be detect. + * @phy_id: the phy id will be detected. + * + * NOTE: this process _must_ quit (return) as soon as any connection + * errors are encountered. Connection recovery is done elsewhere. + * Discover process only interrogates devices in order to discover the + * domain.For plugging out, we un-register the device only when it is + * the last phy in the port, for other phys in this port, we just delete it + * from the port.For inserting, we do discovery when it is the + * first phy,for other phys in this port, we add it to the port to + * forming the wide-port. + */ static int sas_rediscover(struct domain_device *dev, const int phy_id) { struct expander_device *ex = &dev->ex_dev; struct ex_phy *changed_phy = &ex->ex_phy[phy_id]; int res = 0; int i; + bool last = true; /* is this the last phy of the port */ SAS_DPRINTK("ex %016llx phy%d originated BROADCAST(CHANGE)\n", SAS_ADDR(dev->sas_addr), phy_id); @@ -1848,13 +1915,13 @@ static int sas_rediscover(struct domain_device *dev, const int phy_id) SAS_ADDR(changed_phy->attached_sas_addr)) { SAS_DPRINTK("phy%d part of wide port with " "phy%d\n", phy_id, i); - goto out; + last = false; + break; } } - res = sas_rediscover_dev(dev, phy_id); + res = sas_rediscover_dev(dev, phy_id, last); } else res = sas_discover_new(dev, phy_id); -out: return res; } @@ -1881,7 +1948,7 @@ int sas_ex_revalidate_domain(struct domain_device *port_dev) do { phy_id = -1; - res = sas_find_bcast_phy(dev, &phy_id, i); + res = sas_find_bcast_phy(dev, &phy_id, i, true); if (phy_id == -1) break; res = sas_rediscover(dev, phy_id); diff --git a/drivers/scsi/libsas/sas_port.c b/drivers/scsi/libsas/sas_port.c index e6ac59c023f..fe8b74c706d 100644 --- a/drivers/scsi/libsas/sas_port.c +++ b/drivers/scsi/libsas/sas_port.c @@ -56,7 +56,7 @@ static void sas_form_port(struct asd_sas_phy *phy) } } - /* find a port */ + /* see if the phy should be part of a wide port */ spin_lock_irqsave(&sas_ha->phy_port_lock, flags); for (i = 0; i < sas_ha->num_phys; i++) { port = sas_ha->sas_port[i]; @@ -69,12 +69,23 @@ static void sas_form_port(struct asd_sas_phy *phy) SAS_DPRINTK("phy%d matched wide port%d\n", phy->id, port->id); break; - } else if (*(u64 *) port->sas_addr == 0 && port->num_phys==0) { - memcpy(port->sas_addr, phy->sas_addr, SAS_ADDR_SIZE); - break; } spin_unlock(&port->phy_list_lock); } + /* The phy does not match any existing port, create a new one */ + if (i == sas_ha->num_phys) { + for (i = 0; i < sas_ha->num_phys; i++) { + port = sas_ha->sas_port[i]; + spin_lock(&port->phy_list_lock); + if (*(u64 *)port->sas_addr == 0 + && port->num_phys == 0) { + memcpy(port->sas_addr, phy->sas_addr, + SAS_ADDR_SIZE); + break; + } + spin_unlock(&port->phy_list_lock); + } + } if (i >= sas_ha->num_phys) { printk(KERN_NOTICE "%s: couldn't find a free port, bug?\n", diff --git a/drivers/scsi/mac53c94.c b/drivers/scsi/mac53c94.c index b12ad7c7c67..18735b39b3d 100644 --- a/drivers/scsi/mac53c94.c +++ b/drivers/scsi/mac53c94.c @@ -75,8 +75,9 @@ static int mac53c94_queue(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd * int i; printk(KERN_DEBUG "mac53c94_queue %p: command is", cmd); for (i = 0; i < cmd->cmd_len; ++i) - printk(" %.2x", cmd->cmnd[i]); - printk("\n" KERN_DEBUG "use_sg=%d request_bufflen=%d request_buffer=%p\n", + printk(KERN_CONT " %.2x", cmd->cmnd[i]); + printk(KERN_CONT "\n"); + printk(KERN_DEBUG "use_sg=%d request_bufflen=%d request_buffer=%p\n", scsi_sg_count(cmd), scsi_bufflen(cmd), scsi_sglist(cmd)); } #endif diff --git a/drivers/scsi/mpt2sas/mpt2sas_base.c b/drivers/scsi/mpt2sas/mpt2sas_base.c index f3da592f7bc..35a13867495 100644 --- a/drivers/scsi/mpt2sas/mpt2sas_base.c +++ b/drivers/scsi/mpt2sas/mpt2sas_base.c @@ -119,6 +119,64 @@ _base_fault_reset_work(struct work_struct *work) spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags); } +/** + * mpt2sas_base_start_watchdog - start the fault_reset_work_q + * @ioc: pointer to scsi command object + * Context: sleep. + * + * Return nothing. + */ +void +mpt2sas_base_start_watchdog(struct MPT2SAS_ADAPTER *ioc) +{ + unsigned long flags; + + if (ioc->fault_reset_work_q) + return; + + /* initialize fault polling */ + INIT_DELAYED_WORK(&ioc->fault_reset_work, _base_fault_reset_work); + snprintf(ioc->fault_reset_work_q_name, + sizeof(ioc->fault_reset_work_q_name), "poll_%d_status", ioc->id); + ioc->fault_reset_work_q = + create_singlethread_workqueue(ioc->fault_reset_work_q_name); + if (!ioc->fault_reset_work_q) { + printk(MPT2SAS_ERR_FMT "%s: failed (line=%d)\n", + ioc->name, __func__, __LINE__); + return; + } + spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags); + if (ioc->fault_reset_work_q) + queue_delayed_work(ioc->fault_reset_work_q, + &ioc->fault_reset_work, + msecs_to_jiffies(FAULT_POLLING_INTERVAL)); + spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags); +} + +/** + * mpt2sas_base_stop_watchdog - stop the fault_reset_work_q + * @ioc: pointer to scsi command object + * Context: sleep. + * + * Return nothing. + */ +void +mpt2sas_base_stop_watchdog(struct MPT2SAS_ADAPTER *ioc) +{ + unsigned long flags; + struct workqueue_struct *wq; + + spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags); + wq = ioc->fault_reset_work_q; + ioc->fault_reset_work_q = NULL; + spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags); + if (wq) { + if (!cancel_delayed_work(&ioc->fault_reset_work)) + flush_workqueue(wq); + destroy_workqueue(wq); + } +} + #ifdef CONFIG_SCSI_MPT2SAS_LOGGING /** * _base_sas_ioc_info - verbose translation of the ioc status @@ -440,6 +498,10 @@ _base_sas_log_info(struct MPT2SAS_ADAPTER *ioc , u32 log_info) if (sas_loginfo.dw.bus_type != 3 /*SAS*/) return; + /* each nexus loss loginfo */ + if (log_info == 0x31170000) + return; + /* eat the loginfos associated with task aborts */ if (ioc->ignore_loginfos && (log_info == 30050000 || log_info == 0x31140000 || log_info == 0x31130000)) @@ -1109,7 +1171,6 @@ mpt2sas_base_map_resources(struct MPT2SAS_ADAPTER *ioc) } } - pci_set_drvdata(pdev, ioc->shost); _base_mask_interrupts(ioc); r = _base_enable_msix(ioc); if (r) @@ -1132,7 +1193,6 @@ mpt2sas_base_map_resources(struct MPT2SAS_ADAPTER *ioc) ioc->pci_irq = -1; pci_release_selected_regions(ioc->pdev, ioc->bars); pci_disable_device(pdev); - pci_set_drvdata(pdev, NULL); return r; } @@ -3191,7 +3251,6 @@ mpt2sas_base_free_resources(struct MPT2SAS_ADAPTER *ioc) ioc->chip_phys = 0; pci_release_selected_regions(ioc->pdev, ioc->bars); pci_disable_device(pdev); - pci_set_drvdata(pdev, NULL); return; } @@ -3205,7 +3264,6 @@ int mpt2sas_base_attach(struct MPT2SAS_ADAPTER *ioc) { int r, i; - unsigned long flags; dinitprintk(ioc, printk(MPT2SAS_DEBUG_FMT "%s\n", ioc->name, __func__)); @@ -3214,6 +3272,7 @@ mpt2sas_base_attach(struct MPT2SAS_ADAPTER *ioc) if (r) return r; + pci_set_drvdata(ioc->pdev, ioc->shost); r = _base_make_ioc_ready(ioc, CAN_SLEEP, SOFT_RESET); if (r) goto out_free_resources; @@ -3288,23 +3347,7 @@ mpt2sas_base_attach(struct MPT2SAS_ADAPTER *ioc) if (r) goto out_free_resources; - /* initialize fault polling */ - INIT_DELAYED_WORK(&ioc->fault_reset_work, _base_fault_reset_work); - snprintf(ioc->fault_reset_work_q_name, - sizeof(ioc->fault_reset_work_q_name), "poll_%d_status", ioc->id); - ioc->fault_reset_work_q = - create_singlethread_workqueue(ioc->fault_reset_work_q_name); - if (!ioc->fault_reset_work_q) { - printk(MPT2SAS_ERR_FMT "%s: failed (line=%d)\n", - ioc->name, __func__, __LINE__); - goto out_free_resources; - } - spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags); - if (ioc->fault_reset_work_q) - queue_delayed_work(ioc->fault_reset_work_q, - &ioc->fault_reset_work, - msecs_to_jiffies(FAULT_POLLING_INTERVAL)); - spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags); + mpt2sas_base_start_watchdog(ioc); return 0; out_free_resources: @@ -3312,6 +3355,7 @@ mpt2sas_base_attach(struct MPT2SAS_ADAPTER *ioc) ioc->remove_host = 1; mpt2sas_base_free_resources(ioc); _base_release_memory_pools(ioc); + pci_set_drvdata(ioc->pdev, NULL); kfree(ioc->tm_cmds.reply); kfree(ioc->transport_cmds.reply); kfree(ioc->config_cmds.reply); @@ -3337,22 +3381,14 @@ mpt2sas_base_attach(struct MPT2SAS_ADAPTER *ioc) void mpt2sas_base_detach(struct MPT2SAS_ADAPTER *ioc) { - unsigned long flags; - struct workqueue_struct *wq; dexitprintk(ioc, printk(MPT2SAS_DEBUG_FMT "%s\n", ioc->name, __func__)); - spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags); - wq = ioc->fault_reset_work_q; - ioc->fault_reset_work_q = NULL; - spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags); - if (!cancel_delayed_work(&ioc->fault_reset_work)) - flush_workqueue(wq); - destroy_workqueue(wq); - + mpt2sas_base_stop_watchdog(ioc); mpt2sas_base_free_resources(ioc); _base_release_memory_pools(ioc); + pci_set_drvdata(ioc->pdev, NULL); kfree(ioc->pfacts); kfree(ioc->ctl_cmds.reply); kfree(ioc->base_cmds.reply); diff --git a/drivers/scsi/mpt2sas/mpt2sas_base.h b/drivers/scsi/mpt2sas/mpt2sas_base.h index 286c185fa9e..acdcff150a3 100644 --- a/drivers/scsi/mpt2sas/mpt2sas_base.h +++ b/drivers/scsi/mpt2sas/mpt2sas_base.h @@ -69,10 +69,10 @@ #define MPT2SAS_DRIVER_NAME "mpt2sas" #define MPT2SAS_AUTHOR "LSI Corporation <DL-MPTFusionLinux@lsi.com>" #define MPT2SAS_DESCRIPTION "LSI MPT Fusion SAS 2.0 Device Driver" -#define MPT2SAS_DRIVER_VERSION "01.100.03.00" +#define MPT2SAS_DRIVER_VERSION "01.100.04.00" #define MPT2SAS_MAJOR_VERSION 01 #define MPT2SAS_MINOR_VERSION 100 -#define MPT2SAS_BUILD_VERSION 03 +#define MPT2SAS_BUILD_VERSION 04 #define MPT2SAS_RELEASE_VERSION 00 /* @@ -673,6 +673,8 @@ typedef void (*MPT_CALLBACK)(struct MPT2SAS_ADAPTER *ioc, u16 smid, u8 VF_ID, /* base shared API */ extern struct list_head mpt2sas_ioc_list; +void mpt2sas_base_start_watchdog(struct MPT2SAS_ADAPTER *ioc); +void mpt2sas_base_stop_watchdog(struct MPT2SAS_ADAPTER *ioc); int mpt2sas_base_attach(struct MPT2SAS_ADAPTER *ioc); void mpt2sas_base_detach(struct MPT2SAS_ADAPTER *ioc); diff --git a/drivers/scsi/mpt2sas/mpt2sas_config.c b/drivers/scsi/mpt2sas/mpt2sas_config.c index 58cfb97846f..6ddee161beb 100644 --- a/drivers/scsi/mpt2sas/mpt2sas_config.c +++ b/drivers/scsi/mpt2sas/mpt2sas_config.c @@ -236,17 +236,25 @@ _config_request(struct MPT2SAS_ADAPTER *ioc, Mpi2ConfigRequest_t Mpi2ConfigRequest_t *config_request; int r; u8 retry_count; - u8 issue_reset; + u8 issue_host_reset = 0; u16 wait_state_count; + mutex_lock(&ioc->config_cmds.mutex); if (ioc->config_cmds.status != MPT2_CMD_NOT_USED) { printk(MPT2SAS_ERR_FMT "%s: config_cmd in use\n", ioc->name, __func__); + mutex_unlock(&ioc->config_cmds.mutex); return -EAGAIN; } retry_count = 0; retry_config: + if (retry_count) { + if (retry_count > 2) /* attempt only 2 retries */ + return -EFAULT; + printk(MPT2SAS_INFO_FMT "%s: attempting retry (%d)\n", + ioc->name, __func__, retry_count); + } wait_state_count = 0; ioc_state = mpt2sas_base_get_iocstate(ioc, 1); while (ioc_state != MPI2_IOC_STATE_OPERATIONAL) { @@ -254,8 +262,8 @@ _config_request(struct MPT2SAS_ADAPTER *ioc, Mpi2ConfigRequest_t printk(MPT2SAS_ERR_FMT "%s: failed due to ioc not operational\n", ioc->name, __func__); - ioc->config_cmds.status = MPT2_CMD_NOT_USED; - return -EFAULT; + r = -EFAULT; + goto out; } ssleep(1); ioc_state = mpt2sas_base_get_iocstate(ioc, 1); @@ -271,8 +279,8 @@ _config_request(struct MPT2SAS_ADAPTER *ioc, Mpi2ConfigRequest_t if (!smid) { printk(MPT2SAS_ERR_FMT "%s: failed obtaining a smid\n", ioc->name, __func__); - ioc->config_cmds.status = MPT2_CMD_NOT_USED; - return -EAGAIN; + r = -EAGAIN; + goto out; } r = 0; @@ -292,9 +300,15 @@ _config_request(struct MPT2SAS_ADAPTER *ioc, Mpi2ConfigRequest_t ioc->name, __func__); _debug_dump_mf(mpi_request, sizeof(Mpi2ConfigRequest_t)/4); - if (!(ioc->config_cmds.status & MPT2_CMD_RESET)) - issue_reset = 1; - goto issue_host_reset; + retry_count++; + if (ioc->config_cmds.smid == smid) + mpt2sas_base_free_smid(ioc, smid); + if ((ioc->shost_recovery) || + (ioc->config_cmds.status & MPT2_CMD_RESET)) + goto retry_config; + issue_host_reset = 1; + r = -EFAULT; + goto out; } if (ioc->config_cmds.status & MPT2_CMD_REPLY_VALID) memcpy(mpi_reply, ioc->config_cmds.reply, @@ -302,21 +316,13 @@ _config_request(struct MPT2SAS_ADAPTER *ioc, Mpi2ConfigRequest_t if (retry_count) printk(MPT2SAS_INFO_FMT "%s: retry completed!!\n", ioc->name, __func__); +out: ioc->config_cmds.status = MPT2_CMD_NOT_USED; - return r; - - issue_host_reset: - if (issue_reset) + mutex_unlock(&ioc->config_cmds.mutex); + if (issue_host_reset) mpt2sas_base_hard_reset_handler(ioc, CAN_SLEEP, FORCE_BIG_HAMMER); - ioc->config_cmds.status = MPT2_CMD_NOT_USED; - if (!retry_count) { - printk(MPT2SAS_INFO_FMT "%s: attempting retry\n", - ioc->name, __func__); - retry_count++; - goto retry_config; - } - return -EFAULT; + return r; } /** @@ -375,7 +381,6 @@ mpt2sas_config_get_manufacturing_pg0(struct MPT2SAS_ADAPTER *ioc, int r; struct config_request mem; - mutex_lock(&ioc->config_cmds.mutex); memset(config_page, 0, sizeof(Mpi2ManufacturingPage0_t)); memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t)); mpi_request.Function = MPI2_FUNCTION_CONFIG; @@ -417,7 +422,6 @@ mpt2sas_config_get_manufacturing_pg0(struct MPT2SAS_ADAPTER *ioc, _config_free_config_dma_memory(ioc, &mem); out: - mutex_unlock(&ioc->config_cmds.mutex); return r; } @@ -438,7 +442,6 @@ mpt2sas_config_get_bios_pg2(struct MPT2SAS_ADAPTER *ioc, int r; struct config_request mem; - mutex_lock(&ioc->config_cmds.mutex); memset(config_page, 0, sizeof(Mpi2BiosPage2_t)); memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t)); mpi_request.Function = MPI2_FUNCTION_CONFIG; @@ -480,7 +483,6 @@ mpt2sas_config_get_bios_pg2(struct MPT2SAS_ADAPTER *ioc, _config_free_config_dma_memory(ioc, &mem); out: - mutex_unlock(&ioc->config_cmds.mutex); return r; } @@ -501,7 +503,6 @@ mpt2sas_config_get_bios_pg3(struct MPT2SAS_ADAPTER *ioc, Mpi2ConfigReply_t int r; struct config_request mem; - mutex_lock(&ioc->config_cmds.mutex); memset(config_page, 0, sizeof(Mpi2BiosPage3_t)); memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t)); mpi_request.Function = MPI2_FUNCTION_CONFIG; @@ -543,7 +544,6 @@ mpt2sas_config_get_bios_pg3(struct MPT2SAS_ADAPTER *ioc, Mpi2ConfigReply_t _config_free_config_dma_memory(ioc, &mem); out: - mutex_unlock(&ioc->config_cmds.mutex); return r; } @@ -564,7 +564,6 @@ mpt2sas_config_get_iounit_pg0(struct MPT2SAS_ADAPTER *ioc, int r; struct config_request mem; - mutex_lock(&ioc->config_cmds.mutex); memset(config_page, 0, sizeof(Mpi2IOUnitPage0_t)); memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t)); mpi_request.Function = MPI2_FUNCTION_CONFIG; @@ -606,7 +605,6 @@ mpt2sas_config_get_iounit_pg0(struct MPT2SAS_ADAPTER *ioc, _config_free_config_dma_memory(ioc, &mem); out: - mutex_unlock(&ioc->config_cmds.mutex); return r; } @@ -627,7 +625,6 @@ mpt2sas_config_get_iounit_pg1(struct MPT2SAS_ADAPTER *ioc, int r; struct config_request mem; - mutex_lock(&ioc->config_cmds.mutex); memset(config_page, 0, sizeof(Mpi2IOUnitPage1_t)); memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t)); mpi_request.Function = MPI2_FUNCTION_CONFIG; @@ -669,7 +666,6 @@ mpt2sas_config_get_iounit_pg1(struct MPT2SAS_ADAPTER *ioc, _config_free_config_dma_memory(ioc, &mem); out: - mutex_unlock(&ioc->config_cmds.mutex); return r; } @@ -690,7 +686,6 @@ mpt2sas_config_set_iounit_pg1(struct MPT2SAS_ADAPTER *ioc, int r; struct config_request mem; - mutex_lock(&ioc->config_cmds.mutex); memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t)); mpi_request.Function = MPI2_FUNCTION_CONFIG; mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER; @@ -732,7 +727,6 @@ mpt2sas_config_set_iounit_pg1(struct MPT2SAS_ADAPTER *ioc, _config_free_config_dma_memory(ioc, &mem); out: - mutex_unlock(&ioc->config_cmds.mutex); return r; } @@ -753,7 +747,6 @@ mpt2sas_config_get_ioc_pg8(struct MPT2SAS_ADAPTER *ioc, int r; struct config_request mem; - mutex_lock(&ioc->config_cmds.mutex); memset(config_page, 0, sizeof(Mpi2IOCPage8_t)); memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t)); mpi_request.Function = MPI2_FUNCTION_CONFIG; @@ -795,7 +788,6 @@ mpt2sas_config_get_ioc_pg8(struct MPT2SAS_ADAPTER *ioc, _config_free_config_dma_memory(ioc, &mem); out: - mutex_unlock(&ioc->config_cmds.mutex); return r; } @@ -818,7 +810,6 @@ mpt2sas_config_get_sas_device_pg0(struct MPT2SAS_ADAPTER *ioc, Mpi2ConfigReply_t int r; struct config_request mem; - mutex_lock(&ioc->config_cmds.mutex); memset(config_page, 0, sizeof(Mpi2SasDevicePage0_t)); memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t)); mpi_request.Function = MPI2_FUNCTION_CONFIG; @@ -863,7 +854,6 @@ mpt2sas_config_get_sas_device_pg0(struct MPT2SAS_ADAPTER *ioc, Mpi2ConfigReply_t _config_free_config_dma_memory(ioc, &mem); out: - mutex_unlock(&ioc->config_cmds.mutex); return r; } @@ -886,7 +876,6 @@ mpt2sas_config_get_sas_device_pg1(struct MPT2SAS_ADAPTER *ioc, Mpi2ConfigReply_t int r; struct config_request mem; - mutex_lock(&ioc->config_cmds.mutex); memset(config_page, 0, sizeof(Mpi2SasDevicePage1_t)); memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t)); mpi_request.Function = MPI2_FUNCTION_CONFIG; @@ -931,7 +920,6 @@ mpt2sas_config_get_sas_device_pg1(struct MPT2SAS_ADAPTER *ioc, Mpi2ConfigReply_t _config_free_config_dma_memory(ioc, &mem); out: - mutex_unlock(&ioc->config_cmds.mutex); return r; } @@ -953,7 +941,6 @@ mpt2sas_config_get_number_hba_phys(struct MPT2SAS_ADAPTER *ioc, u8 *num_phys) Mpi2ConfigReply_t mpi_reply; Mpi2SasIOUnitPage0_t config_page; - mutex_lock(&ioc->config_cmds.mutex); memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t)); mpi_request.Function = MPI2_FUNCTION_CONFIG; mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER; @@ -1002,7 +989,6 @@ mpt2sas_config_get_number_hba_phys(struct MPT2SAS_ADAPTER *ioc, u8 *num_phys) _config_free_config_dma_memory(ioc, &mem); out: - mutex_unlock(&ioc->config_cmds.mutex); return r; } @@ -1026,8 +1012,6 @@ mpt2sas_config_get_sas_iounit_pg0(struct MPT2SAS_ADAPTER *ioc, Mpi2ConfigReply_t Mpi2ConfigRequest_t mpi_request; int r; struct config_request mem; - - mutex_lock(&ioc->config_cmds.mutex); memset(config_page, 0, sz); memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t)); mpi_request.Function = MPI2_FUNCTION_CONFIG; @@ -1070,7 +1054,6 @@ mpt2sas_config_get_sas_iounit_pg0(struct MPT2SAS_ADAPTER *ioc, Mpi2ConfigReply_t _config_free_config_dma_memory(ioc, &mem); out: - mutex_unlock(&ioc->config_cmds.mutex); return r; } @@ -1095,7 +1078,6 @@ mpt2sas_config_get_sas_iounit_pg1(struct MPT2SAS_ADAPTER *ioc, Mpi2ConfigReply_t int r; struct config_request mem; - mutex_lock(&ioc->config_cmds.mutex); memset(config_page, 0, sz); memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t)); mpi_request.Function = MPI2_FUNCTION_CONFIG; @@ -1138,7 +1120,6 @@ mpt2sas_config_get_sas_iounit_pg1(struct MPT2SAS_ADAPTER *ioc, Mpi2ConfigReply_t _config_free_config_dma_memory(ioc, &mem); out: - mutex_unlock(&ioc->config_cmds.mutex); return r; } @@ -1161,7 +1142,6 @@ mpt2sas_config_get_expander_pg0(struct MPT2SAS_ADAPTER *ioc, Mpi2ConfigReply_t int r; struct config_request mem; - mutex_lock(&ioc->config_cmds.mutex); memset(config_page, 0, sizeof(Mpi2ExpanderPage0_t)); memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t)); mpi_request.Function = MPI2_FUNCTION_CONFIG; @@ -1206,7 +1186,6 @@ mpt2sas_config_get_expander_pg0(struct MPT2SAS_ADAPTER *ioc, Mpi2ConfigReply_t _config_free_config_dma_memory(ioc, &mem); out: - mutex_unlock(&ioc->config_cmds.mutex); return r; } @@ -1230,7 +1209,6 @@ mpt2sas_config_get_expander_pg1(struct MPT2SAS_ADAPTER *ioc, Mpi2ConfigReply_t int r; struct config_request mem; - mutex_lock(&ioc->config_cmds.mutex); memset(config_page, 0, sizeof(Mpi2ExpanderPage1_t)); memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t)); mpi_request.Function = MPI2_FUNCTION_CONFIG; @@ -1277,7 +1255,6 @@ mpt2sas_config_get_expander_pg1(struct MPT2SAS_ADAPTER *ioc, Mpi2ConfigReply_t _config_free_config_dma_memory(ioc, &mem); out: - mutex_unlock(&ioc->config_cmds.mutex); return r; } @@ -1300,7 +1277,6 @@ mpt2sas_config_get_enclosure_pg0(struct MPT2SAS_ADAPTER *ioc, Mpi2ConfigReply_t int r; struct config_request mem; - mutex_lock(&ioc->config_cmds.mutex); memset(config_page, 0, sizeof(Mpi2SasEnclosurePage0_t)); memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t)); mpi_request.Function = MPI2_FUNCTION_CONFIG; @@ -1345,7 +1321,6 @@ mpt2sas_config_get_enclosure_pg0(struct MPT2SAS_ADAPTER *ioc, Mpi2ConfigReply_t _config_free_config_dma_memory(ioc, &mem); out: - mutex_unlock(&ioc->config_cmds.mutex); return r; } @@ -1367,7 +1342,6 @@ mpt2sas_config_get_phy_pg0(struct MPT2SAS_ADAPTER *ioc, Mpi2ConfigReply_t int r; struct config_request mem; - mutex_lock(&ioc->config_cmds.mutex); memset(config_page, 0, sizeof(Mpi2SasPhyPage0_t)); memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t)); mpi_request.Function = MPI2_FUNCTION_CONFIG; @@ -1413,7 +1387,6 @@ mpt2sas_config_get_phy_pg0(struct MPT2SAS_ADAPTER *ioc, Mpi2ConfigReply_t _config_free_config_dma_memory(ioc, &mem); out: - mutex_unlock(&ioc->config_cmds.mutex); return r; } @@ -1435,7 +1408,6 @@ mpt2sas_config_get_phy_pg1(struct MPT2SAS_ADAPTER *ioc, Mpi2ConfigReply_t int r; struct config_request mem; - mutex_lock(&ioc->config_cmds.mutex); memset(config_page, 0, sizeof(Mpi2SasPhyPage1_t)); memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t)); mpi_request.Function = MPI2_FUNCTION_CONFIG; @@ -1481,7 +1453,6 @@ mpt2sas_config_get_phy_pg1(struct MPT2SAS_ADAPTER *ioc, Mpi2ConfigReply_t _config_free_config_dma_memory(ioc, &mem); out: - mutex_unlock(&ioc->config_cmds.mutex); return r; } @@ -1505,7 +1476,6 @@ mpt2sas_config_get_raid_volume_pg1(struct MPT2SAS_ADAPTER *ioc, int r; struct config_request mem; - mutex_lock(&ioc->config_cmds.mutex); memset(config_page, 0, sizeof(Mpi2RaidVolPage1_t)); memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t)); mpi_request.Function = MPI2_FUNCTION_CONFIG; @@ -1548,7 +1518,6 @@ mpt2sas_config_get_raid_volume_pg1(struct MPT2SAS_ADAPTER *ioc, _config_free_config_dma_memory(ioc, &mem); out: - mutex_unlock(&ioc->config_cmds.mutex); return r; } @@ -1572,7 +1541,6 @@ mpt2sas_config_get_number_pds(struct MPT2SAS_ADAPTER *ioc, u16 handle, struct config_request mem; u16 ioc_status; - mutex_lock(&ioc->config_cmds.mutex); memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t)); *num_pds = 0; mpi_request.Function = MPI2_FUNCTION_CONFIG; @@ -1620,7 +1588,6 @@ mpt2sas_config_get_number_pds(struct MPT2SAS_ADAPTER *ioc, u16 handle, _config_free_config_dma_memory(ioc, &mem); out: - mutex_unlock(&ioc->config_cmds.mutex); return r; } @@ -1645,7 +1612,6 @@ mpt2sas_config_get_raid_volume_pg0(struct MPT2SAS_ADAPTER *ioc, int r; struct config_request mem; - mutex_lock(&ioc->config_cmds.mutex); memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t)); memset(config_page, 0, sz); mpi_request.Function = MPI2_FUNCTION_CONFIG; @@ -1687,7 +1653,6 @@ mpt2sas_config_get_raid_volume_pg0(struct MPT2SAS_ADAPTER *ioc, _config_free_config_dma_memory(ioc, &mem); out: - mutex_unlock(&ioc->config_cmds.mutex); return r; } @@ -1711,7 +1676,6 @@ mpt2sas_config_get_phys_disk_pg0(struct MPT2SAS_ADAPTER *ioc, Mpi2ConfigReply_t int r; struct config_request mem; - mutex_lock(&ioc->config_cmds.mutex); memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t)); memset(config_page, 0, sizeof(Mpi2RaidPhysDiskPage0_t)); mpi_request.Function = MPI2_FUNCTION_CONFIG; @@ -1754,7 +1718,6 @@ mpt2sas_config_get_phys_disk_pg0(struct MPT2SAS_ADAPTER *ioc, Mpi2ConfigReply_t _config_free_config_dma_memory(ioc, &mem); out: - mutex_unlock(&ioc->config_cmds.mutex); return r; } @@ -1778,7 +1741,6 @@ mpt2sas_config_get_volume_handle(struct MPT2SAS_ADAPTER *ioc, u16 pd_handle, struct config_request mem; u16 ioc_status; - mutex_lock(&ioc->config_cmds.mutex); *volume_handle = 0; memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t)); mpi_request.Function = MPI2_FUNCTION_CONFIG; @@ -1842,7 +1804,6 @@ mpt2sas_config_get_volume_handle(struct MPT2SAS_ADAPTER *ioc, u16 pd_handle, _config_free_config_dma_memory(ioc, &mem); out: - mutex_unlock(&ioc->config_cmds.mutex); return r; } diff --git a/drivers/scsi/mpt2sas/mpt2sas_scsih.c b/drivers/scsi/mpt2sas/mpt2sas_scsih.c index 2a01a5f2a84..2e9a4445596 100644 --- a/drivers/scsi/mpt2sas/mpt2sas_scsih.c +++ b/drivers/scsi/mpt2sas/mpt2sas_scsih.c @@ -2767,6 +2767,10 @@ _scsih_scsi_ioc_info(struct MPT2SAS_ADAPTER *ioc, struct scsi_cmnd *scmd, char *desc_ioc_state = NULL; char *desc_scsi_status = NULL; char *desc_scsi_state = ioc->tmp_string; + u32 log_info = le32_to_cpu(mpi_reply->IOCLogInfo); + + if (log_info == 0x31170000) + return; switch (ioc_status) { case MPI2_IOCSTATUS_SUCCESS: @@ -3426,7 +3430,7 @@ _scsih_expander_add(struct MPT2SAS_ADAPTER *ioc, u16 handle) __le64 sas_address; int i; unsigned long flags; - struct _sas_port *mpt2sas_port; + struct _sas_port *mpt2sas_port = NULL; int rc = 0; if (!handle) @@ -3518,12 +3522,20 @@ _scsih_expander_add(struct MPT2SAS_ADAPTER *ioc, u16 handle) &expander_pg1, i, handle))) { printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n", ioc->name, __FILE__, __LINE__, __func__); - continue; + rc = -1; + goto out_fail; } sas_expander->phy[i].handle = handle; sas_expander->phy[i].phy_id = i; - mpt2sas_transport_add_expander_phy(ioc, &sas_expander->phy[i], - expander_pg1, sas_expander->parent_dev); + + if ((mpt2sas_transport_add_expander_phy(ioc, + &sas_expander->phy[i], expander_pg1, + sas_expander->parent_dev))) { + printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); + rc = -1; + goto out_fail; + } } if (sas_expander->enclosure_handle) { @@ -3540,8 +3552,9 @@ _scsih_expander_add(struct MPT2SAS_ADAPTER *ioc, u16 handle) out_fail: - if (sas_expander) - kfree(sas_expander->phy); + if (mpt2sas_port) + mpt2sas_transport_port_remove(ioc, sas_expander->sas_address, + sas_expander->parent_handle); kfree(sas_expander); return rc; } @@ -3663,12 +3676,11 @@ _scsih_add_device(struct MPT2SAS_ADAPTER *ioc, u16 handle, u8 phy_num, u8 is_pd) sas_device->hidden_raid_component = is_pd; /* get enclosure_logical_id */ - if (!(mpt2sas_config_get_enclosure_pg0(ioc, &mpi_reply, &enclosure_pg0, - MPI2_SAS_ENCLOS_PGAD_FORM_HANDLE, - sas_device->enclosure_handle))) { + if (sas_device->enclosure_handle && !(mpt2sas_config_get_enclosure_pg0( + ioc, &mpi_reply, &enclosure_pg0, MPI2_SAS_ENCLOS_PGAD_FORM_HANDLE, + sas_device->enclosure_handle))) sas_device->enclosure_logical_id = le64_to_cpu(enclosure_pg0.EnclosureLogicalID); - } /* get device name */ sas_device->device_name = le64_to_cpu(sas_device_pg0.DeviceName); @@ -4250,12 +4262,6 @@ _scsih_sas_volume_add(struct MPT2SAS_ADAPTER *ioc, u16 handle = le16_to_cpu(element->VolDevHandle); int rc; -#if 0 /* RAID_HACKS */ - if (le32_to_cpu(event_data->Flags) & - MPI2_EVENT_IR_CHANGE_FLAGS_FOREIGN_CONFIG) - return; -#endif - mpt2sas_config_get_volume_wwid(ioc, handle, &wwid); if (!wwid) { printk(MPT2SAS_ERR_FMT @@ -4310,12 +4316,6 @@ _scsih_sas_volume_delete(struct MPT2SAS_ADAPTER *ioc, unsigned long flags; struct MPT2SAS_TARGET *sas_target_priv_data; -#if 0 /* RAID_HACKS */ - if (le32_to_cpu(event_data->Flags) & - MPI2_EVENT_IR_CHANGE_FLAGS_FOREIGN_CONFIG) - return; -#endif - spin_lock_irqsave(&ioc->raid_device_lock, flags); raid_device = _scsih_raid_device_find_by_handle(ioc, handle); spin_unlock_irqrestore(&ioc->raid_device_lock, flags); @@ -4428,14 +4428,38 @@ _scsih_sas_pd_add(struct MPT2SAS_ADAPTER *ioc, struct _sas_device *sas_device; unsigned long flags; u16 handle = le16_to_cpu(element->PhysDiskDevHandle); + Mpi2ConfigReply_t mpi_reply; + Mpi2SasDevicePage0_t sas_device_pg0; + u32 ioc_status; spin_lock_irqsave(&ioc->sas_device_lock, flags); sas_device = _scsih_sas_device_find_by_handle(ioc, handle); spin_unlock_irqrestore(&ioc->sas_device_lock, flags); - if (sas_device) + if (sas_device) { sas_device->hidden_raid_component = 1; - else - _scsih_add_device(ioc, handle, 0, 1); + return; + } + + if ((mpt2sas_config_get_sas_device_pg0(ioc, &mpi_reply, &sas_device_pg0, + MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, handle))) { + printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); + return; + } + + ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & + MPI2_IOCSTATUS_MASK; + if (ioc_status != MPI2_IOCSTATUS_SUCCESS) { + printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); + return; + } + + _scsih_link_change(ioc, + le16_to_cpu(sas_device_pg0.ParentDevHandle), + handle, sas_device_pg0.PhyNum, MPI2_SAS_NEG_LINK_RATE_1_5); + + _scsih_add_device(ioc, handle, 0, 1); } #ifdef CONFIG_SCSI_MPT2SAS_LOGGING @@ -4535,12 +4559,15 @@ _scsih_sas_ir_config_change_event(struct MPT2SAS_ADAPTER *ioc, u8 VF_ID, { Mpi2EventIrConfigElement_t *element; int i; + u8 foreign_config; #ifdef CONFIG_SCSI_MPT2SAS_LOGGING if (ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK) _scsih_sas_ir_config_change_event_debug(ioc, event_data); #endif + foreign_config = (le32_to_cpu(event_data->Flags) & + MPI2_EVENT_IR_CHANGE_FLAGS_FOREIGN_CONFIG) ? 1 : 0; element = (Mpi2EventIrConfigElement_t *)&event_data->ConfigElement[0]; for (i = 0; i < event_data->NumElements; i++, element++) { @@ -4548,11 +4575,13 @@ _scsih_sas_ir_config_change_event(struct MPT2SAS_ADAPTER *ioc, u8 VF_ID, switch (element->ReasonCode) { case MPI2_EVENT_IR_CHANGE_RC_VOLUME_CREATED: case MPI2_EVENT_IR_CHANGE_RC_ADDED: - _scsih_sas_volume_add(ioc, element); + if (!foreign_config) + _scsih_sas_volume_add(ioc, element); break; case MPI2_EVENT_IR_CHANGE_RC_VOLUME_DELETED: case MPI2_EVENT_IR_CHANGE_RC_REMOVED: - _scsih_sas_volume_delete(ioc, element); + if (!foreign_config) + _scsih_sas_volume_delete(ioc, element); break; case MPI2_EVENT_IR_CHANGE_RC_PD_CREATED: _scsih_sas_pd_hide(ioc, element); @@ -4671,6 +4700,9 @@ _scsih_sas_ir_physical_disk_event(struct MPT2SAS_ADAPTER *ioc, u8 VF_ID, u32 state; struct _sas_device *sas_device; unsigned long flags; + Mpi2ConfigReply_t mpi_reply; + Mpi2SasDevicePage0_t sas_device_pg0; + u32 ioc_status; if (event_data->ReasonCode != MPI2_EVENT_IR_PHYSDISK_RC_STATE_CHANGED) return; @@ -4687,22 +4719,40 @@ _scsih_sas_ir_physical_disk_event(struct MPT2SAS_ADAPTER *ioc, u8 VF_ID, spin_unlock_irqrestore(&ioc->sas_device_lock, flags); switch (state) { -#if 0 - case MPI2_RAID_PD_STATE_OFFLINE: - if (sas_device) - _scsih_remove_device(ioc, handle); - break; -#endif case MPI2_RAID_PD_STATE_ONLINE: case MPI2_RAID_PD_STATE_DEGRADED: case MPI2_RAID_PD_STATE_REBUILDING: case MPI2_RAID_PD_STATE_OPTIMAL: - if (sas_device) + if (sas_device) { sas_device->hidden_raid_component = 1; - else - _scsih_add_device(ioc, handle, 0, 1); + return; + } + + if ((mpt2sas_config_get_sas_device_pg0(ioc, &mpi_reply, + &sas_device_pg0, MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, + handle))) { + printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); + return; + } + + ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & + MPI2_IOCSTATUS_MASK; + if (ioc_status != MPI2_IOCSTATUS_SUCCESS) { + printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); + return; + } + + _scsih_link_change(ioc, + le16_to_cpu(sas_device_pg0.ParentDevHandle), + handle, sas_device_pg0.PhyNum, MPI2_SAS_NEG_LINK_RATE_1_5); + + _scsih_add_device(ioc, handle, 0, 1); + break; + case MPI2_RAID_PD_STATE_OFFLINE: case MPI2_RAID_PD_STATE_NOT_CONFIGURED: case MPI2_RAID_PD_STATE_NOT_COMPATIBLE: case MPI2_RAID_PD_STATE_HOT_SPARE: @@ -5774,6 +5824,7 @@ _scsih_suspend(struct pci_dev *pdev, pm_message_t state) struct MPT2SAS_ADAPTER *ioc = shost_priv(shost); u32 device_state; + mpt2sas_base_stop_watchdog(ioc); flush_scheduled_work(); scsi_block_requests(shost); device_state = pci_choose_state(pdev, state); @@ -5816,6 +5867,7 @@ _scsih_resume(struct pci_dev *pdev) mpt2sas_base_hard_reset_handler(ioc, CAN_SLEEP, SOFT_RESET); scsi_unblock_requests(shost); + mpt2sas_base_start_watchdog(ioc); return 0; } #endif /* CONFIG_PM */ diff --git a/drivers/scsi/qla2xxx/qla_mid.c b/drivers/scsi/qla2xxx/qla_mid.c index 650bcef08f2..cd78c501803 100644 --- a/drivers/scsi/qla2xxx/qla_mid.c +++ b/drivers/scsi/qla2xxx/qla_mid.c @@ -9,7 +9,6 @@ #include <linux/moduleparam.h> #include <linux/vmalloc.h> -#include <linux/smp_lock.h> #include <linux/list.h> #include <scsi/scsi_tcq.h> diff --git a/drivers/scsi/qla4xxx/ql4_dbg.c b/drivers/scsi/qla4xxx/ql4_dbg.c index fcc184cd066..cbceb0ebabf 100644 --- a/drivers/scsi/qla4xxx/ql4_dbg.c +++ b/drivers/scsi/qla4xxx/ql4_dbg.c @@ -15,19 +15,18 @@ void qla4xxx_dump_buffer(void *b, uint32_t size) uint32_t cnt; uint8_t *c = b; - printk(" 0 1 2 3 4 5 6 7 8 9 Ah Bh Ch Dh Eh " + printk(" 0 1 2 3 4 5 6 7 8 9 Ah Bh Ch Dh Eh " "Fh\n"); printk("------------------------------------------------------------" "--\n"); - for (cnt = 0; cnt < size; cnt++, c++) { - printk(KERN_DEBUG "%02x", *c); - if (!(cnt % 16)) - printk(KERN_DEBUG "\n"); + for (cnt = 0; cnt < size; c++) { + printk(KERN_INFO "%02x", *c); + if (!(++cnt % 16)) + printk(KERN_INFO "\n"); else - printk(KERN_DEBUG " "); + printk(KERN_INFO " "); } - if (cnt % 16) - printk(KERN_DEBUG "\n"); + printk(KERN_INFO "\n"); } diff --git a/drivers/scsi/qla4xxx/ql4_def.h b/drivers/scsi/qla4xxx/ql4_def.h index b586f27c3bd..81b5f29254e 100644 --- a/drivers/scsi/qla4xxx/ql4_def.h +++ b/drivers/scsi/qla4xxx/ql4_def.h @@ -100,7 +100,6 @@ #define MAX_SRBS MAX_CMDS_TO_RISC #define MBOX_AEN_REG_COUNT 5 #define MAX_INIT_RETRIES 5 -#define IOCB_HIWAT_CUSHION 16 /* * Buffer sizes @@ -184,6 +183,11 @@ struct srb { uint16_t cc_stat; u_long r_start; /* Time we recieve a cmd from OS */ u_long u_start; /* Time when we handed the cmd to F/W */ + + /* Used for extended sense / status continuation */ + uint8_t *req_sense_ptr; + uint16_t req_sense_len; + uint16_t reserved2; }; /* @@ -302,7 +306,6 @@ struct scsi_qla_host { uint32_t tot_ddbs; uint16_t iocb_cnt; - uint16_t iocb_hiwat; /* SRB cache. */ #define SRB_MIN_REQ 128 @@ -436,6 +439,8 @@ struct scsi_qla_host { /* Map ddb_list entry by FW ddb index */ struct ddb_entry *fw_ddb_index_map[MAX_DDB_ENTRIES]; + /* Saved srb for status continuation entry processing */ + struct srb *status_srb; }; static inline int is_qla4010(struct scsi_qla_host *ha) diff --git a/drivers/scsi/qla4xxx/ql4_fw.h b/drivers/scsi/qla4xxx/ql4_fw.h index 1b667a70cff..9cd7a608df3 100644 --- a/drivers/scsi/qla4xxx/ql4_fw.h +++ b/drivers/scsi/qla4xxx/ql4_fw.h @@ -572,6 +572,7 @@ struct conn_event_log_entry { *************************************************************************/ #define IOCB_MAX_CDB_LEN 16 /* Bytes in a CBD */ #define IOCB_MAX_SENSEDATA_LEN 32 /* Bytes of sense data */ +#define IOCB_MAX_EXT_SENSEDATA_LEN 60 /* Bytes of extended sense data */ /* IOCB header structure */ struct qla4_header { @@ -733,6 +734,12 @@ struct status_entry { }; +/* Status Continuation entry */ +struct status_cont_entry { + struct qla4_header hdr; /* 00-03 */ + uint8_t ext_sense_data[IOCB_MAX_EXT_SENSEDATA_LEN]; /* 04-63 */ +}; + struct passthru0 { struct qla4_header hdr; /* 00-03 */ uint32_t handle; /* 04-07 */ diff --git a/drivers/scsi/qla4xxx/ql4_iocb.c b/drivers/scsi/qla4xxx/ql4_iocb.c index 912a67494ad..e0c32159749 100644 --- a/drivers/scsi/qla4xxx/ql4_iocb.c +++ b/drivers/scsi/qla4xxx/ql4_iocb.c @@ -10,9 +10,42 @@ #include "ql4_dbg.h" #include "ql4_inline.h" - #include <scsi/scsi_tcq.h> +static int +qla4xxx_space_in_req_ring(struct scsi_qla_host *ha, uint16_t req_cnt) +{ + uint16_t cnt; + + /* Calculate number of free request entries. */ + if ((req_cnt + 2) >= ha->req_q_count) { + cnt = (uint16_t) le32_to_cpu(ha->shadow_regs->req_q_out); + if (ha->request_in < cnt) + ha->req_q_count = cnt - ha->request_in; + else + ha->req_q_count = REQUEST_QUEUE_DEPTH - + (ha->request_in - cnt); + } + + /* Check if room for request in request ring. */ + if ((req_cnt + 2) < ha->req_q_count) + return 1; + else + return 0; +} + +static void qla4xxx_advance_req_ring_ptr(struct scsi_qla_host *ha) +{ + /* Advance request queue pointer */ + if (ha->request_in == (REQUEST_QUEUE_DEPTH - 1)) { + ha->request_in = 0; + ha->request_ptr = ha->request_ring; + } else { + ha->request_in++; + ha->request_ptr++; + } +} + /** * qla4xxx_get_req_pkt - returns a valid entry in request queue. * @ha: Pointer to host adapter structure. @@ -26,35 +59,18 @@ static int qla4xxx_get_req_pkt(struct scsi_qla_host *ha, struct queue_entry **queue_entry) { - uint16_t request_in; - uint8_t status = QLA_SUCCESS; - - *queue_entry = ha->request_ptr; + uint16_t req_cnt = 1; - /* get the latest request_in and request_out index */ - request_in = ha->request_in; - ha->request_out = (uint16_t) le32_to_cpu(ha->shadow_regs->req_q_out); - - /* Advance request queue pointer and check for queue full */ - if (request_in == (REQUEST_QUEUE_DEPTH - 1)) { - request_in = 0; - ha->request_ptr = ha->request_ring; - } else { - request_in++; - ha->request_ptr++; - } - - /* request queue is full, try again later */ - if ((ha->iocb_cnt + 1) >= ha->iocb_hiwat) { - /* restore request pointer */ - ha->request_ptr = *queue_entry; - status = QLA_ERROR; - } else { - ha->request_in = request_in; + if (qla4xxx_space_in_req_ring(ha, req_cnt)) { + *queue_entry = ha->request_ptr; memset(*queue_entry, 0, sizeof(**queue_entry)); + + qla4xxx_advance_req_ring_ptr(ha); + ha->req_q_count -= req_cnt; + return QLA_SUCCESS; } - return status; + return QLA_ERROR; } /** @@ -100,21 +116,14 @@ exit_send_marker: return status; } -static struct continuation_t1_entry* qla4xxx_alloc_cont_entry( - struct scsi_qla_host *ha) +static struct continuation_t1_entry * +qla4xxx_alloc_cont_entry(struct scsi_qla_host *ha) { struct continuation_t1_entry *cont_entry; cont_entry = (struct continuation_t1_entry *)ha->request_ptr; - /* Advance request queue pointer */ - if (ha->request_in == (REQUEST_QUEUE_DEPTH - 1)) { - ha->request_in = 0; - ha->request_ptr = ha->request_ring; - } else { - ha->request_in++; - ha->request_ptr++; - } + qla4xxx_advance_req_ring_ptr(ha); /* Load packet defaults */ cont_entry->hdr.entryType = ET_CONTINUE; @@ -197,13 +206,10 @@ int qla4xxx_send_command_to_isp(struct scsi_qla_host *ha, struct srb * srb) struct scsi_cmnd *cmd = srb->cmd; struct ddb_entry *ddb_entry; struct command_t3_entry *cmd_entry; - int nseg; uint16_t tot_dsds; uint16_t req_cnt; - unsigned long flags; - uint16_t cnt; uint32_t index; char tag[2]; @@ -217,6 +223,19 @@ int qla4xxx_send_command_to_isp(struct scsi_qla_host *ha, struct srb * srb) index = (uint32_t)cmd->request->tag; + /* + * Check to see if adapter is online before placing request on + * request queue. If a reset occurs and a request is in the queue, + * the firmware will still attempt to process the request, retrieving + * garbage for pointers. + */ + if (!test_bit(AF_ONLINE, &ha->flags)) { + DEBUG2(printk("scsi%ld: %s: Adapter OFFLINE! " + "Do not issue command.\n", + ha->host_no, __func__)); + goto queuing_error; + } + /* Calculate the number of request entries needed. */ nseg = scsi_dma_map(cmd); if (nseg < 0) @@ -224,17 +243,7 @@ int qla4xxx_send_command_to_isp(struct scsi_qla_host *ha, struct srb * srb) tot_dsds = nseg; req_cnt = qla4xxx_calc_request_entries(tot_dsds); - - if (ha->req_q_count < (req_cnt + 2)) { - cnt = (uint16_t) le32_to_cpu(ha->shadow_regs->req_q_out); - if (ha->request_in < cnt) - ha->req_q_count = cnt - ha->request_in; - else - ha->req_q_count = REQUEST_QUEUE_DEPTH - - (ha->request_in - cnt); - } - - if (ha->req_q_count < (req_cnt + 2)) + if (!qla4xxx_space_in_req_ring(ha, req_cnt)) goto queuing_error; /* total iocbs active */ @@ -286,32 +295,10 @@ int qla4xxx_send_command_to_isp(struct scsi_qla_host *ha, struct srb * srb) break; } - - /* Advance request queue pointer */ - ha->request_in++; - if (ha->request_in == REQUEST_QUEUE_DEPTH) { - ha->request_in = 0; - ha->request_ptr = ha->request_ring; - } else - ha->request_ptr++; - - + qla4xxx_advance_req_ring_ptr(ha); qla4xxx_build_scsi_iocbs(srb, cmd_entry, tot_dsds); wmb(); - /* - * Check to see if adapter is online before placing request on - * request queue. If a reset occurs and a request is in the queue, - * the firmware will still attempt to process the request, retrieving - * garbage for pointers. - */ - if (!test_bit(AF_ONLINE, &ha->flags)) { - DEBUG2(printk("scsi%ld: %s: Adapter OFFLINE! " - "Do not issue command.\n", - ha->host_no, __func__)); - goto queuing_error; - } - srb->cmd->host_scribble = (unsigned char *)srb; /* update counters */ diff --git a/drivers/scsi/qla4xxx/ql4_isr.c b/drivers/scsi/qla4xxx/ql4_isr.c index 799120fcb9b..8025ee16588 100644 --- a/drivers/scsi/qla4xxx/ql4_isr.c +++ b/drivers/scsi/qla4xxx/ql4_isr.c @@ -11,6 +11,98 @@ #include "ql4_inline.h" /** + * qla4xxx_copy_sense - copy sense data into cmd sense buffer + * @ha: Pointer to host adapter structure. + * @sts_entry: Pointer to status entry structure. + * @srb: Pointer to srb structure. + **/ +static void qla4xxx_copy_sense(struct scsi_qla_host *ha, + struct status_entry *sts_entry, + struct srb *srb) +{ + struct scsi_cmnd *cmd = srb->cmd; + uint16_t sense_len; + + memset(cmd->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE); + sense_len = le16_to_cpu(sts_entry->senseDataByteCnt); + if (sense_len == 0) + return; + + /* Save total available sense length, + * not to exceed cmd's sense buffer size */ + sense_len = min_t(uint16_t, sense_len, SCSI_SENSE_BUFFERSIZE); + srb->req_sense_ptr = cmd->sense_buffer; + srb->req_sense_len = sense_len; + + /* Copy sense from sts_entry pkt */ + sense_len = min_t(uint16_t, sense_len, IOCB_MAX_SENSEDATA_LEN); + memcpy(cmd->sense_buffer, sts_entry->senseData, sense_len); + + DEBUG2(printk(KERN_INFO "scsi%ld:%d:%d:%d: %s: sense key = %x, " + "ASL= %02x, ASC/ASCQ = %02x/%02x\n", ha->host_no, + cmd->device->channel, cmd->device->id, + cmd->device->lun, __func__, + sts_entry->senseData[2] & 0x0f, + sts_entry->senseData[7], + sts_entry->senseData[12], + sts_entry->senseData[13])); + + DEBUG5(qla4xxx_dump_buffer(cmd->sense_buffer, sense_len)); + srb->flags |= SRB_GOT_SENSE; + + /* Update srb, in case a sts_cont pkt follows */ + srb->req_sense_ptr += sense_len; + srb->req_sense_len -= sense_len; + if (srb->req_sense_len != 0) + ha->status_srb = srb; + else + ha->status_srb = NULL; +} + +/** + * qla4xxx_status_cont_entry - Process a Status Continuations entry. + * @ha: SCSI driver HA context + * @sts_cont: Entry pointer + * + * Extended sense data. + */ +static void +qla4xxx_status_cont_entry(struct scsi_qla_host *ha, + struct status_cont_entry *sts_cont) +{ + struct srb *srb = ha->status_srb; + struct scsi_cmnd *cmd; + uint8_t sense_len; + + if (srb == NULL) + return; + + cmd = srb->cmd; + if (cmd == NULL) { + DEBUG2(printk(KERN_INFO "scsi%ld: %s: Cmd already returned " + "back to OS srb=%p srb->state:%d\n", ha->host_no, + __func__, srb, srb->state)); + ha->status_srb = NULL; + return; + } + + /* Copy sense data. */ + sense_len = min_t(uint16_t, srb->req_sense_len, + IOCB_MAX_EXT_SENSEDATA_LEN); + memcpy(srb->req_sense_ptr, sts_cont->ext_sense_data, sense_len); + DEBUG5(qla4xxx_dump_buffer(srb->req_sense_ptr, sense_len)); + + srb->req_sense_ptr += sense_len; + srb->req_sense_len -= sense_len; + + /* Place command on done queue. */ + if (srb->req_sense_len == 0) { + qla4xxx_srb_compl(ha, srb); + ha->status_srb = NULL; + } +} + +/** * qla4xxx_status_entry - processes status IOCBs * @ha: Pointer to host adapter structure. * @sts_entry: Pointer to status entry structure. @@ -23,7 +115,6 @@ static void qla4xxx_status_entry(struct scsi_qla_host *ha, struct srb *srb; struct ddb_entry *ddb_entry; uint32_t residual; - uint16_t sensebytecnt; srb = qla4xxx_del_from_active_array(ha, le32_to_cpu(sts_entry->handle)); if (!srb) { @@ -92,24 +183,7 @@ static void qla4xxx_status_entry(struct scsi_qla_host *ha, break; /* Copy Sense Data into sense buffer. */ - memset(cmd->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE); - - sensebytecnt = le16_to_cpu(sts_entry->senseDataByteCnt); - if (sensebytecnt == 0) - break; - - memcpy(cmd->sense_buffer, sts_entry->senseData, - min_t(uint16_t, sensebytecnt, SCSI_SENSE_BUFFERSIZE)); - - DEBUG2(printk("scsi%ld:%d:%d:%d: %s: sense key = %x, " - "ASC/ASCQ = %02x/%02x\n", ha->host_no, - cmd->device->channel, cmd->device->id, - cmd->device->lun, __func__, - sts_entry->senseData[2] & 0x0f, - sts_entry->senseData[12], - sts_entry->senseData[13])); - - srb->flags |= SRB_GOT_SENSE; + qla4xxx_copy_sense(ha, sts_entry, srb); break; case SCS_INCOMPLETE: @@ -176,23 +250,7 @@ static void qla4xxx_status_entry(struct scsi_qla_host *ha, break; /* Copy Sense Data into sense buffer. */ - memset(cmd->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE); - - sensebytecnt = - le16_to_cpu(sts_entry->senseDataByteCnt); - if (sensebytecnt == 0) - break; - - memcpy(cmd->sense_buffer, sts_entry->senseData, - min_t(uint16_t, sensebytecnt, SCSI_SENSE_BUFFERSIZE)); - - DEBUG2(printk("scsi%ld:%d:%d:%d: %s: sense key = %x, " - "ASC/ASCQ = %02x/%02x\n", ha->host_no, - cmd->device->channel, cmd->device->id, - cmd->device->lun, __func__, - sts_entry->senseData[2] & 0x0f, - sts_entry->senseData[12], - sts_entry->senseData[13])); + qla4xxx_copy_sense(ha, sts_entry, srb); } else { /* * If RISC reports underrun and target does not @@ -268,9 +326,10 @@ static void qla4xxx_status_entry(struct scsi_qla_host *ha, status_entry_exit: - /* complete the request */ + /* complete the request, if not waiting for status_continuation pkt */ srb->cc_stat = sts_entry->completionStatus; - qla4xxx_srb_compl(ha, srb); + if (ha->status_srb == NULL) + qla4xxx_srb_compl(ha, srb); } /** @@ -305,10 +364,7 @@ static void qla4xxx_process_response_queue(struct scsi_qla_host * ha) /* process entry */ switch (sts_entry->hdr.entryType) { case ET_STATUS: - /* - * Common status - Single completion posted in single - * IOSB. - */ + /* Common status */ qla4xxx_status_entry(ha, sts_entry); break; @@ -316,9 +372,8 @@ static void qla4xxx_process_response_queue(struct scsi_qla_host * ha) break; case ET_STATUS_CONTINUATION: - /* Just throw away the status continuation entries */ - DEBUG2(printk("scsi%ld: %s: Status Continuation entry " - "- ignoring\n", ha->host_no, __func__)); + qla4xxx_status_cont_entry(ha, + (struct status_cont_entry *) sts_entry); break; case ET_COMMAND: diff --git a/drivers/scsi/qla4xxx/ql4_mbx.c b/drivers/scsi/qla4xxx/ql4_mbx.c index 051b0f5e8c8..09d6d4b76f3 100644 --- a/drivers/scsi/qla4xxx/ql4_mbx.c +++ b/drivers/scsi/qla4xxx/ql4_mbx.c @@ -385,16 +385,6 @@ int qla4xxx_get_firmware_status(struct scsi_qla_host * ha) mbox_sts[0])); return QLA_ERROR; } - - /* High-water mark of IOCBs */ - ha->iocb_hiwat = mbox_sts[2]; - if (ha->iocb_hiwat > IOCB_HIWAT_CUSHION) - ha->iocb_hiwat -= IOCB_HIWAT_CUSHION; - else - dev_info(&ha->pdev->dev, "WARNING!!! You have less than %d " - "firmware IOCBs available (%d).\n", - IOCB_HIWAT_CUSHION, ha->iocb_hiwat); - return QLA_SUCCESS; } diff --git a/drivers/scsi/qla4xxx/ql4_os.c b/drivers/scsi/qla4xxx/ql4_os.c index ec9da6ce848..40e3cafb3a9 100644 --- a/drivers/scsi/qla4xxx/ql4_os.c +++ b/drivers/scsi/qla4xxx/ql4_os.c @@ -66,6 +66,7 @@ static int qla4xxx_sess_get_param(struct iscsi_cls_session *sess, static int qla4xxx_host_get_param(struct Scsi_Host *shost, enum iscsi_host_param param, char *buf); static void qla4xxx_recovery_timedout(struct iscsi_cls_session *session); +static enum blk_eh_timer_return qla4xxx_eh_cmd_timed_out(struct scsi_cmnd *sc); /* * SCSI host template entry points @@ -89,6 +90,7 @@ static struct scsi_host_template qla4xxx_driver_template = { .eh_device_reset_handler = qla4xxx_eh_device_reset, .eh_target_reset_handler = qla4xxx_eh_target_reset, .eh_host_reset_handler = qla4xxx_eh_host_reset, + .eh_timed_out = qla4xxx_eh_cmd_timed_out, .slave_configure = qla4xxx_slave_configure, .slave_alloc = qla4xxx_slave_alloc, @@ -124,6 +126,21 @@ static struct iscsi_transport qla4xxx_iscsi_transport = { static struct scsi_transport_template *qla4xxx_scsi_transport; +static enum blk_eh_timer_return qla4xxx_eh_cmd_timed_out(struct scsi_cmnd *sc) +{ + struct iscsi_cls_session *session; + struct ddb_entry *ddb_entry; + + session = starget_to_session(scsi_target(sc->device)); + ddb_entry = session->dd_data; + + /* if we are not logged in then the LLD is going to clean up the cmd */ + if (atomic_read(&ddb_entry->state) != DDB_STATE_ONLINE) + return BLK_EH_RESET_TIMER; + else + return BLK_EH_NOT_HANDLED; +} + static void qla4xxx_recovery_timedout(struct iscsi_cls_session *session) { struct ddb_entry *ddb_entry = session->dd_data; @@ -904,18 +921,17 @@ static int qla4xxx_recover_adapter(struct scsi_qla_host *ha, /* Flush any pending ddb changed AENs */ qla4xxx_process_aen(ha, FLUSH_DDB_CHANGED_AENS); + qla4xxx_flush_active_srbs(ha); + /* Reset the firmware. If successful, function * returns with ISP interrupts enabled. */ - if (status == QLA_SUCCESS) { - DEBUG2(printk("scsi%ld: %s - Performing soft reset..\n", - ha->host_no, __func__)); - qla4xxx_flush_active_srbs(ha); - if (ql4xxx_lock_drvr_wait(ha) == QLA_SUCCESS) - status = qla4xxx_soft_reset(ha); - else - status = QLA_ERROR; - } + DEBUG2(printk("scsi%ld: %s - Performing soft reset..\n", + ha->host_no, __func__)); + if (ql4xxx_lock_drvr_wait(ha) == QLA_SUCCESS) + status = qla4xxx_soft_reset(ha); + else + status = QLA_ERROR; /* Flush any pending ddb changed AENs */ qla4xxx_process_aen(ha, FLUSH_DDB_CHANGED_AENS); @@ -1527,11 +1543,9 @@ static int qla4xxx_eh_device_reset(struct scsi_cmnd *cmd) { struct scsi_qla_host *ha = to_qla_host(cmd->device->host); struct ddb_entry *ddb_entry = cmd->device->hostdata; - struct srb *sp; int ret = FAILED, stat; - sp = (struct srb *) cmd->SCp.ptr; - if (!sp || !ddb_entry) + if (!ddb_entry) return ret; dev_info(&ha->pdev->dev, @@ -1644,7 +1658,7 @@ static int qla4xxx_eh_host_reset(struct scsi_cmnd *cmd) ha = (struct scsi_qla_host *) cmd->device->host->hostdata; dev_info(&ha->pdev->dev, - "scsi(%ld:%d:%d:%d): ADAPTER RESET ISSUED.\n", ha->host_no, + "scsi(%ld:%d:%d:%d): HOST RESET ISSUED.\n", ha->host_no, cmd->device->channel, cmd->device->id, cmd->device->lun); if (qla4xxx_wait_for_hba_online(ha) != QLA_SUCCESS) { diff --git a/drivers/scsi/qla4xxx/ql4_version.h b/drivers/scsi/qla4xxx/ql4_version.h index ab984cb89ce..6980cb279c8 100644 --- a/drivers/scsi/qla4xxx/ql4_version.h +++ b/drivers/scsi/qla4xxx/ql4_version.h @@ -5,5 +5,5 @@ * See LICENSE.qla4xxx for copyright and licensing details. */ -#define QLA4XXX_DRIVER_VERSION "5.01.00-k8" +#define QLA4XXX_DRIVER_VERSION "5.01.00-k9" diff --git a/drivers/scsi/scsi_transport_fc.c b/drivers/scsi/scsi_transport_fc.c index 2eee9e6e4fe..292c02f810d 100644 --- a/drivers/scsi/scsi_transport_fc.c +++ b/drivers/scsi/scsi_transport_fc.c @@ -3670,13 +3670,14 @@ static void fc_bsg_goose_queue(struct fc_rport *rport) { int flagset; + unsigned long flags; if (!rport->rqst_q) return; get_device(&rport->dev); - spin_lock(rport->rqst_q->queue_lock); + spin_lock_irqsave(rport->rqst_q->queue_lock, flags); flagset = test_bit(QUEUE_FLAG_REENTER, &rport->rqst_q->queue_flags) && !test_bit(QUEUE_FLAG_REENTER, &rport->rqst_q->queue_flags); if (flagset) @@ -3684,7 +3685,7 @@ fc_bsg_goose_queue(struct fc_rport *rport) __blk_run_queue(rport->rqst_q); if (flagset) queue_flag_clear(QUEUE_FLAG_REENTER, rport->rqst_q); - spin_unlock(rport->rqst_q->queue_lock); + spin_unlock_irqrestore(rport->rqst_q->queue_lock, flags); put_device(&rport->dev); } diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c index 783e33c65eb..b47240ca4b1 100644 --- a/drivers/scsi/scsi_transport_iscsi.c +++ b/drivers/scsi/scsi_transport_iscsi.c @@ -990,7 +990,7 @@ int iscsi_offload_mesg(struct Scsi_Host *shost, struct iscsi_uevent *ev; int len = NLMSG_SPACE(sizeof(*ev) + data_size); - skb = alloc_skb(len, GFP_NOIO); + skb = alloc_skb(len, GFP_ATOMIC); if (!skb) { printk(KERN_ERR "can not deliver iscsi offload message:OOM\n"); return -ENOMEM; @@ -1012,7 +1012,7 @@ int iscsi_offload_mesg(struct Scsi_Host *shost, memcpy((char *)ev + sizeof(*ev), data, data_size); - return iscsi_multicast_skb(skb, ISCSI_NL_GRP_UIP, GFP_NOIO); + return iscsi_multicast_skb(skb, ISCSI_NL_GRP_UIP, GFP_ATOMIC); } EXPORT_SYMBOL_GPL(iscsi_offload_mesg); diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c index 5616cd780ff..b7b9fec67a9 100644 --- a/drivers/scsi/sd.c +++ b/drivers/scsi/sd.c @@ -1840,6 +1840,18 @@ static void sd_read_block_characteristics(struct scsi_disk *sdkp) kfree(buffer); } +static int sd_try_extended_inquiry(struct scsi_device *sdp) +{ + /* + * Although VPD inquiries can go to SCSI-2 type devices, + * some USB ones crash on receiving them, and the pages + * we currently ask for are for SPC-3 and beyond + */ + if (sdp->scsi_level > SCSI_SPC_2) + return 1; + return 0; +} + /** * sd_revalidate_disk - called the first time a new disk is seen, * performs disk spin up, read_capacity, etc. @@ -1877,8 +1889,12 @@ static int sd_revalidate_disk(struct gendisk *disk) */ if (sdkp->media_present) { sd_read_capacity(sdkp, buffer); - sd_read_block_limits(sdkp); - sd_read_block_characteristics(sdkp); + + if (sd_try_extended_inquiry(sdp)) { + sd_read_block_limits(sdkp); + sd_read_block_characteristics(sdkp); + } + sd_read_write_protect_flag(sdkp, buffer); sd_read_cache_type(sdkp, buffer); sd_read_app_tag_own(sdkp, buffer); diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c index 8201387b4da..9230402c45a 100644 --- a/drivers/scsi/sg.c +++ b/drivers/scsi/sg.c @@ -210,13 +210,11 @@ static void sg_put_dev(Sg_device *sdp); static int sg_allow_access(struct file *filp, unsigned char *cmd) { struct sg_fd *sfp = (struct sg_fd *)filp->private_data; - struct request_queue *q = sfp->parentdp->device->request_queue; if (sfp->parentdp->device->type == TYPE_SCANNER) return 0; - return blk_verify_command(&q->cmd_filter, - cmd, filp->f_mode & FMODE_WRITE); + return blk_verify_command(cmd, filp->f_mode & FMODE_WRITE); } static int @@ -621,7 +619,7 @@ sg_write(struct file *filp, const char __user *buf, size_t count, loff_t * ppos) if (strcmp(current->comm, cmd) && printk_ratelimit()) { printk(KERN_WARNING "sg_write: data in/out %d/%d bytes for SCSI command 0x%x--" - "guessing data in;\n" KERN_WARNING " " + "guessing data in;\n " "program %s not setting count and/or reply_len properly\n", old_hdr.reply_len - (int)SZ_SG_HEADER, input_size, (unsigned int) cmnd[0], @@ -1658,6 +1656,10 @@ static int sg_start_req(Sg_request *srp, unsigned char *cmd) md->nr_entries = req_schp->k_use_sg; md->offset = 0; md->null_mapped = hp->dxferp ? 0 : 1; + if (dxfer_dir == SG_DXFER_TO_FROM_DEV) + md->from_user = 1; + else + md->from_user = 0; } if (iov_count) { diff --git a/drivers/scsi/sun3_NCR5380.c b/drivers/scsi/sun3_NCR5380.c index bcaba86060a..75da6e58ce5 100644 --- a/drivers/scsi/sun3_NCR5380.c +++ b/drivers/scsi/sun3_NCR5380.c @@ -2860,8 +2860,7 @@ static int NCR5380_abort(struct scsi_cmnd *cmd) */ local_irq_restore(flags); - printk(KERN_INFO "scsi%d: warning : SCSI command probably completed successfully\n" - KERN_INFO " before abortion\n", HOSTNO); + printk(KERN_INFO "scsi%d: warning : SCSI command probably completed successfully before abortion\n", HOSTNO); return SCSI_ABORT_NOT_RUNNING; } diff --git a/drivers/scsi/zalon.c b/drivers/scsi/zalon.c index 97f3158fa7b..27e84e4b1fa 100644 --- a/drivers/scsi/zalon.c +++ b/drivers/scsi/zalon.c @@ -134,7 +134,7 @@ zalon_probe(struct parisc_device *dev) host = ncr_attach(&zalon7xx_template, unit, &device); if (!host) - goto fail; + return -ENODEV; if (request_irq(dev->irq, ncr53c8xx_intr, IRQF_SHARED, "zalon", host)) { dev_printk(KERN_ERR, &dev->dev, "irq problem with %d, detaching\n ", diff --git a/drivers/serial/8250_pci.c b/drivers/serial/8250_pci.c index a07015d646d..e7108e75653 100644 --- a/drivers/serial/8250_pci.c +++ b/drivers/serial/8250_pci.c @@ -60,11 +60,12 @@ struct serial_private { static void moan_device(const char *str, struct pci_dev *dev) { - printk(KERN_WARNING "%s: %s\n" - KERN_WARNING "Please send the output of lspci -vv, this\n" - KERN_WARNING "message (0x%04x,0x%04x,0x%04x,0x%04x), the\n" - KERN_WARNING "manufacturer and name of serial board or\n" - KERN_WARNING "modem board to rmk+serial@arm.linux.org.uk.\n", + printk(KERN_WARNING + "%s: %s\n" + "Please send the output of lspci -vv, this\n" + "message (0x%04x,0x%04x,0x%04x,0x%04x), the\n" + "manufacturer and name of serial board or\n" + "modem board to rmk+serial@arm.linux.org.uk.\n", pci_name(dev), str, dev->vendor, dev->device, dev->subsystem_vendor, dev->subsystem_device); } @@ -759,6 +760,8 @@ static int pci_netmos_init(struct pci_dev *dev) /* subdevice 0x00PS means <P> parallel, <S> serial */ unsigned int num_serial = dev->subsystem_device & 0xf; + if (dev->device == PCI_DEVICE_ID_NETMOS_9901) + return 0; if (dev->subsystem_vendor == PCI_VENDOR_ID_IBM && dev->subsystem_device == 0x0299) return 0; @@ -3557,6 +3560,10 @@ static struct pci_device_id serial_pci_tbl[] = { PCI_VENDOR_ID_IBM, 0x0299, 0, 0, pbn_b0_bt_2_115200 }, + { PCI_VENDOR_ID_NETMOS, PCI_DEVICE_ID_NETMOS_9901, + 0xA000, 0x1000, + 0, 0, pbn_b0_1_115200 }, + /* * These entries match devices with class COMMUNICATION_SERIAL, * COMMUNICATION_MODEM or COMMUNICATION_MULTISERIAL diff --git a/drivers/serial/Kconfig b/drivers/serial/Kconfig index 037c1e0b7c4..6553833c12d 100644 --- a/drivers/serial/Kconfig +++ b/drivers/serial/Kconfig @@ -527,7 +527,7 @@ config SERIAL_S3C24A0 config SERIAL_S3C6400 tristate "Samsung S3C6400/S3C6410 Serial port support" - depends on SERIAL_SAMSUNG && (CPU_S3C600 || CPU_S3C6410) + depends on SERIAL_SAMSUNG && (CPU_S3C6400 || CPU_S3C6410) default y help Serial port support for the Samsung S3C6400 and S3C6410 diff --git a/drivers/serial/atmel_serial.c b/drivers/serial/atmel_serial.c index 338b15c0a54..607d43a3104 100644 --- a/drivers/serial/atmel_serial.c +++ b/drivers/serial/atmel_serial.c @@ -1551,6 +1551,7 @@ static int __devinit atmel_serial_probe(struct platform_device *pdev) if (ret) goto err_add_port; +#ifdef CONFIG_SERIAL_ATMEL_CONSOLE if (atmel_is_console_port(&port->uart) && ATMEL_CONSOLE_DEVICE->flags & CON_ENABLED) { /* @@ -1559,6 +1560,7 @@ static int __devinit atmel_serial_probe(struct platform_device *pdev) */ clk_disable(port->clk); } +#endif device_init_wakeup(&pdev->dev, 1); platform_set_drvdata(pdev, port); diff --git a/drivers/serial/bfin_sport_uart.c b/drivers/serial/bfin_sport_uart.c index 34b4ae0fe76..c108b1a0ce9 100644 --- a/drivers/serial/bfin_sport_uart.c +++ b/drivers/serial/bfin_sport_uart.c @@ -236,7 +236,6 @@ static int sport_startup(struct uart_port *port) int retval; pr_debug("%s enter\n", __func__); - memset(buffer, 20, '\0'); snprintf(buffer, 20, "%s rx", up->name); retval = request_irq(up->rx_irq, sport_uart_rx_irq, IRQF_SAMPLE_RANDOM, buffer, up); if (retval) { diff --git a/drivers/serial/cpm_uart/cpm_uart_cpm2.c b/drivers/serial/cpm_uart/cpm_uart_cpm2.c index 141c0a3333a..a9802e76b5f 100644 --- a/drivers/serial/cpm_uart/cpm_uart_cpm2.c +++ b/drivers/serial/cpm_uart/cpm_uart_cpm2.c @@ -132,7 +132,7 @@ int cpm_uart_allocbuf(struct uart_cpm_port *pinfo, unsigned int is_con) memsz = L1_CACHE_ALIGN(pinfo->rx_nrfifos * pinfo->rx_fifosize) + L1_CACHE_ALIGN(pinfo->tx_nrfifos * pinfo->tx_fifosize); if (is_con) { - mem_addr = alloc_bootmem(memsz); + mem_addr = kzalloc(memsz, GFP_NOWAIT); dma_addr = virt_to_bus(mem_addr); } else diff --git a/drivers/serial/msm_serial.c b/drivers/serial/msm_serial.c index 698048f64f5..f7c24baa141 100644 --- a/drivers/serial/msm_serial.c +++ b/drivers/serial/msm_serial.c @@ -730,7 +730,6 @@ static int __devexit msm_serial_remove(struct platform_device *pdev) } static struct platform_driver msm_platform_driver = { - .probe = msm_serial_probe, .remove = msm_serial_remove, .driver = { .name = "msm_serial", diff --git a/drivers/serial/s3c2400.c b/drivers/serial/s3c2400.c index fb00ed5296e..fed1a9a1ffb 100644 --- a/drivers/serial/s3c2400.c +++ b/drivers/serial/s3c2400.c @@ -76,7 +76,7 @@ static int s3c2400_serial_probe(struct platform_device *dev) return s3c24xx_serial_probe(dev, &s3c2400_uart_inf); } -static struct platform_driver s3c2400_serial_drv = { +static struct platform_driver s3c2400_serial_driver = { .probe = s3c2400_serial_probe, .remove = __devexit_p(s3c24xx_serial_remove), .driver = { @@ -85,16 +85,16 @@ static struct platform_driver s3c2400_serial_drv = { }, }; -s3c24xx_console_init(&s3c2400_serial_drv, &s3c2400_uart_inf); +s3c24xx_console_init(&s3c2400_serial_driver, &s3c2400_uart_inf); static inline int s3c2400_serial_init(void) { - return s3c24xx_serial_init(&s3c2400_serial_drv, &s3c2400_uart_inf); + return s3c24xx_serial_init(&s3c2400_serial_driver, &s3c2400_uart_inf); } static inline void s3c2400_serial_exit(void) { - platform_driver_unregister(&s3c2400_serial_drv); + platform_driver_unregister(&s3c2400_serial_driver); } module_init(s3c2400_serial_init); diff --git a/drivers/serial/s3c2410.c b/drivers/serial/s3c2410.c index b5d7cbcba2a..c99f0821cae 100644 --- a/drivers/serial/s3c2410.c +++ b/drivers/serial/s3c2410.c @@ -88,7 +88,7 @@ static int s3c2410_serial_probe(struct platform_device *dev) return s3c24xx_serial_probe(dev, &s3c2410_uart_inf); } -static struct platform_driver s3c2410_serial_drv = { +static struct platform_driver s3c2410_serial_driver = { .probe = s3c2410_serial_probe, .remove = __devexit_p(s3c24xx_serial_remove), .driver = { @@ -97,16 +97,16 @@ static struct platform_driver s3c2410_serial_drv = { }, }; -s3c24xx_console_init(&s3c2410_serial_drv, &s3c2410_uart_inf); +s3c24xx_console_init(&s3c2410_serial_driver, &s3c2410_uart_inf); static int __init s3c2410_serial_init(void) { - return s3c24xx_serial_init(&s3c2410_serial_drv, &s3c2410_uart_inf); + return s3c24xx_serial_init(&s3c2410_serial_driver, &s3c2410_uart_inf); } static void __exit s3c2410_serial_exit(void) { - platform_driver_unregister(&s3c2410_serial_drv); + platform_driver_unregister(&s3c2410_serial_driver); } module_init(s3c2410_serial_init); diff --git a/drivers/serial/s3c2412.c b/drivers/serial/s3c2412.c index 11dcb90bdfe..6e057d8809d 100644 --- a/drivers/serial/s3c2412.c +++ b/drivers/serial/s3c2412.c @@ -121,7 +121,7 @@ static int s3c2412_serial_probe(struct platform_device *dev) return s3c24xx_serial_probe(dev, &s3c2412_uart_inf); } -static struct platform_driver s3c2412_serial_drv = { +static struct platform_driver s3c2412_serial_driver = { .probe = s3c2412_serial_probe, .remove = __devexit_p(s3c24xx_serial_remove), .driver = { @@ -130,16 +130,16 @@ static struct platform_driver s3c2412_serial_drv = { }, }; -s3c24xx_console_init(&s3c2412_serial_drv, &s3c2412_uart_inf); +s3c24xx_console_init(&s3c2412_serial_driver, &s3c2412_uart_inf); static inline int s3c2412_serial_init(void) { - return s3c24xx_serial_init(&s3c2412_serial_drv, &s3c2412_uart_inf); + return s3c24xx_serial_init(&s3c2412_serial_driver, &s3c2412_uart_inf); } static inline void s3c2412_serial_exit(void) { - platform_driver_unregister(&s3c2412_serial_drv); + platform_driver_unregister(&s3c2412_serial_driver); } module_init(s3c2412_serial_init); diff --git a/drivers/serial/s3c2440.c b/drivers/serial/s3c2440.c index 06c5b0cc47a..69ff5d340f0 100644 --- a/drivers/serial/s3c2440.c +++ b/drivers/serial/s3c2440.c @@ -151,7 +151,7 @@ static int s3c2440_serial_probe(struct platform_device *dev) return s3c24xx_serial_probe(dev, &s3c2440_uart_inf); } -static struct platform_driver s3c2440_serial_drv = { +static struct platform_driver s3c2440_serial_driver = { .probe = s3c2440_serial_probe, .remove = __devexit_p(s3c24xx_serial_remove), .driver = { @@ -160,16 +160,16 @@ static struct platform_driver s3c2440_serial_drv = { }, }; -s3c24xx_console_init(&s3c2440_serial_drv, &s3c2440_uart_inf); +s3c24xx_console_init(&s3c2440_serial_driver, &s3c2440_uart_inf); static int __init s3c2440_serial_init(void) { - return s3c24xx_serial_init(&s3c2440_serial_drv, &s3c2440_uart_inf); + return s3c24xx_serial_init(&s3c2440_serial_driver, &s3c2440_uart_inf); } static void __exit s3c2440_serial_exit(void) { - platform_driver_unregister(&s3c2440_serial_drv); + platform_driver_unregister(&s3c2440_serial_driver); } module_init(s3c2440_serial_init); diff --git a/drivers/serial/s3c24a0.c b/drivers/serial/s3c24a0.c index 786a067d62a..26c49e18bdd 100644 --- a/drivers/serial/s3c24a0.c +++ b/drivers/serial/s3c24a0.c @@ -92,7 +92,7 @@ static int s3c24a0_serial_probe(struct platform_device *dev) return s3c24xx_serial_probe(dev, &s3c24a0_uart_inf); } -static struct platform_driver s3c24a0_serial_drv = { +static struct platform_driver s3c24a0_serial_driver = { .probe = s3c24a0_serial_probe, .remove = __devexit_p(s3c24xx_serial_remove), .driver = { @@ -101,16 +101,16 @@ static struct platform_driver s3c24a0_serial_drv = { }, }; -s3c24xx_console_init(&s3c24a0_serial_drv, &s3c24a0_uart_inf); +s3c24xx_console_init(&s3c24a0_serial_driver, &s3c24a0_uart_inf); static int __init s3c24a0_serial_init(void) { - return s3c24xx_serial_init(&s3c24a0_serial_drv, &s3c24a0_uart_inf); + return s3c24xx_serial_init(&s3c24a0_serial_driver, &s3c24a0_uart_inf); } static void __exit s3c24a0_serial_exit(void) { - platform_driver_unregister(&s3c24a0_serial_drv); + platform_driver_unregister(&s3c24a0_serial_driver); } module_init(s3c24a0_serial_init); diff --git a/drivers/serial/s3c6400.c b/drivers/serial/s3c6400.c index 48f1a3781f0..4be92ab5005 100644 --- a/drivers/serial/s3c6400.c +++ b/drivers/serial/s3c6400.c @@ -122,7 +122,7 @@ static int s3c6400_serial_probe(struct platform_device *dev) return s3c24xx_serial_probe(dev, &s3c6400_uart_inf); } -static struct platform_driver s3c6400_serial_drv = { +static struct platform_driver s3c6400_serial_driver = { .probe = s3c6400_serial_probe, .remove = __devexit_p(s3c24xx_serial_remove), .driver = { @@ -131,16 +131,16 @@ static struct platform_driver s3c6400_serial_drv = { }, }; -s3c24xx_console_init(&s3c6400_serial_drv, &s3c6400_uart_inf); +s3c24xx_console_init(&s3c6400_serial_driver, &s3c6400_uart_inf); static int __init s3c6400_serial_init(void) { - return s3c24xx_serial_init(&s3c6400_serial_drv, &s3c6400_uart_inf); + return s3c24xx_serial_init(&s3c6400_serial_driver, &s3c6400_uart_inf); } static void __exit s3c6400_serial_exit(void) { - platform_driver_unregister(&s3c6400_serial_drv); + platform_driver_unregister(&s3c6400_serial_driver); } module_init(s3c6400_serial_init); diff --git a/drivers/serial/serial_ks8695.c b/drivers/serial/serial_ks8695.c index 998e89dc5aa..e0665630e4d 100644 --- a/drivers/serial/serial_ks8695.c +++ b/drivers/serial/serial_ks8695.c @@ -549,7 +549,7 @@ static struct uart_port ks8695uart_ports[SERIAL_KS8695_NR] = { .mapbase = KS8695_UART_VA, .iotype = SERIAL_IO_MEM, .irq = KS8695_IRQ_UART_TX, - .uartclk = CLOCK_TICK_RATE * 16, + .uartclk = KS8695_CLOCK_RATE * 16, .fifosize = 16, .ops = &ks8695uart_pops, .flags = ASYNC_BOOT_AUTOCONF, diff --git a/drivers/serial/sh-sci.c b/drivers/serial/sh-sci.c index 66f52674ca0..8e2feb56334 100644 --- a/drivers/serial/sh-sci.c +++ b/drivers/serial/sh-sci.c @@ -707,24 +707,25 @@ static irqreturn_t sci_br_interrupt(int irq, void *ptr) static irqreturn_t sci_mpxed_interrupt(int irq, void *ptr) { - unsigned short ssr_status, scr_status; + unsigned short ssr_status, scr_status, err_enabled; struct uart_port *port = ptr; irqreturn_t ret = IRQ_NONE; ssr_status = sci_in(port, SCxSR); scr_status = sci_in(port, SCSCR); + err_enabled = scr_status & (SCI_CTRL_FLAGS_REIE | SCI_CTRL_FLAGS_RIE); /* Tx Interrupt */ - if ((ssr_status & 0x0020) && (scr_status & SCI_CTRL_FLAGS_TIE)) + if ((ssr_status & SCxSR_TDxE(port)) && (scr_status & SCI_CTRL_FLAGS_TIE)) ret = sci_tx_interrupt(irq, ptr); /* Rx Interrupt */ - if ((ssr_status & 0x0002) && (scr_status & SCI_CTRL_FLAGS_RIE)) + if ((ssr_status & SCxSR_RDxF(port)) && (scr_status & SCI_CTRL_FLAGS_RIE)) ret = sci_rx_interrupt(irq, ptr); /* Error Interrupt */ - if ((ssr_status & 0x0080) && (scr_status & SCI_CTRL_FLAGS_REIE)) + if ((ssr_status & SCxSR_ERRORS(port)) && err_enabled) ret = sci_er_interrupt(irq, ptr); /* Break Interrupt */ - if ((ssr_status & 0x0010) && (scr_status & SCI_CTRL_FLAGS_REIE)) + if ((ssr_status & SCxSR_BRK(port)) && err_enabled) ret = sci_br_interrupt(irq, ptr); return ret; diff --git a/drivers/serial/vr41xx_siu.c b/drivers/serial/vr41xx_siu.c index 0573f3b5175..dac550e57c2 100644 --- a/drivers/serial/vr41xx_siu.c +++ b/drivers/serial/vr41xx_siu.c @@ -1,7 +1,7 @@ /* * Driver for NEC VR4100 series Serial Interface Unit. * - * Copyright (C) 2004-2008 Yoichi Yuasa <yoichi_yuasa@tripeaks.co.jp> + * Copyright (C) 2004-2008 Yoichi Yuasa <yuasa@linux-mips.org> * * Based on drivers/serial/8250.c, by Russell King. * diff --git a/drivers/spi/omap2_mcspi.c b/drivers/spi/omap2_mcspi.c index eee4b6e0af2..9b80ad36dbb 100644 --- a/drivers/spi/omap2_mcspi.c +++ b/drivers/spi/omap2_mcspi.c @@ -59,6 +59,8 @@ /* per-register bitmasks: */ +#define OMAP2_MCSPI_SYSCONFIG_SMARTIDLE (2 << 3) +#define OMAP2_MCSPI_SYSCONFIG_ENAWAKEUP (1 << 2) #define OMAP2_MCSPI_SYSCONFIG_AUTOIDLE (1 << 0) #define OMAP2_MCSPI_SYSCONFIG_SOFTRESET (1 << 1) @@ -90,6 +92,7 @@ #define OMAP2_MCSPI_CHCTRL_EN (1 << 0) +#define OMAP2_MCSPI_WAKEUPENABLE_WKEN (1 << 0) /* We have 2 DMA channels per CS, one for RX and one for TX */ struct omap2_mcspi_dma { @@ -269,7 +272,7 @@ omap2_mcspi_txrx_dma(struct spi_device *spi, struct spi_transfer *xfer) if (rx != NULL) { omap_set_dma_transfer_params(mcspi_dma->dma_rx_channel, - data_type, element_count, 1, + data_type, element_count - 1, 1, OMAP_DMA_SYNC_ELEMENT, mcspi_dma->dma_rx_sync_dev, 1); @@ -300,6 +303,25 @@ omap2_mcspi_txrx_dma(struct spi_device *spi, struct spi_transfer *xfer) if (rx != NULL) { wait_for_completion(&mcspi_dma->dma_rx_completion); dma_unmap_single(NULL, xfer->rx_dma, count, DMA_FROM_DEVICE); + omap2_mcspi_set_enable(spi, 0); + if (likely(mcspi_read_cs_reg(spi, OMAP2_MCSPI_CHSTAT0) + & OMAP2_MCSPI_CHSTAT_RXS)) { + u32 w; + + w = mcspi_read_cs_reg(spi, OMAP2_MCSPI_RX0); + if (word_len <= 8) + ((u8 *)xfer->rx_buf)[element_count - 1] = w; + else if (word_len <= 16) + ((u16 *)xfer->rx_buf)[element_count - 1] = w; + else /* word_len <= 32 */ + ((u32 *)xfer->rx_buf)[element_count - 1] = w; + } else { + dev_err(&spi->dev, "DMA RX last word empty"); + count -= (word_len <= 8) ? 1 : + (word_len <= 16) ? 2 : + /* word_len <= 32 */ 4; + } + omap2_mcspi_set_enable(spi, 1); } return count; } @@ -873,8 +895,12 @@ static int __init omap2_mcspi_reset(struct omap2_mcspi *mcspi) } while (!(tmp & OMAP2_MCSPI_SYSSTATUS_RESETDONE)); mcspi_write_reg(master, OMAP2_MCSPI_SYSCONFIG, - /* (3 << 8) | (2 << 3) | */ - OMAP2_MCSPI_SYSCONFIG_AUTOIDLE); + OMAP2_MCSPI_SYSCONFIG_AUTOIDLE | + OMAP2_MCSPI_SYSCONFIG_ENAWAKEUP | + OMAP2_MCSPI_SYSCONFIG_SMARTIDLE); + + mcspi_write_reg(master, OMAP2_MCSPI_WAKEUPENABLE, + OMAP2_MCSPI_WAKEUPENABLE_WKEN); omap2_mcspi_set_master_mode(master); diff --git a/drivers/spi/omap_uwire.c b/drivers/spi/omap_uwire.c index aa90ddb3706..8980a5640bd 100644 --- a/drivers/spi/omap_uwire.c +++ b/drivers/spi/omap_uwire.c @@ -514,6 +514,8 @@ static int __init uwire_probe(struct platform_device *pdev) /* the spi->mode bits understood by this driver: */ master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH; + master->flags = SPI_MASTER_HALF_DUPLEX; + master->bus_num = 2; /* "official" */ master->num_chipselect = 4; master->setup = uwire_setup; diff --git a/drivers/spi/spi_bitbang.c b/drivers/spi/spi_bitbang.c index 2a5abc08e85..f1db395dd88 100644 --- a/drivers/spi/spi_bitbang.c +++ b/drivers/spi/spi_bitbang.c @@ -258,6 +258,11 @@ static void bitbang_work(struct work_struct *work) struct spi_bitbang *bitbang = container_of(work, struct spi_bitbang, work); unsigned long flags; + int do_setup = -1; + int (*setup_transfer)(struct spi_device *, + struct spi_transfer *); + + setup_transfer = bitbang->setup_transfer; spin_lock_irqsave(&bitbang->lock, flags); bitbang->busy = 1; @@ -269,8 +274,6 @@ static void bitbang_work(struct work_struct *work) unsigned tmp; unsigned cs_change; int status; - int (*setup_transfer)(struct spi_device *, - struct spi_transfer *); m = container_of(bitbang->queue.next, struct spi_message, queue); @@ -287,19 +290,19 @@ static void bitbang_work(struct work_struct *work) tmp = 0; cs_change = 1; status = 0; - setup_transfer = NULL; list_for_each_entry (t, &m->transfers, transfer_list) { - /* override or restore speed and wordsize */ - if (t->speed_hz || t->bits_per_word) { - setup_transfer = bitbang->setup_transfer; + /* override speed or wordsize? */ + if (t->speed_hz || t->bits_per_word) + do_setup = 1; + + /* init (-1) or override (1) transfer params */ + if (do_setup != 0) { if (!setup_transfer) { status = -ENOPROTOOPT; break; } - } - if (setup_transfer) { status = setup_transfer(spi, t); if (status < 0) break; @@ -363,9 +366,10 @@ static void bitbang_work(struct work_struct *work) m->status = status; m->complete(m->context); - /* restore speed and wordsize */ - if (setup_transfer) + /* restore speed and wordsize if it was overridden */ + if (do_setup == 1) setup_transfer(spi, NULL); + do_setup = 0; /* normally deactivate chipselect ... unless no error and * cs_change has hinted that the next message will probably diff --git a/drivers/spi/spi_s3c24xx.c b/drivers/spi/spi_s3c24xx.c index e0d44af4745..3f3119d760d 100644 --- a/drivers/spi/spi_s3c24xx.c +++ b/drivers/spi/spi_s3c24xx.c @@ -111,29 +111,32 @@ static int s3c24xx_spi_setupxfer(struct spi_device *spi, unsigned int bpw; unsigned int hz; unsigned int div; + unsigned long clk; bpw = t ? t->bits_per_word : spi->bits_per_word; hz = t ? t->speed_hz : spi->max_speed_hz; + if (!bpw) + bpw = 8; + + if (!hz) + hz = spi->max_speed_hz; + if (bpw != 8) { dev_err(&spi->dev, "invalid bits-per-word (%d)\n", bpw); return -EINVAL; } - div = clk_get_rate(hw->clk) / hz; - - /* is clk = pclk / (2 * (pre+1)), or is it - * clk = (pclk * 2) / ( pre + 1) */ - - div /= 2; - - if (div > 0) - div -= 1; + clk = clk_get_rate(hw->clk); + div = DIV_ROUND_UP(clk, hz * 2) - 1; if (div > 255) div = 255; - dev_dbg(&spi->dev, "setting pre-scaler to %d (hz %d)\n", div, hz); + dev_dbg(&spi->dev, "setting pre-scaler to %d (wanted %d, got %ld)\n", + div, hz, clk / (2 * (div + 1))); + + writeb(div, hw->regs + S3C2410_SPPRE); spin_lock(&hw->bitbang.lock); diff --git a/drivers/spi/spidev.c b/drivers/spi/spidev.c index 5d869c4d3eb..606e7a40a8d 100644 --- a/drivers/spi/spidev.c +++ b/drivers/spi/spidev.c @@ -58,15 +58,20 @@ static unsigned long minors[N_SPI_MINORS / BITS_PER_LONG]; /* Bit masks for spi_device.mode management. Note that incorrect - * settings for CS_HIGH and 3WIRE can cause *lots* of trouble for other - * devices on a shared bus: CS_HIGH, because this device will be - * active when it shouldn't be; 3WIRE, because when active it won't - * behave as it should. + * settings for some settings can cause *lots* of trouble for other + * devices on a shared bus: * - * REVISIT should changing those two modes be privileged? + * - CS_HIGH ... this device will be active when it shouldn't be + * - 3WIRE ... when active, it won't behave as it should + * - NO_CS ... there will be no explicit message boundaries; this + * is completely incompatible with the shared bus model + * - READY ... transfers may proceed when they shouldn't. + * + * REVISIT should changing those flags be privileged? */ #define SPI_MODE_MASK (SPI_CPHA | SPI_CPOL | SPI_CS_HIGH \ - | SPI_LSB_FIRST | SPI_3WIRE | SPI_LOOP) + | SPI_LSB_FIRST | SPI_3WIRE | SPI_LOOP \ + | SPI_NO_CS | SPI_READY) struct spidev_data { dev_t devt; diff --git a/drivers/ssb/driver_mipscore.c b/drivers/ssb/driver_mipscore.c index 3fd3e3b412b..3c6feed46f6 100644 --- a/drivers/ssb/driver_mipscore.c +++ b/drivers/ssb/driver_mipscore.c @@ -49,29 +49,54 @@ static const u32 ipsflag_irq_shift[] = { static inline u32 ssb_irqflag(struct ssb_device *dev) { - return ssb_read32(dev, SSB_TPSFLAG) & SSB_TPSFLAG_BPFLAG; + u32 tpsflag = ssb_read32(dev, SSB_TPSFLAG); + if (tpsflag) + return ssb_read32(dev, SSB_TPSFLAG) & SSB_TPSFLAG_BPFLAG; + else + /* not irq supported */ + return 0x3f; +} + +static struct ssb_device *find_device(struct ssb_device *rdev, int irqflag) +{ + struct ssb_bus *bus = rdev->bus; + int i; + for (i = 0; i < bus->nr_devices; i++) { + struct ssb_device *dev; + dev = &(bus->devices[i]); + if (ssb_irqflag(dev) == irqflag) + return dev; + } + return NULL; } /* Get the MIPS IRQ assignment for a specified device. * If unassigned, 0 is returned. + * If disabled, 5 is returned. + * If not supported, 6 is returned. */ unsigned int ssb_mips_irq(struct ssb_device *dev) { struct ssb_bus *bus = dev->bus; + struct ssb_device *mdev = bus->mipscore.dev; u32 irqflag; u32 ipsflag; u32 tmp; unsigned int irq; irqflag = ssb_irqflag(dev); + if (irqflag == 0x3f) + return 6; ipsflag = ssb_read32(bus->mipscore.dev, SSB_IPSFLAG); for (irq = 1; irq <= 4; irq++) { tmp = ((ipsflag & ipsflag_irq_mask[irq]) >> ipsflag_irq_shift[irq]); if (tmp == irqflag) break; } - if (irq == 5) - irq = 0; + if (irq == 5) { + if ((1 << irqflag) & ssb_read32(mdev, SSB_INTVEC)) + irq = 0; + } return irq; } @@ -97,25 +122,56 @@ static void set_irq(struct ssb_device *dev, unsigned int irq) struct ssb_device *mdev = bus->mipscore.dev; u32 irqflag = ssb_irqflag(dev); + BUG_ON(oldirq == 6); + dev->irq = irq + 2; - ssb_dprintk(KERN_INFO PFX - "set_irq: core 0x%04x, irq %d => %d\n", - dev->id.coreid, oldirq, irq); /* clear the old irq */ if (oldirq == 0) ssb_write32(mdev, SSB_INTVEC, (~(1 << irqflag) & ssb_read32(mdev, SSB_INTVEC))); - else + else if (oldirq != 5) clear_irq(bus, oldirq); /* assign the new one */ if (irq == 0) { ssb_write32(mdev, SSB_INTVEC, ((1 << irqflag) | ssb_read32(mdev, SSB_INTVEC))); } else { + u32 ipsflag = ssb_read32(mdev, SSB_IPSFLAG); + if ((ipsflag & ipsflag_irq_mask[irq]) != ipsflag_irq_mask[irq]) { + u32 oldipsflag = (ipsflag & ipsflag_irq_mask[irq]) >> ipsflag_irq_shift[irq]; + struct ssb_device *olddev = find_device(dev, oldipsflag); + if (olddev) + set_irq(olddev, 0); + } irqflag <<= ipsflag_irq_shift[irq]; - irqflag |= (ssb_read32(mdev, SSB_IPSFLAG) & ~ipsflag_irq_mask[irq]); + irqflag |= (ipsflag & ~ipsflag_irq_mask[irq]); ssb_write32(mdev, SSB_IPSFLAG, irqflag); } + ssb_dprintk(KERN_INFO PFX + "set_irq: core 0x%04x, irq %d => %d\n", + dev->id.coreid, oldirq+2, irq+2); +} + +static void print_irq(struct ssb_device *dev, unsigned int irq) +{ + int i; + static const char *irq_name[] = {"2(S)", "3", "4", "5", "6", "D", "I"}; + ssb_dprintk(KERN_INFO PFX + "core 0x%04x, irq :", dev->id.coreid); + for (i = 0; i <= 6; i++) { + ssb_dprintk(" %s%s", irq_name[i], i==irq?"*":" "); + } + ssb_dprintk("\n"); +} + +static void dump_irq(struct ssb_bus *bus) +{ + int i; + for (i = 0; i < bus->nr_devices; i++) { + struct ssb_device *dev; + dev = &(bus->devices[i]); + print_irq(dev, ssb_mips_irq(dev)); + } } static void ssb_mips_serial_init(struct ssb_mipscore *mcore) @@ -197,16 +253,23 @@ void ssb_mipscore_init(struct ssb_mipscore *mcore) /* Assign IRQs to all cores on the bus, start with irq line 2, because serial usually takes 1 */ for (irq = 2, i = 0; i < bus->nr_devices; i++) { + int mips_irq; dev = &(bus->devices[i]); - dev->irq = ssb_mips_irq(dev) + 2; + mips_irq = ssb_mips_irq(dev); + if (mips_irq > 4) + dev->irq = 0; + else + dev->irq = mips_irq + 2; + if (dev->irq > 5) + continue; switch (dev->id.coreid) { case SSB_DEV_USB11_HOST: /* shouldn't need a separate irq line for non-4710, most of them have a proper * external usb controller on the pci */ if ((bus->chip_id == 0x4710) && (irq <= 4)) { set_irq(dev, irq++); - break; } + break; /* fallthrough */ case SSB_DEV_PCI: case SSB_DEV_ETHERNET: @@ -220,6 +283,8 @@ void ssb_mipscore_init(struct ssb_mipscore *mcore) } } } + ssb_dprintk(KERN_INFO PFX "after irq reconfiguration\n"); + dump_irq(bus); ssb_mips_serial_init(mcore); ssb_mips_flash_detect(mcore); diff --git a/drivers/ssb/pcmcia.c b/drivers/ssb/pcmcia.c index fbfadbac67e..100e7a5c5ea 100644 --- a/drivers/ssb/pcmcia.c +++ b/drivers/ssb/pcmcia.c @@ -583,7 +583,7 @@ static int ssb_pcmcia_sprom_write_all(struct ssb_bus *bus, const u16 *sprom) ssb_printk("."); err = ssb_pcmcia_sprom_write(bus, i, sprom[i]); if (err) { - ssb_printk("\n" KERN_NOTICE PFX + ssb_printk(KERN_NOTICE PFX "Failed to write to SPROM.\n"); failed = 1; break; @@ -591,7 +591,7 @@ static int ssb_pcmcia_sprom_write_all(struct ssb_bus *bus, const u16 *sprom) } err = ssb_pcmcia_sprom_command(bus, SSB_PCMCIA_SPROMCTL_WRITEDIS); if (err) { - ssb_printk("\n" KERN_NOTICE PFX + ssb_printk(KERN_NOTICE PFX "Could not disable SPROM write access.\n"); failed = 1; } @@ -678,7 +678,8 @@ int ssb_pcmcia_get_invariants(struct ssb_bus *bus, sprom->board_rev = tuple.TupleData[1]; break; case SSB_PCMCIA_CIS_PA: - GOTO_ERROR_ON(tuple.TupleDataLen != 9, + GOTO_ERROR_ON((tuple.TupleDataLen != 9) && + (tuple.TupleDataLen != 10), "pa tpl size"); sprom->pa0b0 = tuple.TupleData[1] | ((u16)tuple.TupleData[2] << 8); @@ -718,7 +719,8 @@ int ssb_pcmcia_get_invariants(struct ssb_bus *bus, sprom->antenna_gain.ghz5.a3 = tuple.TupleData[1]; break; case SSB_PCMCIA_CIS_BFLAGS: - GOTO_ERROR_ON(tuple.TupleDataLen != 3, + GOTO_ERROR_ON((tuple.TupleDataLen != 3) && + (tuple.TupleDataLen != 5), "bfl tpl size"); sprom->boardflags_lo = tuple.TupleData[1] | ((u16)tuple.TupleData[2] << 8); diff --git a/drivers/staging/Kconfig b/drivers/staging/Kconfig index 348bf61a8fe..975ecddbce3 100644 --- a/drivers/staging/Kconfig +++ b/drivers/staging/Kconfig @@ -103,8 +103,6 @@ source "drivers/staging/pohmelfs/Kconfig" source "drivers/staging/stlc45xx/Kconfig" -source "drivers/staging/uc2322/Kconfig" - source "drivers/staging/b3dfg/Kconfig" source "drivers/staging/phison/Kconfig" diff --git a/drivers/staging/Makefile b/drivers/staging/Makefile index 8d61d7b4deb..2241ae1b21e 100644 --- a/drivers/staging/Makefile +++ b/drivers/staging/Makefile @@ -34,7 +34,6 @@ obj-$(CONFIG_ANDROID) += android/ obj-$(CONFIG_DST) += dst/ obj-$(CONFIG_POHMELFS) += pohmelfs/ obj-$(CONFIG_STLC45XX) += stlc45xx/ -obj-$(CONFIG_USB_SERIAL_ATEN2011) += uc2322/ obj-$(CONFIG_B3DFG) += b3dfg/ obj-$(CONFIG_IDE_PHISON) += phison/ obj-$(CONFIG_PLAN9AUTH) += p9auth/ diff --git a/drivers/staging/android/lowmemorykiller.c b/drivers/staging/android/lowmemorykiller.c index fe72240f5a9..f934393f395 100644 --- a/drivers/staging/android/lowmemorykiller.c +++ b/drivers/staging/android/lowmemorykiller.c @@ -96,19 +96,21 @@ static int lowmem_shrink(int nr_to_scan, gfp_t gfp_mask) read_lock(&tasklist_lock); for_each_process(p) { + struct mm_struct *mm; int oom_adj; task_lock(p); - if (!p->mm) { + mm = p->mm; + if (!mm) { task_unlock(p); continue; } - oom_adj = p->oomkilladj; + oom_adj = mm->oom_adj; if (oom_adj < min_adj) { task_unlock(p); continue; } - tasksize = get_mm_rss(p->mm); + tasksize = get_mm_rss(mm); task_unlock(p); if (tasksize <= 0) continue; diff --git a/drivers/staging/b3dfg/Kconfig b/drivers/staging/b3dfg/Kconfig index 524231047de..9e6573cf97d 100644 --- a/drivers/staging/b3dfg/Kconfig +++ b/drivers/staging/b3dfg/Kconfig @@ -1,5 +1,6 @@ config B3DFG tristate "Brontes 3d Frame Framegrabber" + depends on PCI default n ---help--- This driver provides support for the Brontes 3d Framegrabber diff --git a/drivers/staging/comedi/drivers/jr3_pci.c b/drivers/staging/comedi/drivers/jr3_pci.c index baf83c6a941..e3c3adc282e 100644 --- a/drivers/staging/comedi/drivers/jr3_pci.c +++ b/drivers/staging/comedi/drivers/jr3_pci.c @@ -45,6 +45,8 @@ Devices: [JR3] PCI force sensor board (jr3_pci) #include <linux/delay.h> #include <linux/ctype.h> #include <linux/firmware.h> +#include <linux/jiffies.h> +#include <linux/timer.h> #include "comedi_pci.h" #include "jr3_pci.h" diff --git a/drivers/staging/comedi/drivers/s626.c b/drivers/staging/comedi/drivers/s626.c index 92121cf8c45..5d9bab352c1 100644 --- a/drivers/staging/comedi/drivers/s626.c +++ b/drivers/staging/comedi/drivers/s626.c @@ -111,9 +111,13 @@ static const struct s626_board s626_boards[] = { #define PCI_VENDOR_ID_S626 0x1131 #define PCI_DEVICE_ID_S626 0x7146 +/* + * For devices with vendor:device id == 0x1131:0x7146 you must specify + * also subvendor:subdevice ids, because otherwise it will conflict with + * Philips SAA7146 media/dvb based cards. + */ static DEFINE_PCI_DEVICE_TABLE(s626_pci_table) = { - {PCI_VENDOR_ID_S626, PCI_DEVICE_ID_S626, PCI_ANY_ID, PCI_ANY_ID, 0, 0, - 0}, + {PCI_VENDOR_ID_S626, PCI_DEVICE_ID_S626, 0x6000, 0x0272, 0, 0, 0}, {0} }; @@ -499,25 +503,26 @@ static int s626_attach(struct comedi_device *dev, struct comedi_devconfig *it) resource_size_t resourceStart; dma_addr_t appdma; struct comedi_subdevice *s; - struct pci_dev *pdev; + const struct pci_device_id *ids; + struct pci_dev *pdev = NULL; if (alloc_private(dev, sizeof(struct s626_private)) < 0) return -ENOMEM; - for (pdev = pci_get_device(PCI_VENDOR_ID_S626, PCI_DEVICE_ID_S626, - NULL); pdev != NULL; - pdev = pci_get_device(PCI_VENDOR_ID_S626, - PCI_DEVICE_ID_S626, pdev)) { - if (it->options[0] || it->options[1]) { - if (pdev->bus->number == it->options[0] && - PCI_SLOT(pdev->devfn) == it->options[1]) { + for (i = 0; i < (ARRAY_SIZE(s626_pci_table) - 1) && !pdev; i++) { + ids = &s626_pci_table[i]; + do { + pdev = pci_get_subsys(ids->vendor, ids->device, ids->subvendor, + ids->subdevice, pdev); + + if ((it->options[0] || it->options[1]) && pdev) { /* matches requested bus/slot */ + if (pdev->bus->number == it->options[0] && + PCI_SLOT(pdev->devfn) == it->options[1]) + break; + } else break; - } - } else { - /* no bus/slot specified */ - break; - } + } while (1); } devpriv->pdev = pdev; diff --git a/drivers/staging/go7007/s2250-loader.c b/drivers/staging/go7007/s2250-loader.c index a5e4acab089..bb22347af60 100644 --- a/drivers/staging/go7007/s2250-loader.c +++ b/drivers/staging/go7007/s2250-loader.c @@ -17,6 +17,7 @@ #include <linux/module.h> #include <linux/init.h> +#include <linux/smp_lock.h> #include <linux/usb.h> #include <dvb-usb.h> diff --git a/drivers/staging/heci/Kconfig b/drivers/staging/heci/Kconfig index ae8d588d3a2..c7206f8bcd9 100644 --- a/drivers/staging/heci/Kconfig +++ b/drivers/staging/heci/Kconfig @@ -1,5 +1,6 @@ config HECI tristate "Intel Management Engine Interface (MEI) Support" + depends on PCI ---help--- The Intel Management Engine Interface (Intel MEI) driver allows applications to access the Active Management Technology diff --git a/drivers/staging/meilhaus/TODO b/drivers/staging/meilhaus/TODO index 6ec25203089..d6ce39823de 100644 --- a/drivers/staging/meilhaus/TODO +++ b/drivers/staging/meilhaus/TODO @@ -7,4 +7,4 @@ TODO: - possible comedi merge Please send cleanup patches to Greg Kroah-Hartman <greg@kroah.com> -and CC: David Kiliani <mail@davidkiliani.de> +and CC: David Kiliani <mail@davidkiliani.de> and Meilhaus Support <support@meilhaus.de> diff --git a/drivers/staging/rspiusb/rspiusb.c b/drivers/staging/rspiusb/rspiusb.c index 1cdfe69585e..04e2f92c0f6 100644 --- a/drivers/staging/rspiusb/rspiusb.c +++ b/drivers/staging/rspiusb/rspiusb.c @@ -444,8 +444,7 @@ static void piusb_write_bulk_callback(struct urb *urb) __func__, status); pdx->pendingWrite = 0; - usb_buffer_free(urb->dev, urb->transfer_buffer_length, - urb->transfer_buffer, urb->transfer_dma); + kfree(urb->transfer_buffer); } int piusb_output(struct ioctl_struct *io, unsigned char *uBuf, int len, @@ -457,9 +456,7 @@ int piusb_output(struct ioctl_struct *io, unsigned char *uBuf, int len, urb = usb_alloc_urb(0, GFP_KERNEL); if (urb != NULL) { - kbuf = - usb_buffer_alloc(pdx->udev, len, GFP_KERNEL, - &urb->transfer_dma); + kbuf = kmalloc(len, GFP_KERNEL); if (!kbuf) { dev_err(&pdx->udev->dev, "buffer_alloc failed\n"); return -ENOMEM; @@ -470,7 +467,6 @@ int piusb_output(struct ioctl_struct *io, unsigned char *uBuf, int len, } usb_fill_bulk_urb(urb, pdx->udev, pdx->hEP[io->endpoint], kbuf, len, piusb_write_bulk_callback, pdx); - urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP; err = usb_submit_urb(urb, GFP_KERNEL); if (err) { dev_err(&pdx->udev->dev, @@ -641,7 +637,7 @@ static int MapUserBuffer(struct ioctl_struct *io, struct device_extension *pdx) numPagesRequired = ((uaddr & ~PAGE_MASK) + count + ~PAGE_MASK) >> PAGE_SHIFT; dbg("Number of pages needed = %d", numPagesRequired); - maplist_p = vmalloc(numPagesRequired * sizeof(struct page)); + maplist_p = vmalloc(numPagesRequired * sizeof(struct page *)); if (!maplist_p) { dbg("Can't Allocate Memory for maplist_p"); return -ENOMEM; @@ -712,9 +708,7 @@ static int MapUserBuffer(struct ioctl_struct *io, struct device_extension *pdx) usb_fill_bulk_urb(pdx->PixelUrb[frameInfo][i], pdx->udev, epAddr, - (dma_addr_t *) sg_dma_address(&pdx-> - sgl[frameInfo] - [i]), + NULL, // non-DMA HC? buy a better hardware sg_dma_len(&pdx->sgl[frameInfo][i]), piusb_readPIXEL_callback, (void *)pdx); pdx->PixelUrb[frameInfo][i]->transfer_dma = @@ -722,6 +716,8 @@ static int MapUserBuffer(struct ioctl_struct *io, struct device_extension *pdx) pdx->PixelUrb[frameInfo][i]->transfer_flags = URB_NO_TRANSFER_DMA_MAP | URB_NO_INTERRUPT; } + if (i == 0) + return -EINVAL; /* only interrupt when last URB completes */ pdx->PixelUrb[frameInfo][--i]->transfer_flags &= ~URB_NO_INTERRUPT; pdx->pendedPixelUrbs[frameInfo] = diff --git a/drivers/staging/rt2860/rt_linux.h b/drivers/staging/rt2860/rt_linux.h index 85175c18243..25b53ac3f82 100644 --- a/drivers/staging/rt2860/rt_linux.h +++ b/drivers/staging/rt2860/rt_linux.h @@ -43,9 +43,6 @@ #include "rtmp_type.h" #include <linux/module.h> #include <linux/kernel.h> -#if !defined(RT2860) && !defined(RT30xx) -#include <linux/kthread.h> -#endif #include <linux/spinlock.h> #include <linux/init.h> @@ -166,9 +163,7 @@ typedef int (*HARD_START_XMIT_FUNC)(struct sk_buff *skb, struct net_device *net_ #ifndef RT30xx typedef struct pid * THREAD_PID; -#ifdef RT2860 #define THREAD_PID_INIT_VALUE NULL -#endif #define GET_PID(_v) find_get_pid(_v) #define GET_PID_NUMBER(_v) pid_nr(_v) #define CHECK_PID_LEGALITY(_pid) if (pid_nr(_pid) >= 0) @@ -188,12 +183,12 @@ struct os_cookie { dma_addr_t pAd_pa; #endif #ifdef RT2870 - struct usb_device *pUsb_Dev; + struct usb_device *pUsb_Dev; #ifndef RT30xx - struct task_struct *MLMEThr_task; - struct task_struct *RTUSBCmdThr_task; - struct task_struct *TimerQThr_task; + THREAD_PID MLMEThr_pid; + THREAD_PID RTUSBCmdThr_pid; + THREAD_PID TimerQThr_pid; #endif #ifdef RT30xx struct pid *MLMEThr_pid; diff --git a/drivers/staging/rt2870/2870_main_dev.c b/drivers/staging/rt2870/2870_main_dev.c index dd01c64fbf6..a4e8696ca39 100644 --- a/drivers/staging/rt2870/2870_main_dev.c +++ b/drivers/staging/rt2870/2870_main_dev.c @@ -235,7 +235,7 @@ INT MlmeThread( DBGPRINT(RT_DEBUG_TRACE,( "<---%s\n",__func__)); #ifndef RT30xx - pObj->MLMEThr_task = NULL; + pObj->MLMEThr_pid = THREAD_PID_INIT_VALUE; #endif #ifdef RT30xx pObj->MLMEThr_pid = NULL; @@ -348,7 +348,7 @@ INT RTUSBCmdThread( DBGPRINT(RT_DEBUG_TRACE,( "<---RTUSBCmdThread\n")); #ifndef RT30xx - pObj->RTUSBCmdThr_task = NULL; + pObj->RTUSBCmdThr_pid = THREAD_PID_INIT_VALUE; #endif #ifdef RT30xx pObj->RTUSBCmdThr_pid = NULL; @@ -447,7 +447,7 @@ INT TimerQThread( DBGPRINT(RT_DEBUG_TRACE,( "<---%s\n",__func__)); #ifndef RT30xx - pObj->TimerQThr_task = NULL; + pObj->TimerQThr_pid = THREAD_PID_INIT_VALUE; #endif #ifdef RT30xx pObj->TimerQThr_pid = NULL; @@ -883,46 +883,69 @@ VOID RT28xxThreadTerminate( // Terminate Threads #ifndef RT30xx - BUG_ON(pObj->TimerQThr_task == NULL); - CHECK_PID_LEGALITY(task_pid(pObj->TimerQThr_task)) + CHECK_PID_LEGALITY(pObj->TimerQThr_pid) { POS_COOKIE pObj = (POS_COOKIE)pAd->OS_Cookie; - printk(KERN_DEBUG "Terminate the TimerQThr pid=%d!\n", - pid_nr(task_pid(pObj->TimerQThr_task))); + printk("Terminate the TimerQThr_pid=%d!\n", GET_PID_NUMBER(pObj->TimerQThr_pid)); mb(); pAd->TimerFunc_kill = 1; mb(); - kthread_stop(pObj->TimerQThr_task); - pObj->TimerQThr_task = NULL; + ret = KILL_THREAD_PID(pObj->TimerQThr_pid, SIGTERM, 1); + if (ret) + { + printk(KERN_WARNING "%s: unable to stop TimerQThread, pid=%d, ret=%d!\n", + pAd->net_dev->name, GET_PID_NUMBER(pObj->TimerQThr_pid), ret); + } + else + { + wait_for_completion(&pAd->TimerQComplete); + pObj->TimerQThr_pid = THREAD_PID_INIT_VALUE; + } } - BUG_ON(pObj->MLMEThr_task == NULL); - CHECK_PID_LEGALITY(task_pid(pObj->MLMEThr_task)) + CHECK_PID_LEGALITY(pObj->MLMEThr_pid) { - printk(KERN_DEBUG "Terminate the MLMEThr pid=%d!\n", - pid_nr(task_pid(pObj->MLMEThr_task))); + printk("Terminate the MLMEThr_pid=%d!\n", GET_PID_NUMBER(pObj->MLMEThr_pid)); mb(); pAd->mlme_kill = 1; //RT28XX_MLME_HANDLER(pAd); mb(); - kthread_stop(pObj->MLMEThr_task); - pObj->MLMEThr_task = NULL; + ret = KILL_THREAD_PID(pObj->MLMEThr_pid, SIGTERM, 1); + if (ret) + { + printk (KERN_WARNING "%s: unable to Mlme thread, pid=%d, ret=%d!\n", + pAd->net_dev->name, GET_PID_NUMBER(pObj->MLMEThr_pid), ret); + } + else + { + //wait_for_completion (&pAd->notify); + wait_for_completion (&pAd->mlmeComplete); + pObj->MLMEThr_pid = THREAD_PID_INIT_VALUE; + } } - BUG_ON(pObj->RTUSBCmdThr_task == NULL); - CHECK_PID_LEGALITY(task_pid(pObj->RTUSBCmdThr_task)) + CHECK_PID_LEGALITY(pObj->RTUSBCmdThr_pid) { - printk(KERN_DEBUG "Terminate the RTUSBCmdThr pid=%d!\n", - pid_nr(task_pid(pObj->RTUSBCmdThr_task))); + printk("Terminate the RTUSBCmdThr_pid=%d!\n", GET_PID_NUMBER(pObj->RTUSBCmdThr_pid)); mb(); NdisAcquireSpinLock(&pAd->CmdQLock); pAd->CmdQ.CmdQState = RT2870_THREAD_STOPED; NdisReleaseSpinLock(&pAd->CmdQLock); mb(); //RTUSBCMDUp(pAd); - kthread_stop(pObj->RTUSBCmdThr_task); - pObj->RTUSBCmdThr_task = NULL; + ret = KILL_THREAD_PID(pObj->RTUSBCmdThr_pid, SIGTERM, 1); + if (ret) + { + printk(KERN_WARNING "%s: unable to RTUSBCmd thread, pid=%d, ret=%d!\n", + pAd->net_dev->name, GET_PID_NUMBER(pObj->RTUSBCmdThr_pid), ret); + } + else + { + //wait_for_completion (&pAd->notify); + wait_for_completion (&pAd->CmdQComplete); + pObj->RTUSBCmdThr_pid = THREAD_PID_INIT_VALUE; + } } #endif #ifdef RT30xx @@ -1045,7 +1068,7 @@ BOOLEAN RT28XXChipsetCheck( dev_p->descriptor.idProduct == rtusb_usb_id[i].idProduct) { #ifndef RT30xx - printk(KERN_DEBUG "rt2870: idVendor = 0x%x, idProduct = 0x%x\n", + printk("rt2870: idVendor = 0x%x, idProduct = 0x%x\n", #endif #ifdef RT30xx printk("rt2870: idVendor = 0x%x, idProduct = 0x%x\n", diff --git a/drivers/staging/rt2870/common/2870_rtmp_init.c b/drivers/staging/rt2870/common/2870_rtmp_init.c index 0f4c8af97e4..80909e9ab5a 100644 --- a/drivers/staging/rt2870/common/2870_rtmp_init.c +++ b/drivers/staging/rt2870/common/2870_rtmp_init.c @@ -700,8 +700,8 @@ NDIS_STATUS AdapterBlockAllocateMemory( usb_dev = pObj->pUsb_Dev; #ifndef RT30xx - pObj->MLMEThr_task = NULL; - pObj->RTUSBCmdThr_task = NULL; + pObj->MLMEThr_pid = THREAD_PID_INIT_VALUE; + pObj->RTUSBCmdThr_pid = THREAD_PID_INIT_VALUE; #endif #ifdef RT30xx pObj->MLMEThr_pid = NULL; @@ -743,7 +743,7 @@ NDIS_STATUS CreateThreads( PRTMP_ADAPTER pAd = net_dev->ml_priv; POS_COOKIE pObj = (POS_COOKIE) pAd->OS_Cookie; #ifndef RT30xx - struct task_struct *tsk; + pid_t pid_number = -1; #endif #ifdef RT30xx pid_t pid_number; @@ -762,10 +762,10 @@ NDIS_STATUS CreateThreads( // Creat MLME Thread #ifndef RT30xx - pObj->MLMEThr_task = NULL; - tsk = kthread_run(MlmeThread, pAd, "%s", pAd->net_dev->name); - - if (IS_ERR(tsk)) { + pObj->MLMEThr_pid= THREAD_PID_INIT_VALUE; + pid_number = kernel_thread(MlmeThread, pAd, CLONE_VM); + if (pid_number < 0) + { #endif #ifdef RT30xx pObj->MLMEThr_pid = NULL; @@ -778,7 +778,7 @@ NDIS_STATUS CreateThreads( } #ifndef RT30xx - pObj->MLMEThr_task = tsk; + pObj->MLMEThr_pid = GET_PID(pid_number); #endif #ifdef RT30xx pObj->MLMEThr_pid = find_get_pid(pid_number); @@ -788,10 +788,9 @@ NDIS_STATUS CreateThreads( // Creat Command Thread #ifndef RT30xx - pObj->RTUSBCmdThr_task = NULL; - tsk = kthread_run(RTUSBCmdThread, pAd, "%s", pAd->net_dev->name); - - if (IS_ERR(tsk) < 0) + pObj->RTUSBCmdThr_pid= THREAD_PID_INIT_VALUE; + pid_number = kernel_thread(RTUSBCmdThread, pAd, CLONE_VM); + if (pid_number < 0) #endif #ifdef RT30xx pObj->RTUSBCmdThr_pid = NULL; @@ -804,7 +803,7 @@ NDIS_STATUS CreateThreads( } #ifndef RT30xx - pObj->RTUSBCmdThr_task = tsk; + pObj->RTUSBCmdThr_pid = GET_PID(pid_number); #endif #ifdef RT30xx pObj->RTUSBCmdThr_pid = find_get_pid(pid_number); @@ -812,9 +811,9 @@ NDIS_STATUS CreateThreads( wait_for_completion(&(pAd->CmdQComplete)); #ifndef RT30xx - pObj->TimerQThr_task = NULL; - tsk = kthread_run(TimerQThread, pAd, "%s", pAd->net_dev->name); - if (IS_ERR(tsk) < 0) + pObj->TimerQThr_pid= THREAD_PID_INIT_VALUE; + pid_number = kernel_thread(TimerQThread, pAd, CLONE_VM); + if (pid_number < 0) #endif #ifdef RT30xx pObj->TimerQThr_pid = NULL; @@ -826,7 +825,7 @@ NDIS_STATUS CreateThreads( return NDIS_STATUS_FAILURE; } #ifndef RT30xx - pObj->TimerQThr_task = tsk; + pObj->TimerQThr_pid = GET_PID(pid_number); #endif #ifdef RT30xx pObj->TimerQThr_pid = find_get_pid(pid_number); diff --git a/drivers/staging/rt2870/common/rtusb_io.c b/drivers/staging/rt2870/common/rtusb_io.c index fd1b0c18f2a..704b5c2d509 100644 --- a/drivers/staging/rt2870/common/rtusb_io.c +++ b/drivers/staging/rt2870/common/rtusb_io.c @@ -984,8 +984,7 @@ NDIS_STATUS RTUSBEnqueueCmdFromNdis( POS_COOKIE pObj = (POS_COOKIE) pAd->OS_Cookie; #ifndef RT30xx - BUG_ON(pObj->RTUSBCmdThr_task == NULL); - CHECK_PID_LEGALITY(task_pid(pObj->RTUSBCmdThr_task)) + CHECK_PID_LEGALITY(pObj->RTUSBCmdThr_pid) #endif #ifdef RT30xx if (pObj->RTUSBCmdThr_pid < 0) diff --git a/drivers/staging/rt2870/rt2870.h b/drivers/staging/rt2870/rt2870.h index 5e5b3f2b7eb..2b8872b2fd9 100644 --- a/drivers/staging/rt2870/rt2870.h +++ b/drivers/staging/rt2870/rt2870.h @@ -79,6 +79,7 @@ { \ {USB_DEVICE(0x148F,0x2770)}, /* Ralink */ \ {USB_DEVICE(0x1737,0x0071)}, /* Linksys WUSB600N */ \ + {USB_DEVICE(0x1737,0x0070)}, /* Linksys */ \ {USB_DEVICE(0x148F,0x2870)}, /* Ralink */ \ {USB_DEVICE(0x148F,0x3070)}, /* Ralink */ \ {USB_DEVICE(0x0B05,0x1731)}, /* Asus */ \ @@ -89,15 +90,18 @@ {USB_DEVICE(0x0DF6,0x002C)}, /* Sitecom */ \ {USB_DEVICE(0x0DF6,0x002D)}, /* Sitecom */ \ {USB_DEVICE(0x0DF6,0x0039)}, /* Sitecom */ \ + {USB_DEVICE(0x0DF6,0x003F)}, /* Sitecom WL-608 */ \ {USB_DEVICE(0x14B2,0x3C06)}, /* Conceptronic */ \ {USB_DEVICE(0x14B2,0x3C28)}, /* Conceptronic */ \ {USB_DEVICE(0x2019,0xED06)}, /* Planex Communications, Inc. */ \ + {USB_DEVICE(0x2019,0xED14)}, /* Planex Communications, Inc. */ \ {USB_DEVICE(0x2019,0xAB25)}, /* Planex Communications, Inc. RT3070 */ \ {USB_DEVICE(0x07D1,0x3C09)}, /* D-Link */ \ {USB_DEVICE(0x07D1,0x3C11)}, /* D-Link */ \ {USB_DEVICE(0x14B2,0x3C07)}, /* AL */ \ {USB_DEVICE(0x14B2,0x3C12)}, /* AL */ \ {USB_DEVICE(0x050D,0x8053)}, /* Belkin */ \ + {USB_DEVICE(0x050D,0x815C)}, /* Belkin */ \ {USB_DEVICE(0x14B2,0x3C23)}, /* Airlink */ \ {USB_DEVICE(0x14B2,0x3C27)}, /* Airlink */ \ {USB_DEVICE(0x07AA,0x002F)}, /* Corega */ \ @@ -586,16 +590,14 @@ VOID RTUSBBulkRxComplete(purbb_t pUrb, struct pt_regs *pt_regs); #define RTUSBMlmeUp(pAd) \ { \ POS_COOKIE pObj = (POS_COOKIE) pAd->OS_Cookie; \ - BUG_ON(pObj->MLMEThr_task == NULL); \ - CHECK_PID_LEGALITY(task_pid(pObj->MLMEThr_task)) \ + CHECK_PID_LEGALITY(pObj->MLMEThr_pid) \ up(&(pAd->mlme_semaphore)); \ } #define RTUSBCMDUp(pAd) \ { \ POS_COOKIE pObj = (POS_COOKIE) pAd->OS_Cookie; \ - BUG_ON(pObj->RTUSBCmdThr_task == NULL); \ - CHECK_PID_LEGALITY(task_pid(pObj->RTUSBCmdThr_task)) \ + CHECK_PID_LEGALITY(pObj->RTUSBCmdThr_pid) \ up(&(pAd->RTUSBCmd_semaphore)); \ } #endif diff --git a/drivers/staging/rtl8187se/ieee80211/ieee80211_softmac_wx.c b/drivers/staging/rtl8187se/ieee80211/ieee80211_softmac_wx.c index 93af37e2d31..54b4b718f84 100644 --- a/drivers/staging/rtl8187se/ieee80211/ieee80211_softmac_wx.c +++ b/drivers/staging/rtl8187se/ieee80211/ieee80211_softmac_wx.c @@ -461,19 +461,19 @@ int ieee80211_wx_get_name(struct ieee80211_device *ieee, struct iw_request_info *info, union iwreq_data *wrqu, char *extra) { - strcpy(wrqu->name, "802.11"); + strlcpy(wrqu->name, "802.11", IFNAMSIZ); if(ieee->modulation & IEEE80211_CCK_MODULATION){ - strcat(wrqu->name, "b"); + strlcat(wrqu->name, "b", IFNAMSIZ); if(ieee->modulation & IEEE80211_OFDM_MODULATION) - strcat(wrqu->name, "/g"); + strlcat(wrqu->name, "/g", IFNAMSIZ); }else if(ieee->modulation & IEEE80211_OFDM_MODULATION) - strcat(wrqu->name, "g"); + strlcat(wrqu->name, "g", IFNAMSIZ); if((ieee->state == IEEE80211_LINKED) || (ieee->state == IEEE80211_LINKED_SCANNING)) - strcat(wrqu->name," linked"); + strlcat(wrqu->name," link", IFNAMSIZ); else if(ieee->state != IEEE80211_NOLINK) - strcat(wrqu->name," link.."); + strlcat(wrqu->name," .....", IFNAMSIZ); return 0; diff --git a/drivers/staging/rtl8192su/Kconfig b/drivers/staging/rtl8192su/Kconfig index 4b5552c5926..770f41280f2 100644 --- a/drivers/staging/rtl8192su/Kconfig +++ b/drivers/staging/rtl8192su/Kconfig @@ -1,6 +1,6 @@ config RTL8192SU tristate "RealTek RTL8192SU Wireless LAN NIC driver" depends on PCI - depends on WIRELESS_EXT && COMPAT_NET_DEV_OPS + depends on WIRELESS_EXT default N ---help--- diff --git a/drivers/staging/rtl8192su/ieee80211.h b/drivers/staging/rtl8192su/ieee80211.h index 0edb09a536f..ea973931803 100644 --- a/drivers/staging/rtl8192su/ieee80211.h +++ b/drivers/staging/rtl8192su/ieee80211.h @@ -2645,7 +2645,7 @@ extern int ieee80211_encrypt_fragment( struct sk_buff *frag, int hdr_len); -extern int ieee80211_xmit(struct sk_buff *skb, +extern int rtl8192_ieee80211_xmit(struct sk_buff *skb, struct net_device *dev); extern void ieee80211_txb_free(struct ieee80211_txb *); diff --git a/drivers/staging/rtl8192su/ieee80211/ieee80211.h b/drivers/staging/rtl8192su/ieee80211/ieee80211.h index 720bfcbfadc..5e3a2cbed2b 100644 --- a/drivers/staging/rtl8192su/ieee80211/ieee80211.h +++ b/drivers/staging/rtl8192su/ieee80211/ieee80211.h @@ -2645,7 +2645,7 @@ extern int ieee80211_encrypt_fragment( struct sk_buff *frag, int hdr_len); -extern int ieee80211_xmit(struct sk_buff *skb, +extern int rtl8192_ieee80211_xmit(struct sk_buff *skb, struct net_device *dev); extern void ieee80211_txb_free(struct ieee80211_txb *); diff --git a/drivers/staging/rtl8192su/ieee80211/ieee80211_module.c b/drivers/staging/rtl8192su/ieee80211/ieee80211_module.c index f408b4583b8..759032db4a3 100644 --- a/drivers/staging/rtl8192su/ieee80211/ieee80211_module.c +++ b/drivers/staging/rtl8192su/ieee80211/ieee80211_module.c @@ -118,7 +118,6 @@ struct net_device *alloc_ieee80211(int sizeof_priv) #else ieee = (struct ieee80211_device *)dev->priv; #endif - dev->hard_start_xmit = ieee80211_xmit; memset(ieee, 0, sizeof(struct ieee80211_device)+sizeof_priv); ieee->dev = dev; diff --git a/drivers/staging/rtl8192su/ieee80211/ieee80211_softmac_wx.c b/drivers/staging/rtl8192su/ieee80211/ieee80211_softmac_wx.c index 1f50c46dcb9..191dc3fbbe3 100644 --- a/drivers/staging/rtl8192su/ieee80211/ieee80211_softmac_wx.c +++ b/drivers/staging/rtl8192su/ieee80211/ieee80211_softmac_wx.c @@ -548,21 +548,21 @@ int ieee80211_wx_get_name(struct ieee80211_device *ieee, struct iw_request_info *info, union iwreq_data *wrqu, char *extra) { - strcpy(wrqu->name, "802.11"); + strlcpy(wrqu->name, "802.11", IFNAMSIZ); if(ieee->modulation & IEEE80211_CCK_MODULATION){ - strcat(wrqu->name, "b"); + strlcat(wrqu->name, "b", IFNAMSIZ); if(ieee->modulation & IEEE80211_OFDM_MODULATION) - strcat(wrqu->name, "/g"); + strlcat(wrqu->name, "/g", IFNAMSIZ); }else if(ieee->modulation & IEEE80211_OFDM_MODULATION) - strcat(wrqu->name, "g"); + strlcat(wrqu->name, "g", IFNAMSIZ); if (ieee->mode & (IEEE_N_24G | IEEE_N_5G)) - strcat(wrqu->name, "/n"); + strlcat(wrqu->name, "/n", IFNAMSIZ); if((ieee->state == IEEE80211_LINKED) || (ieee->state == IEEE80211_LINKED_SCANNING)) - strcat(wrqu->name," linked"); + strlcat(wrqu->name, " link", IFNAMSIZ); else if(ieee->state != IEEE80211_NOLINK) - strcat(wrqu->name," link.."); + strlcat(wrqu->name, " .....", IFNAMSIZ); return 0; diff --git a/drivers/staging/rtl8192su/ieee80211/ieee80211_tx.c b/drivers/staging/rtl8192su/ieee80211/ieee80211_tx.c index 7294572b990..cba12b84be5 100644 --- a/drivers/staging/rtl8192su/ieee80211/ieee80211_tx.c +++ b/drivers/staging/rtl8192su/ieee80211/ieee80211_tx.c @@ -618,7 +618,7 @@ void ieee80211_query_seqnum(struct ieee80211_device*ieee, struct sk_buff* skb, u } } -int ieee80211_xmit(struct sk_buff *skb, struct net_device *dev) +int rtl8192_ieee80211_xmit(struct sk_buff *skb, struct net_device *dev) { #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0)) struct ieee80211_device *ieee = netdev_priv(dev); @@ -943,5 +943,6 @@ int ieee80211_xmit(struct sk_buff *skb, struct net_device *dev) return 1; } +EXPORT_SYMBOL(rtl8192_ieee80211_xmit); EXPORT_SYMBOL(ieee80211_txb_free); diff --git a/drivers/staging/rtl8192su/r8192U_core.c b/drivers/staging/rtl8192su/r8192U_core.c index f1423d71449..70f81a8f129 100644 --- a/drivers/staging/rtl8192su/r8192U_core.c +++ b/drivers/staging/rtl8192su/r8192U_core.c @@ -12132,6 +12132,19 @@ static void HalUsbSetQueuePipeMapping8192SUsb(struct usb_interface *intf, struct } #endif +static const struct net_device_ops rtl8192_netdev_ops = { + .ndo_open = rtl8192_open, + .ndo_stop = rtl8192_close, + .ndo_get_stats = rtl8192_stats, + .ndo_tx_timeout = tx_timeout, + .ndo_do_ioctl = rtl8192_ioctl, + .ndo_set_multicast_list = r8192_set_multicast, + .ndo_set_mac_address = r8192_set_mac_adr, + .ndo_validate_addr = eth_validate_addr, + .ndo_change_mtu = eth_change_mtu, + .ndo_start_xmit = rtl8192_ieee80211_xmit, +}; + #if LINUX_VERSION_CODE > KERNEL_VERSION(2,5,0) static int __devinit rtl8192_usb_probe(struct usb_interface *intf, const struct usb_device_id *id) @@ -12186,15 +12199,7 @@ static void * __devinit rtl8192_usb_probe(struct usb_device *udev, priv->ops = &rtl8192u_ops; #endif - dev->open = rtl8192_open; - dev->stop = rtl8192_close; - //dev->hard_start_xmit = rtl8192_8023_hard_start_xmit; - dev->tx_timeout = tx_timeout; - //dev->wireless_handlers = &r8192_wx_handlers_def; - dev->do_ioctl = rtl8192_ioctl; - dev->set_multicast_list = r8192_set_multicast; - dev->set_mac_address = r8192_set_mac_adr; - dev->get_stats = rtl8192_stats; + dev->netdev_ops = &rtl8192_netdev_ops; //DMESG("Oops: i'm coming\n"); #if WIRELESS_EXT >= 12 diff --git a/drivers/staging/rtl8192su/r8192U_pm.c b/drivers/staging/rtl8192su/r8192U_pm.c index 92c95aa3663..b1531a8d0cd 100644 --- a/drivers/staging/rtl8192su/r8192U_pm.c +++ b/drivers/staging/rtl8192su/r8192U_pm.c @@ -35,7 +35,9 @@ int rtl8192U_suspend(struct usb_interface *intf, pm_message_t state) return 0; } - dev->stop(dev); + if (dev->netdev_ops->ndo_stop) + dev->netdev_ops->ndo_stop(dev); + mdelay(10); netif_device_detach(dev); @@ -61,7 +63,9 @@ int rtl8192U_resume (struct usb_interface *intf) } netif_device_attach(dev); - dev->open(dev); + + if (dev->netdev_ops->ndo_open) + dev->netdev_ops->ndo_open(dev); } return 0; diff --git a/drivers/staging/serqt_usb2/serqt_usb2.c b/drivers/staging/serqt_usb2/serqt_usb2.c index 90b29b56463..0fdf8c6dc64 100644 --- a/drivers/staging/serqt_usb2/serqt_usb2.c +++ b/drivers/staging/serqt_usb2/serqt_usb2.c @@ -360,18 +360,18 @@ static void qt_read_bulk_callback(struct urb *urb) if (port_paranoia_check(port, __func__) != 0) { dbg("%s - port_paranoia_check, exiting\n", __func__); qt_port->ReadBulkStopped = 1; - return; + goto exit; } if (!serial) { dbg("%s - bad serial pointer, exiting\n", __func__); - return; + goto exit; } if (qt_port->closePending == 1) { /* Were closing , stop reading */ dbg("%s - (qt_port->closepending == 1\n", __func__); qt_port->ReadBulkStopped = 1; - return; + goto exit; } /* @@ -381,7 +381,7 @@ static void qt_read_bulk_callback(struct urb *urb) */ if (qt_port->RxHolding == 1) { qt_port->ReadBulkStopped = 1; - return; + goto exit; } if (urb->status) { @@ -389,7 +389,7 @@ static void qt_read_bulk_callback(struct urb *urb) dbg("%s - nonzero read bulk status received: %d\n", __func__, urb->status); - return; + goto exit; } if (tty && RxCount) { @@ -463,6 +463,8 @@ static void qt_read_bulk_callback(struct urb *urb) } schedule_work(&port->work); +exit: + tty_kref_put(tty); } /* @@ -736,6 +738,11 @@ static int qt_startup(struct usb_serial *serial) if (!qt_port) { dbg("%s: kmalloc for quatech_port (%d) failed!.", __func__, i); + for(--i; i >= 0; i--) { + port = serial->port[i]; + kfree(usb_get_serial_port_data(port)); + usb_set_serial_port_data(port, NULL); + } return -ENOMEM; } spin_lock_init(&qt_port->lock); @@ -866,7 +873,7 @@ static void qt_release(struct usb_serial *serial) } -int qt_open(struct tty_struct *tty, +static int qt_open(struct tty_struct *tty, struct usb_serial_port *port, struct file *filp) { struct usb_serial *serial; @@ -1041,17 +1048,19 @@ static void qt_block_until_empty(struct tty_struct *tty, } } -static void qt_close(struct tty_struct *tty, struct usb_serial_port *port, - struct file *filp) +static void qt_close(struct usb_serial_port *port) { struct usb_serial *serial = port->serial; struct quatech_port *qt_port; struct quatech_port *port0; + struct tty_struct *tty; int status; unsigned int index; status = 0; dbg("%s - port %d\n", __func__, port->number); + + tty = tty_port_tty_get(&port->port); index = tty->index - serial->minor; qt_port = qt_get_port_private(port); @@ -1066,6 +1075,7 @@ static void qt_close(struct tty_struct *tty, struct usb_serial_port *port, /* wait up to for transmitter to empty */ if (serial->dev) qt_block_until_empty(tty, qt_port); + tty_kref_put(tty); /* Close uart channel */ status = qt_close_channel(serial, index); diff --git a/drivers/staging/stlc45xx/stlc45xx.c b/drivers/staging/stlc45xx/stlc45xx.c index cfdaac9b747..a137c78fac0 100644 --- a/drivers/staging/stlc45xx/stlc45xx.c +++ b/drivers/staging/stlc45xx/stlc45xx.c @@ -2235,24 +2235,6 @@ static void stlc45xx_op_remove_interface(struct ieee80211_hw *hw, stlc45xx_debug(DEBUG_FUNC, "%s", __func__); } -static int stlc45xx_op_config_interface(struct ieee80211_hw *hw, - struct ieee80211_vif *vif, - struct ieee80211_if_conf *conf) -{ - struct stlc45xx *stlc = hw->priv; - - stlc45xx_debug(DEBUG_FUNC, "%s", __func__); - - mutex_lock(&stlc->mutex); - - memcpy(stlc->bssid, conf->bssid, ETH_ALEN); - stlc45xx_tx_setup(stlc); - - mutex_unlock(&stlc->mutex); - - return 0; -} - static int stlc45xx_op_config(struct ieee80211_hw *hw, u32 changed) { struct stlc45xx *stlc = hw->priv; @@ -2295,6 +2277,14 @@ static void stlc45xx_op_bss_info_changed(struct ieee80211_hw *hw, { struct stlc45xx *stlc = hw->priv; + stlc45xx_debug(DEBUG_FUNC, "%s", __func__); + mutex_lock(&stlc->mutex); + + memcpy(stlc->bssid, info->bssid, ETH_ALEN); + stlc45xx_tx_setup(stlc); + + mutex_unlock(&stlc->mutex); + if (changed & BSS_CHANGED_ASSOC) { stlc->associated = info->assoc; if (info->assoc) @@ -2357,7 +2347,6 @@ static const struct ieee80211_ops stlc45xx_ops = { .add_interface = stlc45xx_op_add_interface, .remove_interface = stlc45xx_op_remove_interface, .config = stlc45xx_op_config, - .config_interface = stlc45xx_op_config_interface, .configure_filter = stlc45xx_op_configure_filter, .tx = stlc45xx_op_tx, .bss_info_changed = stlc45xx_op_bss_info_changed, diff --git a/drivers/staging/uc2322/Kconfig b/drivers/staging/uc2322/Kconfig deleted file mode 100644 index 2e0c6e79df2..00000000000 --- a/drivers/staging/uc2322/Kconfig +++ /dev/null @@ -1,10 +0,0 @@ -config USB_SERIAL_ATEN2011 - tristate "ATEN 2011 USB to serial device support" - depends on USB_SERIAL - default N - ---help--- - Say Y here if you want to use a ATEN 2011 dual port USB to serial - adapter. - - To compile this driver as a module, choose M here: the module will be - called aten2011. diff --git a/drivers/staging/uc2322/Makefile b/drivers/staging/uc2322/Makefile deleted file mode 100644 index 49c18d6e579..00000000000 --- a/drivers/staging/uc2322/Makefile +++ /dev/null @@ -1 +0,0 @@ -obj-$(CONFIG_USB_SERIAL_ATEN2011) += aten2011.o diff --git a/drivers/staging/uc2322/TODO b/drivers/staging/uc2322/TODO deleted file mode 100644 index c189a64c418..00000000000 --- a/drivers/staging/uc2322/TODO +++ /dev/null @@ -1,7 +0,0 @@ -TODO: - - checkpatch.pl cleanups - - remove dead and useless code (auditing the tty ioctls to - verify that they really are correct and needed.) - -Please send any patches to Greg Kroah-Hartman <greg@kroah.com> and -Russell Lang <gsview@ghostgum.com.au>. diff --git a/drivers/staging/uc2322/aten2011.c b/drivers/staging/uc2322/aten2011.c deleted file mode 100644 index 39d0926d1a9..00000000000 --- a/drivers/staging/uc2322/aten2011.c +++ /dev/null @@ -1,2430 +0,0 @@ -/* - * Aten 2011 USB serial driver for 4 port devices - * - * Copyright (C) 2000 Inside Out Networks - * Copyright (C) 2001-2002, 2009 Greg Kroah-Hartman <greg@kroah.com> - * Copyright (C) 2009 Novell Inc. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - */ - -#include <linux/kernel.h> -#include <linux/errno.h> -#include <linux/init.h> -#include <linux/slab.h> -#include <linux/tty.h> -#include <linux/tty_driver.h> -#include <linux/tty_flip.h> -#include <linux/module.h> -#include <linux/serial.h> -#include <linux/uaccess.h> -#include <linux/usb.h> -#include <linux/usb/serial.h> - - -#define ZLP_REG1 0x3A /* Zero_Flag_Reg1 58 */ -#define ZLP_REG2 0x3B /* Zero_Flag_Reg2 59 */ -#define ZLP_REG3 0x3C /* Zero_Flag_Reg3 60 */ -#define ZLP_REG4 0x3D /* Zero_Flag_Reg4 61 */ -#define ZLP_REG5 0x3E /* Zero_Flag_Reg5 62 */ - -/* Interrupt Rotinue Defines */ -#define SERIAL_IIR_RLS 0x06 -#define SERIAL_IIR_RDA 0x04 -#define SERIAL_IIR_CTI 0x0c -#define SERIAL_IIR_THR 0x02 -#define SERIAL_IIR_MS 0x00 - -/* Emulation of the bit mask on the LINE STATUS REGISTER. */ -#define SERIAL_LSR_DR 0x0001 -#define SERIAL_LSR_OE 0x0002 -#define SERIAL_LSR_PE 0x0004 -#define SERIAL_LSR_FE 0x0008 -#define SERIAL_LSR_BI 0x0010 -#define SERIAL_LSR_THRE 0x0020 -#define SERIAL_LSR_TEMT 0x0040 -#define SERIAL_LSR_FIFOERR 0x0080 - -/* MSR bit defines(place holders) */ -#define ATEN_MSR_DELTA_CTS 0x10 -#define ATEN_MSR_DELTA_DSR 0x20 -#define ATEN_MSR_DELTA_RI 0x40 -#define ATEN_MSR_DELTA_CD 0x80 - -/* Serial Port register Address */ -#define RECEIVE_BUFFER_REGISTER ((__u16)(0x00)) -#define TRANSMIT_HOLDING_REGISTER ((__u16)(0x00)) -#define INTERRUPT_ENABLE_REGISTER ((__u16)(0x01)) -#define INTERRUPT_IDENT_REGISTER ((__u16)(0x02)) -#define FIFO_CONTROL_REGISTER ((__u16)(0x02)) -#define LINE_CONTROL_REGISTER ((__u16)(0x03)) -#define MODEM_CONTROL_REGISTER ((__u16)(0x04)) -#define LINE_STATUS_REGISTER ((__u16)(0x05)) -#define MODEM_STATUS_REGISTER ((__u16)(0x06)) -#define SCRATCH_PAD_REGISTER ((__u16)(0x07)) -#define DIVISOR_LATCH_LSB ((__u16)(0x00)) -#define DIVISOR_LATCH_MSB ((__u16)(0x01)) - -#define SP1_REGISTER ((__u16)(0x00)) -#define CONTROL1_REGISTER ((__u16)(0x01)) -#define CLK_MULTI_REGISTER ((__u16)(0x02)) -#define CLK_START_VALUE_REGISTER ((__u16)(0x03)) -#define DCR1_REGISTER ((__u16)(0x04)) -#define GPIO_REGISTER ((__u16)(0x07)) - -#define SERIAL_LCR_DLAB ((__u16)(0x0080)) - -/* - * URB POOL related defines - */ -#define NUM_URBS 16 /* URB Count */ -#define URB_TRANSFER_BUFFER_SIZE 32 /* URB Size */ - -#define USB_VENDOR_ID_ATENINTL 0x0557 -#define ATENINTL_DEVICE_ID_2011 0x2011 -#define ATENINTL_DEVICE_ID_7820 0x7820 - -static struct usb_device_id id_table[] = { - { USB_DEVICE(USB_VENDOR_ID_ATENINTL, ATENINTL_DEVICE_ID_2011) }, - { USB_DEVICE(USB_VENDOR_ID_ATENINTL, ATENINTL_DEVICE_ID_7820) }, - { } /* terminating entry */ -}; -MODULE_DEVICE_TABLE(usb, id_table); - -/* This structure holds all of the local port information */ -struct ATENINTL_port { - int port_num; /*Actual port number in the device(1,2,etc)*/ - __u8 bulk_out_endpoint; /* the bulk out endpoint handle */ - unsigned char *bulk_out_buffer; /* buffer used for the bulk out endpoint */ - struct urb *write_urb; /* write URB for this port */ - __u8 bulk_in_endpoint; /* the bulk in endpoint handle */ - unsigned char *bulk_in_buffer; /* the buffer we use for the bulk in endpoint */ - struct urb *read_urb; /* read URB for this port */ - __u8 shadowLCR; /* last LCR value received */ - __u8 shadowMCR; /* last MCR value received */ - char open; - char chaseResponsePending; - wait_queue_head_t wait_chase; /* for handling sleeping while waiting for chase to finish */ - wait_queue_head_t wait_command; /* for handling sleeping while waiting for command to finish */ - struct async_icount icount; - struct usb_serial_port *port; /* loop back to the owner of this object */ - /*Offsets*/ - __u8 SpRegOffset; - __u8 ControlRegOffset; - __u8 DcrRegOffset; - /* for processing control URBS in interrupt context */ - struct urb *control_urb; - char *ctrl_buf; - int MsrLsr; - - struct urb *write_urb_pool[NUM_URBS]; - /* we pass a pointer to this as the arguement sent to cypress_set_termios old_termios */ - struct ktermios tmp_termios; /* stores the old termios settings */ - spinlock_t lock; /* private lock */ -}; - -/* This structure holds all of the individual serial device information */ -struct ATENINTL_serial { - __u8 interrupt_in_endpoint; /* the interrupt endpoint handle */ - unsigned char *interrupt_in_buffer; /* the buffer we use for the interrupt endpoint */ - struct urb *interrupt_read_urb; /* our interrupt urb */ - __u8 bulk_in_endpoint; /* the bulk in endpoint handle */ - unsigned char *bulk_in_buffer; /* the buffer we use for the bulk in endpoint */ - struct urb *read_urb; /* our bulk read urb */ - __u8 bulk_out_endpoint; /* the bulk out endpoint handle */ - struct usb_serial *serial; /* loop back to the owner of this object */ - int ATEN2011_spectrum_2or4ports; /* this says the number of ports in the device */ - /* Indicates about the no.of opened ports of an individual USB-serial adapater. */ - unsigned int NoOfOpenPorts; - /* a flag for Status endpoint polling */ - unsigned char status_polling_started; -}; - -static void ATEN2011_set_termios(struct tty_struct *tty, - struct usb_serial_port *port, - struct ktermios *old_termios); -static void ATEN2011_change_port_settings(struct tty_struct *tty, - struct ATENINTL_port *ATEN2011_port, - struct ktermios *old_termios); - -/************************************* - * Bit definitions for each register * - *************************************/ -#define LCR_BITS_5 0x00 /* 5 bits/char */ -#define LCR_BITS_6 0x01 /* 6 bits/char */ -#define LCR_BITS_7 0x02 /* 7 bits/char */ -#define LCR_BITS_8 0x03 /* 8 bits/char */ -#define LCR_BITS_MASK 0x03 /* Mask for bits/char field */ - -#define LCR_STOP_1 0x00 /* 1 stop bit */ -#define LCR_STOP_1_5 0x04 /* 1.5 stop bits (if 5 bits/char) */ -#define LCR_STOP_2 0x04 /* 2 stop bits (if 6-8 bits/char) */ -#define LCR_STOP_MASK 0x04 /* Mask for stop bits field */ - -#define LCR_PAR_NONE 0x00 /* No parity */ -#define LCR_PAR_ODD 0x08 /* Odd parity */ -#define LCR_PAR_EVEN 0x18 /* Even parity */ -#define LCR_PAR_MARK 0x28 /* Force parity bit to 1 */ -#define LCR_PAR_SPACE 0x38 /* Force parity bit to 0 */ -#define LCR_PAR_MASK 0x38 /* Mask for parity field */ - -#define LCR_SET_BREAK 0x40 /* Set Break condition */ -#define LCR_DL_ENABLE 0x80 /* Enable access to divisor latch */ - -#define MCR_DTR 0x01 /* Assert DTR */ -#define MCR_RTS 0x02 /* Assert RTS */ -#define MCR_OUT1 0x04 /* Loopback only: Sets state of RI */ -#define MCR_MASTER_IE 0x08 /* Enable interrupt outputs */ -#define MCR_LOOPBACK 0x10 /* Set internal (digital) loopback mode */ -#define MCR_XON_ANY 0x20 /* Enable any char to exit XOFF mode */ - -#define ATEN2011_MSR_CTS 0x10 /* Current state of CTS */ -#define ATEN2011_MSR_DSR 0x20 /* Current state of DSR */ -#define ATEN2011_MSR_RI 0x40 /* Current state of RI */ -#define ATEN2011_MSR_CD 0x80 /* Current state of CD */ - - -static int debug; - -/* - * Version Information - */ -#define DRIVER_VERSION "2.0" -#define DRIVER_DESC "ATENINTL 2011 USB Serial Adapter" - -/* - * Defines used for sending commands to port - */ - -#define ATEN_WDR_TIMEOUT (50) /* default urb timeout */ - -/* Requests */ -#define ATEN_RD_RTYPE 0xC0 -#define ATEN_WR_RTYPE 0x40 -#define ATEN_RDREQ 0x0D -#define ATEN_WRREQ 0x0E -#define ATEN_CTRL_TIMEOUT 500 -#define VENDOR_READ_LENGTH (0x01) - -/* set to 1 for RS485 mode and 0 for RS232 mode */ -/* FIXME make this somehow dynamic and not build time specific */ -static int RS485mode; - -static int set_reg_sync(struct usb_serial_port *port, __u16 reg, __u16 val) -{ - struct usb_device *dev = port->serial->dev; - val = val & 0x00ff; - - dbg("%s: is %x, value %x", __func__, reg, val); - - return usb_control_msg(dev, usb_sndctrlpipe(dev, 0), ATEN_WRREQ, - ATEN_WR_RTYPE, val, reg, NULL, 0, - ATEN_WDR_TIMEOUT); -} - -static int get_reg_sync(struct usb_serial_port *port, __u16 reg, __u16 *val) -{ - struct usb_device *dev = port->serial->dev; - int ret; - - ret = usb_control_msg(dev, usb_rcvctrlpipe(dev, 0), ATEN_RDREQ, - ATEN_RD_RTYPE, 0, reg, val, VENDOR_READ_LENGTH, - ATEN_WDR_TIMEOUT); - dbg("%s: offset is %x, return val %x", __func__, reg, *val); - *val = (*val) & 0x00ff; - return ret; -} - -static int set_uart_reg(struct usb_serial_port *port, __u16 reg, __u16 val) -{ - struct usb_device *dev = port->serial->dev; - struct ATENINTL_serial *a_serial; - __u16 minor; - - a_serial = usb_get_serial_data(port->serial); - minor = port->serial->minor; - if (minor == SERIAL_TTY_NO_MINOR) - minor = 0; - val = val & 0x00ff; - - /* - * For the UART control registers, - * the application number need to be Or'ed - */ - if (a_serial->ATEN2011_spectrum_2or4ports == 4) - val |= (((__u16)port->number - minor) + 1) << 8; - else { - if (((__u16) port->number - minor) == 0) - val |= (((__u16)port->number - minor) + 1) << 8; - else - val |= (((__u16)port->number - minor) + 2) << 8; - } - dbg("%s: application number is %x", __func__, val); - - return usb_control_msg(dev, usb_sndctrlpipe(dev, 0), ATEN_WRREQ, - ATEN_WR_RTYPE, val, reg, NULL, 0, - ATEN_WDR_TIMEOUT); -} - -static int get_uart_reg(struct usb_serial_port *port, __u16 reg, __u16 *val) -{ - struct usb_device *dev = port->serial->dev; - int ret = 0; - __u16 wval; - struct ATENINTL_serial *a_serial; - __u16 minor = port->serial->minor; - - a_serial = usb_get_serial_data(port->serial); - if (minor == SERIAL_TTY_NO_MINOR) - minor = 0; - - /* wval is same as application number */ - if (a_serial->ATEN2011_spectrum_2or4ports == 4) - wval = (((__u16)port->number - minor) + 1) << 8; - else { - if (((__u16) port->number - minor) == 0) - wval = (((__u16) port->number - minor) + 1) << 8; - else - wval = (((__u16) port->number - minor) + 2) << 8; - } - dbg("%s: application number is %x", __func__, wval); - ret = usb_control_msg(dev, usb_rcvctrlpipe(dev, 0), ATEN_RDREQ, - ATEN_RD_RTYPE, wval, reg, val, VENDOR_READ_LENGTH, - ATEN_WDR_TIMEOUT); - *val = (*val) & 0x00ff; - return ret; -} - -static int handle_newMsr(struct ATENINTL_port *port, __u8 newMsr) -{ - struct ATENINTL_port *ATEN2011_port; - struct async_icount *icount; - ATEN2011_port = port; - icount = &ATEN2011_port->icount; - if (newMsr & - (ATEN_MSR_DELTA_CTS | ATEN_MSR_DELTA_DSR | ATEN_MSR_DELTA_RI | - ATEN_MSR_DELTA_CD)) { - icount = &ATEN2011_port->icount; - - /* update input line counters */ - if (newMsr & ATEN_MSR_DELTA_CTS) - icount->cts++; - if (newMsr & ATEN_MSR_DELTA_DSR) - icount->dsr++; - if (newMsr & ATEN_MSR_DELTA_CD) - icount->dcd++; - if (newMsr & ATEN_MSR_DELTA_RI) - icount->rng++; - } - - return 0; -} - -static int handle_newLsr(struct ATENINTL_port *port, __u8 newLsr) -{ - struct async_icount *icount; - - dbg("%s - %02x", __func__, newLsr); - - if (newLsr & SERIAL_LSR_BI) { - /* - * Parity and Framing errors only count if they occur exclusive - * of a break being received. - */ - newLsr &= (__u8) (SERIAL_LSR_OE | SERIAL_LSR_BI); - } - - /* update input line counters */ - icount = &port->icount; - if (newLsr & SERIAL_LSR_BI) - icount->brk++; - if (newLsr & SERIAL_LSR_OE) - icount->overrun++; - if (newLsr & SERIAL_LSR_PE) - icount->parity++; - if (newLsr & SERIAL_LSR_FE) - icount->frame++; - - return 0; -} - -static void ATEN2011_control_callback(struct urb *urb) -{ - unsigned char *data; - struct ATENINTL_port *ATEN2011_port; - __u8 regval = 0x0; - - switch (urb->status) { - case 0: - /* success */ - break; - case -ECONNRESET: - case -ENOENT: - case -ESHUTDOWN: - /* this urb is terminated, clean up */ - dbg("%s - urb shutting down with status: %d", __func__, - urb->status); - return; - default: - dbg("%s - nonzero urb status received: %d", __func__, - urb->status); - goto exit; - } - - ATEN2011_port = (struct ATENINTL_port *)urb->context; - - dbg("%s urb buffer size is %d", __func__, urb->actual_length); - dbg("%s ATEN2011_port->MsrLsr is %d port %d", __func__, - ATEN2011_port->MsrLsr, ATEN2011_port->port_num); - data = urb->transfer_buffer; - regval = (__u8) data[0]; - dbg("%s data is %x", __func__, regval); - if (ATEN2011_port->MsrLsr == 0) - handle_newMsr(ATEN2011_port, regval); - else if (ATEN2011_port->MsrLsr == 1) - handle_newLsr(ATEN2011_port, regval); - -exit: - return; -} - -static int ATEN2011_get_reg(struct ATENINTL_port *ATEN, __u16 Wval, __u16 reg, - __u16 *val) -{ - struct usb_device *dev = ATEN->port->serial->dev; - struct usb_ctrlrequest *dr = NULL; - unsigned char *buffer = NULL; - int ret = 0; - buffer = (__u8 *) ATEN->ctrl_buf; - - dr = (void *)(buffer + 2); - dr->bRequestType = ATEN_RD_RTYPE; - dr->bRequest = ATEN_RDREQ; - dr->wValue = cpu_to_le16(Wval); - dr->wIndex = cpu_to_le16(reg); - dr->wLength = cpu_to_le16(2); - - usb_fill_control_urb(ATEN->control_urb, dev, usb_rcvctrlpipe(dev, 0), - (unsigned char *)dr, buffer, 2, - ATEN2011_control_callback, ATEN); - ATEN->control_urb->transfer_buffer_length = 2; - ret = usb_submit_urb(ATEN->control_urb, GFP_ATOMIC); - return ret; -} - -static void ATEN2011_interrupt_callback(struct urb *urb) -{ - int result; - int length; - struct ATENINTL_port *ATEN2011_port; - struct ATENINTL_serial *ATEN2011_serial; - struct usb_serial *serial; - __u16 Data; - unsigned char *data; - __u8 sp[5], st; - int i; - __u16 wval; - int minor; - - dbg("%s", " : Entering"); - - ATEN2011_serial = (struct ATENINTL_serial *)urb->context; - - switch (urb->status) { - case 0: - /* success */ - break; - case -ECONNRESET: - case -ENOENT: - case -ESHUTDOWN: - /* this urb is terminated, clean up */ - dbg("%s - urb shutting down with status: %d", __func__, - urb->status); - return; - default: - dbg("%s - nonzero urb status received: %d", __func__, - urb->status); - goto exit; - } - length = urb->actual_length; - data = urb->transfer_buffer; - - serial = ATEN2011_serial->serial; - - /* ATENINTL get 5 bytes - * Byte 1 IIR Port 1 (port.number is 0) - * Byte 2 IIR Port 2 (port.number is 1) - * Byte 3 IIR Port 3 (port.number is 2) - * Byte 4 IIR Port 4 (port.number is 3) - * Byte 5 FIFO status for both */ - - if (length && length > 5) { - dbg("%s", "Wrong data !!!"); - return; - } - - /* MATRIX */ - if (ATEN2011_serial->ATEN2011_spectrum_2or4ports == 4) { - sp[0] = (__u8) data[0]; - sp[1] = (__u8) data[1]; - sp[2] = (__u8) data[2]; - sp[3] = (__u8) data[3]; - st = (__u8) data[4]; - } else { - sp[0] = (__u8) data[0]; - sp[1] = (__u8) data[2]; - /* sp[2]=(__u8)data[2]; */ - /* sp[3]=(__u8)data[3]; */ - st = (__u8) data[4]; - - } - for (i = 0; i < serial->num_ports; i++) { - ATEN2011_port = usb_get_serial_port_data(serial->port[i]); - minor = serial->minor; - if (minor == SERIAL_TTY_NO_MINOR) - minor = 0; - if ((ATEN2011_serial->ATEN2011_spectrum_2or4ports == 2) - && (i != 0)) - wval = - (((__u16) serial->port[i]->number - - (__u16) (minor)) + 2) << 8; - else - wval = - (((__u16) serial->port[i]->number - - (__u16) (minor)) + 1) << 8; - if (ATEN2011_port->open != 0) { - if (sp[i] & 0x01) { - dbg("SP%d No Interrupt !!!", i); - } else { - switch (sp[i] & 0x0f) { - case SERIAL_IIR_RLS: - dbg("Serial Port %d: Receiver status error or address bit detected in 9-bit mode", i); - ATEN2011_port->MsrLsr = 1; - ATEN2011_get_reg(ATEN2011_port, wval, - LINE_STATUS_REGISTER, - &Data); - break; - case SERIAL_IIR_MS: - dbg("Serial Port %d: Modem status change", i); - ATEN2011_port->MsrLsr = 0; - ATEN2011_get_reg(ATEN2011_port, wval, - MODEM_STATUS_REGISTER, - &Data); - break; - } - } - } - - } -exit: - if (ATEN2011_serial->status_polling_started == 0) - return; - - result = usb_submit_urb(urb, GFP_ATOMIC); - if (result) { - dev_err(&urb->dev->dev, - "%s - Error %d submitting interrupt urb\n", - __func__, result); - } - - return; -} - -static void ATEN2011_bulk_in_callback(struct urb *urb) -{ - int status; - unsigned char *data; - struct usb_serial *serial; - struct usb_serial_port *port; - struct ATENINTL_serial *ATEN2011_serial; - struct ATENINTL_port *ATEN2011_port; - struct tty_struct *tty; - - if (urb->status) { - dbg("nonzero read bulk status received: %d", urb->status); - return; - } - - ATEN2011_port = (struct ATENINTL_port *)urb->context; - - port = (struct usb_serial_port *)ATEN2011_port->port; - serial = port->serial; - - dbg("%s", "Entering..."); - - data = urb->transfer_buffer; - ATEN2011_serial = usb_get_serial_data(serial); - - if (urb->actual_length) { - tty = tty_port_tty_get(&ATEN2011_port->port->port); - if (tty) { - tty_buffer_request_room(tty, urb->actual_length); - tty_insert_flip_string(tty, data, urb->actual_length); - tty_flip_buffer_push(tty); - tty_kref_put(tty); - } - - ATEN2011_port->icount.rx += urb->actual_length; - dbg("ATEN2011_port->icount.rx is %d:", - ATEN2011_port->icount.rx); - } - - if (!ATEN2011_port->read_urb) { - dbg("%s", "URB KILLED !!!"); - return; - } - - if (ATEN2011_port->read_urb->status != -EINPROGRESS) { - ATEN2011_port->read_urb->dev = serial->dev; - - status = usb_submit_urb(ATEN2011_port->read_urb, GFP_ATOMIC); - if (status) - dbg("usb_submit_urb(read bulk) failed, status = %d", status); - } -} - -static void ATEN2011_bulk_out_data_callback(struct urb *urb) -{ - struct ATENINTL_port *ATEN2011_port; - struct tty_struct *tty; - - if (urb->status) { - dbg("nonzero write bulk status received:%d", urb->status); - return; - } - - ATEN2011_port = (struct ATENINTL_port *)urb->context; - - dbg("%s", "Entering ........."); - - tty = tty_port_tty_get(&ATEN2011_port->port->port); - - if (tty && ATEN2011_port->open) - /* tell the tty driver that something has changed */ - tty_wakeup(tty); - - /* schedule_work(&ATEN2011_port->port->work); */ - tty_kref_put(tty); - -} - -#ifdef ATENSerialProbe -static int ATEN2011_serial_probe(struct usb_serial *serial, - const struct usb_device_id *id) -{ - - /*need to implement the mode_reg reading and updating\ - structures usb_serial_ device_type\ - (i.e num_ports, num_bulkin,bulkout etc) */ - /* Also we can update the changes attach */ - return 1; -} -#endif - -static int ATEN2011_open(struct tty_struct *tty, struct usb_serial_port *port, - struct file *filp) -{ - int response; - int j; - struct usb_serial *serial; - struct urb *urb; - __u16 Data; - int status; - struct ATENINTL_serial *ATEN2011_serial; - struct ATENINTL_port *ATEN2011_port; - struct ktermios tmp_termios; - int minor; - - serial = port->serial; - - ATEN2011_port = usb_get_serial_port_data(port); - - if (ATEN2011_port == NULL) - return -ENODEV; - - ATEN2011_serial = usb_get_serial_data(serial); - if (ATEN2011_serial == NULL) - return -ENODEV; - - /* increment the number of opened ports counter here */ - ATEN2011_serial->NoOfOpenPorts++; - - usb_clear_halt(serial->dev, port->write_urb->pipe); - usb_clear_halt(serial->dev, port->read_urb->pipe); - - /* Initialising the write urb pool */ - for (j = 0; j < NUM_URBS; ++j) { - urb = usb_alloc_urb(0, GFP_ATOMIC); - ATEN2011_port->write_urb_pool[j] = urb; - - if (urb == NULL) { - err("No more urbs???"); - continue; - } - - urb->transfer_buffer = NULL; - urb->transfer_buffer = - kmalloc(URB_TRANSFER_BUFFER_SIZE, GFP_KERNEL); - if (!urb->transfer_buffer) { - err("%s-out of memory for urb buffers.", __func__); - continue; - } - } - -/***************************************************************************** - * Initialize ATEN2011 -- Write Init values to corresponding Registers - * - * Register Index - * 1 : IER - * 2 : FCR - * 3 : LCR - * 4 : MCR - * - * 0x08 : SP1/2 Control Reg - *****************************************************************************/ - -/* NEED to check the fallowing Block */ - - Data = 0x0; - status = get_reg_sync(port, ATEN2011_port->SpRegOffset, &Data); - if (status < 0) { - dbg("Reading Spreg failed"); - return -1; - } - Data |= 0x80; - status = set_reg_sync(port, ATEN2011_port->SpRegOffset, Data); - if (status < 0) { - dbg("writing Spreg failed"); - return -1; - } - - Data &= ~0x80; - status = set_reg_sync(port, ATEN2011_port->SpRegOffset, Data); - if (status < 0) { - dbg("writing Spreg failed"); - return -1; - } - -/* End of block to be checked */ -/**************************CHECK***************************/ - - if (RS485mode == 0) - Data = 0xC0; - else - Data = 0x00; - status = set_uart_reg(port, SCRATCH_PAD_REGISTER, Data); - if (status < 0) { - dbg("Writing SCRATCH_PAD_REGISTER failed status-0x%x", status); - return -1; - } else - dbg("SCRATCH_PAD_REGISTER Writing success status%d", status); - -/**************************CHECK***************************/ - - Data = 0x0; - status = get_reg_sync(port, ATEN2011_port->ControlRegOffset, &Data); - if (status < 0) { - dbg("Reading Controlreg failed"); - return -1; - } - Data |= 0x08; /* Driver done bit */ - Data |= 0x20; /* rx_disable */ - status = 0; - status = - set_reg_sync(port, ATEN2011_port->ControlRegOffset, Data); - if (status < 0) { - dbg("writing Controlreg failed"); - return -1; - } - /* - * do register settings here - * Set all regs to the device default values. - * First Disable all interrupts. - */ - - Data = 0x00; - status = set_uart_reg(port, INTERRUPT_ENABLE_REGISTER, Data); - if (status < 0) { - dbg("disableing interrupts failed"); - return -1; - } - /* Set FIFO_CONTROL_REGISTER to the default value */ - Data = 0x00; - status = set_uart_reg(port, FIFO_CONTROL_REGISTER, Data); - if (status < 0) { - dbg("Writing FIFO_CONTROL_REGISTER failed"); - return -1; - } - - Data = 0xcf; /* chk */ - status = set_uart_reg(port, FIFO_CONTROL_REGISTER, Data); - if (status < 0) { - dbg("Writing FIFO_CONTROL_REGISTER failed"); - return -1; - } - - Data = 0x03; /* LCR_BITS_8 */ - status = set_uart_reg(port, LINE_CONTROL_REGISTER, Data); - ATEN2011_port->shadowLCR = Data; - - Data = 0x0b; /* MCR_DTR|MCR_RTS|MCR_MASTER_IE */ - status = set_uart_reg(port, MODEM_CONTROL_REGISTER, Data); - ATEN2011_port->shadowMCR = Data; - -#ifdef Check - Data = 0x00; - status = get_uart_reg(port, LINE_CONTROL_REGISTER, &Data); - ATEN2011_port->shadowLCR = Data; - - Data |= SERIAL_LCR_DLAB; /* data latch enable in LCR 0x80 */ - status = set_uart_reg(port, LINE_CONTROL_REGISTER, Data); - - Data = 0x0c; - status = set_uart_reg(port, DIVISOR_LATCH_LSB, Data); - - Data = 0x0; - status = set_uart_reg(port, DIVISOR_LATCH_MSB, Data); - - Data = 0x00; - status = get_uart_reg(port, LINE_CONTROL_REGISTER, &Data); - -/* Data = ATEN2011_port->shadowLCR; */ /* data latch disable */ - Data = Data & ~SERIAL_LCR_DLAB; - status = set_uart_reg(port, LINE_CONTROL_REGISTER, Data); - ATEN2011_port->shadowLCR = Data; -#endif - /* clearing Bulkin and Bulkout Fifo */ - Data = 0x0; - status = get_reg_sync(port, ATEN2011_port->SpRegOffset, &Data); - - Data = Data | 0x0c; - status = set_reg_sync(port, ATEN2011_port->SpRegOffset, Data); - - Data = Data & ~0x0c; - status = set_reg_sync(port, ATEN2011_port->SpRegOffset, Data); - /* Finally enable all interrupts */ - Data = 0x0; - Data = 0x0c; - status = set_uart_reg(port, INTERRUPT_ENABLE_REGISTER, Data); - - /* clearing rx_disable */ - Data = 0x0; - status = get_reg_sync(port, ATEN2011_port->ControlRegOffset, &Data); - Data = Data & ~0x20; - status = set_reg_sync(port, ATEN2011_port->ControlRegOffset, Data); - - /* rx_negate */ - Data = 0x0; - status = get_reg_sync(port, ATEN2011_port->ControlRegOffset, &Data); - Data = Data | 0x10; - status = 0; - status = set_reg_sync(port, ATEN2011_port->ControlRegOffset, Data); - - /* - * Check to see if we've set up our endpoint info yet - * (can't set it up in ATEN2011_startup as the structures - * were not set up at that time.) - */ - if (ATEN2011_serial->NoOfOpenPorts == 1) { - /* start the status polling here */ - ATEN2011_serial->status_polling_started = 1; - /* If not yet set, Set here */ - ATEN2011_serial->interrupt_in_buffer = - serial->port[0]->interrupt_in_buffer; - ATEN2011_serial->interrupt_in_endpoint = - serial->port[0]->interrupt_in_endpointAddress; - ATEN2011_serial->interrupt_read_urb = - serial->port[0]->interrupt_in_urb; - - /* set up interrupt urb */ - usb_fill_int_urb(ATEN2011_serial->interrupt_read_urb, - serial->dev, - usb_rcvintpipe(serial->dev, - ATEN2011_serial-> - interrupt_in_endpoint), - ATEN2011_serial->interrupt_in_buffer, - ATEN2011_serial->interrupt_read_urb-> - transfer_buffer_length, - ATEN2011_interrupt_callback, ATEN2011_serial, - ATEN2011_serial->interrupt_read_urb->interval); - - /* start interrupt read for ATEN2011 * - * will continue as long as ATEN2011 is connected */ - - response = - usb_submit_urb(ATEN2011_serial->interrupt_read_urb, - GFP_KERNEL); - if (response) { - dbg("%s - Error %d submitting interrupt urb", - __func__, response); - } - - } - - /* - * See if we've set up our endpoint info yet - * (can't set it up in ATEN2011_startup as the - * structures were not set up at that time.) - */ - - dbg("port number is %d", port->number); - dbg("serial number is %d", port->serial->minor); - dbg("Bulkin endpoint is %d", port->bulk_in_endpointAddress); - dbg("BulkOut endpoint is %d", port->bulk_out_endpointAddress); - dbg("Interrupt endpoint is %d", - port->interrupt_in_endpointAddress); - dbg("port's number in the device is %d", ATEN2011_port->port_num); - ATEN2011_port->bulk_in_buffer = port->bulk_in_buffer; - ATEN2011_port->bulk_in_endpoint = port->bulk_in_endpointAddress; - ATEN2011_port->read_urb = port->read_urb; - ATEN2011_port->bulk_out_endpoint = port->bulk_out_endpointAddress; - - minor = port->serial->minor; - if (minor == SERIAL_TTY_NO_MINOR) - minor = 0; - - /* set up our bulk in urb */ - if ((ATEN2011_serial->ATEN2011_spectrum_2or4ports == 2) - && (((__u16) port->number - (__u16) (minor)) != 0)) { - usb_fill_bulk_urb(ATEN2011_port->read_urb, serial->dev, - usb_rcvbulkpipe(serial->dev, - (port-> - bulk_in_endpointAddress + - 2)), port->bulk_in_buffer, - ATEN2011_port->read_urb-> - transfer_buffer_length, - ATEN2011_bulk_in_callback, ATEN2011_port); - } else - usb_fill_bulk_urb(ATEN2011_port->read_urb, - serial->dev, - usb_rcvbulkpipe(serial->dev, - port-> - bulk_in_endpointAddress), - port->bulk_in_buffer, - ATEN2011_port->read_urb-> - transfer_buffer_length, - ATEN2011_bulk_in_callback, ATEN2011_port); - - dbg("ATEN2011_open: bulkin endpoint is %d", - port->bulk_in_endpointAddress); - response = usb_submit_urb(ATEN2011_port->read_urb, GFP_KERNEL); - if (response) { - err("%s - Error %d submitting control urb", __func__, - response); - } - - /* initialize our wait queues */ - init_waitqueue_head(&ATEN2011_port->wait_chase); - init_waitqueue_head(&ATEN2011_port->wait_command); - - /* initialize our icount structure */ - memset(&(ATEN2011_port->icount), 0x00, sizeof(ATEN2011_port->icount)); - - /* initialize our port settings */ - ATEN2011_port->shadowMCR = MCR_MASTER_IE; /* Must set to enable ints! */ - ATEN2011_port->chaseResponsePending = 0; - /* send a open port command */ - ATEN2011_port->open = 1; - /* ATEN2011_change_port_settings(ATEN2011_port,old_termios); */ - /* Setup termios */ - ATEN2011_set_termios(tty, port, &tmp_termios); - ATEN2011_port->icount.tx = 0; - ATEN2011_port->icount.rx = 0; - - dbg("usb_serial serial:%x ATEN2011_port:%x\nATEN2011_serial:%x usb_serial_port port:%x", - (unsigned int)serial, (unsigned int)ATEN2011_port, - (unsigned int)ATEN2011_serial, (unsigned int)port); - - return 0; - -} - -static int ATEN2011_chars_in_buffer(struct tty_struct *tty) -{ - struct usb_serial_port *port = tty->driver_data; - int i; - int chars = 0; - struct ATENINTL_port *ATEN2011_port; - - /* dbg("%s"," ATEN2011_chars_in_buffer:entering ..........."); */ - - ATEN2011_port = usb_get_serial_port_data(port); - if (ATEN2011_port == NULL) { - dbg("%s", "ATEN2011_break:leaving ..........."); - return -1; - } - - for (i = 0; i < NUM_URBS; ++i) - if (ATEN2011_port->write_urb_pool[i]->status == -EINPROGRESS) - chars += URB_TRANSFER_BUFFER_SIZE; - - dbg("%s - returns %d", __func__, chars); - return chars; - -} - -static void ATEN2011_block_until_tx_empty(struct tty_struct *tty, - struct ATENINTL_port *ATEN2011_port) -{ - int timeout = HZ / 10; - int wait = 30; - int count; - - while (1) { - count = ATEN2011_chars_in_buffer(tty); - - /* Check for Buffer status */ - if (count <= 0) - return; - - /* Block the thread for a while */ - interruptible_sleep_on_timeout(&ATEN2011_port->wait_chase, - timeout); - - /* No activity.. count down section */ - wait--; - if (wait == 0) { - dbg("%s - TIMEOUT", __func__); - return; - } else { - /* Reset timout value back to seconds */ - wait = 30; - } - } -} - -static void ATEN2011_close(struct tty_struct *tty, struct usb_serial_port *port, - struct file *filp) -{ - struct usb_serial *serial; - struct ATENINTL_serial *ATEN2011_serial; - struct ATENINTL_port *ATEN2011_port; - int no_urbs; - __u16 Data; - - dbg("%s", "ATEN2011_close:entering..."); - serial = port->serial; - - /* take the Adpater and port's private data */ - ATEN2011_serial = usb_get_serial_data(serial); - ATEN2011_port = usb_get_serial_port_data(port); - if ((ATEN2011_serial == NULL) || (ATEN2011_port == NULL)) - return; - - if (serial->dev) { - /* flush and block(wait) until tx is empty */ - ATEN2011_block_until_tx_empty(tty, ATEN2011_port); - } - /* kill the ports URB's */ - for (no_urbs = 0; no_urbs < NUM_URBS; no_urbs++) - usb_kill_urb(ATEN2011_port->write_urb_pool[no_urbs]); - /* Freeing Write URBs */ - for (no_urbs = 0; no_urbs < NUM_URBS; ++no_urbs) { - kfree(ATEN2011_port->write_urb_pool[no_urbs]->transfer_buffer); - usb_free_urb(ATEN2011_port->write_urb_pool[no_urbs]); - } - /* While closing port, shutdown all bulk read, write * - * and interrupt read if they exists */ - if (serial->dev) { - if (ATEN2011_port->write_urb) { - dbg("%s", "Shutdown bulk write"); - usb_kill_urb(ATEN2011_port->write_urb); - } - if (ATEN2011_port->read_urb) { - dbg("%s", "Shutdown bulk read"); - usb_kill_urb(ATEN2011_port->read_urb); - } - if ((&ATEN2011_port->control_urb)) { - dbg("%s", "Shutdown control read"); - /* usb_kill_urb (ATEN2011_port->control_urb); */ - - } - } - /* if(ATEN2011_port->ctrl_buf != NULL) */ - /* kfree(ATEN2011_port->ctrl_buf); */ - /* decrement the no.of open ports counter of an individual USB-serial adapter. */ - ATEN2011_serial->NoOfOpenPorts--; - dbg("NoOfOpenPorts in close%d:in port%d", - ATEN2011_serial->NoOfOpenPorts, port->number); - if (ATEN2011_serial->NoOfOpenPorts == 0) { - /* stop the stus polling here */ - ATEN2011_serial->status_polling_started = 0; - if (ATEN2011_serial->interrupt_read_urb) { - dbg("%s", "Shutdown interrupt_read_urb"); - /* ATEN2011_serial->interrupt_in_buffer=NULL; */ - /* usb_kill_urb (ATEN2011_serial->interrupt_read_urb); */ - } - } - if (ATEN2011_port->write_urb) { - /* if this urb had a transfer buffer already (old tx) free it */ - kfree(ATEN2011_port->write_urb->transfer_buffer); - usb_free_urb(ATEN2011_port->write_urb); - } - - /* clear the MCR & IER */ - Data = 0x00; - set_uart_reg(port, MODEM_CONTROL_REGISTER, Data); - Data = 0x00; - set_uart_reg(port, INTERRUPT_ENABLE_REGISTER, Data); - - ATEN2011_port->open = 0; - dbg("%s", "Leaving ............"); - -} - -static void ATEN2011_block_until_chase_response(struct tty_struct *tty, - struct ATENINTL_port - *ATEN2011_port) -{ - int timeout = 1 * HZ; - int wait = 10; - int count; - - while (1) { - count = ATEN2011_chars_in_buffer(tty); - - /* Check for Buffer status */ - if (count <= 0) { - ATEN2011_port->chaseResponsePending = 0; - return; - } - - /* Block the thread for a while */ - interruptible_sleep_on_timeout(&ATEN2011_port->wait_chase, - timeout); - /* No activity.. count down section */ - wait--; - if (wait == 0) { - dbg("%s - TIMEOUT", __func__); - return; - } else { - /* Reset timout value back to seconds */ - wait = 10; - } - } - -} - -static void ATEN2011_break(struct tty_struct *tty, int break_state) -{ - struct usb_serial_port *port = tty->driver_data; - unsigned char data; - struct usb_serial *serial; - struct ATENINTL_serial *ATEN2011_serial; - struct ATENINTL_port *ATEN2011_port; - - dbg("%s", "Entering ..........."); - dbg("ATEN2011_break: Start"); - - serial = port->serial; - - ATEN2011_serial = usb_get_serial_data(serial); - ATEN2011_port = usb_get_serial_port_data(port); - - if ((ATEN2011_serial == NULL) || (ATEN2011_port == NULL)) - return; - - /* flush and chase */ - ATEN2011_port->chaseResponsePending = 1; - - if (serial->dev) { - /* flush and block until tx is empty */ - ATEN2011_block_until_chase_response(tty, ATEN2011_port); - } - - if (break_state == -1) - data = ATEN2011_port->shadowLCR | LCR_SET_BREAK; - else - data = ATEN2011_port->shadowLCR & ~LCR_SET_BREAK; - - ATEN2011_port->shadowLCR = data; - dbg("ATEN2011_break ATEN2011_port->shadowLCR is %x", - ATEN2011_port->shadowLCR); - set_uart_reg(port, LINE_CONTROL_REGISTER, ATEN2011_port->shadowLCR); - - return; -} - -static int ATEN2011_write_room(struct tty_struct *tty) -{ - struct usb_serial_port *port = tty->driver_data; - int i; - int room = 0; - struct ATENINTL_port *ATEN2011_port; - - ATEN2011_port = usb_get_serial_port_data(port); - if (ATEN2011_port == NULL) { - dbg("%s", "ATEN2011_break:leaving ..........."); - return -1; - } - - for (i = 0; i < NUM_URBS; ++i) - if (ATEN2011_port->write_urb_pool[i]->status != -EINPROGRESS) - room += URB_TRANSFER_BUFFER_SIZE; - - dbg("%s - returns %d", __func__, room); - return room; - -} - -static int ATEN2011_write(struct tty_struct *tty, struct usb_serial_port *port, - const unsigned char *data, int count) -{ - int status; - int i; - int bytes_sent = 0; - int transfer_size; - int minor; - - struct ATENINTL_port *ATEN2011_port; - struct usb_serial *serial; - struct ATENINTL_serial *ATEN2011_serial; - struct urb *urb; - const unsigned char *current_position = data; - unsigned char *data1; - dbg("%s", "entering ..........."); - - serial = port->serial; - - ATEN2011_port = usb_get_serial_port_data(port); - if (ATEN2011_port == NULL) { - dbg("%s", "ATEN2011_port is NULL"); - return -1; - } - - ATEN2011_serial = usb_get_serial_data(serial); - if (ATEN2011_serial == NULL) { - dbg("%s", "ATEN2011_serial is NULL"); - return -1; - } - - /* try to find a free urb in the list */ - urb = NULL; - - for (i = 0; i < NUM_URBS; ++i) { - if (ATEN2011_port->write_urb_pool[i]->status != -EINPROGRESS) { - urb = ATEN2011_port->write_urb_pool[i]; - dbg("URB:%d", i); - break; - } - } - - if (urb == NULL) { - dbg("%s - no more free urbs", __func__); - goto exit; - } - - if (urb->transfer_buffer == NULL) { - urb->transfer_buffer = - kmalloc(URB_TRANSFER_BUFFER_SIZE, GFP_KERNEL); - - if (urb->transfer_buffer == NULL) { - err("%s no more kernel memory...", __func__); - goto exit; - } - } - transfer_size = min(count, URB_TRANSFER_BUFFER_SIZE); - - memcpy(urb->transfer_buffer, current_position, transfer_size); - /* usb_serial_debug_data (__FILE__, __func__, transfer_size, urb->transfer_buffer); */ - - /* fill urb with data and submit */ - minor = port->serial->minor; - if (minor == SERIAL_TTY_NO_MINOR) - minor = 0; - if ((ATEN2011_serial->ATEN2011_spectrum_2or4ports == 2) - && (((__u16) port->number - (__u16) (minor)) != 0)) { - usb_fill_bulk_urb(urb, ATEN2011_serial->serial->dev, - usb_sndbulkpipe(ATEN2011_serial->serial->dev, - (port-> - bulk_out_endpointAddress) + - 2), urb->transfer_buffer, - transfer_size, - ATEN2011_bulk_out_data_callback, - ATEN2011_port); - } else - - usb_fill_bulk_urb(urb, - ATEN2011_serial->serial->dev, - usb_sndbulkpipe(ATEN2011_serial->serial->dev, - port-> - bulk_out_endpointAddress), - urb->transfer_buffer, transfer_size, - ATEN2011_bulk_out_data_callback, - ATEN2011_port); - - data1 = urb->transfer_buffer; - dbg("bulkout endpoint is %d", port->bulk_out_endpointAddress); - /* for(i=0;i < urb->actual_length;i++) */ - /* dbg("Data is %c ",data1[i]); */ - - /* send it down the pipe */ - status = usb_submit_urb(urb, GFP_ATOMIC); - - if (status) { - err("%s - usb_submit_urb(write bulk) failed with status = %d", - __func__, status); - bytes_sent = status; - goto exit; - } - bytes_sent = transfer_size; - ATEN2011_port->icount.tx += transfer_size; - dbg("ATEN2011_port->icount.tx is %d:", ATEN2011_port->icount.tx); - -exit: - return bytes_sent; -} - -static void ATEN2011_throttle(struct tty_struct *tty) -{ - struct usb_serial_port *port = tty->driver_data; - struct ATENINTL_port *ATEN2011_port; - int status; - - dbg("- port %d", port->number); - - ATEN2011_port = usb_get_serial_port_data(port); - - if (ATEN2011_port == NULL) - return; - - if (!ATEN2011_port->open) { - dbg("%s", "port not opened"); - return; - } - - dbg("%s", "Entering .......... "); - - if (!tty) { - dbg("%s - no tty available", __func__); - return; - } - - /* if we are implementing XON/XOFF, send the stop character */ - if (I_IXOFF(tty)) { - unsigned char stop_char = STOP_CHAR(tty); - status = ATEN2011_write(tty, port, &stop_char, 1); - if (status <= 0) - return; - } - - /* if we are implementing RTS/CTS, toggle that line */ - if (tty->termios->c_cflag & CRTSCTS) { - ATEN2011_port->shadowMCR &= ~MCR_RTS; - status = set_uart_reg(port, MODEM_CONTROL_REGISTER, - ATEN2011_port->shadowMCR); - if (status < 0) - return; - } - - return; -} - -static void ATEN2011_unthrottle(struct tty_struct *tty) -{ - struct usb_serial_port *port = tty->driver_data; - int status; - struct ATENINTL_port *ATEN2011_port = usb_get_serial_port_data(port); - - if (ATEN2011_port == NULL) - return; - - if (!ATEN2011_port->open) { - dbg("%s - port not opened", __func__); - return; - } - - dbg("%s", "Entering .......... "); - - if (!tty) { - dbg("%s - no tty available", __func__); - return; - } - - /* if we are implementing XON/XOFF, send the start character */ - if (I_IXOFF(tty)) { - unsigned char start_char = START_CHAR(tty); - status = ATEN2011_write(tty, port, &start_char, 1); - if (status <= 0) - return; - } - - /* if we are implementing RTS/CTS, toggle that line */ - if (tty->termios->c_cflag & CRTSCTS) { - ATEN2011_port->shadowMCR |= MCR_RTS; - status = set_uart_reg(port, MODEM_CONTROL_REGISTER, - ATEN2011_port->shadowMCR); - if (status < 0) - return; - } - - return; -} - -static int ATEN2011_tiocmget(struct tty_struct *tty, struct file *file) -{ - struct usb_serial_port *port = tty->driver_data; - struct ATENINTL_port *ATEN2011_port; - unsigned int result; - __u16 msr; - __u16 mcr; - /* unsigned int mcr; */ - int status = 0; - ATEN2011_port = usb_get_serial_port_data(port); - - dbg("%s - port %d", __func__, port->number); - - if (ATEN2011_port == NULL) - return -ENODEV; - - status = get_uart_reg(port, MODEM_STATUS_REGISTER, &msr); - status = get_uart_reg(port, MODEM_CONTROL_REGISTER, &mcr); - /* mcr = ATEN2011_port->shadowMCR; */ - /* COMMENT2: the Fallowing three line are commented for updating only MSR values */ - result = ((mcr & MCR_DTR) ? TIOCM_DTR : 0) - | ((mcr & MCR_RTS) ? TIOCM_RTS : 0) - | ((mcr & MCR_LOOPBACK) ? TIOCM_LOOP : 0) - | ((msr & ATEN2011_MSR_CTS) ? TIOCM_CTS : 0) - | ((msr & ATEN2011_MSR_CD) ? TIOCM_CAR : 0) - | ((msr & ATEN2011_MSR_RI) ? TIOCM_RI : 0) - | ((msr & ATEN2011_MSR_DSR) ? TIOCM_DSR : 0); - - dbg("%s - 0x%04X", __func__, result); - - return result; -} - -static int ATEN2011_tiocmset(struct tty_struct *tty, struct file *file, - unsigned int set, unsigned int clear) -{ - struct usb_serial_port *port = tty->driver_data; - struct ATENINTL_port *ATEN2011_port; - unsigned int mcr; - unsigned int status; - - dbg("%s - port %d", __func__, port->number); - - ATEN2011_port = usb_get_serial_port_data(port); - - if (ATEN2011_port == NULL) - return -ENODEV; - - mcr = ATEN2011_port->shadowMCR; - if (clear & TIOCM_RTS) - mcr &= ~MCR_RTS; - if (clear & TIOCM_DTR) - mcr &= ~MCR_DTR; - if (clear & TIOCM_LOOP) - mcr &= ~MCR_LOOPBACK; - - if (set & TIOCM_RTS) - mcr |= MCR_RTS; - if (set & TIOCM_DTR) - mcr |= MCR_DTR; - if (set & TIOCM_LOOP) - mcr |= MCR_LOOPBACK; - - ATEN2011_port->shadowMCR = mcr; - - status = set_uart_reg(port, MODEM_CONTROL_REGISTER, mcr); - if (status < 0) { - dbg("setting MODEM_CONTROL_REGISTER Failed"); - return -1; - } - - return 0; -} - -static void ATEN2011_set_termios(struct tty_struct *tty, - struct usb_serial_port *port, - struct ktermios *old_termios) -{ - int status; - unsigned int cflag; - struct usb_serial *serial; - struct ATENINTL_port *ATEN2011_port; - - dbg("ATEN2011_set_termios: START"); - - serial = port->serial; - - ATEN2011_port = usb_get_serial_port_data(port); - - if (ATEN2011_port == NULL) - return; - - if (!ATEN2011_port->open) { - dbg("%s - port not opened", __func__); - return; - } - - dbg("%s", "setting termios - "); - - cflag = tty->termios->c_cflag; - - dbg("%s - cflag %08x iflag %08x", __func__, - tty->termios->c_cflag, RELEVANT_IFLAG(tty->termios->c_iflag)); - - if (old_termios) { - dbg("%s - old clfag %08x old iflag %08x", __func__, - old_termios->c_cflag, RELEVANT_IFLAG(old_termios->c_iflag)); - } - - dbg("%s - port %d", __func__, port->number); - - /* change the port settings to the new ones specified */ - - ATEN2011_change_port_settings(tty, ATEN2011_port, old_termios); - - if (!ATEN2011_port->read_urb) { - dbg("%s", "URB KILLED !!!!!"); - return; - } - - if (ATEN2011_port->read_urb->status != -EINPROGRESS) { - ATEN2011_port->read_urb->dev = serial->dev; - status = usb_submit_urb(ATEN2011_port->read_urb, GFP_ATOMIC); - if (status) { - dbg - (" usb_submit_urb(read bulk) failed, status = %d", - status); - } - } - return; -} - -static int get_lsr_info(struct tty_struct *tty, - struct ATENINTL_port *ATEN2011_port, - unsigned int __user *value) -{ - int count; - unsigned int result = 0; - - count = ATEN2011_chars_in_buffer(tty); - if (count == 0) { - dbg("%s -- Empty", __func__); - result = TIOCSER_TEMT; - } - - if (copy_to_user(value, &result, sizeof(int))) - return -EFAULT; - return 0; -} - -static int get_number_bytes_avail(struct tty_struct *tty, - struct ATENINTL_port *ATEN2011_port, - unsigned int __user *value) -{ - unsigned int result = 0; - - if (!tty) - return -ENOIOCTLCMD; - - result = tty->read_cnt; - - dbg("%s(%d) = %d", __func__, ATEN2011_port->port->number, result); - if (copy_to_user(value, &result, sizeof(int))) - return -EFAULT; - - return -ENOIOCTLCMD; -} - -static int set_modem_info(struct ATENINTL_port *ATEN2011_port, unsigned int cmd, - unsigned int __user *value) -{ - unsigned int mcr; - unsigned int arg; - __u16 Data; - int status; - struct usb_serial_port *port; - - if (ATEN2011_port == NULL) - return -1; - - port = (struct usb_serial_port *)ATEN2011_port->port; - - mcr = ATEN2011_port->shadowMCR; - - if (copy_from_user(&arg, value, sizeof(int))) - return -EFAULT; - - switch (cmd) { - case TIOCMBIS: - if (arg & TIOCM_RTS) - mcr |= MCR_RTS; - if (arg & TIOCM_DTR) - mcr |= MCR_RTS; - if (arg & TIOCM_LOOP) - mcr |= MCR_LOOPBACK; - break; - - case TIOCMBIC: - if (arg & TIOCM_RTS) - mcr &= ~MCR_RTS; - if (arg & TIOCM_DTR) - mcr &= ~MCR_RTS; - if (arg & TIOCM_LOOP) - mcr &= ~MCR_LOOPBACK; - break; - - case TIOCMSET: - /* turn off the RTS and DTR and LOOPBACK - * and then only turn on what was asked to */ - mcr &= ~(MCR_RTS | MCR_DTR | MCR_LOOPBACK); - mcr |= ((arg & TIOCM_RTS) ? MCR_RTS : 0); - mcr |= ((arg & TIOCM_DTR) ? MCR_DTR : 0); - mcr |= ((arg & TIOCM_LOOP) ? MCR_LOOPBACK : 0); - break; - } - - ATEN2011_port->shadowMCR = mcr; - - Data = ATEN2011_port->shadowMCR; - status = set_uart_reg(port, MODEM_CONTROL_REGISTER, Data); - if (status < 0) { - dbg("setting MODEM_CONTROL_REGISTER Failed"); - return -1; - } - - return 0; -} - -static int get_modem_info(struct ATENINTL_port *ATEN2011_port, - unsigned int __user *value) -{ - unsigned int result = 0; - __u16 msr; - unsigned int mcr = ATEN2011_port->shadowMCR; - int status; - - status = get_uart_reg(ATEN2011_port->port, MODEM_STATUS_REGISTER, &msr); - result = ((mcr & MCR_DTR) ? TIOCM_DTR : 0) /* 0x002 */ - |((mcr & MCR_RTS) ? TIOCM_RTS : 0) /* 0x004 */ - |((msr & ATEN2011_MSR_CTS) ? TIOCM_CTS : 0) /* 0x020 */ - |((msr & ATEN2011_MSR_CD) ? TIOCM_CAR : 0) /* 0x040 */ - |((msr & ATEN2011_MSR_RI) ? TIOCM_RI : 0) /* 0x080 */ - |((msr & ATEN2011_MSR_DSR) ? TIOCM_DSR : 0); /* 0x100 */ - - dbg("%s -- %x", __func__, result); - - if (copy_to_user(value, &result, sizeof(int))) - return -EFAULT; - return 0; -} - -static int get_serial_info(struct ATENINTL_port *ATEN2011_port, - struct serial_struct __user *retinfo) -{ - struct serial_struct tmp; - - if (ATEN2011_port == NULL) - return -1; - - if (!retinfo) - return -EFAULT; - - memset(&tmp, 0, sizeof(tmp)); - - tmp.type = PORT_16550A; - tmp.line = ATEN2011_port->port->serial->minor; - if (tmp.line == SERIAL_TTY_NO_MINOR) - tmp.line = 0; - tmp.port = ATEN2011_port->port->number; - tmp.irq = 0; - tmp.flags = ASYNC_SKIP_TEST | ASYNC_AUTO_IRQ; - tmp.xmit_fifo_size = NUM_URBS * URB_TRANSFER_BUFFER_SIZE; - tmp.baud_base = 9600; - tmp.close_delay = 5 * HZ; - tmp.closing_wait = 30 * HZ; - - if (copy_to_user(retinfo, &tmp, sizeof(*retinfo))) - return -EFAULT; - return 0; -} - -static int ATEN2011_ioctl(struct tty_struct *tty, struct file *file, - unsigned int cmd, unsigned long arg) -{ - struct usb_serial_port *port = tty->driver_data; - struct ATENINTL_port *ATEN2011_port; - struct async_icount cnow; - struct async_icount cprev; - struct serial_icounter_struct icount; - int ATENret = 0; - unsigned int __user *user_arg = (unsigned int __user *)arg; - - ATEN2011_port = usb_get_serial_port_data(port); - - if (ATEN2011_port == NULL) - return -1; - - dbg("%s - port %d, cmd = 0x%x", __func__, port->number, cmd); - - switch (cmd) { - /* return number of bytes available */ - - case TIOCINQ: - dbg("%s (%d) TIOCINQ", __func__, port->number); - return get_number_bytes_avail(tty, ATEN2011_port, user_arg); - break; - - case TIOCOUTQ: - dbg("%s (%d) TIOCOUTQ", __func__, port->number); - return put_user(ATEN2011_chars_in_buffer(tty), user_arg); - break; - - case TIOCSERGETLSR: - dbg("%s (%d) TIOCSERGETLSR", __func__, port->number); - return get_lsr_info(tty, ATEN2011_port, user_arg); - return 0; - - case TIOCMBIS: - case TIOCMBIC: - case TIOCMSET: - dbg("%s (%d) TIOCMSET/TIOCMBIC/TIOCMSET", __func__, - port->number); - ATENret = set_modem_info(ATEN2011_port, cmd, user_arg); - return ATENret; - - case TIOCMGET: - dbg("%s (%d) TIOCMGET", __func__, port->number); - return get_modem_info(ATEN2011_port, user_arg); - - case TIOCGSERIAL: - dbg("%s (%d) TIOCGSERIAL", __func__, port->number); - return get_serial_info(ATEN2011_port, - (struct serial_struct __user *)arg); - - case TIOCSSERIAL: - dbg("%s (%d) TIOCSSERIAL", __func__, port->number); - break; - - case TIOCMIWAIT: - dbg("%s (%d) TIOCMIWAIT", __func__, port->number); - cprev = ATEN2011_port->icount; - while (1) { - /* see if a signal did it */ - if (signal_pending(current)) - return -ERESTARTSYS; - cnow = ATEN2011_port->icount; - if (cnow.rng == cprev.rng && cnow.dsr == cprev.dsr && - cnow.dcd == cprev.dcd && cnow.cts == cprev.cts) - return -EIO; /* no change => error */ - if (((arg & TIOCM_RNG) && (cnow.rng != cprev.rng)) || - ((arg & TIOCM_DSR) && (cnow.dsr != cprev.dsr)) || - ((arg & TIOCM_CD) && (cnow.dcd != cprev.dcd)) || - ((arg & TIOCM_CTS) && (cnow.cts != cprev.cts))) { - return 0; - } - cprev = cnow; - } - /* NOTREACHED */ - break; - - case TIOCGICOUNT: - cnow = ATEN2011_port->icount; - icount.cts = cnow.cts; - icount.dsr = cnow.dsr; - icount.rng = cnow.rng; - icount.dcd = cnow.dcd; - icount.rx = cnow.rx; - icount.tx = cnow.tx; - icount.frame = cnow.frame; - icount.overrun = cnow.overrun; - icount.parity = cnow.parity; - icount.brk = cnow.brk; - icount.buf_overrun = cnow.buf_overrun; - - dbg("%s (%d) TIOCGICOUNT RX=%d, TX=%d", __func__, - port->number, icount.rx, icount.tx); - if (copy_to_user((void __user *)arg, &icount, sizeof(icount))) - return -EFAULT; - return 0; - - default: - break; - } - - return -ENOIOCTLCMD; -} - -static int ATEN2011_calc_baud_rate_divisor(int baudRate, int *divisor, - __u16 *clk_sel_val) -{ - dbg("%s - %d", __func__, baudRate); - - if (baudRate <= 115200) { - *divisor = 115200 / baudRate; - *clk_sel_val = 0x0; - } - if ((baudRate > 115200) && (baudRate <= 230400)) { - *divisor = 230400 / baudRate; - *clk_sel_val = 0x10; - } else if ((baudRate > 230400) && (baudRate <= 403200)) { - *divisor = 403200 / baudRate; - *clk_sel_val = 0x20; - } else if ((baudRate > 403200) && (baudRate <= 460800)) { - *divisor = 460800 / baudRate; - *clk_sel_val = 0x30; - } else if ((baudRate > 460800) && (baudRate <= 806400)) { - *divisor = 806400 / baudRate; - *clk_sel_val = 0x40; - } else if ((baudRate > 806400) && (baudRate <= 921600)) { - *divisor = 921600 / baudRate; - *clk_sel_val = 0x50; - } else if ((baudRate > 921600) && (baudRate <= 1572864)) { - *divisor = 1572864 / baudRate; - *clk_sel_val = 0x60; - } else if ((baudRate > 1572864) && (baudRate <= 3145728)) { - *divisor = 3145728 / baudRate; - *clk_sel_val = 0x70; - } - return 0; -} - -static int ATEN2011_send_cmd_write_baud_rate(struct ATENINTL_port - *ATEN2011_port, int baudRate) -{ - int divisor = 0; - int status; - __u16 Data; - unsigned char number; - __u16 clk_sel_val; - struct usb_serial_port *port; - int minor; - - if (ATEN2011_port == NULL) - return -1; - - port = (struct usb_serial_port *)ATEN2011_port->port; - - dbg("%s", "Entering .......... "); - - minor = ATEN2011_port->port->serial->minor; - if (minor == SERIAL_TTY_NO_MINOR) - minor = 0; - number = ATEN2011_port->port->number - minor; - - dbg("%s - port = %d, baud = %d", __func__, - ATEN2011_port->port->number, baudRate); - /* reset clk_uart_sel in spregOffset */ - if (baudRate > 115200) { -#ifdef HW_flow_control - /* - * NOTE: need to see the pther register to modify - * setting h/w flow control bit to 1; - */ - /* Data = ATEN2011_port->shadowMCR; */ - Data = 0x2b; - ATEN2011_port->shadowMCR = Data; - status = set_uart_reg(port, MODEM_CONTROL_REGISTER, Data); - if (status < 0) { - dbg("Writing spreg failed in set_serial_baud"); - return -1; - } -#endif - - } else { -#ifdef HW_flow_control - /* setting h/w flow control bit to 0; */ - /* Data = ATEN2011_port->shadowMCR; */ - Data = 0xb; - ATEN2011_port->shadowMCR = Data; - status = set_uart_reg(port, MODEM_CONTROL_REGISTER, Data); - if (status < 0) { - dbg("Writing spreg failed in set_serial_baud"); - return -1; - } -#endif - - } - - if (1) /* baudRate <= 115200) */ { - clk_sel_val = 0x0; - Data = 0x0; - status = - ATEN2011_calc_baud_rate_divisor(baudRate, &divisor, - &clk_sel_val); - status = get_reg_sync(port, ATEN2011_port->SpRegOffset, &Data); - if (status < 0) { - dbg("reading spreg failed in set_serial_baud"); - return -1; - } - Data = (Data & 0x8f) | clk_sel_val; - status = set_reg_sync(port, ATEN2011_port->SpRegOffset, Data); - if (status < 0) { - dbg("Writing spreg failed in set_serial_baud"); - return -1; - } - /* Calculate the Divisor */ - - if (status) { - err("%s - bad baud rate", __func__); - dbg("%s", "bad baud rate"); - return status; - } - /* Enable access to divisor latch */ - Data = ATEN2011_port->shadowLCR | SERIAL_LCR_DLAB; - ATEN2011_port->shadowLCR = Data; - set_uart_reg(port, LINE_CONTROL_REGISTER, Data); - - /* Write the divisor */ - Data = (unsigned char)(divisor & 0xff); - dbg("set_serial_baud Value to write DLL is %x", Data); - set_uart_reg(port, DIVISOR_LATCH_LSB, Data); - - Data = (unsigned char)((divisor & 0xff00) >> 8); - dbg("set_serial_baud Value to write DLM is %x", Data); - set_uart_reg(port, DIVISOR_LATCH_MSB, Data); - - /* Disable access to divisor latch */ - Data = ATEN2011_port->shadowLCR & ~SERIAL_LCR_DLAB; - ATEN2011_port->shadowLCR = Data; - set_uart_reg(port, LINE_CONTROL_REGISTER, Data); - - } - - return status; -} - -static void ATEN2011_change_port_settings(struct tty_struct *tty, - struct ATENINTL_port *ATEN2011_port, - struct ktermios *old_termios) -{ - int baud; - unsigned cflag; - unsigned iflag; - __u8 lData; - __u8 lParity; - __u8 lStop; - int status; - __u16 Data; - struct usb_serial_port *port; - struct usb_serial *serial; - - if (ATEN2011_port == NULL) - return; - - port = (struct usb_serial_port *)ATEN2011_port->port; - - serial = port->serial; - - dbg("%s - port %d", __func__, ATEN2011_port->port->number); - - if (!ATEN2011_port->open) { - dbg("%s - port not opened", __func__); - return; - } - - if ((!tty) || (!tty->termios)) { - dbg("%s - no tty structures", __func__); - return; - } - - dbg("%s", "Entering .......... "); - - lData = LCR_BITS_8; - lStop = LCR_STOP_1; - lParity = LCR_PAR_NONE; - - cflag = tty->termios->c_cflag; - iflag = tty->termios->c_iflag; - - /* Change the number of bits */ - - /* COMMENT1: the below Line"if(cflag & CSIZE)" is added for the errors we get for serial loop data test i.e serial_loopback.pl -v */ - /* if(cflag & CSIZE) */ - { - switch (cflag & CSIZE) { - case CS5: - lData = LCR_BITS_5; - break; - - case CS6: - lData = LCR_BITS_6; - break; - - case CS7: - lData = LCR_BITS_7; - break; - default: - case CS8: - lData = LCR_BITS_8; - break; - } - } - /* Change the Parity bit */ - if (cflag & PARENB) { - if (cflag & PARODD) { - lParity = LCR_PAR_ODD; - dbg("%s - parity = odd", __func__); - } else { - lParity = LCR_PAR_EVEN; - dbg("%s - parity = even", __func__); - } - - } else { - dbg("%s - parity = none", __func__); - } - - if (cflag & CMSPAR) - lParity = lParity | 0x20; - - /* Change the Stop bit */ - if (cflag & CSTOPB) { - lStop = LCR_STOP_2; - dbg("%s - stop bits = 2", __func__); - } else { - lStop = LCR_STOP_1; - dbg("%s - stop bits = 1", __func__); - } - - /* Update the LCR with the correct value */ - ATEN2011_port->shadowLCR &= - ~(LCR_BITS_MASK | LCR_STOP_MASK | LCR_PAR_MASK); - ATEN2011_port->shadowLCR |= (lData | lParity | lStop); - - dbg - ("ATEN2011_change_port_settings ATEN2011_port->shadowLCR is %x", - ATEN2011_port->shadowLCR); - /* Disable Interrupts */ - Data = 0x00; - set_uart_reg(port, INTERRUPT_ENABLE_REGISTER, Data); - - Data = 0x00; - set_uart_reg(port, FIFO_CONTROL_REGISTER, Data); - - Data = 0xcf; - set_uart_reg(port, FIFO_CONTROL_REGISTER, Data); - - /* Send the updated LCR value to the ATEN2011 */ - Data = ATEN2011_port->shadowLCR; - - set_uart_reg(port, LINE_CONTROL_REGISTER, Data); - - Data = 0x00b; - ATEN2011_port->shadowMCR = Data; - set_uart_reg(port, MODEM_CONTROL_REGISTER, Data); - Data = 0x00b; - set_uart_reg(port, MODEM_CONTROL_REGISTER, Data); - - /* set up the MCR register and send it to the ATEN2011 */ - - ATEN2011_port->shadowMCR = MCR_MASTER_IE; - if (cflag & CBAUD) - ATEN2011_port->shadowMCR |= (MCR_DTR | MCR_RTS); - - if (cflag & CRTSCTS) - ATEN2011_port->shadowMCR |= (MCR_XON_ANY); - else - ATEN2011_port->shadowMCR &= ~(MCR_XON_ANY); - - Data = ATEN2011_port->shadowMCR; - set_uart_reg(port, MODEM_CONTROL_REGISTER, Data); - - /* Determine divisor based on baud rate */ - baud = tty_get_baud_rate(tty); - - if (!baud) { - /* pick a default, any default... */ - dbg("%s", "Picked default baud..."); - baud = 9600; - } - - dbg("%s - baud rate = %d", __func__, baud); - status = ATEN2011_send_cmd_write_baud_rate(ATEN2011_port, baud); - - /* Enable Interrupts */ - Data = 0x0c; - set_uart_reg(port, INTERRUPT_ENABLE_REGISTER, Data); - - if (ATEN2011_port->read_urb->status != -EINPROGRESS) { - ATEN2011_port->read_urb->dev = serial->dev; - - status = usb_submit_urb(ATEN2011_port->read_urb, GFP_ATOMIC); - - if (status) { - dbg - (" usb_submit_urb(read bulk) failed, status = %d", - status); - } - } - dbg - ("ATEN2011_change_port_settings ATEN2011_port->shadowLCR is End %x", - ATEN2011_port->shadowLCR); - - return; -} - -static int ATEN2011_calc_num_ports(struct usb_serial *serial) -{ - - __u16 Data = 0x00; - int ret = 0; - int ATEN2011_2or4ports; - ret = usb_control_msg(serial->dev, usb_rcvctrlpipe(serial->dev, 0), - ATEN_RDREQ, ATEN_RD_RTYPE, 0, GPIO_REGISTER, - &Data, VENDOR_READ_LENGTH, ATEN_WDR_TIMEOUT); - -/* ghostgum: here is where the problem appears to bet */ -/* Which of the following are needed? */ -/* Greg used the serial->type->num_ports=2 */ -/* But the code in the ATEN2011_open relies on serial->num_ports=2 */ - if ((Data & 0x01) == 0) { - ATEN2011_2or4ports = 2; - serial->type->num_ports = 2; - serial->num_ports = 2; - } - /* else if(serial->interface->cur_altsetting->desc.bNumEndpoints == 9) */ - else { - ATEN2011_2or4ports = 4; - serial->type->num_ports = 4; - serial->num_ports = 4; - - } - - return ATEN2011_2or4ports; -} - -static int ATEN2011_startup(struct usb_serial *serial) -{ - struct ATENINTL_serial *ATEN2011_serial; - struct ATENINTL_port *ATEN2011_port; - struct usb_device *dev; - int i, status; - int minor; - - __u16 Data; - dbg("%s", " ATEN2011_startup :entering.........."); - - if (!serial) { - dbg("%s", "Invalid Handler"); - return -1; - } - - dev = serial->dev; - - dbg("%s", "Entering..."); - - /* create our private serial structure */ - ATEN2011_serial = kzalloc(sizeof(struct ATENINTL_serial), GFP_KERNEL); - if (ATEN2011_serial == NULL) { - err("%s - Out of memory", __func__); - return -ENOMEM; - } - - /* resetting the private structure field values to zero */ - memset(ATEN2011_serial, 0, sizeof(struct ATENINTL_serial)); - - ATEN2011_serial->serial = serial; - /* initilize status polling flag to 0 */ - ATEN2011_serial->status_polling_started = 0; - - usb_set_serial_data(serial, ATEN2011_serial); - ATEN2011_serial->ATEN2011_spectrum_2or4ports = - ATEN2011_calc_num_ports(serial); - /* we set up the pointers to the endpoints in the ATEN2011_open * - * function, as the structures aren't created yet. */ - - /* set up port private structures */ - for (i = 0; i < serial->num_ports; ++i) { - ATEN2011_port = - kmalloc(sizeof(struct ATENINTL_port), GFP_KERNEL); - if (ATEN2011_port == NULL) { - err("%s - Out of memory", __func__); - usb_set_serial_data(serial, NULL); - kfree(ATEN2011_serial); - return -ENOMEM; - } - memset(ATEN2011_port, 0, sizeof(struct ATENINTL_port)); - - /* - * Initialize all port interrupt end point to port 0 - * int endpoint. Our device has only one interrupt end point - * comman to all port - */ - /* serial->port[i]->interrupt_in_endpointAddress = serial->port[0]->interrupt_in_endpointAddress; */ - - ATEN2011_port->port = serial->port[i]; - usb_set_serial_port_data(serial->port[i], ATEN2011_port); - - minor = serial->port[i]->serial->minor; - if (minor == SERIAL_TTY_NO_MINOR) - minor = 0; - ATEN2011_port->port_num = - ((serial->port[i]->number - minor) + 1); - - if (ATEN2011_port->port_num == 1) { - ATEN2011_port->SpRegOffset = 0x0; - ATEN2011_port->ControlRegOffset = 0x1; - ATEN2011_port->DcrRegOffset = 0x4; - } else if ((ATEN2011_port->port_num == 2) - && (ATEN2011_serial->ATEN2011_spectrum_2or4ports == - 4)) { - ATEN2011_port->SpRegOffset = 0x8; - ATEN2011_port->ControlRegOffset = 0x9; - ATEN2011_port->DcrRegOffset = 0x16; - } else if ((ATEN2011_port->port_num == 2) - && (ATEN2011_serial->ATEN2011_spectrum_2or4ports == - 2)) { - ATEN2011_port->SpRegOffset = 0xa; - ATEN2011_port->ControlRegOffset = 0xb; - ATEN2011_port->DcrRegOffset = 0x19; - } else if ((ATEN2011_port->port_num == 3) - && (ATEN2011_serial->ATEN2011_spectrum_2or4ports == - 4)) { - ATEN2011_port->SpRegOffset = 0xa; - ATEN2011_port->ControlRegOffset = 0xb; - ATEN2011_port->DcrRegOffset = 0x19; - } else if ((ATEN2011_port->port_num == 4) - && (ATEN2011_serial->ATEN2011_spectrum_2or4ports == - 4)) { - ATEN2011_port->SpRegOffset = 0xc; - ATEN2011_port->ControlRegOffset = 0xd; - ATEN2011_port->DcrRegOffset = 0x1c; - } - - usb_set_serial_port_data(serial->port[i], ATEN2011_port); - - /* enable rx_disable bit in control register */ - - status = get_reg_sync(serial->port[i], - ATEN2011_port->ControlRegOffset, &Data); - if (status < 0) { - dbg("Reading ControlReg failed status-0x%x", - status); - break; - } else - dbg - ("ControlReg Reading success val is %x, status%d", - Data, status); - Data |= 0x08; /* setting driver done bit */ - Data |= 0x04; /* sp1_bit to have cts change reflect in modem status reg */ - - /* Data |= 0x20; */ /* rx_disable bit */ - status = set_reg_sync(serial->port[i], - ATEN2011_port->ControlRegOffset, Data); - if (status < 0) { - dbg - ("Writing ControlReg failed(rx_disable) status-0x%x", - status); - break; - } else - dbg - ("ControlReg Writing success(rx_disable) status%d", - status); - - /* - * Write default values in DCR (i.e 0x01 in DCR0, 0x05 in DCR2 - * and 0x24 in DCR3 - */ - Data = 0x01; - status = set_reg_sync(serial->port[i], - (__u16)(ATEN2011_port->DcrRegOffset + 0), - Data); - if (status < 0) { - dbg("Writing DCR0 failed status-0x%x", status); - break; - } else - dbg("DCR0 Writing success status%d", status); - - Data = 0x05; - status = set_reg_sync(serial->port[i], - (__u16)(ATEN2011_port->DcrRegOffset + 1), - Data); - if (status < 0) { - dbg("Writing DCR1 failed status-0x%x", status); - break; - } else - dbg("DCR1 Writing success status%d", status); - - Data = 0x24; - status = set_reg_sync(serial->port[i], - (__u16)(ATEN2011_port->DcrRegOffset + 2), - Data); - if (status < 0) { - dbg("Writing DCR2 failed status-0x%x", status); - break; - } else - dbg("DCR2 Writing success status%d", status); - - /* write values in clkstart0x0 and clkmulti 0x20 */ - Data = 0x0; - status = set_reg_sync(serial->port[i], CLK_START_VALUE_REGISTER, - Data); - if (status < 0) { - dbg - ("Writing CLK_START_VALUE_REGISTER failed status-0x%x", - status); - break; - } else - dbg - ("CLK_START_VALUE_REGISTER Writing success status%d", - status); - - Data = 0x20; - status = set_reg_sync(serial->port[i], CLK_MULTI_REGISTER, - Data); - if (status < 0) { - dbg - ("Writing CLK_MULTI_REGISTER failed status-0x%x", - status); - break; - } else - dbg("CLK_MULTI_REGISTER Writing success status%d", - status); - - /* Zero Length flag register */ - if ((ATEN2011_port->port_num != 1) - && (ATEN2011_serial->ATEN2011_spectrum_2or4ports == 2)) { - - Data = 0xff; - status = set_reg_sync(serial->port[i], - (__u16)(ZLP_REG1 + ((__u16)ATEN2011_port->port_num)), - Data); - dbg("ZLIP offset%x", - (__u16) (ZLP_REG1 + - ((__u16) ATEN2011_port->port_num))); - if (status < 0) { - dbg - ("Writing ZLP_REG%d failed status-0x%x", - i + 2, status); - break; - } else - dbg("ZLP_REG%d Writing success status%d", - i + 2, status); - } else { - Data = 0xff; - status = set_reg_sync(serial->port[i], - (__u16)(ZLP_REG1 + ((__u16)ATEN2011_port->port_num) - 0x1), - Data); - dbg("ZLIP offset%x", - (__u16) (ZLP_REG1 + - ((__u16) ATEN2011_port->port_num) - - 0x1)); - if (status < 0) { - dbg - ("Writing ZLP_REG%d failed status-0x%x", - i + 1, status); - break; - } else - dbg("ZLP_REG%d Writing success status%d", - i + 1, status); - - } - ATEN2011_port->control_urb = usb_alloc_urb(0, GFP_ATOMIC); - ATEN2011_port->ctrl_buf = kmalloc(16, GFP_KERNEL); - - } - - /* Zero Length flag enable */ - Data = 0x0f; - status = set_reg_sync(serial->port[0], ZLP_REG5, Data); - if (status < 0) { - dbg("Writing ZLP_REG5 failed status-0x%x", status); - return -1; - } else - dbg("ZLP_REG5 Writing success status%d", status); - - /* setting configuration feature to one */ - usb_control_msg(serial->dev, usb_sndctrlpipe(serial->dev, 0), - (__u8) 0x03, 0x00, 0x01, 0x00, NULL, 0x00, 5 * HZ); - return 0; -} - -static void ATEN2011_release(struct usb_serial *serial) -{ - int i; - struct ATENINTL_port *ATEN2011_port; - - /* check for the ports to be closed,close the ports and disconnect */ - - /* free private structure allocated for serial port * - * stop reads and writes on all ports */ - - for (i = 0; i < serial->num_ports; ++i) { - ATEN2011_port = usb_get_serial_port_data(serial->port[i]); - kfree(ATEN2011_port->ctrl_buf); - usb_kill_urb(ATEN2011_port->control_urb); - kfree(ATEN2011_port); - usb_set_serial_port_data(serial->port[i], NULL); - } - - /* free private structure allocated for serial device */ - - kfree(usb_get_serial_data(serial)); - usb_set_serial_data(serial, NULL); -} - -static struct usb_serial_driver aten_serial_driver = { - .driver = { - .owner = THIS_MODULE, - .name = "aten2011", - }, - .description = DRIVER_DESC, - .id_table = id_table, - .open = ATEN2011_open, - .close = ATEN2011_close, - .write = ATEN2011_write, - .write_room = ATEN2011_write_room, - .chars_in_buffer = ATEN2011_chars_in_buffer, - .throttle = ATEN2011_throttle, - .unthrottle = ATEN2011_unthrottle, - .calc_num_ports = ATEN2011_calc_num_ports, - - .ioctl = ATEN2011_ioctl, - .set_termios = ATEN2011_set_termios, - .break_ctl = ATEN2011_break, - .tiocmget = ATEN2011_tiocmget, - .tiocmset = ATEN2011_tiocmset, - .attach = ATEN2011_startup, - .release = ATEN2011_release, - .read_bulk_callback = ATEN2011_bulk_in_callback, - .read_int_callback = ATEN2011_interrupt_callback, -}; - -static struct usb_driver aten_driver = { - .name = "aten2011", - .probe = usb_serial_probe, - .disconnect = usb_serial_disconnect, - .id_table = id_table, -}; - -static int __init aten_init(void) -{ - int retval; - - /* Register with the usb serial */ - retval = usb_serial_register(&aten_serial_driver); - if (retval) - return retval; - - printk(KERN_INFO KBUILD_MODNAME ":" - DRIVER_DESC " " DRIVER_VERSION "\n"); - - /* Register with the usb */ - retval = usb_register(&aten_driver); - if (retval) - usb_serial_deregister(&aten_serial_driver); - - return retval; -} - -static void __exit aten_exit(void) -{ - usb_deregister(&aten_driver); - usb_serial_deregister(&aten_serial_driver); -} - -module_init(aten_init); -module_exit(aten_exit); - -/* Module information */ -MODULE_DESCRIPTION(DRIVER_DESC); -MODULE_LICENSE("GPL"); - -MODULE_PARM_DESC(debug, "Debug enabled or not"); diff --git a/drivers/staging/udlfb/udlfb.c b/drivers/staging/udlfb/udlfb.c index 0ab9d15f343..f5416af1e90 100644 --- a/drivers/staging/udlfb/udlfb.c +++ b/drivers/staging/udlfb/udlfb.c @@ -21,6 +21,7 @@ #include <linux/mm.h> #include <linux/fb.h> #include <linux/mutex.h> +#include <linux/vmalloc.h> #include "udlfb.h" diff --git a/drivers/staging/usbip/usbip_common.c b/drivers/staging/usbip/usbip_common.c index 22f93dd0ba0..251220dc885 100644 --- a/drivers/staging/usbip/usbip_common.c +++ b/drivers/staging/usbip/usbip_common.c @@ -18,6 +18,7 @@ */ #include <linux/kernel.h> +#include <linux/smp_lock.h> #include <linux/file.h> #include <linux/tcp.h> #include <linux/in.h> diff --git a/drivers/staging/vt6655/device_main.c b/drivers/staging/vt6655/device_main.c index a10ed27acbc..f43ca416e4a 100644 --- a/drivers/staging/vt6655/device_main.c +++ b/drivers/staging/vt6655/device_main.c @@ -344,7 +344,7 @@ static CHIP_INFO chip_info_table[]= { }; static struct pci_device_id device_id_table[] __devinitdata = { -{ 0x1106, 0x3253, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (int)&chip_info_table[0]}, +{ 0x1106, 0x3253, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long)&chip_info_table[0]}, { 0, } }; #endif @@ -369,7 +369,7 @@ static int device_ioctl(struct net_device *dev, struct ifreq *rq, int cmd); #ifdef CONFIG_PM static int device_notify_reboot(struct notifier_block *, unsigned long event, void *ptr); -static int viawget_suspend(struct pci_dev *pcid, u32 state); +static int viawget_suspend(struct pci_dev *pcid, pm_message_t state); static int viawget_resume(struct pci_dev *pcid); struct notifier_block device_notifier = { notifier_call: device_notify_reboot, @@ -3941,7 +3941,7 @@ device_notify_reboot(struct notifier_block *nb, unsigned long event, void *p) while ((pdev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, pdev)) != NULL) { if(pci_dev_driver(pdev) == &device_driver) { if (pci_get_drvdata(pdev)) - viawget_suspend(pdev, 3); + viawget_suspend(pdev, PMSG_HIBERNATE); } } } @@ -3949,7 +3949,7 @@ device_notify_reboot(struct notifier_block *nb, unsigned long event, void *p) } static int -viawget_suspend(struct pci_dev *pcid, u32 state) +viawget_suspend(struct pci_dev *pcid, pm_message_t state) { int power_status; // to silence the compiler @@ -3971,7 +3971,7 @@ viawget_suspend(struct pci_dev *pcid, u32 state) memset(pMgmt->abyCurrBSSID, 0, 6); pMgmt->eCurrState = WMAC_STATE_IDLE; pci_disable_device(pcid); - power_status = pci_set_power_state(pcid, state); + power_status = pci_set_power_state(pcid, pci_choose_state(pcid, state)); spin_unlock_irq(&pDevice->lock); return 0; } diff --git a/drivers/telephony/ixj.c b/drivers/telephony/ixj.c index a913efc6966..40de151f278 100644 --- a/drivers/telephony/ixj.c +++ b/drivers/telephony/ixj.c @@ -257,6 +257,7 @@ #include <linux/fs.h> /* everything... */ #include <linux/errno.h> /* error codes */ #include <linux/slab.h> +#include <linux/smp_lock.h> #include <linux/mm.h> #include <linux/ioport.h> #include <linux/interrupt.h> diff --git a/drivers/telephony/phonedev.c b/drivers/telephony/phonedev.c index b52cc830c0b..f3873f650bb 100644 --- a/drivers/telephony/phonedev.c +++ b/drivers/telephony/phonedev.c @@ -23,7 +23,6 @@ #include <linux/errno.h> #include <linux/phonedev.h> #include <linux/init.h> -#include <linux/smp_lock.h> #include <asm/uaccess.h> #include <asm/system.h> diff --git a/drivers/thermal/thermal_sys.c b/drivers/thermal/thermal_sys.c index 0a69672097a..4e83c297ec9 100644 --- a/drivers/thermal/thermal_sys.c +++ b/drivers/thermal/thermal_sys.c @@ -953,7 +953,12 @@ void thermal_zone_device_update(struct thermal_zone_device *tz) mutex_lock(&tz->lock); - tz->ops->get_temp(tz, &temp); + if (tz->ops->get_temp(tz, &temp)) { + /* get_temp failed - retry it later */ + printk(KERN_WARNING PREFIX "failed to read out thermal zone " + "%d\n", tz->id); + goto leave; + } for (count = 0; count < tz->trips; count++) { tz->ops->get_trip_type(tz, count, &trip_type); @@ -1005,6 +1010,8 @@ void thermal_zone_device_update(struct thermal_zone_device *tz) THERMAL_TRIPS_NONE); tz->last_temperature = temp; + + leave: if (tz->passive) thermal_zone_device_set_polling(tz, tz->passive_delay); else if (tz->polling_delay) diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c index 38bfdb0f666..2bfc41ece0e 100644 --- a/drivers/usb/class/cdc-acm.c +++ b/drivers/usb/class/cdc-acm.c @@ -462,11 +462,18 @@ urbs: rcv->buffer = buf; - usb_fill_bulk_urb(rcv->urb, acm->dev, - acm->rx_endpoint, - buf->base, - acm->readsize, - acm_read_bulk, rcv); + if (acm->is_int_ep) + usb_fill_int_urb(rcv->urb, acm->dev, + acm->rx_endpoint, + buf->base, + acm->readsize, + acm_read_bulk, rcv, acm->bInterval); + else + usb_fill_bulk_urb(rcv->urb, acm->dev, + acm->rx_endpoint, + buf->base, + acm->readsize, + acm_read_bulk, rcv); rcv->urb->transfer_dma = buf->dma; rcv->urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP; @@ -550,7 +557,7 @@ static void acm_waker(struct work_struct *waker) static int acm_tty_open(struct tty_struct *tty, struct file *filp) { struct acm *acm; - int rv = -EINVAL; + int rv = -ENODEV; int i; dbg("Entering acm_tty_open."); @@ -677,7 +684,7 @@ static void acm_tty_close(struct tty_struct *tty, struct file *filp) /* Perform the closing process and see if we need to do the hardware shutdown */ - if (tty_port_close_start(&acm->port, tty, filp) == 0) + if (!acm || tty_port_close_start(&acm->port, tty, filp) == 0) return; acm_port_down(acm, 0); tty_port_close_end(&acm->port, tty); @@ -740,7 +747,7 @@ static int acm_tty_chars_in_buffer(struct tty_struct *tty) { struct acm *acm = tty->driver_data; if (!ACM_READY(acm)) - return -EINVAL; + return 0; /* * This is inaccurate (overcounts), but it works. */ @@ -1173,6 +1180,9 @@ made_compressed_probe: spin_lock_init(&acm->read_lock); mutex_init(&acm->mutex); acm->rx_endpoint = usb_rcvbulkpipe(usb_dev, epread->bEndpointAddress); + acm->is_int_ep = usb_endpoint_xfer_int(epread); + if (acm->is_int_ep) + acm->bInterval = epread->bInterval; tty_port_init(&acm->port); acm->port.ops = &acm_port_ops; @@ -1227,9 +1237,14 @@ made_compressed_probe: goto alloc_fail7; } - usb_fill_bulk_urb(snd->urb, usb_dev, - usb_sndbulkpipe(usb_dev, epwrite->bEndpointAddress), - NULL, acm->writesize, acm_write_bulk, snd); + if (usb_endpoint_xfer_int(epwrite)) + usb_fill_int_urb(snd->urb, usb_dev, + usb_sndbulkpipe(usb_dev, epwrite->bEndpointAddress), + NULL, acm->writesize, acm_write_bulk, snd, epwrite->bInterval); + else + usb_fill_bulk_urb(snd->urb, usb_dev, + usb_sndbulkpipe(usb_dev, epwrite->bEndpointAddress), + NULL, acm->writesize, acm_write_bulk, snd); snd->urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP; snd->instance = acm; } diff --git a/drivers/usb/class/cdc-acm.h b/drivers/usb/class/cdc-acm.h index 1602324808b..c4a0ee8ffcc 100644 --- a/drivers/usb/class/cdc-acm.h +++ b/drivers/usb/class/cdc-acm.h @@ -126,6 +126,8 @@ struct acm { unsigned int ctrl_caps; /* control capabilities from the class specific header */ unsigned int susp_count; /* number of suspended interfaces */ int combined_interfaces:1; /* control and data collapsed */ + int is_int_ep:1; /* interrupt endpoints contrary to spec used */ + u8 bInterval; struct acm_wb *delayed_wb; /* write queued for a device about to be woken */ }; diff --git a/drivers/usb/class/cdc-wdm.c b/drivers/usb/class/cdc-wdm.c index 0fe434505ac..ba589d4ca8b 100644 --- a/drivers/usb/class/cdc-wdm.c +++ b/drivers/usb/class/cdc-wdm.c @@ -15,7 +15,6 @@ #include <linux/errno.h> #include <linux/slab.h> #include <linux/module.h> -#include <linux/smp_lock.h> #include <linux/mutex.h> #include <linux/uaccess.h> #include <linux/bitops.h> diff --git a/drivers/usb/class/usbtmc.c b/drivers/usb/class/usbtmc.c index 3703789d0d2..b09a527f734 100644 --- a/drivers/usb/class/usbtmc.c +++ b/drivers/usb/class/usbtmc.c @@ -751,7 +751,7 @@ static int get_capabilities(struct usbtmc_device_data *data) { struct device *dev = &data->usb_dev->dev; char *buffer; - int rv; + int rv = 0; buffer = kmalloc(0x18, GFP_KERNEL); if (!buffer) @@ -763,7 +763,7 @@ static int get_capabilities(struct usbtmc_device_data *data) 0, 0, buffer, 0x18, USBTMC_TIMEOUT); if (rv < 0) { dev_err(dev, "usb_control_msg returned %d\n", rv); - return rv; + goto err_out; } dev_dbg(dev, "GET_CAPABILITIES returned %x\n", buffer[0]); @@ -773,7 +773,8 @@ static int get_capabilities(struct usbtmc_device_data *data) dev_dbg(dev, "USB488 device capabilities are %x\n", buffer[15]); if (buffer[0] != USBTMC_STATUS_SUCCESS) { dev_err(dev, "GET_CAPABILITIES returned %x\n", buffer[0]); - return -EPERM; + rv = -EPERM; + goto err_out; } data->capabilities.interface_capabilities = buffer[4]; @@ -781,8 +782,9 @@ static int get_capabilities(struct usbtmc_device_data *data) data->capabilities.usb488_interface_capabilities = buffer[14]; data->capabilities.usb488_device_capabilities = buffer[15]; +err_out: kfree(buffer); - return 0; + return rv; } #define capability_attribute(name) \ diff --git a/drivers/usb/core/Kconfig b/drivers/usb/core/Kconfig index 69280c35b5c..ad925946f86 100644 --- a/drivers/usb/core/Kconfig +++ b/drivers/usb/core/Kconfig @@ -28,7 +28,7 @@ comment "Miscellaneous USB options" depends on USB config USB_DEVICEFS - bool "USB device filesystem (DEPRECATED)" if EMBEDDED + bool "USB device filesystem (DEPRECATED)" depends on USB ---help--- If you say Y here (and to "/proc file system support" in the "File diff --git a/drivers/usb/core/config.c b/drivers/usb/core/config.c index 24dfb33f90c..a16c538d013 100644 --- a/drivers/usb/core/config.c +++ b/drivers/usb/core/config.c @@ -80,38 +80,18 @@ static int usb_parse_ss_endpoint_companion(struct device *ddev, int cfgno, int max_tx; int i; - /* Allocate space for the SS endpoint companion descriptor */ - ep->ss_ep_comp = kzalloc(sizeof(struct usb_host_ss_ep_comp), - GFP_KERNEL); - if (!ep->ss_ep_comp) - return -ENOMEM; desc = (struct usb_ss_ep_comp_descriptor *) buffer; if (desc->bDescriptorType != USB_DT_SS_ENDPOINT_COMP) { dev_warn(ddev, "No SuperSpeed endpoint companion for config %d " " interface %d altsetting %d ep %d: " "using minimum values\n", cfgno, inum, asnum, ep->desc.bEndpointAddress); - ep->ss_ep_comp->desc.bLength = USB_DT_SS_EP_COMP_SIZE; - ep->ss_ep_comp->desc.bDescriptorType = USB_DT_SS_ENDPOINT_COMP; - ep->ss_ep_comp->desc.bMaxBurst = 0; - /* - * Leave bmAttributes as zero, which will mean no streams for - * bulk, and isoc won't support multiple bursts of packets. - * With bursts of only one packet, and a Mult of 1, the max - * amount of data moved per endpoint service interval is one - * packet. - */ - if (usb_endpoint_xfer_isoc(&ep->desc) || - usb_endpoint_xfer_int(&ep->desc)) - ep->ss_ep_comp->desc.wBytesPerInterval = - ep->desc.wMaxPacketSize; /* * The next descriptor is for an Endpoint or Interface, * no extra descriptors to copy into the companion structure, * and we didn't eat up any of the buffer. */ - retval = 0; - goto valid; + return 0; } memcpy(&ep->ss_ep_comp->desc, desc, USB_DT_SS_EP_COMP_SIZE); desc = &ep->ss_ep_comp->desc; @@ -320,6 +300,28 @@ static int usb_parse_endpoint(struct device *ddev, int cfgno, int inum, buffer += i; size -= i; + /* Allocate space for the SS endpoint companion descriptor */ + endpoint->ss_ep_comp = kzalloc(sizeof(struct usb_host_ss_ep_comp), + GFP_KERNEL); + if (!endpoint->ss_ep_comp) + return -ENOMEM; + + /* Fill in some default values (may be overwritten later) */ + endpoint->ss_ep_comp->desc.bLength = USB_DT_SS_EP_COMP_SIZE; + endpoint->ss_ep_comp->desc.bDescriptorType = USB_DT_SS_ENDPOINT_COMP; + endpoint->ss_ep_comp->desc.bMaxBurst = 0; + /* + * Leave bmAttributes as zero, which will mean no streams for + * bulk, and isoc won't support multiple bursts of packets. + * With bursts of only one packet, and a Mult of 1, the max + * amount of data moved per endpoint service interval is one + * packet. + */ + if (usb_endpoint_xfer_isoc(&endpoint->desc) || + usb_endpoint_xfer_int(&endpoint->desc)) + endpoint->ss_ep_comp->desc.wBytesPerInterval = + endpoint->desc.wMaxPacketSize; + if (size > 0) { retval = usb_parse_ss_endpoint_companion(ddev, cfgno, inum, asnum, endpoint, num_ep, buffer, @@ -329,6 +331,10 @@ static int usb_parse_endpoint(struct device *ddev, int cfgno, int inum, retval = buffer - buffer0; } } else { + dev_warn(ddev, "config %d interface %d altsetting %d " + "endpoint 0x%X has no " + "SuperSpeed companion descriptor\n", + cfgno, inum, asnum, d->bEndpointAddress); retval = buffer - buffer0; } } else { diff --git a/drivers/usb/core/devices.c b/drivers/usb/core/devices.c index 73c108d117b..96f11715cd2 100644 --- a/drivers/usb/core/devices.c +++ b/drivers/usb/core/devices.c @@ -136,17 +136,19 @@ static const struct class_info clas_info[] = {USB_CLASS_AUDIO, "audio"}, {USB_CLASS_COMM, "comm."}, {USB_CLASS_HID, "HID"}, - {USB_CLASS_HUB, "hub"}, {USB_CLASS_PHYSICAL, "PID"}, + {USB_CLASS_STILL_IMAGE, "still"}, {USB_CLASS_PRINTER, "print"}, {USB_CLASS_MASS_STORAGE, "stor."}, + {USB_CLASS_HUB, "hub"}, {USB_CLASS_CDC_DATA, "data"}, - {USB_CLASS_APP_SPEC, "app."}, - {USB_CLASS_VENDOR_SPEC, "vend."}, - {USB_CLASS_STILL_IMAGE, "still"}, {USB_CLASS_CSCID, "scard"}, {USB_CLASS_CONTENT_SEC, "c-sec"}, {USB_CLASS_VIDEO, "video"}, + {USB_CLASS_WIRELESS_CONTROLLER, "wlcon"}, + {USB_CLASS_MISC, "misc"}, + {USB_CLASS_APP_SPEC, "app."}, + {USB_CLASS_VENDOR_SPEC, "vend."}, {-1, "unk."} /* leave as last */ }; diff --git a/drivers/usb/core/devio.c b/drivers/usb/core/devio.c index 308609039c7..4247eccf858 100644 --- a/drivers/usb/core/devio.c +++ b/drivers/usb/core/devio.c @@ -325,21 +325,34 @@ static void async_completed(struct urb *urb) struct async *as = urb->context; struct dev_state *ps = as->ps; struct siginfo sinfo; + struct pid *pid = NULL; + uid_t uid = 0; + uid_t euid = 0; + u32 secid = 0; + int signr; spin_lock(&ps->lock); list_move_tail(&as->asynclist, &ps->async_completed); - spin_unlock(&ps->lock); as->status = urb->status; - if (as->signr) { + signr = as->signr; + if (signr) { sinfo.si_signo = as->signr; sinfo.si_errno = as->status; sinfo.si_code = SI_ASYNCIO; sinfo.si_addr = as->userurb; - kill_pid_info_as_uid(as->signr, &sinfo, as->pid, as->uid, - as->euid, as->secid); + pid = as->pid; + uid = as->uid; + euid = as->euid; + secid = as->secid; } snoop(&urb->dev->dev, "urb complete\n"); snoop_urb(urb, as->userurb); + spin_unlock(&ps->lock); + + if (signr) + kill_pid_info_as_uid(sinfo.si_signo, &sinfo, pid, uid, + euid, secid); + wake_up(&ps->wait); } @@ -582,7 +595,7 @@ static int usbdev_open(struct inode *inode, struct file *file) if (!ps) goto out; - ret = -ENOENT; + ret = -ENODEV; /* usbdev device-node */ if (imajor(inode) == USB_DEVICE_MAJOR) @@ -982,7 +995,7 @@ static int proc_do_submiturb(struct dev_state *ps, struct usbdevfs_urb *uurb, USBDEVFS_URB_ZERO_PACKET | USBDEVFS_URB_NO_INTERRUPT)) return -EINVAL; - if (!uurb->buffer) + if (uurb->buffer_length > 0 && !uurb->buffer) return -EINVAL; if (!(uurb->type == USBDEVFS_URB_TYPE_CONTROL && (uurb->endpoint & ~USB_ENDPOINT_DIR_MASK) == 0)) { @@ -1038,11 +1051,6 @@ static int proc_do_submiturb(struct dev_state *ps, struct usbdevfs_urb *uurb, is_in = 0; uurb->endpoint &= ~USB_DIR_IN; } - if (!access_ok(is_in ? VERIFY_WRITE : VERIFY_READ, - uurb->buffer, uurb->buffer_length)) { - kfree(dr); - return -EFAULT; - } snoop(&ps->dev->dev, "control urb: bRequest=%02x " "bRrequestType=%02x wValue=%04x " "wIndex=%04x wLength=%04x\n", @@ -1062,9 +1070,6 @@ static int proc_do_submiturb(struct dev_state *ps, struct usbdevfs_urb *uurb, uurb->number_of_packets = 0; if (uurb->buffer_length > MAX_USBFS_BUFFER_SIZE) return -EINVAL; - if (!access_ok(is_in ? VERIFY_WRITE : VERIFY_READ, - uurb->buffer, uurb->buffer_length)) - return -EFAULT; snoop(&ps->dev->dev, "bulk urb\n"); break; @@ -1106,28 +1111,35 @@ static int proc_do_submiturb(struct dev_state *ps, struct usbdevfs_urb *uurb, return -EINVAL; if (uurb->buffer_length > MAX_USBFS_BUFFER_SIZE) return -EINVAL; - if (!access_ok(is_in ? VERIFY_WRITE : VERIFY_READ, - uurb->buffer, uurb->buffer_length)) - return -EFAULT; snoop(&ps->dev->dev, "interrupt urb\n"); break; default: return -EINVAL; } - as = alloc_async(uurb->number_of_packets); - if (!as) { + if (uurb->buffer_length > 0 && + !access_ok(is_in ? VERIFY_WRITE : VERIFY_READ, + uurb->buffer, uurb->buffer_length)) { kfree(isopkt); kfree(dr); - return -ENOMEM; + return -EFAULT; } - as->urb->transfer_buffer = kmalloc(uurb->buffer_length, GFP_KERNEL); - if (!as->urb->transfer_buffer) { + as = alloc_async(uurb->number_of_packets); + if (!as) { kfree(isopkt); kfree(dr); - free_async(as); return -ENOMEM; } + if (uurb->buffer_length > 0) { + as->urb->transfer_buffer = kmalloc(uurb->buffer_length, + GFP_KERNEL); + if (!as->urb->transfer_buffer) { + kfree(isopkt); + kfree(dr); + free_async(as); + return -ENOMEM; + } + } as->urb->dev = ps->dev; as->urb->pipe = (uurb->type << 30) | __create_pipe(ps->dev, uurb->endpoint & 0xf) | @@ -1169,7 +1181,7 @@ static int proc_do_submiturb(struct dev_state *ps, struct usbdevfs_urb *uurb, kfree(isopkt); as->ps = ps; as->userurb = arg; - if (uurb->endpoint & USB_DIR_IN) + if (is_in && uurb->buffer_length > 0) as->userbuffer = uurb->buffer; else as->userbuffer = NULL; @@ -1179,9 +1191,9 @@ static int proc_do_submiturb(struct dev_state *ps, struct usbdevfs_urb *uurb, as->uid = cred->uid; as->euid = cred->euid; security_task_getsecid(current, &as->secid); - if (!is_in) { + if (!is_in && uurb->buffer_length > 0) { if (copy_from_user(as->urb->transfer_buffer, uurb->buffer, - as->urb->transfer_buffer_length)) { + uurb->buffer_length)) { free_async(as); return -EFAULT; } @@ -1231,22 +1243,22 @@ static int processcompl(struct async *as, void __user * __user *arg) if (as->userbuffer) if (copy_to_user(as->userbuffer, urb->transfer_buffer, urb->transfer_buffer_length)) - return -EFAULT; + goto err_out; if (put_user(as->status, &userurb->status)) - return -EFAULT; + goto err_out; if (put_user(urb->actual_length, &userurb->actual_length)) - return -EFAULT; + goto err_out; if (put_user(urb->error_count, &userurb->error_count)) - return -EFAULT; + goto err_out; if (usb_endpoint_xfer_isoc(&urb->ep->desc)) { for (i = 0; i < urb->number_of_packets; i++) { if (put_user(urb->iso_frame_desc[i].actual_length, &userurb->iso_frame_desc[i].actual_length)) - return -EFAULT; + goto err_out; if (put_user(urb->iso_frame_desc[i].status, &userurb->iso_frame_desc[i].status)) - return -EFAULT; + goto err_out; } } @@ -1255,6 +1267,10 @@ static int processcompl(struct async *as, void __user * __user *arg) if (put_user(addr, (void __user * __user *)arg)) return -EFAULT; return 0; + +err_out: + free_async(as); + return -EFAULT; } static struct async *reap_as(struct dev_state *ps) @@ -1305,7 +1321,8 @@ static int get_urb32(struct usbdevfs_urb *kurb, struct usbdevfs_urb32 __user *uurb) { __u32 uptr; - if (get_user(kurb->type, &uurb->type) || + if (!access_ok(VERIFY_READ, uurb, sizeof(*uurb)) || + __get_user(kurb->type, &uurb->type) || __get_user(kurb->endpoint, &uurb->endpoint) || __get_user(kurb->status, &uurb->status) || __get_user(kurb->flags, &uurb->flags) || @@ -1520,8 +1537,9 @@ static int proc_ioctl_compat(struct dev_state *ps, compat_uptr_t arg) u32 udata; uioc = compat_ptr((long)arg); - if (get_user(ctrl.ifno, &uioc->ifno) || - get_user(ctrl.ioctl_code, &uioc->ioctl_code) || + if (!access_ok(VERIFY_READ, uioc, sizeof(*uioc)) || + __get_user(ctrl.ifno, &uioc->ifno) || + __get_user(ctrl.ioctl_code, &uioc->ioctl_code) || __get_user(udata, &uioc->data)) return -EFAULT; ctrl.data = compat_ptr(udata); diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c index ce3f453f02e..95ccfa0b9fc 100644 --- a/drivers/usb/core/hcd.c +++ b/drivers/usb/core/hcd.c @@ -648,7 +648,7 @@ void usb_hcd_poll_rh_status(struct usb_hcd *hcd) struct urb *urb; int length; unsigned long flags; - char buffer[4]; /* Any root hubs with > 31 ports? */ + char buffer[6]; /* Any root hubs with > 31 ports? */ if (unlikely(!hcd->rh_registered)) return; diff --git a/drivers/usb/core/hcd.h b/drivers/usb/core/hcd.h index d397ecfd5b1..ec5c67ea07b 100644 --- a/drivers/usb/core/hcd.h +++ b/drivers/usb/core/hcd.h @@ -227,6 +227,10 @@ struct hc_driver { /* has a port been handed over to a companion? */ int (*port_handed_over)(struct usb_hcd *, int); + /* CLEAR_TT_BUFFER completion callback */ + void (*clear_tt_buffer_complete)(struct usb_hcd *, + struct usb_host_endpoint *); + /* xHCI specific functions */ /* Called by usb_alloc_dev to alloc HC device structures */ int (*alloc_dev)(struct usb_hcd *, struct usb_device *); diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c index 2af3b4f0605..71f86c60d83 100644 --- a/drivers/usb/core/hub.c +++ b/drivers/usb/core/hub.c @@ -450,10 +450,10 @@ hub_clear_tt_buffer (struct usb_device *hdev, u16 devinfo, u16 tt) * talking to TTs must queue control transfers (not just bulk and iso), so * both can talk to the same hub concurrently. */ -static void hub_tt_kevent (struct work_struct *work) +static void hub_tt_work(struct work_struct *work) { struct usb_hub *hub = - container_of(work, struct usb_hub, tt.kevent); + container_of(work, struct usb_hub, tt.clear_work); unsigned long flags; int limit = 100; @@ -462,6 +462,7 @@ static void hub_tt_kevent (struct work_struct *work) struct list_head *next; struct usb_tt_clear *clear; struct usb_device *hdev = hub->hdev; + const struct hc_driver *drv; int status; next = hub->tt.clear_list.next; @@ -471,21 +472,25 @@ static void hub_tt_kevent (struct work_struct *work) /* drop lock so HCD can concurrently report other TT errors */ spin_unlock_irqrestore (&hub->tt.lock, flags); status = hub_clear_tt_buffer (hdev, clear->devinfo, clear->tt); - spin_lock_irqsave (&hub->tt.lock, flags); - if (status) dev_err (&hdev->dev, "clear tt %d (%04x) error %d\n", clear->tt, clear->devinfo, status); + + /* Tell the HCD, even if the operation failed */ + drv = clear->hcd->driver; + if (drv->clear_tt_buffer_complete) + (drv->clear_tt_buffer_complete)(clear->hcd, clear->ep); + kfree(clear); + spin_lock_irqsave(&hub->tt.lock, flags); } spin_unlock_irqrestore (&hub->tt.lock, flags); } /** - * usb_hub_tt_clear_buffer - clear control/bulk TT state in high speed hub - * @udev: the device whose split transaction failed - * @pipe: identifies the endpoint of the failed transaction + * usb_hub_clear_tt_buffer - clear control/bulk TT state in high speed hub + * @urb: an URB associated with the failed or incomplete split transaction * * High speed HCDs use this to tell the hub driver that some split control or * bulk transaction failed in a way that requires clearing internal state of @@ -495,8 +500,10 @@ static void hub_tt_kevent (struct work_struct *work) * It may not be possible for that hub to handle additional full (or low) * speed transactions until that state is fully cleared out. */ -void usb_hub_tt_clear_buffer (struct usb_device *udev, int pipe) +int usb_hub_clear_tt_buffer(struct urb *urb) { + struct usb_device *udev = urb->dev; + int pipe = urb->pipe; struct usb_tt *tt = udev->tt; unsigned long flags; struct usb_tt_clear *clear; @@ -508,7 +515,7 @@ void usb_hub_tt_clear_buffer (struct usb_device *udev, int pipe) if ((clear = kmalloc (sizeof *clear, GFP_ATOMIC)) == NULL) { dev_err (&udev->dev, "can't save CLEAR_TT_BUFFER state\n"); /* FIXME recover somehow ... RESET_TT? */ - return; + return -ENOMEM; } /* info that CLEAR_TT_BUFFER needs */ @@ -520,14 +527,19 @@ void usb_hub_tt_clear_buffer (struct usb_device *udev, int pipe) : (USB_ENDPOINT_XFER_BULK << 11); if (usb_pipein (pipe)) clear->devinfo |= 1 << 15; - + + /* info for completion callback */ + clear->hcd = bus_to_hcd(udev->bus); + clear->ep = urb->ep; + /* tell keventd to clear state for this TT */ spin_lock_irqsave (&tt->lock, flags); list_add_tail (&clear->clear_list, &tt->clear_list); - schedule_work (&tt->kevent); + schedule_work(&tt->clear_work); spin_unlock_irqrestore (&tt->lock, flags); + return 0; } -EXPORT_SYMBOL_GPL(usb_hub_tt_clear_buffer); +EXPORT_SYMBOL_GPL(usb_hub_clear_tt_buffer); /* If do_delay is false, return the number of milliseconds the caller * needs to delay. @@ -818,7 +830,7 @@ static void hub_quiesce(struct usb_hub *hub, enum hub_quiescing_type type) if (hub->has_indicators) cancel_delayed_work_sync(&hub->leds); if (hub->tt.hub) - cancel_work_sync(&hub->tt.kevent); + cancel_work_sync(&hub->tt.clear_work); } /* caller has locked the hub device */ @@ -935,7 +947,7 @@ static int hub_configure(struct usb_hub *hub, spin_lock_init (&hub->tt.lock); INIT_LIST_HEAD (&hub->tt.clear_list); - INIT_WORK (&hub->tt.kevent, hub_tt_kevent); + INIT_WORK(&hub->tt.clear_work, hub_tt_work); switch (hdev->descriptor.bDeviceProtocol) { case 0: break; diff --git a/drivers/usb/core/hub.h b/drivers/usb/core/hub.h index 889c0f32a40..de8081f065e 100644 --- a/drivers/usb/core/hub.h +++ b/drivers/usb/core/hub.h @@ -188,16 +188,18 @@ struct usb_tt { /* for control/bulk error recovery (CLEAR_TT_BUFFER) */ spinlock_t lock; struct list_head clear_list; /* of usb_tt_clear */ - struct work_struct kevent; + struct work_struct clear_work; }; struct usb_tt_clear { struct list_head clear_list; unsigned tt; u16 devinfo; + struct usb_hcd *hcd; + struct usb_host_endpoint *ep; }; -extern void usb_hub_tt_clear_buffer(struct usb_device *dev, int pipe); +extern int usb_hub_clear_tt_buffer(struct urb *urb); extern void usb_ep0_reinit(struct usb_device *); #endif /* __LINUX_HUB_H */ diff --git a/drivers/usb/core/message.c b/drivers/usb/core/message.c index 2bed83caacb..9720e699f47 100644 --- a/drivers/usb/core/message.c +++ b/drivers/usb/core/message.c @@ -806,6 +806,48 @@ static int usb_string_sub(struct usb_device *dev, unsigned int langid, return rc; } +static int usb_get_langid(struct usb_device *dev, unsigned char *tbuf) +{ + int err; + + if (dev->have_langid) + return 0; + + if (dev->string_langid < 0) + return -EPIPE; + + err = usb_string_sub(dev, 0, 0, tbuf); + + /* If the string was reported but is malformed, default to english + * (0x0409) */ + if (err == -ENODATA || (err > 0 && err < 4)) { + dev->string_langid = 0x0409; + dev->have_langid = 1; + dev_err(&dev->dev, + "string descriptor 0 malformed (err = %d), " + "defaulting to 0x%04x\n", + err, dev->string_langid); + return 0; + } + + /* In case of all other errors, we assume the device is not able to + * deal with strings at all. Set string_langid to -1 in order to + * prevent any string to be retrieved from the device */ + if (err < 0) { + dev_err(&dev->dev, "string descriptor 0 read error: %d\n", + err); + dev->string_langid = -1; + return -EPIPE; + } + + /* always use the first langid listed */ + dev->string_langid = tbuf[2] | (tbuf[3] << 8); + dev->have_langid = 1; + dev_dbg(&dev->dev, "default language 0x%04x\n", + dev->string_langid); + return 0; +} + /** * usb_string - returns UTF-8 version of a string descriptor * @dev: the device whose string descriptor is being retrieved @@ -837,24 +879,9 @@ int usb_string(struct usb_device *dev, int index, char *buf, size_t size) if (!tbuf) return -ENOMEM; - /* get langid for strings if it's not yet known */ - if (!dev->have_langid) { - err = usb_string_sub(dev, 0, 0, tbuf); - if (err < 0) { - dev_err(&dev->dev, - "string descriptor 0 read error: %d\n", - err); - } else if (err < 4) { - dev_err(&dev->dev, "string descriptor 0 too short\n"); - } else { - dev->string_langid = tbuf[2] | (tbuf[3] << 8); - /* always use the first langid listed */ - dev_dbg(&dev->dev, "default language 0x%04x\n", - dev->string_langid); - } - - dev->have_langid = 1; - } + err = usb_get_langid(dev, tbuf); + if (err < 0) + goto errout; err = usb_string_sub(dev, dev->string_langid, index, tbuf); if (err < 0) diff --git a/drivers/usb/gadget/Kconfig b/drivers/usb/gadget/Kconfig index 5d1ddf485d1..7f8e83a954a 100644 --- a/drivers/usb/gadget/Kconfig +++ b/drivers/usb/gadget/Kconfig @@ -286,6 +286,27 @@ config USB_S3C_HSOTG default USB_GADGET select USB_GADGET_SELECTED +config USB_GADGET_IMX + boolean "Freescale IMX USB Peripheral Controller" + depends on ARCH_MX1 + help + Freescale's IMX series include an integrated full speed + USB 1.1 device controller. The controller in the IMX series + is register-compatible. + + It has Six fixed-function endpoints, as well as endpoint + zero (for control transfers). + + Say "y" to link the driver statically, or "m" to build a + dynamically linked module called "imx_udc" and force all + gadget drivers to also be dynamically linked. + +config USB_IMX + tristate + depends on USB_GADGET_IMX + default USB_GADGET + select USB_GADGET_SELECTED + config USB_GADGET_S3C2410 boolean "S3C2410 USB Device Controller" depends on ARCH_S3C2410 @@ -321,27 +342,6 @@ config USB_GADGET_MUSB_HDRC This OTG-capable silicon IP is used in dual designs including the TI DaVinci, OMAP 243x, OMAP 343x, TUSB 6010, and ADI Blackfin -config USB_GADGET_IMX - boolean "Freescale IMX USB Peripheral Controller" - depends on ARCH_MX1 - help - Freescale's IMX series include an integrated full speed - USB 1.1 device controller. The controller in the IMX series - is register-compatible. - - It has Six fixed-function endpoints, as well as endpoint - zero (for control transfers). - - Say "y" to link the driver statically, or "m" to build a - dynamically linked module called "imx_udc" and force all - gadget drivers to also be dynamically linked. - -config USB_IMX - tristate - depends on USB_GADGET_IMX - default USB_GADGET - select USB_GADGET_SELECTED - config USB_GADGET_M66592 boolean "Renesas M66592 USB Peripheral Controller" select USB_GADGET_DUALSPEED @@ -604,6 +604,7 @@ config USB_ZERO_HNPTEST config USB_AUDIO tristate "Audio Gadget (EXPERIMENTAL)" depends on SND + select SND_PCM help Gadget Audio is compatible with USB Audio Class specification 1.0. It will include at least one AudioControl interface, zero or more diff --git a/drivers/usb/gadget/amd5536udc.c b/drivers/usb/gadget/amd5536udc.c index 826f3adde5d..77352ccc245 100644 --- a/drivers/usb/gadget/amd5536udc.c +++ b/drivers/usb/gadget/amd5536udc.c @@ -48,7 +48,6 @@ #include <linux/ioport.h> #include <linux/sched.h> #include <linux/slab.h> -#include <linux/smp_lock.h> #include <linux/errno.h> #include <linux/init.h> #include <linux/timer.h> diff --git a/drivers/usb/gadget/audio.c b/drivers/usb/gadget/audio.c index 94de7e86461..9f80f4e970b 100644 --- a/drivers/usb/gadget/audio.c +++ b/drivers/usb/gadget/audio.c @@ -42,9 +42,9 @@ * Instead: allocate your own, using normal USB-IF procedures. */ -/* Thanks to NetChip Technologies for donating this product ID. */ -#define AUDIO_VENDOR_NUM 0x0525 /* NetChip */ -#define AUDIO_PRODUCT_NUM 0xa4a1 /* Linux-USB Audio Gadget */ +/* Thanks to Linux Foundation for donating this product ID. */ +#define AUDIO_VENDOR_NUM 0x1d6b /* Linux Foundation */ +#define AUDIO_PRODUCT_NUM 0x0101 /* Linux-USB Audio Gadget */ /*-------------------------------------------------------------------------*/ diff --git a/drivers/usb/gadget/ether.c b/drivers/usb/gadget/ether.c index d006dc652e0..bd102f5052b 100644 --- a/drivers/usb/gadget/ether.c +++ b/drivers/usb/gadget/ether.c @@ -293,15 +293,16 @@ static int __init eth_bind(struct usb_composite_dev *cdev) /* CDC Subset */ eth_config_driver.label = "CDC Subset/SAFE"; - device_desc.idVendor = cpu_to_le16(SIMPLE_VENDOR_NUM), - device_desc.idProduct = cpu_to_le16(SIMPLE_PRODUCT_NUM), - device_desc.bDeviceClass = USB_CLASS_VENDOR_SPEC; + device_desc.idVendor = cpu_to_le16(SIMPLE_VENDOR_NUM); + device_desc.idProduct = cpu_to_le16(SIMPLE_PRODUCT_NUM); + if (!has_rndis()) + device_desc.bDeviceClass = USB_CLASS_VENDOR_SPEC; } if (has_rndis()) { /* RNDIS plus ECM-or-Subset */ - device_desc.idVendor = cpu_to_le16(RNDIS_VENDOR_NUM), - device_desc.idProduct = cpu_to_le16(RNDIS_PRODUCT_NUM), + device_desc.idVendor = cpu_to_le16(RNDIS_VENDOR_NUM); + device_desc.idProduct = cpu_to_le16(RNDIS_PRODUCT_NUM); device_desc.bNumConfigurations = 2; } diff --git a/drivers/usb/gadget/langwell_udc.c b/drivers/usb/gadget/langwell_udc.c index 6829d596135..a3913519fd5 100644 --- a/drivers/usb/gadget/langwell_udc.c +++ b/drivers/usb/gadget/langwell_udc.c @@ -34,7 +34,6 @@ #include <linux/ioport.h> #include <linux/sched.h> #include <linux/slab.h> -#include <linux/smp_lock.h> #include <linux/errno.h> #include <linux/init.h> #include <linux/timer.h> diff --git a/drivers/usb/gadget/pxa25x_udc.c b/drivers/usb/gadget/pxa25x_udc.c index 0ce4e281984..ed21e263f83 100644 --- a/drivers/usb/gadget/pxa25x_udc.c +++ b/drivers/usb/gadget/pxa25x_udc.c @@ -139,7 +139,7 @@ static int is_vbus_present(void) { struct pxa2xx_udc_mach_info *mach = the_controller->mach; - if (mach->gpio_vbus) { + if (gpio_is_valid(mach->gpio_vbus)) { int value = gpio_get_value(mach->gpio_vbus); if (mach->gpio_vbus_inverted) @@ -158,7 +158,7 @@ static void pullup_off(void) struct pxa2xx_udc_mach_info *mach = the_controller->mach; int off_level = mach->gpio_pullup_inverted; - if (mach->gpio_pullup) + if (gpio_is_valid(mach->gpio_pullup)) gpio_set_value(mach->gpio_pullup, off_level); else if (mach->udc_command) mach->udc_command(PXA2XX_UDC_CMD_DISCONNECT); @@ -169,7 +169,7 @@ static void pullup_on(void) struct pxa2xx_udc_mach_info *mach = the_controller->mach; int on_level = !mach->gpio_pullup_inverted; - if (mach->gpio_pullup) + if (gpio_is_valid(mach->gpio_pullup)) gpio_set_value(mach->gpio_pullup, on_level); else if (mach->udc_command) mach->udc_command(PXA2XX_UDC_CMD_CONNECT); @@ -1000,7 +1000,7 @@ static int pxa25x_udc_pullup(struct usb_gadget *_gadget, int is_active) udc = container_of(_gadget, struct pxa25x_udc, gadget); /* not all boards support pullup control */ - if (!udc->mach->gpio_pullup && !udc->mach->udc_command) + if (!gpio_is_valid(udc->mach->gpio_pullup) && !udc->mach->udc_command) return -EOPNOTSUPP; udc->pullup = (is_active != 0); @@ -1802,11 +1802,13 @@ pxa25x_udc_irq(int irq, void *_dev) USIR0 |= tmp; handled = 1; } +#ifndef CONFIG_USB_PXA25X_SMALL if (usir1 & tmp) { handle_ep(&dev->ep[i+8]); USIR1 |= tmp; handled = 1; } +#endif } } @@ -2160,7 +2162,7 @@ static int __init pxa25x_udc_probe(struct platform_device *pdev) dev->dev = &pdev->dev; dev->mach = pdev->dev.platform_data; - if (dev->mach->gpio_vbus) { + if (gpio_is_valid(dev->mach->gpio_vbus)) { if ((retval = gpio_request(dev->mach->gpio_vbus, "pxa25x_udc GPIO VBUS"))) { dev_dbg(&pdev->dev, @@ -2173,7 +2175,7 @@ static int __init pxa25x_udc_probe(struct platform_device *pdev) } else vbus_irq = 0; - if (dev->mach->gpio_pullup) { + if (gpio_is_valid(dev->mach->gpio_pullup)) { if ((retval = gpio_request(dev->mach->gpio_pullup, "pca25x_udc GPIO PULLUP"))) { dev_dbg(&pdev->dev, @@ -2256,10 +2258,10 @@ lubbock_fail0: #endif free_irq(irq, dev); err_irq1: - if (dev->mach->gpio_pullup) + if (gpio_is_valid(dev->mach->gpio_pullup)) gpio_free(dev->mach->gpio_pullup); err_gpio_pullup: - if (dev->mach->gpio_vbus) + if (gpio_is_valid(dev->mach->gpio_vbus)) gpio_free(dev->mach->gpio_vbus); err_gpio_vbus: clk_put(dev->clk); @@ -2294,11 +2296,11 @@ static int __exit pxa25x_udc_remove(struct platform_device *pdev) free_irq(LUBBOCK_USB_IRQ, dev); } #endif - if (dev->mach->gpio_vbus) { + if (gpio_is_valid(dev->mach->gpio_vbus)) { free_irq(gpio_to_irq(dev->mach->gpio_vbus), dev); gpio_free(dev->mach->gpio_vbus); } - if (dev->mach->gpio_pullup) + if (gpio_is_valid(dev->mach->gpio_pullup)) gpio_free(dev->mach->gpio_pullup); clk_put(dev->clk); @@ -2329,7 +2331,7 @@ static int pxa25x_udc_suspend(struct platform_device *dev, pm_message_t state) struct pxa25x_udc *udc = platform_get_drvdata(dev); unsigned long flags; - if (!udc->mach->gpio_pullup && !udc->mach->udc_command) + if (!gpio_is_valid(udc->mach->gpio_pullup) && !udc->mach->udc_command) WARNING("USB host won't detect disconnect!\n"); udc->suspended = 1; diff --git a/drivers/usb/gadget/rndis.c b/drivers/usb/gadget/rndis.c index 2b4660e08c4..ca41b0b5afb 100644 --- a/drivers/usb/gadget/rndis.c +++ b/drivers/usb/gadget/rndis.c @@ -442,6 +442,8 @@ gen_ndis_query_resp (int configNr, u32 OID, u8 *buf, unsigned buf_len, case OID_802_3_MAC_OPTIONS: pr_debug("%s: OID_802_3_MAC_OPTIONS\n", __func__); + *outbuf = cpu_to_le32(0); + retval = 0; break; /* ieee802.3 statistics OIDs (table 4-4) */ diff --git a/drivers/usb/gadget/s3c2410_udc.c b/drivers/usb/gadget/s3c2410_udc.c index 9a2b8920532..a9b452fe622 100644 --- a/drivers/usb/gadget/s3c2410_udc.c +++ b/drivers/usb/gadget/s3c2410_udc.c @@ -28,7 +28,6 @@ #include <linux/ioport.h> #include <linux/sched.h> #include <linux/slab.h> -#include <linux/smp_lock.h> #include <linux/errno.h> #include <linux/init.h> #include <linux/timer.h> diff --git a/drivers/usb/host/Kconfig b/drivers/usb/host/Kconfig index 1576a0520ad..1a920c70b5a 100644 --- a/drivers/usb/host/Kconfig +++ b/drivers/usb/host/Kconfig @@ -181,26 +181,27 @@ config USB_OHCI_HCD_PPC_SOC Enables support for the USB controller on the MPC52xx or STB03xxx processor chip. If unsure, say Y. -config USB_OHCI_HCD_PPC_OF - bool "OHCI support for PPC USB controller on OF platform bus" - depends on USB_OHCI_HCD && PPC_OF - default y - ---help--- - Enables support for the USB controller PowerPC present on the - OpenFirmware platform bus. - config USB_OHCI_HCD_PPC_OF_BE - bool "Support big endian HC" - depends on USB_OHCI_HCD_PPC_OF - default y + bool "OHCI support for OF platform bus (big endian)" + depends on USB_OHCI_HCD && PPC_OF select USB_OHCI_BIG_ENDIAN_DESC select USB_OHCI_BIG_ENDIAN_MMIO + ---help--- + Enables support for big-endian USB controllers present on the + OpenFirmware platform bus. config USB_OHCI_HCD_PPC_OF_LE - bool "Support little endian HC" - depends on USB_OHCI_HCD_PPC_OF - default n + bool "OHCI support for OF platform bus (little endian)" + depends on USB_OHCI_HCD && PPC_OF select USB_OHCI_LITTLE_ENDIAN + ---help--- + Enables support for little-endian USB controllers present on the + OpenFirmware platform bus. + +config USB_OHCI_HCD_PPC_OF + bool + depends on USB_OHCI_HCD && PPC_OF + default USB_OHCI_HCD_PPC_OF_BE || USB_OHCI_HCD_PPC_OF_LE config USB_OHCI_HCD_PCI bool "OHCI support for PCI-bus USB controllers" @@ -337,10 +338,10 @@ config USB_R8A66597_HCD config SUPERH_ON_CHIP_R8A66597 boolean "Enable SuperH on-chip R8A66597 USB" - depends on USB_R8A66597_HCD && (CPU_SUBTYPE_SH7366 || CPU_SUBTYPE_SH7723) + depends on USB_R8A66597_HCD && (CPU_SUBTYPE_SH7366 || CPU_SUBTYPE_SH7723 || CPU_SUBTYPE_SH7724) help This driver enables support for the on-chip R8A66597 in the - SH7366 and SH7723 processors. + SH7366, SH7723 and SH7724 processors. config USB_WHCI_HCD tristate "Wireless USB Host Controller Interface (WHCI) driver (EXPERIMENTAL)" diff --git a/drivers/usb/host/ehci-au1xxx.c b/drivers/usb/host/ehci-au1xxx.c index c3a778bd359..59d208d94d4 100644 --- a/drivers/usb/host/ehci-au1xxx.c +++ b/drivers/usb/host/ehci-au1xxx.c @@ -113,6 +113,8 @@ static const struct hc_driver ehci_au1xxx_hc_driver = { .bus_resume = ehci_bus_resume, .relinquish_port = ehci_relinquish_port, .port_handed_over = ehci_port_handed_over, + + .clear_tt_buffer_complete = ehci_clear_tt_buffer_complete, }; static int ehci_hcd_au1xxx_drv_probe(struct platform_device *pdev) diff --git a/drivers/usb/host/ehci-fsl.c b/drivers/usb/host/ehci-fsl.c index bf86809c512..991174937db 100644 --- a/drivers/usb/host/ehci-fsl.c +++ b/drivers/usb/host/ehci-fsl.c @@ -325,6 +325,8 @@ static const struct hc_driver ehci_fsl_hc_driver = { .bus_resume = ehci_bus_resume, .relinquish_port = ehci_relinquish_port, .port_handed_over = ehci_port_handed_over, + + .clear_tt_buffer_complete = ehci_clear_tt_buffer_complete, }; static int ehci_fsl_drv_probe(struct platform_device *pdev) diff --git a/drivers/usb/host/ehci-hcd.c b/drivers/usb/host/ehci-hcd.c index 2b72473544d..11c627ce602 100644 --- a/drivers/usb/host/ehci-hcd.c +++ b/drivers/usb/host/ehci-hcd.c @@ -903,7 +903,8 @@ static int ehci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status) /* already started */ break; case QH_STATE_IDLE: - WARN_ON(1); + /* QH might be waiting for a Clear-TT-Buffer */ + qh_completions(ehci, qh); break; } break; @@ -1003,6 +1004,8 @@ idle_timeout: schedule_timeout_uninterruptible(1); goto rescan; case QH_STATE_IDLE: /* fully unlinked */ + if (qh->clearing_tt) + goto idle_timeout; if (list_empty (&qh->qtd_list)) { qh_put (qh); break; @@ -1030,12 +1033,14 @@ ehci_endpoint_reset(struct usb_hcd *hcd, struct usb_host_endpoint *ep) struct ehci_hcd *ehci = hcd_to_ehci(hcd); struct ehci_qh *qh; int eptype = usb_endpoint_type(&ep->desc); + int epnum = usb_endpoint_num(&ep->desc); + int is_out = usb_endpoint_dir_out(&ep->desc); + unsigned long flags; if (eptype != USB_ENDPOINT_XFER_BULK && eptype != USB_ENDPOINT_XFER_INT) return; - rescan: - spin_lock_irq(&ehci->lock); + spin_lock_irqsave(&ehci->lock, flags); qh = ep->hcpriv; /* For Bulk and Interrupt endpoints we maintain the toggle state @@ -1044,29 +1049,24 @@ ehci_endpoint_reset(struct usb_hcd *hcd, struct usb_host_endpoint *ep) * the toggle bit in the QH. */ if (qh) { + usb_settoggle(qh->dev, epnum, is_out, 0); if (!list_empty(&qh->qtd_list)) { WARN_ONCE(1, "clear_halt for a busy endpoint\n"); - } else if (qh->qh_state == QH_STATE_IDLE) { - qh->hw_token &= ~cpu_to_hc32(ehci, QTD_TOGGLE); - } else { - /* It's not safe to write into the overlay area - * while the QH is active. Unlink it first and - * wait for the unlink to complete. + } else if (qh->qh_state == QH_STATE_LINKED) { + + /* The toggle value in the QH can't be updated + * while the QH is active. Unlink it now; + * re-linking will call qh_refresh(). */ - if (qh->qh_state == QH_STATE_LINKED) { - if (eptype == USB_ENDPOINT_XFER_BULK) { - unlink_async(ehci, qh); - } else { - intr_deschedule(ehci, qh); - (void) qh_schedule(ehci, qh); - } + if (eptype == USB_ENDPOINT_XFER_BULK) { + unlink_async(ehci, qh); + } else { + intr_deschedule(ehci, qh); + (void) qh_schedule(ehci, qh); } - spin_unlock_irq(&ehci->lock); - schedule_timeout_uninterruptible(1); - goto rescan; } } - spin_unlock_irq(&ehci->lock); + spin_unlock_irqrestore(&ehci->lock, flags); } static int ehci_get_frame (struct usb_hcd *hcd) diff --git a/drivers/usb/host/ehci-ixp4xx.c b/drivers/usb/host/ehci-ixp4xx.c index a44bb4a9495..89b7c70c6ed 100644 --- a/drivers/usb/host/ehci-ixp4xx.c +++ b/drivers/usb/host/ehci-ixp4xx.c @@ -61,6 +61,8 @@ static const struct hc_driver ixp4xx_ehci_hc_driver = { #endif .relinquish_port = ehci_relinquish_port, .port_handed_over = ehci_port_handed_over, + + .clear_tt_buffer_complete = ehci_clear_tt_buffer_complete, }; static int ixp4xx_ehci_probe(struct platform_device *pdev) diff --git a/drivers/usb/host/ehci-orion.c b/drivers/usb/host/ehci-orion.c index 770dd9aba62..1d283e1b2b8 100644 --- a/drivers/usb/host/ehci-orion.c +++ b/drivers/usb/host/ehci-orion.c @@ -105,6 +105,7 @@ static int ehci_orion_setup(struct usb_hcd *hcd) struct ehci_hcd *ehci = hcd_to_ehci(hcd); int retval; + ehci_reset(ehci); retval = ehci_halt(ehci); if (retval) return retval; @@ -118,7 +119,6 @@ static int ehci_orion_setup(struct usb_hcd *hcd) hcd->has_tt = 1; - ehci_reset(ehci); ehci_port_power(ehci, 0); return retval; @@ -165,6 +165,8 @@ static const struct hc_driver ehci_orion_hc_driver = { .bus_resume = ehci_bus_resume, .relinquish_port = ehci_relinquish_port, .port_handed_over = ehci_port_handed_over, + + .clear_tt_buffer_complete = ehci_clear_tt_buffer_complete, }; static void __init diff --git a/drivers/usb/host/ehci-pci.c b/drivers/usb/host/ehci-pci.c index f3683e1da16..c2f1b7df918 100644 --- a/drivers/usb/host/ehci-pci.c +++ b/drivers/usb/host/ehci-pci.c @@ -404,6 +404,8 @@ static const struct hc_driver ehci_pci_hc_driver = { .bus_resume = ehci_bus_resume, .relinquish_port = ehci_relinquish_port, .port_handed_over = ehci_port_handed_over, + + .clear_tt_buffer_complete = ehci_clear_tt_buffer_complete, }; /*-------------------------------------------------------------------------*/ diff --git a/drivers/usb/host/ehci-ppc-of.c b/drivers/usb/host/ehci-ppc-of.c index fbd272288fc..36f96da129f 100644 --- a/drivers/usb/host/ehci-ppc-of.c +++ b/drivers/usb/host/ehci-ppc-of.c @@ -79,6 +79,8 @@ static const struct hc_driver ehci_ppc_of_hc_driver = { #endif .relinquish_port = ehci_relinquish_port, .port_handed_over = ehci_port_handed_over, + + .clear_tt_buffer_complete = ehci_clear_tt_buffer_complete, }; diff --git a/drivers/usb/host/ehci-ps3.c b/drivers/usb/host/ehci-ps3.c index 93f7035d00a..1dee33b9139 100644 --- a/drivers/usb/host/ehci-ps3.c +++ b/drivers/usb/host/ehci-ps3.c @@ -75,6 +75,8 @@ static const struct hc_driver ps3_ehci_hc_driver = { #endif .relinquish_port = ehci_relinquish_port, .port_handed_over = ehci_port_handed_over, + + .clear_tt_buffer_complete = ehci_clear_tt_buffer_complete, }; static int __devinit ps3_ehci_probe(struct ps3_system_bus_device *dev) diff --git a/drivers/usb/host/ehci-q.c b/drivers/usb/host/ehci-q.c index 3192f683f80..7673554fa64 100644 --- a/drivers/usb/host/ehci-q.c +++ b/drivers/usb/host/ehci-q.c @@ -93,6 +93,22 @@ qh_update (struct ehci_hcd *ehci, struct ehci_qh *qh, struct ehci_qtd *qtd) qh->hw_qtd_next = QTD_NEXT(ehci, qtd->qtd_dma); qh->hw_alt_next = EHCI_LIST_END(ehci); + /* Except for control endpoints, we make hardware maintain data + * toggle (like OHCI) ... here (re)initialize the toggle in the QH, + * and set the pseudo-toggle in udev. Only usb_clear_halt() will + * ever clear it. + */ + if (!(qh->hw_info1 & cpu_to_hc32(ehci, 1 << 14))) { + unsigned is_out, epnum; + + is_out = !(qtd->hw_token & cpu_to_hc32(ehci, 1 << 8)); + epnum = (hc32_to_cpup(ehci, &qh->hw_info1) >> 8) & 0x0f; + if (unlikely (!usb_gettoggle (qh->dev, epnum, is_out))) { + qh->hw_token &= ~cpu_to_hc32(ehci, QTD_TOGGLE); + usb_settoggle (qh->dev, epnum, is_out, 1); + } + } + /* HC must see latest qtd and qh data before we clear ACTIVE+HALT */ wmb (); qh->hw_token &= cpu_to_hc32(ehci, QTD_TOGGLE | QTD_STS_PING); @@ -123,6 +139,55 @@ qh_refresh (struct ehci_hcd *ehci, struct ehci_qh *qh) /*-------------------------------------------------------------------------*/ +static void qh_link_async(struct ehci_hcd *ehci, struct ehci_qh *qh); + +static void ehci_clear_tt_buffer_complete(struct usb_hcd *hcd, + struct usb_host_endpoint *ep) +{ + struct ehci_hcd *ehci = hcd_to_ehci(hcd); + struct ehci_qh *qh = ep->hcpriv; + unsigned long flags; + + spin_lock_irqsave(&ehci->lock, flags); + qh->clearing_tt = 0; + if (qh->qh_state == QH_STATE_IDLE && !list_empty(&qh->qtd_list) + && HC_IS_RUNNING(hcd->state)) + qh_link_async(ehci, qh); + spin_unlock_irqrestore(&ehci->lock, flags); +} + +static void ehci_clear_tt_buffer(struct ehci_hcd *ehci, struct ehci_qh *qh, + struct urb *urb, u32 token) +{ + + /* If an async split transaction gets an error or is unlinked, + * the TT buffer may be left in an indeterminate state. We + * have to clear the TT buffer. + * + * Note: this routine is never called for Isochronous transfers. + */ + if (urb->dev->tt && !usb_pipeint(urb->pipe) && !qh->clearing_tt) { +#ifdef DEBUG + struct usb_device *tt = urb->dev->tt->hub; + dev_dbg(&tt->dev, + "clear tt buffer port %d, a%d ep%d t%08x\n", + urb->dev->ttport, urb->dev->devnum, + usb_pipeendpoint(urb->pipe), token); +#endif /* DEBUG */ + if (!ehci_is_TDI(ehci) + || urb->dev->tt->hub != + ehci_to_hcd(ehci)->self.root_hub) { + if (usb_hub_clear_tt_buffer(urb) == 0) + qh->clearing_tt = 1; + } else { + + /* REVISIT ARC-derived cores don't clear the root + * hub TT buffer in this way... + */ + } + } +} + static int qtd_copy_status ( struct ehci_hcd *ehci, struct urb *urb, @@ -149,6 +214,14 @@ static int qtd_copy_status ( if (token & QTD_STS_BABBLE) { /* FIXME "must" disable babbling device's port too */ status = -EOVERFLOW; + /* CERR nonzero + halt --> stall */ + } else if (QTD_CERR(token)) { + status = -EPIPE; + + /* In theory, more than one of the following bits can be set + * since they are sticky and the transaction is retried. + * Which to test first is rather arbitrary. + */ } else if (token & QTD_STS_MMF) { /* fs/ls interrupt xfer missed the complete-split */ status = -EPROTO; @@ -157,21 +230,15 @@ static int qtd_copy_status ( ? -ENOSR /* hc couldn't read data */ : -ECOMM; /* hc couldn't write data */ } else if (token & QTD_STS_XACT) { - /* timeout, bad crc, wrong PID, etc; retried */ - if (QTD_CERR (token)) - status = -EPIPE; - else { - ehci_dbg (ehci, "devpath %s ep%d%s 3strikes\n", - urb->dev->devpath, - usb_pipeendpoint (urb->pipe), - usb_pipein (urb->pipe) ? "in" : "out"); - status = -EPROTO; - } - /* CERR nonzero + no errors + halt --> stall */ - } else if (QTD_CERR (token)) - status = -EPIPE; - else /* unknown */ + /* timeout, bad CRC, wrong PID, etc */ + ehci_dbg(ehci, "devpath %s ep%d%s 3strikes\n", + urb->dev->devpath, + usb_pipeendpoint(urb->pipe), + usb_pipein(urb->pipe) ? "in" : "out"); status = -EPROTO; + } else { /* unknown */ + status = -EPROTO; + } ehci_vdbg (ehci, "dev%d ep%d%s qtd token %08x --> status %d\n", @@ -179,28 +246,6 @@ static int qtd_copy_status ( usb_pipeendpoint (urb->pipe), usb_pipein (urb->pipe) ? "in" : "out", token, status); - - /* if async CSPLIT failed, try cleaning out the TT buffer */ - if (status != -EPIPE - && urb->dev->tt - && !usb_pipeint(urb->pipe) - && ((token & QTD_STS_MMF) != 0 - || QTD_CERR(token) == 0) - && (!ehci_is_TDI(ehci) - || urb->dev->tt->hub != - ehci_to_hcd(ehci)->self.root_hub)) { -#ifdef DEBUG - struct usb_device *tt = urb->dev->tt->hub; - dev_dbg (&tt->dev, - "clear tt buffer port %d, a%d ep%d t%08x\n", - urb->dev->ttport, urb->dev->devnum, - usb_pipeendpoint (urb->pipe), token); -#endif /* DEBUG */ - /* REVISIT ARC-derived cores don't clear the root - * hub TT buffer in this way... - */ - usb_hub_tt_clear_buffer (urb->dev, urb->pipe); - } } return status; @@ -330,12 +375,11 @@ qh_completions (struct ehci_hcd *ehci, struct ehci_qh *qh) */ if ((token & QTD_STS_XACT) && QTD_CERR(token) == 0 && - --qh->xacterrs > 0 && + ++qh->xacterrs < QH_XACTERR_MAX && !urb->unlinked) { ehci_dbg(ehci, "detected XactErr len %zu/%zu retry %d\n", - qtd->length - QTD_LENGTH(token), qtd->length, - QH_XACTERR_MAX - qh->xacterrs); + qtd->length - QTD_LENGTH(token), qtd->length, qh->xacterrs); /* reset the token in the qtd and the * qh overlay (which still contains @@ -391,9 +435,16 @@ qh_completions (struct ehci_hcd *ehci, struct ehci_qh *qh) /* qh unlinked; token in overlay may be most current */ if (state == QH_STATE_IDLE && cpu_to_hc32(ehci, qtd->qtd_dma) - == qh->hw_current) + == qh->hw_current) { token = hc32_to_cpu(ehci, qh->hw_token); + /* An unlink may leave an incomplete + * async transaction in the TT buffer. + * We have to clear it. + */ + ehci_clear_tt_buffer(ehci, qh, urb, token); + } + /* force halt for unlinked or blocked qh, so we'll * patch the qh later and so that completions can't * activate it while we "know" it's stopped. @@ -419,6 +470,13 @@ halt: && (qtd->hw_alt_next & EHCI_LIST_END(ehci))) last_status = -EINPROGRESS; + + /* As part of low/full-speed endpoint-halt processing + * we must clear the TT buffer (11.17.5). + */ + if (unlikely(last_status != -EINPROGRESS && + last_status != -EREMOTEIO)) + ehci_clear_tt_buffer(ehci, qh, urb, token); } /* if we're removing something not at the queue head, @@ -435,7 +493,7 @@ halt: last = qtd; /* reinit the xacterr counter for the next qtd */ - qh->xacterrs = QH_XACTERR_MAX; + qh->xacterrs = 0; } /* last urb's completion might still need calling */ @@ -834,6 +892,7 @@ done: qh->qh_state = QH_STATE_IDLE; qh->hw_info1 = cpu_to_hc32(ehci, info1); qh->hw_info2 = cpu_to_hc32(ehci, info2); + usb_settoggle (urb->dev, usb_pipeendpoint (urb->pipe), !is_input, 1); qh_refresh (ehci, qh); return qh; } @@ -847,6 +906,10 @@ static void qh_link_async (struct ehci_hcd *ehci, struct ehci_qh *qh) __hc32 dma = QH_NEXT(ehci, qh->qh_dma); struct ehci_qh *head; + /* Don't link a QH if there's a Clear-TT-Buffer pending */ + if (unlikely(qh->clearing_tt)) + return; + /* (re)start the async schedule? */ head = ehci->async; timer_action_done (ehci, TIMER_ASYNC_OFF); @@ -864,7 +927,7 @@ static void qh_link_async (struct ehci_hcd *ehci, struct ehci_qh *qh) } } - /* clear halt and maybe recover from silicon quirk */ + /* clear halt and/or toggle; and maybe recover from silicon quirk */ if (qh->qh_state == QH_STATE_IDLE) qh_refresh (ehci, qh); @@ -876,7 +939,8 @@ static void qh_link_async (struct ehci_hcd *ehci, struct ehci_qh *qh) head->qh_next.qh = qh; head->hw_next = dma; - qh->xacterrs = QH_XACTERR_MAX; + qh_get(qh); + qh->xacterrs = 0; qh->qh_state = QH_STATE_LINKED; /* qtd completions reported later by interrupt */ } @@ -1016,7 +1080,7 @@ submit_async ( * the HC and TT handle it when the TT has a buffer ready. */ if (likely (qh->qh_state == QH_STATE_IDLE)) - qh_link_async (ehci, qh_get (qh)); + qh_link_async(ehci, qh); done: spin_unlock_irqrestore (&ehci->lock, flags); if (unlikely (qh == NULL)) @@ -1051,8 +1115,6 @@ static void end_unlink_async (struct ehci_hcd *ehci) && HC_IS_RUNNING (ehci_to_hcd(ehci)->state)) qh_link_async (ehci, qh); else { - qh_put (qh); // refcount from async list - /* it's not free to turn the async schedule on/off; leave it * active but idle for a while once it empties. */ @@ -1060,6 +1122,7 @@ static void end_unlink_async (struct ehci_hcd *ehci) && ehci->async->qh_next.qh == NULL) timer_action (ehci, TIMER_ASYNC_OFF); } + qh_put(qh); /* refcount from async list */ if (next) { ehci->reclaim = NULL; diff --git a/drivers/usb/host/ehci-sched.c b/drivers/usb/host/ehci-sched.c index 9d1babc7ff6..edd61ee9032 100644 --- a/drivers/usb/host/ehci-sched.c +++ b/drivers/usb/host/ehci-sched.c @@ -542,6 +542,7 @@ static int qh_link_periodic (struct ehci_hcd *ehci, struct ehci_qh *qh) } } qh->qh_state = QH_STATE_LINKED; + qh->xacterrs = 0; qh_get (qh); /* update per-qh bandwidth for usbfs */ @@ -1619,11 +1620,14 @@ itd_complete ( desc->status = -EPROTO; /* HC need not update length with this error */ - if (!(t & EHCI_ISOC_BABBLE)) - desc->actual_length = EHCI_ITD_LENGTH (t); + if (!(t & EHCI_ISOC_BABBLE)) { + desc->actual_length = EHCI_ITD_LENGTH(t); + urb->actual_length += desc->actual_length; + } } else if (likely ((t & EHCI_ISOC_ACTIVE) == 0)) { desc->status = 0; - desc->actual_length = EHCI_ITD_LENGTH (t); + desc->actual_length = EHCI_ITD_LENGTH(t); + urb->actual_length += desc->actual_length; } else { /* URB was too late */ desc->status = -EXDEV; @@ -2014,7 +2018,8 @@ sitd_complete ( desc->status = -EPROTO; } else { desc->status = 0; - desc->actual_length = desc->length - SITD_LENGTH (t); + desc->actual_length = desc->length - SITD_LENGTH(t); + urb->actual_length += desc->actual_length; } stream->depth -= stream->interval << 3; diff --git a/drivers/usb/host/ehci.h b/drivers/usb/host/ehci.h index 90ad3395bb2..2bfff30f470 100644 --- a/drivers/usb/host/ehci.h +++ b/drivers/usb/host/ehci.h @@ -354,7 +354,9 @@ struct ehci_qh { unsigned short period; /* polling interval */ unsigned short start; /* where polling starts */ #define NO_FRAME ((unsigned short)~0) /* pick new start */ + struct usb_device *dev; /* access to TT */ + unsigned clearing_tt:1; /* Clear-TT-Buf in progress */ } __attribute__ ((aligned (32))); /*-------------------------------------------------------------------------*/ diff --git a/drivers/usb/host/fhci-sched.c b/drivers/usb/host/fhci-sched.c index bb63b68ddb7..62a226b6167 100644 --- a/drivers/usb/host/fhci-sched.c +++ b/drivers/usb/host/fhci-sched.c @@ -576,9 +576,7 @@ irqreturn_t fhci_irq(struct usb_hcd *hcd) out_be16(&usb->fhci->regs->usb_event, usb->saved_msk); } else if (usb->port_status == FHCI_PORT_DISABLED) { - if (fhci_ioports_check_bus_state(fhci) == 1 && - usb->port_status != FHCI_PORT_LOW && - usb->port_status != FHCI_PORT_FULL) + if (fhci_ioports_check_bus_state(fhci) == 1) fhci_device_connected_interrupt(fhci); } usb_er &= ~USB_E_RESET_MASK; @@ -605,9 +603,7 @@ irqreturn_t fhci_irq(struct usb_hcd *hcd) } if (usb_er & USB_E_IDLE_MASK) { - if (usb->port_status == FHCI_PORT_DISABLED && - usb->port_status != FHCI_PORT_LOW && - usb->port_status != FHCI_PORT_FULL) { + if (usb->port_status == FHCI_PORT_DISABLED) { usb_er &= ~USB_E_RESET_MASK; fhci_device_connected_interrupt(fhci); } else if (usb->port_status == diff --git a/drivers/usb/host/isp1760-if.c b/drivers/usb/host/isp1760-if.c index 3fa3a170279..d4feebfc63b 100644 --- a/drivers/usb/host/isp1760-if.c +++ b/drivers/usb/host/isp1760-if.c @@ -361,7 +361,7 @@ static int __devexit isp1760_plat_remove(struct platform_device *pdev) static struct platform_driver isp1760_plat_driver = { .probe = isp1760_plat_probe, - .remove = isp1760_plat_remove, + .remove = __devexit_p(isp1760_plat_remove), .driver = { .name = "isp1760", }, diff --git a/drivers/usb/host/ohci-omap.c b/drivers/usb/host/ohci-omap.c index f3aaba35e91..83cbecd2a1e 100644 --- a/drivers/usb/host/ohci-omap.c +++ b/drivers/usb/host/ohci-omap.c @@ -282,6 +282,7 @@ static int ohci_omap_init(struct usb_hcd *hcd) static void ohci_omap_stop(struct usb_hcd *hcd) { dev_dbg(hcd->self.controller, "stopping USB Controller\n"); + ohci_stop(hcd); omap_ohci_clock_power(0); } diff --git a/drivers/usb/host/r8a66597-hcd.c b/drivers/usb/host/r8a66597-hcd.c index 56976cc0352..e18f74946e6 100644 --- a/drivers/usb/host/r8a66597-hcd.c +++ b/drivers/usb/host/r8a66597-hcd.c @@ -26,7 +26,6 @@ #include <linux/module.h> #include <linux/kernel.h> #include <linux/sched.h> -#include <linux/smp_lock.h> #include <linux/errno.h> #include <linux/init.h> #include <linux/timer.h> diff --git a/drivers/usb/host/xhci-dbg.c b/drivers/usb/host/xhci-dbg.c index 2501c571f85..705e3432415 100644 --- a/drivers/usb/host/xhci-dbg.c +++ b/drivers/usb/host/xhci-dbg.c @@ -173,6 +173,7 @@ void xhci_print_ir_set(struct xhci_hcd *xhci, struct xhci_intr_reg *ir_set, int { void *addr; u32 temp; + u64 temp_64; addr = &ir_set->irq_pending; temp = xhci_readl(xhci, addr); @@ -200,25 +201,15 @@ void xhci_print_ir_set(struct xhci_hcd *xhci, struct xhci_intr_reg *ir_set, int xhci_dbg(xhci, " WARN: %p: ir_set.rsvd = 0x%x\n", addr, (unsigned int)temp); - addr = &ir_set->erst_base[0]; - temp = xhci_readl(xhci, addr); - xhci_dbg(xhci, " %p: ir_set.erst_base[0] = 0x%x\n", - addr, (unsigned int) temp); - - addr = &ir_set->erst_base[1]; - temp = xhci_readl(xhci, addr); - xhci_dbg(xhci, " %p: ir_set.erst_base[1] = 0x%x\n", - addr, (unsigned int) temp); + addr = &ir_set->erst_base; + temp_64 = xhci_read_64(xhci, addr); + xhci_dbg(xhci, " %p: ir_set.erst_base = @%08llx\n", + addr, temp_64); - addr = &ir_set->erst_dequeue[0]; - temp = xhci_readl(xhci, addr); - xhci_dbg(xhci, " %p: ir_set.erst_dequeue[0] = 0x%x\n", - addr, (unsigned int) temp); - - addr = &ir_set->erst_dequeue[1]; - temp = xhci_readl(xhci, addr); - xhci_dbg(xhci, " %p: ir_set.erst_dequeue[1] = 0x%x\n", - addr, (unsigned int) temp); + addr = &ir_set->erst_dequeue; + temp_64 = xhci_read_64(xhci, addr); + xhci_dbg(xhci, " %p: ir_set.erst_dequeue = @%08llx\n", + addr, temp_64); } void xhci_print_run_regs(struct xhci_hcd *xhci) @@ -268,8 +259,7 @@ void xhci_debug_trb(struct xhci_hcd *xhci, union xhci_trb *trb) xhci_dbg(xhci, "Link TRB:\n"); xhci_print_trb_offsets(xhci, trb); - address = trb->link.segment_ptr[0] + - (((u64) trb->link.segment_ptr[1]) << 32); + address = trb->link.segment_ptr; xhci_dbg(xhci, "Next ring segment DMA address = 0x%llx\n", address); xhci_dbg(xhci, "Interrupter target = 0x%x\n", @@ -282,8 +272,7 @@ void xhci_debug_trb(struct xhci_hcd *xhci, union xhci_trb *trb) (unsigned int) (trb->link.control & TRB_NO_SNOOP)); break; case TRB_TYPE(TRB_TRANSFER): - address = trb->trans_event.buffer[0] + - (((u64) trb->trans_event.buffer[1]) << 32); + address = trb->trans_event.buffer; /* * FIXME: look at flags to figure out if it's an address or if * the data is directly in the buffer field. @@ -291,8 +280,7 @@ void xhci_debug_trb(struct xhci_hcd *xhci, union xhci_trb *trb) xhci_dbg(xhci, "DMA address or buffer contents= %llu\n", address); break; case TRB_TYPE(TRB_COMPLETION): - address = trb->event_cmd.cmd_trb[0] + - (((u64) trb->event_cmd.cmd_trb[1]) << 32); + address = trb->event_cmd.cmd_trb; xhci_dbg(xhci, "Command TRB pointer = %llu\n", address); xhci_dbg(xhci, "Completion status = %u\n", (unsigned int) GET_COMP_CODE(trb->event_cmd.status)); @@ -328,8 +316,8 @@ void xhci_debug_segment(struct xhci_hcd *xhci, struct xhci_segment *seg) for (i = 0; i < TRBS_PER_SEGMENT; ++i) { trb = &seg->trbs[i]; xhci_dbg(xhci, "@%08x %08x %08x %08x %08x\n", addr, - (unsigned int) trb->link.segment_ptr[0], - (unsigned int) trb->link.segment_ptr[1], + lower_32_bits(trb->link.segment_ptr), + upper_32_bits(trb->link.segment_ptr), (unsigned int) trb->link.intr_target, (unsigned int) trb->link.control); addr += sizeof(*trb); @@ -386,8 +374,8 @@ void xhci_dbg_erst(struct xhci_hcd *xhci, struct xhci_erst *erst) entry = &erst->entries[i]; xhci_dbg(xhci, "@%08x %08x %08x %08x %08x\n", (unsigned int) addr, - (unsigned int) entry->seg_addr[0], - (unsigned int) entry->seg_addr[1], + lower_32_bits(entry->seg_addr), + upper_32_bits(entry->seg_addr), (unsigned int) entry->seg_size, (unsigned int) entry->rsvd); addr += sizeof(*entry); @@ -396,90 +384,147 @@ void xhci_dbg_erst(struct xhci_hcd *xhci, struct xhci_erst *erst) void xhci_dbg_cmd_ptrs(struct xhci_hcd *xhci) { - u32 val; + u64 val; - val = xhci_readl(xhci, &xhci->op_regs->cmd_ring[0]); - xhci_dbg(xhci, "// xHC command ring deq ptr low bits + flags = 0x%x\n", val); - val = xhci_readl(xhci, &xhci->op_regs->cmd_ring[1]); - xhci_dbg(xhci, "// xHC command ring deq ptr high bits = 0x%x\n", val); + val = xhci_read_64(xhci, &xhci->op_regs->cmd_ring); + xhci_dbg(xhci, "// xHC command ring deq ptr low bits + flags = @%08x\n", + lower_32_bits(val)); + xhci_dbg(xhci, "// xHC command ring deq ptr high bits = @%08x\n", + upper_32_bits(val)); } -void xhci_dbg_ctx(struct xhci_hcd *xhci, struct xhci_device_control *ctx, dma_addr_t dma, unsigned int last_ep) +/* Print the last 32 bytes for 64-byte contexts */ +static void dbg_rsvd64(struct xhci_hcd *xhci, u64 *ctx, dma_addr_t dma) +{ + int i; + for (i = 0; i < 4; ++i) { + xhci_dbg(xhci, "@%p (virt) @%08llx " + "(dma) %#08llx - rsvd64[%d]\n", + &ctx[4 + i], (unsigned long long)dma, + ctx[4 + i], i); + dma += 8; + } +} + +void xhci_dbg_slot_ctx(struct xhci_hcd *xhci, struct xhci_container_ctx *ctx) { - int i, j; - int last_ep_ctx = 31; /* Fields are 32 bits wide, DMA addresses are in bytes */ int field_size = 32 / 8; + int i; - xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - drop flags\n", - &ctx->drop_flags, (unsigned long long)dma, - ctx->drop_flags); - dma += field_size; - xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - add flags\n", - &ctx->add_flags, (unsigned long long)dma, - ctx->add_flags); - dma += field_size; - for (i = 0; i > 6; ++i) { - xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - rsvd[%d]\n", - &ctx->rsvd[i], (unsigned long long)dma, - ctx->rsvd[i], i); - dma += field_size; - } + struct xhci_slot_ctx *slot_ctx = xhci_get_slot_ctx(xhci, ctx); + dma_addr_t dma = ctx->dma + ((unsigned long)slot_ctx - (unsigned long)ctx); + int csz = HCC_64BYTE_CONTEXT(xhci->hcc_params); xhci_dbg(xhci, "Slot Context:\n"); xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - dev_info\n", - &ctx->slot.dev_info, - (unsigned long long)dma, ctx->slot.dev_info); + &slot_ctx->dev_info, + (unsigned long long)dma, slot_ctx->dev_info); dma += field_size; xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - dev_info2\n", - &ctx->slot.dev_info2, - (unsigned long long)dma, ctx->slot.dev_info2); + &slot_ctx->dev_info2, + (unsigned long long)dma, slot_ctx->dev_info2); dma += field_size; xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - tt_info\n", - &ctx->slot.tt_info, - (unsigned long long)dma, ctx->slot.tt_info); + &slot_ctx->tt_info, + (unsigned long long)dma, slot_ctx->tt_info); dma += field_size; xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - dev_state\n", - &ctx->slot.dev_state, - (unsigned long long)dma, ctx->slot.dev_state); + &slot_ctx->dev_state, + (unsigned long long)dma, slot_ctx->dev_state); dma += field_size; - for (i = 0; i > 4; ++i) { + for (i = 0; i < 4; ++i) { xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - rsvd[%d]\n", - &ctx->slot.reserved[i], (unsigned long long)dma, - ctx->slot.reserved[i], i); + &slot_ctx->reserved[i], (unsigned long long)dma, + slot_ctx->reserved[i], i); dma += field_size; } + if (csz) + dbg_rsvd64(xhci, (u64 *)slot_ctx, dma); +} + +void xhci_dbg_ep_ctx(struct xhci_hcd *xhci, + struct xhci_container_ctx *ctx, + unsigned int last_ep) +{ + int i, j; + int last_ep_ctx = 31; + /* Fields are 32 bits wide, DMA addresses are in bytes */ + int field_size = 32 / 8; + int csz = HCC_64BYTE_CONTEXT(xhci->hcc_params); + if (last_ep < 31) last_ep_ctx = last_ep + 1; for (i = 0; i < last_ep_ctx; ++i) { + struct xhci_ep_ctx *ep_ctx = xhci_get_ep_ctx(xhci, ctx, i); + dma_addr_t dma = ctx->dma + + ((unsigned long)ep_ctx - (unsigned long)ctx); + xhci_dbg(xhci, "Endpoint %02d Context:\n", i); xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - ep_info\n", - &ctx->ep[i].ep_info, - (unsigned long long)dma, ctx->ep[i].ep_info); + &ep_ctx->ep_info, + (unsigned long long)dma, ep_ctx->ep_info); dma += field_size; xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - ep_info2\n", - &ctx->ep[i].ep_info2, - (unsigned long long)dma, ctx->ep[i].ep_info2); - dma += field_size; - xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - deq[0]\n", - &ctx->ep[i].deq[0], - (unsigned long long)dma, ctx->ep[i].deq[0]); - dma += field_size; - xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - deq[1]\n", - &ctx->ep[i].deq[1], - (unsigned long long)dma, ctx->ep[i].deq[1]); + &ep_ctx->ep_info2, + (unsigned long long)dma, ep_ctx->ep_info2); dma += field_size; + xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08llx - deq\n", + &ep_ctx->deq, + (unsigned long long)dma, ep_ctx->deq); + dma += 2*field_size; xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - tx_info\n", - &ctx->ep[i].tx_info, - (unsigned long long)dma, ctx->ep[i].tx_info); + &ep_ctx->tx_info, + (unsigned long long)dma, ep_ctx->tx_info); dma += field_size; for (j = 0; j < 3; ++j) { xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - rsvd[%d]\n", - &ctx->ep[i].reserved[j], + &ep_ctx->reserved[j], (unsigned long long)dma, - ctx->ep[i].reserved[j], j); + ep_ctx->reserved[j], j); + dma += field_size; + } + + if (csz) + dbg_rsvd64(xhci, (u64 *)ep_ctx, dma); + } +} + +void xhci_dbg_ctx(struct xhci_hcd *xhci, + struct xhci_container_ctx *ctx, + unsigned int last_ep) +{ + int i; + /* Fields are 32 bits wide, DMA addresses are in bytes */ + int field_size = 32 / 8; + struct xhci_slot_ctx *slot_ctx; + dma_addr_t dma = ctx->dma; + int csz = HCC_64BYTE_CONTEXT(xhci->hcc_params); + + if (ctx->type == XHCI_CTX_TYPE_INPUT) { + struct xhci_input_control_ctx *ctrl_ctx = + xhci_get_input_control_ctx(xhci, ctx); + xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - drop flags\n", + &ctrl_ctx->drop_flags, (unsigned long long)dma, + ctrl_ctx->drop_flags); + dma += field_size; + xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - add flags\n", + &ctrl_ctx->add_flags, (unsigned long long)dma, + ctrl_ctx->add_flags); + dma += field_size; + for (i = 0; i < 6; ++i) { + xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - rsvd2[%d]\n", + &ctrl_ctx->rsvd2[i], (unsigned long long)dma, + ctrl_ctx->rsvd2[i], i); dma += field_size; } + + if (csz) + dbg_rsvd64(xhci, (u64 *)ctrl_ctx, dma); } + + slot_ctx = xhci_get_slot_ctx(xhci, ctx); + xhci_dbg_slot_ctx(xhci, ctx); + xhci_dbg_ep_ctx(xhci, ctx, last_ep); } diff --git a/drivers/usb/host/xhci-hcd.c b/drivers/usb/host/xhci-hcd.c index dba3e07ccd0..816c39caca1 100644 --- a/drivers/usb/host/xhci-hcd.c +++ b/drivers/usb/host/xhci-hcd.c @@ -103,7 +103,10 @@ int xhci_reset(struct xhci_hcd *xhci) u32 state; state = xhci_readl(xhci, &xhci->op_regs->status); - BUG_ON((state & STS_HALT) == 0); + if ((state & STS_HALT) == 0) { + xhci_warn(xhci, "Host controller not halted, aborting reset.\n"); + return 0; + } xhci_dbg(xhci, "// Reset the HC\n"); command = xhci_readl(xhci, &xhci->op_regs->command); @@ -226,6 +229,7 @@ int xhci_init(struct usb_hcd *hcd) static void xhci_work(struct xhci_hcd *xhci) { u32 temp; + u64 temp_64; /* * Clear the op reg interrupt status first, @@ -248,9 +252,9 @@ static void xhci_work(struct xhci_hcd *xhci) /* FIXME this should be a delayed service routine that clears the EHB */ xhci_handle_event(xhci); - /* Clear the event handler busy flag; the event ring should be empty. */ - temp = xhci_readl(xhci, &xhci->ir_set->erst_dequeue[0]); - xhci_writel(xhci, temp & ~ERST_EHB, &xhci->ir_set->erst_dequeue[0]); + /* Clear the event handler busy flag (RW1C); the event ring should be empty. */ + temp_64 = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue); + xhci_write_64(xhci, temp_64 | ERST_EHB, &xhci->ir_set->erst_dequeue); /* Flush posted writes -- FIXME is this necessary? */ xhci_readl(xhci, &xhci->ir_set->irq_pending); } @@ -266,19 +270,34 @@ irqreturn_t xhci_irq(struct usb_hcd *hcd) { struct xhci_hcd *xhci = hcd_to_xhci(hcd); u32 temp, temp2; + union xhci_trb *trb; spin_lock(&xhci->lock); + trb = xhci->event_ring->dequeue; /* Check if the xHC generated the interrupt, or the irq is shared */ temp = xhci_readl(xhci, &xhci->op_regs->status); temp2 = xhci_readl(xhci, &xhci->ir_set->irq_pending); + if (temp == 0xffffffff && temp2 == 0xffffffff) + goto hw_died; + if (!(temp & STS_EINT) && !ER_IRQ_PENDING(temp2)) { spin_unlock(&xhci->lock); return IRQ_NONE; } + xhci_dbg(xhci, "op reg status = %08x\n", temp); + xhci_dbg(xhci, "ir set irq_pending = %08x\n", temp2); + xhci_dbg(xhci, "Event ring dequeue ptr:\n"); + xhci_dbg(xhci, "@%llx %08x %08x %08x %08x\n", + (unsigned long long)xhci_trb_virt_to_dma(xhci->event_ring->deq_seg, trb), + lower_32_bits(trb->link.segment_ptr), + upper_32_bits(trb->link.segment_ptr), + (unsigned int) trb->link.intr_target, + (unsigned int) trb->link.control); if (temp & STS_FATAL) { xhci_warn(xhci, "WARNING: Host System Error\n"); xhci_halt(xhci); +hw_died: xhci_to_hcd(xhci)->state = HC_STATE_HALT; spin_unlock(&xhci->lock); return -ESHUTDOWN; @@ -295,6 +314,7 @@ void xhci_event_ring_work(unsigned long arg) { unsigned long flags; int temp; + u64 temp_64; struct xhci_hcd *xhci = (struct xhci_hcd *) arg; int i, j; @@ -311,9 +331,9 @@ void xhci_event_ring_work(unsigned long arg) xhci_dbg(xhci, "Event ring:\n"); xhci_debug_segment(xhci, xhci->event_ring->deq_seg); xhci_dbg_ring_ptrs(xhci, xhci->event_ring); - temp = xhci_readl(xhci, &xhci->ir_set->erst_dequeue[0]); - temp &= ERST_PTR_MASK; - xhci_dbg(xhci, "ERST deq = 0x%x\n", temp); + temp_64 = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue); + temp_64 &= ~ERST_PTR_MASK; + xhci_dbg(xhci, "ERST deq = 64'h%0lx\n", (long unsigned int) temp_64); xhci_dbg(xhci, "Command ring:\n"); xhci_debug_segment(xhci, xhci->cmd_ring->deq_seg); xhci_dbg_ring_ptrs(xhci, xhci->cmd_ring); @@ -356,6 +376,7 @@ void xhci_event_ring_work(unsigned long arg) int xhci_run(struct usb_hcd *hcd) { u32 temp; + u64 temp_64; struct xhci_hcd *xhci = hcd_to_xhci(hcd); void (*doorbell)(struct xhci_hcd *) = NULL; @@ -382,6 +403,20 @@ int xhci_run(struct usb_hcd *hcd) add_timer(&xhci->event_ring_timer); #endif + xhci_dbg(xhci, "Command ring memory map follows:\n"); + xhci_debug_ring(xhci, xhci->cmd_ring); + xhci_dbg_ring_ptrs(xhci, xhci->cmd_ring); + xhci_dbg_cmd_ptrs(xhci); + + xhci_dbg(xhci, "ERST memory map follows:\n"); + xhci_dbg_erst(xhci, &xhci->erst); + xhci_dbg(xhci, "Event ring:\n"); + xhci_debug_ring(xhci, xhci->event_ring); + xhci_dbg_ring_ptrs(xhci, xhci->event_ring); + temp_64 = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue); + temp_64 &= ~ERST_PTR_MASK; + xhci_dbg(xhci, "ERST deq = 64'h%0lx\n", (long unsigned int) temp_64); + xhci_dbg(xhci, "// Set the interrupt modulation register\n"); temp = xhci_readl(xhci, &xhci->ir_set->irq_control); temp &= ~ER_IRQ_INTERVAL_MASK; @@ -406,22 +441,6 @@ int xhci_run(struct usb_hcd *hcd) if (NUM_TEST_NOOPS > 0) doorbell = xhci_setup_one_noop(xhci); - xhci_dbg(xhci, "Command ring memory map follows:\n"); - xhci_debug_ring(xhci, xhci->cmd_ring); - xhci_dbg_ring_ptrs(xhci, xhci->cmd_ring); - xhci_dbg_cmd_ptrs(xhci); - - xhci_dbg(xhci, "ERST memory map follows:\n"); - xhci_dbg_erst(xhci, &xhci->erst); - xhci_dbg(xhci, "Event ring:\n"); - xhci_debug_ring(xhci, xhci->event_ring); - xhci_dbg_ring_ptrs(xhci, xhci->event_ring); - temp = xhci_readl(xhci, &xhci->ir_set->erst_dequeue[0]); - temp &= ERST_PTR_MASK; - xhci_dbg(xhci, "ERST deq = 0x%x\n", temp); - temp = xhci_readl(xhci, &xhci->ir_set->erst_dequeue[1]); - xhci_dbg(xhci, "ERST deq upper = 0x%x\n", temp); - temp = xhci_readl(xhci, &xhci->op_regs->command); temp |= (CMD_RUN); xhci_dbg(xhci, "// Turn on HC, cmd = 0x%x.\n", @@ -601,10 +620,13 @@ int xhci_urb_enqueue(struct usb_hcd *hcd, struct urb *urb, gfp_t mem_flags) goto exit; } if (usb_endpoint_xfer_control(&urb->ep->desc)) - ret = xhci_queue_ctrl_tx(xhci, mem_flags, urb, + /* We have a spinlock and interrupts disabled, so we must pass + * atomic context to this function, which may allocate memory. + */ + ret = xhci_queue_ctrl_tx(xhci, GFP_ATOMIC, urb, slot_id, ep_index); else if (usb_endpoint_xfer_bulk(&urb->ep->desc)) - ret = xhci_queue_bulk_tx(xhci, mem_flags, urb, + ret = xhci_queue_bulk_tx(xhci, GFP_ATOMIC, urb, slot_id, ep_index); else ret = -EINVAL; @@ -661,8 +683,12 @@ int xhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status) goto done; xhci_dbg(xhci, "Cancel URB %p\n", urb); + xhci_dbg(xhci, "Event ring:\n"); + xhci_debug_ring(xhci, xhci->event_ring); ep_index = xhci_get_endpoint_index(&urb->ep->desc); ep_ring = xhci->devs[urb->dev->slot_id]->ep_rings[ep_index]; + xhci_dbg(xhci, "Endpoint ring:\n"); + xhci_debug_ring(xhci, ep_ring); td = (struct xhci_td *) urb->hcpriv; ep_ring->cancels_pending++; @@ -696,7 +722,9 @@ int xhci_drop_endpoint(struct usb_hcd *hcd, struct usb_device *udev, struct usb_host_endpoint *ep) { struct xhci_hcd *xhci; - struct xhci_device_control *in_ctx; + struct xhci_container_ctx *in_ctx, *out_ctx; + struct xhci_input_control_ctx *ctrl_ctx; + struct xhci_slot_ctx *slot_ctx; unsigned int last_ctx; unsigned int ep_index; struct xhci_ep_ctx *ep_ctx; @@ -724,31 +752,34 @@ int xhci_drop_endpoint(struct usb_hcd *hcd, struct usb_device *udev, } in_ctx = xhci->devs[udev->slot_id]->in_ctx; + out_ctx = xhci->devs[udev->slot_id]->out_ctx; + ctrl_ctx = xhci_get_input_control_ctx(xhci, in_ctx); ep_index = xhci_get_endpoint_index(&ep->desc); - ep_ctx = &xhci->devs[udev->slot_id]->out_ctx->ep[ep_index]; + ep_ctx = xhci_get_ep_ctx(xhci, out_ctx, ep_index); /* If the HC already knows the endpoint is disabled, * or the HCD has noted it is disabled, ignore this request */ if ((ep_ctx->ep_info & EP_STATE_MASK) == EP_STATE_DISABLED || - in_ctx->drop_flags & xhci_get_endpoint_flag(&ep->desc)) { + ctrl_ctx->drop_flags & xhci_get_endpoint_flag(&ep->desc)) { xhci_warn(xhci, "xHCI %s called with disabled ep %p\n", __func__, ep); return 0; } - in_ctx->drop_flags |= drop_flag; - new_drop_flags = in_ctx->drop_flags; + ctrl_ctx->drop_flags |= drop_flag; + new_drop_flags = ctrl_ctx->drop_flags; - in_ctx->add_flags = ~drop_flag; - new_add_flags = in_ctx->add_flags; + ctrl_ctx->add_flags = ~drop_flag; + new_add_flags = ctrl_ctx->add_flags; - last_ctx = xhci_last_valid_endpoint(in_ctx->add_flags); + last_ctx = xhci_last_valid_endpoint(ctrl_ctx->add_flags); + slot_ctx = xhci_get_slot_ctx(xhci, in_ctx); /* Update the last valid endpoint context, if we deleted the last one */ - if ((in_ctx->slot.dev_info & LAST_CTX_MASK) > LAST_CTX(last_ctx)) { - in_ctx->slot.dev_info &= ~LAST_CTX_MASK; - in_ctx->slot.dev_info |= LAST_CTX(last_ctx); + if ((slot_ctx->dev_info & LAST_CTX_MASK) > LAST_CTX(last_ctx)) { + slot_ctx->dev_info &= ~LAST_CTX_MASK; + slot_ctx->dev_info |= LAST_CTX(last_ctx); } - new_slot_info = in_ctx->slot.dev_info; + new_slot_info = slot_ctx->dev_info; xhci_endpoint_zero(xhci, xhci->devs[udev->slot_id], ep); @@ -778,17 +809,22 @@ int xhci_add_endpoint(struct usb_hcd *hcd, struct usb_device *udev, struct usb_host_endpoint *ep) { struct xhci_hcd *xhci; - struct xhci_device_control *in_ctx; + struct xhci_container_ctx *in_ctx, *out_ctx; unsigned int ep_index; struct xhci_ep_ctx *ep_ctx; + struct xhci_slot_ctx *slot_ctx; + struct xhci_input_control_ctx *ctrl_ctx; u32 added_ctxs; unsigned int last_ctx; u32 new_add_flags, new_drop_flags, new_slot_info; int ret = 0; ret = xhci_check_args(hcd, udev, ep, 1, __func__); - if (ret <= 0) + if (ret <= 0) { + /* So we won't queue a reset ep command for a root hub */ + ep->hcpriv = NULL; return ret; + } xhci = hcd_to_xhci(hcd); added_ctxs = xhci_get_endpoint_flag(&ep->desc); @@ -810,12 +846,14 @@ int xhci_add_endpoint(struct usb_hcd *hcd, struct usb_device *udev, } in_ctx = xhci->devs[udev->slot_id]->in_ctx; + out_ctx = xhci->devs[udev->slot_id]->out_ctx; + ctrl_ctx = xhci_get_input_control_ctx(xhci, in_ctx); ep_index = xhci_get_endpoint_index(&ep->desc); - ep_ctx = &xhci->devs[udev->slot_id]->out_ctx->ep[ep_index]; + ep_ctx = xhci_get_ep_ctx(xhci, out_ctx, ep_index); /* If the HCD has already noted the endpoint is enabled, * ignore this request. */ - if (in_ctx->add_flags & xhci_get_endpoint_flag(&ep->desc)) { + if (ctrl_ctx->add_flags & xhci_get_endpoint_flag(&ep->desc)) { xhci_warn(xhci, "xHCI %s called with enabled ep %p\n", __func__, ep); return 0; @@ -833,8 +871,8 @@ int xhci_add_endpoint(struct usb_hcd *hcd, struct usb_device *udev, return -ENOMEM; } - in_ctx->add_flags |= added_ctxs; - new_add_flags = in_ctx->add_flags; + ctrl_ctx->add_flags |= added_ctxs; + new_add_flags = ctrl_ctx->add_flags; /* If xhci_endpoint_disable() was called for this endpoint, but the * xHC hasn't been notified yet through the check_bandwidth() call, @@ -842,14 +880,18 @@ int xhci_add_endpoint(struct usb_hcd *hcd, struct usb_device *udev, * descriptors. We must drop and re-add this endpoint, so we leave the * drop flags alone. */ - new_drop_flags = in_ctx->drop_flags; + new_drop_flags = ctrl_ctx->drop_flags; + slot_ctx = xhci_get_slot_ctx(xhci, in_ctx); /* Update the last valid endpoint context, if we just added one past */ - if ((in_ctx->slot.dev_info & LAST_CTX_MASK) < LAST_CTX(last_ctx)) { - in_ctx->slot.dev_info &= ~LAST_CTX_MASK; - in_ctx->slot.dev_info |= LAST_CTX(last_ctx); + if ((slot_ctx->dev_info & LAST_CTX_MASK) < LAST_CTX(last_ctx)) { + slot_ctx->dev_info &= ~LAST_CTX_MASK; + slot_ctx->dev_info |= LAST_CTX(last_ctx); } - new_slot_info = in_ctx->slot.dev_info; + new_slot_info = slot_ctx->dev_info; + + /* Store the usb_device pointer for later use */ + ep->hcpriv = udev; xhci_dbg(xhci, "add ep 0x%x, slot id %d, new drop flags = %#x, new add flags = %#x, new slot info = %#x\n", (unsigned int) ep->desc.bEndpointAddress, @@ -860,9 +902,11 @@ int xhci_add_endpoint(struct usb_hcd *hcd, struct usb_device *udev, return 0; } -static void xhci_zero_in_ctx(struct xhci_virt_device *virt_dev) +static void xhci_zero_in_ctx(struct xhci_hcd *xhci, struct xhci_virt_device *virt_dev) { + struct xhci_input_control_ctx *ctrl_ctx; struct xhci_ep_ctx *ep_ctx; + struct xhci_slot_ctx *slot_ctx; int i; /* When a device's add flag and drop flag are zero, any subsequent @@ -870,17 +914,18 @@ static void xhci_zero_in_ctx(struct xhci_virt_device *virt_dev) * untouched. Make sure we don't leave any old state in the input * endpoint contexts. */ - virt_dev->in_ctx->drop_flags = 0; - virt_dev->in_ctx->add_flags = 0; - virt_dev->in_ctx->slot.dev_info &= ~LAST_CTX_MASK; + ctrl_ctx = xhci_get_input_control_ctx(xhci, virt_dev->in_ctx); + ctrl_ctx->drop_flags = 0; + ctrl_ctx->add_flags = 0; + slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->in_ctx); + slot_ctx->dev_info &= ~LAST_CTX_MASK; /* Endpoint 0 is always valid */ - virt_dev->in_ctx->slot.dev_info |= LAST_CTX(1); + slot_ctx->dev_info |= LAST_CTX(1); for (i = 1; i < 31; ++i) { - ep_ctx = &virt_dev->in_ctx->ep[i]; + ep_ctx = xhci_get_ep_ctx(xhci, virt_dev->in_ctx, i); ep_ctx->ep_info = 0; ep_ctx->ep_info2 = 0; - ep_ctx->deq[0] = 0; - ep_ctx->deq[1] = 0; + ep_ctx->deq = 0; ep_ctx->tx_info = 0; } } @@ -903,6 +948,8 @@ int xhci_check_bandwidth(struct usb_hcd *hcd, struct usb_device *udev) unsigned long flags; struct xhci_hcd *xhci; struct xhci_virt_device *virt_dev; + struct xhci_input_control_ctx *ctrl_ctx; + struct xhci_slot_ctx *slot_ctx; ret = xhci_check_args(hcd, udev, NULL, 0, __func__); if (ret <= 0) @@ -918,16 +965,18 @@ int xhci_check_bandwidth(struct usb_hcd *hcd, struct usb_device *udev) virt_dev = xhci->devs[udev->slot_id]; /* See section 4.6.6 - A0 = 1; A1 = D0 = D1 = 0 */ - virt_dev->in_ctx->add_flags |= SLOT_FLAG; - virt_dev->in_ctx->add_flags &= ~EP0_FLAG; - virt_dev->in_ctx->drop_flags &= ~SLOT_FLAG; - virt_dev->in_ctx->drop_flags &= ~EP0_FLAG; + ctrl_ctx = xhci_get_input_control_ctx(xhci, virt_dev->in_ctx); + ctrl_ctx->add_flags |= SLOT_FLAG; + ctrl_ctx->add_flags &= ~EP0_FLAG; + ctrl_ctx->drop_flags &= ~SLOT_FLAG; + ctrl_ctx->drop_flags &= ~EP0_FLAG; xhci_dbg(xhci, "New Input Control Context:\n"); - xhci_dbg_ctx(xhci, virt_dev->in_ctx, virt_dev->in_ctx_dma, - LAST_CTX_TO_EP_NUM(virt_dev->in_ctx->slot.dev_info)); + slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->in_ctx); + xhci_dbg_ctx(xhci, virt_dev->in_ctx, + LAST_CTX_TO_EP_NUM(slot_ctx->dev_info)); spin_lock_irqsave(&xhci->lock, flags); - ret = xhci_queue_configure_endpoint(xhci, virt_dev->in_ctx_dma, + ret = xhci_queue_configure_endpoint(xhci, virt_dev->in_ctx->dma, udev->slot_id); if (ret < 0) { spin_unlock_irqrestore(&xhci->lock, flags); @@ -982,10 +1031,10 @@ int xhci_check_bandwidth(struct usb_hcd *hcd, struct usb_device *udev) } xhci_dbg(xhci, "Output context after successful config ep cmd:\n"); - xhci_dbg_ctx(xhci, virt_dev->out_ctx, virt_dev->out_ctx_dma, - LAST_CTX_TO_EP_NUM(virt_dev->in_ctx->slot.dev_info)); + xhci_dbg_ctx(xhci, virt_dev->out_ctx, + LAST_CTX_TO_EP_NUM(slot_ctx->dev_info)); - xhci_zero_in_ctx(virt_dev); + xhci_zero_in_ctx(xhci, virt_dev); /* Free any old rings */ for (i = 1; i < 31; ++i) { if (virt_dev->new_ep_rings[i]) { @@ -1023,7 +1072,67 @@ void xhci_reset_bandwidth(struct usb_hcd *hcd, struct usb_device *udev) virt_dev->new_ep_rings[i] = NULL; } } - xhci_zero_in_ctx(virt_dev); + xhci_zero_in_ctx(xhci, virt_dev); +} + +/* Deal with stalled endpoints. The core should have sent the control message + * to clear the halt condition. However, we need to make the xHCI hardware + * reset its sequence number, since a device will expect a sequence number of + * zero after the halt condition is cleared. + * Context: in_interrupt + */ +void xhci_endpoint_reset(struct usb_hcd *hcd, + struct usb_host_endpoint *ep) +{ + struct xhci_hcd *xhci; + struct usb_device *udev; + unsigned int ep_index; + unsigned long flags; + int ret; + struct xhci_dequeue_state deq_state; + struct xhci_ring *ep_ring; + + xhci = hcd_to_xhci(hcd); + udev = (struct usb_device *) ep->hcpriv; + /* Called with a root hub endpoint (or an endpoint that wasn't added + * with xhci_add_endpoint() + */ + if (!ep->hcpriv) + return; + ep_index = xhci_get_endpoint_index(&ep->desc); + ep_ring = xhci->devs[udev->slot_id]->ep_rings[ep_index]; + if (!ep_ring->stopped_td) { + xhci_dbg(xhci, "Endpoint 0x%x not halted, refusing to reset.\n", + ep->desc.bEndpointAddress); + return; + } + + xhci_dbg(xhci, "Queueing reset endpoint command\n"); + spin_lock_irqsave(&xhci->lock, flags); + ret = xhci_queue_reset_ep(xhci, udev->slot_id, ep_index); + /* + * Can't change the ring dequeue pointer until it's transitioned to the + * stopped state, which is only upon a successful reset endpoint + * command. Better hope that last command worked! + */ + if (!ret) { + xhci_dbg(xhci, "Cleaning up stalled endpoint ring\n"); + /* We need to move the HW's dequeue pointer past this TD, + * or it will attempt to resend it on the next doorbell ring. + */ + xhci_find_new_dequeue_state(xhci, udev->slot_id, + ep_index, ep_ring->stopped_td, &deq_state); + xhci_dbg(xhci, "Queueing new dequeue state\n"); + xhci_queue_new_dequeue_state(xhci, ep_ring, + udev->slot_id, + ep_index, &deq_state); + kfree(ep_ring->stopped_td); + xhci_ring_cmd_db(xhci); + } + spin_unlock_irqrestore(&xhci->lock, flags); + + if (ret) + xhci_warn(xhci, "FIXME allocate a new ring segment\n"); } /* @@ -1120,7 +1229,9 @@ int xhci_address_device(struct usb_hcd *hcd, struct usb_device *udev) struct xhci_virt_device *virt_dev; int ret = 0; struct xhci_hcd *xhci = hcd_to_xhci(hcd); - u32 temp; + struct xhci_slot_ctx *slot_ctx; + struct xhci_input_control_ctx *ctrl_ctx; + u64 temp_64; if (!udev->slot_id) { xhci_dbg(xhci, "Bad Slot ID %d\n", udev->slot_id); @@ -1133,10 +1244,12 @@ int xhci_address_device(struct usb_hcd *hcd, struct usb_device *udev) if (!udev->config) xhci_setup_addressable_virt_dev(xhci, udev); /* Otherwise, assume the core has the device configured how it wants */ + xhci_dbg(xhci, "Slot ID %d Input Context:\n", udev->slot_id); + xhci_dbg_ctx(xhci, virt_dev->in_ctx, 2); spin_lock_irqsave(&xhci->lock, flags); - ret = xhci_queue_address_device(xhci, virt_dev->in_ctx_dma, - udev->slot_id); + ret = xhci_queue_address_device(xhci, virt_dev->in_ctx->dma, + udev->slot_id); if (ret) { spin_unlock_irqrestore(&xhci->lock, flags); xhci_dbg(xhci, "FIXME: allocate a command ring segment\n"); @@ -1176,41 +1289,37 @@ int xhci_address_device(struct usb_hcd *hcd, struct usb_device *udev) default: xhci_err(xhci, "ERROR: unexpected command completion " "code 0x%x.\n", virt_dev->cmd_status); + xhci_dbg(xhci, "Slot ID %d Output Context:\n", udev->slot_id); + xhci_dbg_ctx(xhci, virt_dev->out_ctx, 2); ret = -EINVAL; break; } if (ret) { return ret; } - temp = xhci_readl(xhci, &xhci->op_regs->dcbaa_ptr[0]); - xhci_dbg(xhci, "Op regs DCBAA ptr[0] = %#08x\n", temp); - temp = xhci_readl(xhci, &xhci->op_regs->dcbaa_ptr[1]); - xhci_dbg(xhci, "Op regs DCBAA ptr[1] = %#08x\n", temp); - xhci_dbg(xhci, "Slot ID %d dcbaa entry[0] @%p = %#08x\n", - udev->slot_id, - &xhci->dcbaa->dev_context_ptrs[2*udev->slot_id], - xhci->dcbaa->dev_context_ptrs[2*udev->slot_id]); - xhci_dbg(xhci, "Slot ID %d dcbaa entry[1] @%p = %#08x\n", + temp_64 = xhci_read_64(xhci, &xhci->op_regs->dcbaa_ptr); + xhci_dbg(xhci, "Op regs DCBAA ptr = %#016llx\n", temp_64); + xhci_dbg(xhci, "Slot ID %d dcbaa entry @%p = %#016llx\n", udev->slot_id, - &xhci->dcbaa->dev_context_ptrs[2*udev->slot_id+1], - xhci->dcbaa->dev_context_ptrs[2*udev->slot_id+1]); + &xhci->dcbaa->dev_context_ptrs[udev->slot_id], + (unsigned long long) + xhci->dcbaa->dev_context_ptrs[udev->slot_id]); xhci_dbg(xhci, "Output Context DMA address = %#08llx\n", - (unsigned long long)virt_dev->out_ctx_dma); + (unsigned long long)virt_dev->out_ctx->dma); xhci_dbg(xhci, "Slot ID %d Input Context:\n", udev->slot_id); - xhci_dbg_ctx(xhci, virt_dev->in_ctx, virt_dev->in_ctx_dma, 2); + xhci_dbg_ctx(xhci, virt_dev->in_ctx, 2); xhci_dbg(xhci, "Slot ID %d Output Context:\n", udev->slot_id); - xhci_dbg_ctx(xhci, virt_dev->out_ctx, virt_dev->out_ctx_dma, 2); + xhci_dbg_ctx(xhci, virt_dev->out_ctx, 2); /* * USB core uses address 1 for the roothubs, so we add one to the * address given back to us by the HC. */ - udev->devnum = (virt_dev->out_ctx->slot.dev_state & DEV_ADDR_MASK) + 1; + slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->out_ctx); + udev->devnum = (slot_ctx->dev_state & DEV_ADDR_MASK) + 1; /* Zero the input context control for later use */ - virt_dev->in_ctx->add_flags = 0; - virt_dev->in_ctx->drop_flags = 0; - /* Mirror flags in the output context for future ep enable/disable */ - virt_dev->out_ctx->add_flags = SLOT_FLAG | EP0_FLAG; - virt_dev->out_ctx->drop_flags = 0; + ctrl_ctx = xhci_get_input_control_ctx(xhci, virt_dev->in_ctx); + ctrl_ctx->add_flags = 0; + ctrl_ctx->drop_flags = 0; xhci_dbg(xhci, "Device address = %d\n", udev->devnum); /* XXX Meh, not sure if anyone else but choose_address uses this. */ @@ -1252,7 +1361,6 @@ static int __init xhci_hcd_init(void) /* xhci_device_control has eight fields, and also * embeds one xhci_slot_ctx and 31 xhci_ep_ctx */ - BUILD_BUG_ON(sizeof(struct xhci_device_control) != (8+8+8*31)*32/8); BUILD_BUG_ON(sizeof(struct xhci_stream_ctx) != 4*32/8); BUILD_BUG_ON(sizeof(union xhci_trb) != 4*32/8); BUILD_BUG_ON(sizeof(struct xhci_erst_entry) != 4*32/8); diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c index c8a72de1c50..e6b9a1c6002 100644 --- a/drivers/usb/host/xhci-mem.c +++ b/drivers/usb/host/xhci-mem.c @@ -88,7 +88,7 @@ static void xhci_link_segments(struct xhci_hcd *xhci, struct xhci_segment *prev, return; prev->next = next; if (link_trbs) { - prev->trbs[TRBS_PER_SEGMENT-1].link.segment_ptr[0] = next->dma; + prev->trbs[TRBS_PER_SEGMENT-1].link.segment_ptr = next->dma; /* Set the last TRB in the segment to have a TRB type ID of Link TRB */ val = prev->trbs[TRBS_PER_SEGMENT-1].link.control; @@ -189,6 +189,63 @@ fail: return 0; } +#define CTX_SIZE(_hcc) (HCC_64BYTE_CONTEXT(_hcc) ? 64 : 32) + +struct xhci_container_ctx *xhci_alloc_container_ctx(struct xhci_hcd *xhci, + int type, gfp_t flags) +{ + struct xhci_container_ctx *ctx = kzalloc(sizeof(*ctx), flags); + if (!ctx) + return NULL; + + BUG_ON((type != XHCI_CTX_TYPE_DEVICE) && (type != XHCI_CTX_TYPE_INPUT)); + ctx->type = type; + ctx->size = HCC_64BYTE_CONTEXT(xhci->hcc_params) ? 2048 : 1024; + if (type == XHCI_CTX_TYPE_INPUT) + ctx->size += CTX_SIZE(xhci->hcc_params); + + ctx->bytes = dma_pool_alloc(xhci->device_pool, flags, &ctx->dma); + memset(ctx->bytes, 0, ctx->size); + return ctx; +} + +void xhci_free_container_ctx(struct xhci_hcd *xhci, + struct xhci_container_ctx *ctx) +{ + dma_pool_free(xhci->device_pool, ctx->bytes, ctx->dma); + kfree(ctx); +} + +struct xhci_input_control_ctx *xhci_get_input_control_ctx(struct xhci_hcd *xhci, + struct xhci_container_ctx *ctx) +{ + BUG_ON(ctx->type != XHCI_CTX_TYPE_INPUT); + return (struct xhci_input_control_ctx *)ctx->bytes; +} + +struct xhci_slot_ctx *xhci_get_slot_ctx(struct xhci_hcd *xhci, + struct xhci_container_ctx *ctx) +{ + if (ctx->type == XHCI_CTX_TYPE_DEVICE) + return (struct xhci_slot_ctx *)ctx->bytes; + + return (struct xhci_slot_ctx *) + (ctx->bytes + CTX_SIZE(xhci->hcc_params)); +} + +struct xhci_ep_ctx *xhci_get_ep_ctx(struct xhci_hcd *xhci, + struct xhci_container_ctx *ctx, + unsigned int ep_index) +{ + /* increment ep index by offset of start of ep ctx array */ + ep_index++; + if (ctx->type == XHCI_CTX_TYPE_INPUT) + ep_index++; + + return (struct xhci_ep_ctx *) + (ctx->bytes + (ep_index * CTX_SIZE(xhci->hcc_params))); +} + /* All the xhci_tds in the ring's TD list should be freed at this point */ void xhci_free_virt_device(struct xhci_hcd *xhci, int slot_id) { @@ -200,8 +257,7 @@ void xhci_free_virt_device(struct xhci_hcd *xhci, int slot_id) return; dev = xhci->devs[slot_id]; - xhci->dcbaa->dev_context_ptrs[2*slot_id] = 0; - xhci->dcbaa->dev_context_ptrs[2*slot_id + 1] = 0; + xhci->dcbaa->dev_context_ptrs[slot_id] = 0; if (!dev) return; @@ -210,11 +266,10 @@ void xhci_free_virt_device(struct xhci_hcd *xhci, int slot_id) xhci_ring_free(xhci, dev->ep_rings[i]); if (dev->in_ctx) - dma_pool_free(xhci->device_pool, - dev->in_ctx, dev->in_ctx_dma); + xhci_free_container_ctx(xhci, dev->in_ctx); if (dev->out_ctx) - dma_pool_free(xhci->device_pool, - dev->out_ctx, dev->out_ctx_dma); + xhci_free_container_ctx(xhci, dev->out_ctx); + kfree(xhci->devs[slot_id]); xhci->devs[slot_id] = 0; } @@ -222,7 +277,6 @@ void xhci_free_virt_device(struct xhci_hcd *xhci, int slot_id) int xhci_alloc_virt_device(struct xhci_hcd *xhci, int slot_id, struct usb_device *udev, gfp_t flags) { - dma_addr_t dma; struct xhci_virt_device *dev; /* Slot ID 0 is reserved */ @@ -236,23 +290,21 @@ int xhci_alloc_virt_device(struct xhci_hcd *xhci, int slot_id, return 0; dev = xhci->devs[slot_id]; - /* Allocate the (output) device context that will be used in the HC */ - dev->out_ctx = dma_pool_alloc(xhci->device_pool, flags, &dma); + /* Allocate the (output) device context that will be used in the HC. */ + dev->out_ctx = xhci_alloc_container_ctx(xhci, XHCI_CTX_TYPE_DEVICE, flags); if (!dev->out_ctx) goto fail; - dev->out_ctx_dma = dma; + xhci_dbg(xhci, "Slot %d output ctx = 0x%llx (dma)\n", slot_id, - (unsigned long long)dma); - memset(dev->out_ctx, 0, sizeof(*dev->out_ctx)); + (unsigned long long)dev->out_ctx->dma); /* Allocate the (input) device context for address device command */ - dev->in_ctx = dma_pool_alloc(xhci->device_pool, flags, &dma); + dev->in_ctx = xhci_alloc_container_ctx(xhci, XHCI_CTX_TYPE_INPUT, flags); if (!dev->in_ctx) goto fail; - dev->in_ctx_dma = dma; + xhci_dbg(xhci, "Slot %d input ctx = 0x%llx (dma)\n", slot_id, - (unsigned long long)dma); - memset(dev->in_ctx, 0, sizeof(*dev->in_ctx)); + (unsigned long long)dev->in_ctx->dma); /* Allocate endpoint 0 ring */ dev->ep_rings[0] = xhci_ring_alloc(xhci, 1, true, flags); @@ -261,17 +313,12 @@ int xhci_alloc_virt_device(struct xhci_hcd *xhci, int slot_id, init_completion(&dev->cmd_completion); - /* - * Point to output device context in dcbaa; skip the output control - * context, which is eight 32 bit fields (or 32 bytes long) - */ - xhci->dcbaa->dev_context_ptrs[2*slot_id] = - (u32) dev->out_ctx_dma + (32); + /* Point to output device context in dcbaa. */ + xhci->dcbaa->dev_context_ptrs[slot_id] = dev->out_ctx->dma; xhci_dbg(xhci, "Set slot id %d dcbaa entry %p to 0x%llx\n", slot_id, - &xhci->dcbaa->dev_context_ptrs[2*slot_id], - (unsigned long long)dev->out_ctx_dma); - xhci->dcbaa->dev_context_ptrs[2*slot_id + 1] = 0; + &xhci->dcbaa->dev_context_ptrs[slot_id], + (unsigned long long) xhci->dcbaa->dev_context_ptrs[slot_id]); return 1; fail: @@ -285,6 +332,8 @@ int xhci_setup_addressable_virt_dev(struct xhci_hcd *xhci, struct usb_device *ud struct xhci_virt_device *dev; struct xhci_ep_ctx *ep0_ctx; struct usb_device *top_dev; + struct xhci_slot_ctx *slot_ctx; + struct xhci_input_control_ctx *ctrl_ctx; dev = xhci->devs[udev->slot_id]; /* Slot ID 0 is reserved */ @@ -293,27 +342,29 @@ int xhci_setup_addressable_virt_dev(struct xhci_hcd *xhci, struct usb_device *ud udev->slot_id); return -EINVAL; } - ep0_ctx = &dev->in_ctx->ep[0]; + ep0_ctx = xhci_get_ep_ctx(xhci, dev->in_ctx, 0); + ctrl_ctx = xhci_get_input_control_ctx(xhci, dev->in_ctx); + slot_ctx = xhci_get_slot_ctx(xhci, dev->in_ctx); /* 2) New slot context and endpoint 0 context are valid*/ - dev->in_ctx->add_flags = SLOT_FLAG | EP0_FLAG; + ctrl_ctx->add_flags = SLOT_FLAG | EP0_FLAG; /* 3) Only the control endpoint is valid - one endpoint context */ - dev->in_ctx->slot.dev_info |= LAST_CTX(1); + slot_ctx->dev_info |= LAST_CTX(1); switch (udev->speed) { case USB_SPEED_SUPER: - dev->in_ctx->slot.dev_info |= (u32) udev->route; - dev->in_ctx->slot.dev_info |= (u32) SLOT_SPEED_SS; + slot_ctx->dev_info |= (u32) udev->route; + slot_ctx->dev_info |= (u32) SLOT_SPEED_SS; break; case USB_SPEED_HIGH: - dev->in_ctx->slot.dev_info |= (u32) SLOT_SPEED_HS; + slot_ctx->dev_info |= (u32) SLOT_SPEED_HS; break; case USB_SPEED_FULL: - dev->in_ctx->slot.dev_info |= (u32) SLOT_SPEED_FS; + slot_ctx->dev_info |= (u32) SLOT_SPEED_FS; break; case USB_SPEED_LOW: - dev->in_ctx->slot.dev_info |= (u32) SLOT_SPEED_LS; + slot_ctx->dev_info |= (u32) SLOT_SPEED_LS; break; case USB_SPEED_VARIABLE: xhci_dbg(xhci, "FIXME xHCI doesn't support wireless speeds\n"); @@ -327,7 +378,7 @@ int xhci_setup_addressable_virt_dev(struct xhci_hcd *xhci, struct usb_device *ud for (top_dev = udev; top_dev->parent && top_dev->parent->parent; top_dev = top_dev->parent) /* Found device below root hub */; - dev->in_ctx->slot.dev_info2 |= (u32) ROOT_HUB_PORT(top_dev->portnum); + slot_ctx->dev_info2 |= (u32) ROOT_HUB_PORT(top_dev->portnum); xhci_dbg(xhci, "Set root hub portnum to %d\n", top_dev->portnum); /* Is this a LS/FS device under a HS hub? */ @@ -337,8 +388,8 @@ int xhci_setup_addressable_virt_dev(struct xhci_hcd *xhci, struct usb_device *ud */ if ((udev->speed == USB_SPEED_LOW || udev->speed == USB_SPEED_FULL) && udev->tt) { - dev->in_ctx->slot.tt_info = udev->tt->hub->slot_id; - dev->in_ctx->slot.tt_info |= udev->ttport << 8; + slot_ctx->tt_info = udev->tt->hub->slot_id; + slot_ctx->tt_info |= udev->ttport << 8; } xhci_dbg(xhci, "udev->tt = %p\n", udev->tt); xhci_dbg(xhci, "udev->ttport = 0x%x\n", udev->ttport); @@ -360,10 +411,9 @@ int xhci_setup_addressable_virt_dev(struct xhci_hcd *xhci, struct usb_device *ud ep0_ctx->ep_info2 |= MAX_BURST(0); ep0_ctx->ep_info2 |= ERROR_COUNT(3); - ep0_ctx->deq[0] = + ep0_ctx->deq = dev->ep_rings[0]->first_seg->dma; - ep0_ctx->deq[0] |= dev->ep_rings[0]->cycle_state; - ep0_ctx->deq[1] = 0; + ep0_ctx->deq |= dev->ep_rings[0]->cycle_state; /* Steps 7 and 8 were done in xhci_alloc_virt_device() */ @@ -470,25 +520,26 @@ int xhci_endpoint_init(struct xhci_hcd *xhci, unsigned int max_burst; ep_index = xhci_get_endpoint_index(&ep->desc); - ep_ctx = &virt_dev->in_ctx->ep[ep_index]; + ep_ctx = xhci_get_ep_ctx(xhci, virt_dev->in_ctx, ep_index); /* Set up the endpoint ring */ virt_dev->new_ep_rings[ep_index] = xhci_ring_alloc(xhci, 1, true, mem_flags); if (!virt_dev->new_ep_rings[ep_index]) return -ENOMEM; ep_ring = virt_dev->new_ep_rings[ep_index]; - ep_ctx->deq[0] = ep_ring->first_seg->dma | ep_ring->cycle_state; - ep_ctx->deq[1] = 0; + ep_ctx->deq = ep_ring->first_seg->dma | ep_ring->cycle_state; ep_ctx->ep_info = xhci_get_endpoint_interval(udev, ep); /* FIXME dig Mult and streams info out of ep companion desc */ - /* Allow 3 retries for everything but isoc */ + /* Allow 3 retries for everything but isoc; + * error count = 0 means infinite retries. + */ if (!usb_endpoint_xfer_isoc(&ep->desc)) ep_ctx->ep_info2 = ERROR_COUNT(3); else - ep_ctx->ep_info2 = ERROR_COUNT(0); + ep_ctx->ep_info2 = ERROR_COUNT(1); ep_ctx->ep_info2 |= xhci_get_endpoint_type(udev, ep); @@ -498,7 +549,12 @@ int xhci_endpoint_init(struct xhci_hcd *xhci, max_packet = ep->desc.wMaxPacketSize; ep_ctx->ep_info2 |= MAX_PACKET(max_packet); /* dig out max burst from ep companion desc */ - max_packet = ep->ss_ep_comp->desc.bMaxBurst; + if (!ep->ss_ep_comp) { + xhci_warn(xhci, "WARN no SS endpoint companion descriptor.\n"); + max_packet = 0; + } else { + max_packet = ep->ss_ep_comp->desc.bMaxBurst; + } ep_ctx->ep_info2 |= MAX_BURST(max_packet); break; case USB_SPEED_HIGH: @@ -531,18 +587,114 @@ void xhci_endpoint_zero(struct xhci_hcd *xhci, struct xhci_ep_ctx *ep_ctx; ep_index = xhci_get_endpoint_index(&ep->desc); - ep_ctx = &virt_dev->in_ctx->ep[ep_index]; + ep_ctx = xhci_get_ep_ctx(xhci, virt_dev->in_ctx, ep_index); ep_ctx->ep_info = 0; ep_ctx->ep_info2 = 0; - ep_ctx->deq[0] = 0; - ep_ctx->deq[1] = 0; + ep_ctx->deq = 0; ep_ctx->tx_info = 0; /* Don't free the endpoint ring until the set interface or configuration * request succeeds. */ } +/* Set up the scratchpad buffer array and scratchpad buffers, if needed. */ +static int scratchpad_alloc(struct xhci_hcd *xhci, gfp_t flags) +{ + int i; + struct device *dev = xhci_to_hcd(xhci)->self.controller; + int num_sp = HCS_MAX_SCRATCHPAD(xhci->hcs_params2); + + xhci_dbg(xhci, "Allocating %d scratchpad buffers\n", num_sp); + + if (!num_sp) + return 0; + + xhci->scratchpad = kzalloc(sizeof(*xhci->scratchpad), flags); + if (!xhci->scratchpad) + goto fail_sp; + + xhci->scratchpad->sp_array = + pci_alloc_consistent(to_pci_dev(dev), + num_sp * sizeof(u64), + &xhci->scratchpad->sp_dma); + if (!xhci->scratchpad->sp_array) + goto fail_sp2; + + xhci->scratchpad->sp_buffers = kzalloc(sizeof(void *) * num_sp, flags); + if (!xhci->scratchpad->sp_buffers) + goto fail_sp3; + + xhci->scratchpad->sp_dma_buffers = + kzalloc(sizeof(dma_addr_t) * num_sp, flags); + + if (!xhci->scratchpad->sp_dma_buffers) + goto fail_sp4; + + xhci->dcbaa->dev_context_ptrs[0] = xhci->scratchpad->sp_dma; + for (i = 0; i < num_sp; i++) { + dma_addr_t dma; + void *buf = pci_alloc_consistent(to_pci_dev(dev), + xhci->page_size, &dma); + if (!buf) + goto fail_sp5; + + xhci->scratchpad->sp_array[i] = dma; + xhci->scratchpad->sp_buffers[i] = buf; + xhci->scratchpad->sp_dma_buffers[i] = dma; + } + + return 0; + + fail_sp5: + for (i = i - 1; i >= 0; i--) { + pci_free_consistent(to_pci_dev(dev), xhci->page_size, + xhci->scratchpad->sp_buffers[i], + xhci->scratchpad->sp_dma_buffers[i]); + } + kfree(xhci->scratchpad->sp_dma_buffers); + + fail_sp4: + kfree(xhci->scratchpad->sp_buffers); + + fail_sp3: + pci_free_consistent(to_pci_dev(dev), num_sp * sizeof(u64), + xhci->scratchpad->sp_array, + xhci->scratchpad->sp_dma); + + fail_sp2: + kfree(xhci->scratchpad); + xhci->scratchpad = NULL; + + fail_sp: + return -ENOMEM; +} + +static void scratchpad_free(struct xhci_hcd *xhci) +{ + int num_sp; + int i; + struct pci_dev *pdev = to_pci_dev(xhci_to_hcd(xhci)->self.controller); + + if (!xhci->scratchpad) + return; + + num_sp = HCS_MAX_SCRATCHPAD(xhci->hcs_params2); + + for (i = 0; i < num_sp; i++) { + pci_free_consistent(pdev, xhci->page_size, + xhci->scratchpad->sp_buffers[i], + xhci->scratchpad->sp_dma_buffers[i]); + } + kfree(xhci->scratchpad->sp_dma_buffers); + kfree(xhci->scratchpad->sp_buffers); + pci_free_consistent(pdev, num_sp * sizeof(u64), + xhci->scratchpad->sp_array, + xhci->scratchpad->sp_dma); + kfree(xhci->scratchpad); + xhci->scratchpad = NULL; +} + void xhci_mem_cleanup(struct xhci_hcd *xhci) { struct pci_dev *pdev = to_pci_dev(xhci_to_hcd(xhci)->self.controller); @@ -551,10 +703,8 @@ void xhci_mem_cleanup(struct xhci_hcd *xhci) /* Free the Event Ring Segment Table and the actual Event Ring */ xhci_writel(xhci, 0, &xhci->ir_set->erst_size); - xhci_writel(xhci, 0, &xhci->ir_set->erst_base[0]); - xhci_writel(xhci, 0, &xhci->ir_set->erst_base[1]); - xhci_writel(xhci, 0, &xhci->ir_set->erst_dequeue[0]); - xhci_writel(xhci, 0, &xhci->ir_set->erst_dequeue[1]); + xhci_write_64(xhci, 0, &xhci->ir_set->erst_base); + xhci_write_64(xhci, 0, &xhci->ir_set->erst_dequeue); size = sizeof(struct xhci_erst_entry)*(xhci->erst.num_entries); if (xhci->erst.entries) pci_free_consistent(pdev, size, @@ -566,8 +716,7 @@ void xhci_mem_cleanup(struct xhci_hcd *xhci) xhci->event_ring = NULL; xhci_dbg(xhci, "Freed event ring\n"); - xhci_writel(xhci, 0, &xhci->op_regs->cmd_ring[0]); - xhci_writel(xhci, 0, &xhci->op_regs->cmd_ring[1]); + xhci_write_64(xhci, 0, &xhci->op_regs->cmd_ring); if (xhci->cmd_ring) xhci_ring_free(xhci, xhci->cmd_ring); xhci->cmd_ring = NULL; @@ -586,8 +735,7 @@ void xhci_mem_cleanup(struct xhci_hcd *xhci) xhci->device_pool = NULL; xhci_dbg(xhci, "Freed device context pool\n"); - xhci_writel(xhci, 0, &xhci->op_regs->dcbaa_ptr[0]); - xhci_writel(xhci, 0, &xhci->op_regs->dcbaa_ptr[1]); + xhci_write_64(xhci, 0, &xhci->op_regs->dcbaa_ptr); if (xhci->dcbaa) pci_free_consistent(pdev, sizeof(*xhci->dcbaa), xhci->dcbaa, xhci->dcbaa->dma); @@ -595,6 +743,7 @@ void xhci_mem_cleanup(struct xhci_hcd *xhci) xhci->page_size = 0; xhci->page_shift = 0; + scratchpad_free(xhci); } int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags) @@ -602,6 +751,7 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags) dma_addr_t dma; struct device *dev = xhci_to_hcd(xhci)->self.controller; unsigned int val, val2; + u64 val_64; struct xhci_segment *seg; u32 page_size; int i; @@ -647,8 +797,7 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags) xhci->dcbaa->dma = dma; xhci_dbg(xhci, "// Device context base array address = 0x%llx (DMA), %p (virt)\n", (unsigned long long)xhci->dcbaa->dma, xhci->dcbaa); - xhci_writel(xhci, dma, &xhci->op_regs->dcbaa_ptr[0]); - xhci_writel(xhci, (u32) 0, &xhci->op_regs->dcbaa_ptr[1]); + xhci_write_64(xhci, dma, &xhci->op_regs->dcbaa_ptr); /* * Initialize the ring segment pool. The ring must be a contiguous @@ -658,11 +807,10 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags) */ xhci->segment_pool = dma_pool_create("xHCI ring segments", dev, SEGMENT_SIZE, 64, xhci->page_size); + /* See Table 46 and Note on Figure 55 */ - /* FIXME support 64-byte contexts */ xhci->device_pool = dma_pool_create("xHCI input/output contexts", dev, - sizeof(struct xhci_device_control), - 64, xhci->page_size); + 2112, 64, xhci->page_size); if (!xhci->segment_pool || !xhci->device_pool) goto fail; @@ -675,14 +823,12 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags) (unsigned long long)xhci->cmd_ring->first_seg->dma); /* Set the address in the Command Ring Control register */ - val = xhci_readl(xhci, &xhci->op_regs->cmd_ring[0]); - val = (val & ~CMD_RING_ADDR_MASK) | - (xhci->cmd_ring->first_seg->dma & CMD_RING_ADDR_MASK) | + val_64 = xhci_read_64(xhci, &xhci->op_regs->cmd_ring); + val_64 = (val_64 & (u64) CMD_RING_RSVD_BITS) | + (xhci->cmd_ring->first_seg->dma & (u64) ~CMD_RING_RSVD_BITS) | xhci->cmd_ring->cycle_state; - xhci_dbg(xhci, "// Setting command ring address low bits to 0x%x\n", val); - xhci_writel(xhci, val, &xhci->op_regs->cmd_ring[0]); - xhci_dbg(xhci, "// Setting command ring address high bits to 0x0\n"); - xhci_writel(xhci, (u32) 0, &xhci->op_regs->cmd_ring[1]); + xhci_dbg(xhci, "// Setting command ring address to 0x%x\n", val); + xhci_write_64(xhci, val_64, &xhci->op_regs->cmd_ring); xhci_dbg_cmd_ptrs(xhci); val = xhci_readl(xhci, &xhci->cap_regs->db_off); @@ -722,8 +868,7 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags) /* set ring base address and size for each segment table entry */ for (val = 0, seg = xhci->event_ring->first_seg; val < ERST_NUM_SEGS; val++) { struct xhci_erst_entry *entry = &xhci->erst.entries[val]; - entry->seg_addr[0] = seg->dma; - entry->seg_addr[1] = 0; + entry->seg_addr = seg->dma; entry->seg_size = TRBS_PER_SEGMENT; entry->rsvd = 0; seg = seg->next; @@ -741,11 +886,10 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags) /* set the segment table base address */ xhci_dbg(xhci, "// Set ERST base address for ir_set 0 = 0x%llx\n", (unsigned long long)xhci->erst.erst_dma_addr); - val = xhci_readl(xhci, &xhci->ir_set->erst_base[0]); - val &= ERST_PTR_MASK; - val |= (xhci->erst.erst_dma_addr & ~ERST_PTR_MASK); - xhci_writel(xhci, val, &xhci->ir_set->erst_base[0]); - xhci_writel(xhci, 0, &xhci->ir_set->erst_base[1]); + val_64 = xhci_read_64(xhci, &xhci->ir_set->erst_base); + val_64 &= ERST_PTR_MASK; + val_64 |= (xhci->erst.erst_dma_addr & (u64) ~ERST_PTR_MASK); + xhci_write_64(xhci, val_64, &xhci->ir_set->erst_base); /* Set the event ring dequeue address */ xhci_set_hc_event_deq(xhci); @@ -761,7 +905,11 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags) for (i = 0; i < MAX_HC_SLOTS; ++i) xhci->devs[i] = 0; + if (scratchpad_alloc(xhci, flags)) + goto fail; + return 0; + fail: xhci_warn(xhci, "Couldn't initialize memory\n"); xhci_mem_cleanup(xhci); diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c index 1462709e26c..592fe7e623f 100644 --- a/drivers/usb/host/xhci-pci.c +++ b/drivers/usb/host/xhci-pci.c @@ -117,6 +117,7 @@ static const struct hc_driver xhci_pci_hc_driver = { .free_dev = xhci_free_dev, .add_endpoint = xhci_add_endpoint, .drop_endpoint = xhci_drop_endpoint, + .endpoint_reset = xhci_endpoint_reset, .check_bandwidth = xhci_check_bandwidth, .reset_bandwidth = xhci_reset_bandwidth, .address_device = xhci_address_device, diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c index 02d81985c45..aa88a067148 100644 --- a/drivers/usb/host/xhci-ring.c +++ b/drivers/usb/host/xhci-ring.c @@ -135,6 +135,7 @@ static void next_trb(struct xhci_hcd *xhci, static void inc_deq(struct xhci_hcd *xhci, struct xhci_ring *ring, bool consumer) { union xhci_trb *next = ++(ring->dequeue); + unsigned long long addr; ring->deq_updates++; /* Update the dequeue pointer further if that was a link TRB or we're at @@ -152,6 +153,13 @@ static void inc_deq(struct xhci_hcd *xhci, struct xhci_ring *ring, bool consumer ring->dequeue = ring->deq_seg->trbs; next = ring->dequeue; } + addr = (unsigned long long) xhci_trb_virt_to_dma(ring->deq_seg, ring->dequeue); + if (ring == xhci->event_ring) + xhci_dbg(xhci, "Event ring deq = 0x%llx (DMA)\n", addr); + else if (ring == xhci->cmd_ring) + xhci_dbg(xhci, "Command ring deq = 0x%llx (DMA)\n", addr); + else + xhci_dbg(xhci, "Ring deq = 0x%llx (DMA)\n", addr); } /* @@ -171,6 +179,7 @@ static void inc_enq(struct xhci_hcd *xhci, struct xhci_ring *ring, bool consumer { u32 chain; union xhci_trb *next; + unsigned long long addr; chain = ring->enqueue->generic.field[3] & TRB_CHAIN; next = ++(ring->enqueue); @@ -204,6 +213,13 @@ static void inc_enq(struct xhci_hcd *xhci, struct xhci_ring *ring, bool consumer ring->enqueue = ring->enq_seg->trbs; next = ring->enqueue; } + addr = (unsigned long long) xhci_trb_virt_to_dma(ring->enq_seg, ring->enqueue); + if (ring == xhci->event_ring) + xhci_dbg(xhci, "Event ring enq = 0x%llx (DMA)\n", addr); + else if (ring == xhci->cmd_ring) + xhci_dbg(xhci, "Command ring enq = 0x%llx (DMA)\n", addr); + else + xhci_dbg(xhci, "Ring enq = 0x%llx (DMA)\n", addr); } /* @@ -237,7 +253,7 @@ static int room_on_ring(struct xhci_hcd *xhci, struct xhci_ring *ring, void xhci_set_hc_event_deq(struct xhci_hcd *xhci) { - u32 temp; + u64 temp; dma_addr_t deq; deq = xhci_trb_virt_to_dma(xhci->event_ring->deq_seg, @@ -246,13 +262,15 @@ void xhci_set_hc_event_deq(struct xhci_hcd *xhci) xhci_warn(xhci, "WARN something wrong with SW event ring " "dequeue ptr.\n"); /* Update HC event ring dequeue pointer */ - temp = xhci_readl(xhci, &xhci->ir_set->erst_dequeue[0]); + temp = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue); temp &= ERST_PTR_MASK; - if (!in_interrupt()) - xhci_dbg(xhci, "// Write event ring dequeue pointer\n"); - xhci_writel(xhci, 0, &xhci->ir_set->erst_dequeue[1]); - xhci_writel(xhci, (deq & ~ERST_PTR_MASK) | temp, - &xhci->ir_set->erst_dequeue[0]); + /* Don't clear the EHB bit (which is RW1C) because + * there might be more events to service. + */ + temp &= ~ERST_EHB; + xhci_dbg(xhci, "// Write event ring dequeue pointer, preserving EHB bit\n"); + xhci_write_64(xhci, ((u64) deq & (u64) ~ERST_PTR_MASK) | temp, + &xhci->ir_set->erst_dequeue); } /* Ring the host controller doorbell after placing a command on the ring */ @@ -279,7 +297,8 @@ static void ring_ep_doorbell(struct xhci_hcd *xhci, /* Don't ring the doorbell for this endpoint if there are pending * cancellations because the we don't want to interrupt processing. */ - if (!ep_ring->cancels_pending && !(ep_ring->state & SET_DEQ_PENDING)) { + if (!ep_ring->cancels_pending && !(ep_ring->state & SET_DEQ_PENDING) + && !(ep_ring->state & EP_HALTED)) { field = xhci_readl(xhci, db_addr) & DB_MASK; xhci_writel(xhci, field | EPI_TO_DB(ep_index), db_addr); /* Flush PCI posted writes - FIXME Matthew Wilcox says this @@ -316,12 +335,6 @@ static struct xhci_segment *find_trb_seg( return cur_seg; } -struct dequeue_state { - struct xhci_segment *new_deq_seg; - union xhci_trb *new_deq_ptr; - int new_cycle_state; -}; - /* * Move the xHC's endpoint ring dequeue pointer past cur_td. * Record the new state of the xHC's endpoint ring dequeue segment, @@ -336,24 +349,30 @@ struct dequeue_state { * - Finally we move the dequeue state one TRB further, toggling the cycle bit * if we've moved it past a link TRB with the toggle cycle bit set. */ -static void find_new_dequeue_state(struct xhci_hcd *xhci, +void xhci_find_new_dequeue_state(struct xhci_hcd *xhci, unsigned int slot_id, unsigned int ep_index, - struct xhci_td *cur_td, struct dequeue_state *state) + struct xhci_td *cur_td, struct xhci_dequeue_state *state) { struct xhci_virt_device *dev = xhci->devs[slot_id]; struct xhci_ring *ep_ring = dev->ep_rings[ep_index]; struct xhci_generic_trb *trb; + struct xhci_ep_ctx *ep_ctx; + dma_addr_t addr; state->new_cycle_state = 0; + xhci_dbg(xhci, "Finding segment containing stopped TRB.\n"); state->new_deq_seg = find_trb_seg(cur_td->start_seg, ep_ring->stopped_trb, &state->new_cycle_state); if (!state->new_deq_seg) BUG(); /* Dig out the cycle state saved by the xHC during the stop ep cmd */ - state->new_cycle_state = 0x1 & dev->out_ctx->ep[ep_index].deq[0]; + xhci_dbg(xhci, "Finding endpoint context\n"); + ep_ctx = xhci_get_ep_ctx(xhci, dev->out_ctx, ep_index); + state->new_cycle_state = 0x1 & ep_ctx->deq; state->new_deq_ptr = cur_td->last_trb; + xhci_dbg(xhci, "Finding segment containing last TRB in TD.\n"); state->new_deq_seg = find_trb_seg(state->new_deq_seg, state->new_deq_ptr, &state->new_cycle_state); @@ -367,6 +386,12 @@ static void find_new_dequeue_state(struct xhci_hcd *xhci, next_trb(xhci, ep_ring, &state->new_deq_seg, &state->new_deq_ptr); /* Don't update the ring cycle state for the producer (us). */ + xhci_dbg(xhci, "New dequeue segment = %p (virtual)\n", + state->new_deq_seg); + addr = xhci_trb_virt_to_dma(state->new_deq_seg, state->new_deq_ptr); + xhci_dbg(xhci, "New dequeue pointer = 0x%llx (DMA)\n", + (unsigned long long) addr); + xhci_dbg(xhci, "Setting dequeue pointer in internal ring state.\n"); ep_ring->dequeue = state->new_deq_ptr; ep_ring->deq_seg = state->new_deq_seg; } @@ -416,6 +441,30 @@ static int queue_set_tr_deq(struct xhci_hcd *xhci, int slot_id, unsigned int ep_index, struct xhci_segment *deq_seg, union xhci_trb *deq_ptr, u32 cycle_state); +void xhci_queue_new_dequeue_state(struct xhci_hcd *xhci, + struct xhci_ring *ep_ring, unsigned int slot_id, + unsigned int ep_index, struct xhci_dequeue_state *deq_state) +{ + xhci_dbg(xhci, "Set TR Deq Ptr cmd, new deq seg = %p (0x%llx dma), " + "new deq ptr = %p (0x%llx dma), new cycle = %u\n", + deq_state->new_deq_seg, + (unsigned long long)deq_state->new_deq_seg->dma, + deq_state->new_deq_ptr, + (unsigned long long)xhci_trb_virt_to_dma(deq_state->new_deq_seg, deq_state->new_deq_ptr), + deq_state->new_cycle_state); + queue_set_tr_deq(xhci, slot_id, ep_index, + deq_state->new_deq_seg, + deq_state->new_deq_ptr, + (u32) deq_state->new_cycle_state); + /* Stop the TD queueing code from ringing the doorbell until + * this command completes. The HC won't set the dequeue pointer + * if the ring is running, and ringing the doorbell starts the + * ring running. + */ + ep_ring->state |= SET_DEQ_PENDING; + xhci_ring_cmd_db(xhci); +} + /* * When we get a command completion for a Stop Endpoint Command, we need to * unlink any cancelled TDs from the ring. There are two ways to do that: @@ -436,7 +485,7 @@ static void handle_stopped_endpoint(struct xhci_hcd *xhci, struct xhci_td *cur_td = 0; struct xhci_td *last_unlinked_td; - struct dequeue_state deq_state; + struct xhci_dequeue_state deq_state; #ifdef CONFIG_USB_HCD_STAT ktime_t stop_time = ktime_get(); #endif @@ -464,7 +513,7 @@ static void handle_stopped_endpoint(struct xhci_hcd *xhci, * move the xHC endpoint ring dequeue pointer past this TD. */ if (cur_td == ep_ring->stopped_td) - find_new_dequeue_state(xhci, slot_id, ep_index, cur_td, + xhci_find_new_dequeue_state(xhci, slot_id, ep_index, cur_td, &deq_state); else td_to_noop(xhci, ep_ring, cur_td); @@ -480,24 +529,8 @@ static void handle_stopped_endpoint(struct xhci_hcd *xhci, /* If necessary, queue a Set Transfer Ring Dequeue Pointer command */ if (deq_state.new_deq_ptr && deq_state.new_deq_seg) { - xhci_dbg(xhci, "Set TR Deq Ptr cmd, new deq seg = %p (0x%llx dma), " - "new deq ptr = %p (0x%llx dma), new cycle = %u\n", - deq_state.new_deq_seg, - (unsigned long long)deq_state.new_deq_seg->dma, - deq_state.new_deq_ptr, - (unsigned long long)xhci_trb_virt_to_dma(deq_state.new_deq_seg, deq_state.new_deq_ptr), - deq_state.new_cycle_state); - queue_set_tr_deq(xhci, slot_id, ep_index, - deq_state.new_deq_seg, - deq_state.new_deq_ptr, - (u32) deq_state.new_cycle_state); - /* Stop the TD queueing code from ringing the doorbell until - * this command completes. The HC won't set the dequeue pointer - * if the ring is running, and ringing the doorbell starts the - * ring running. - */ - ep_ring->state |= SET_DEQ_PENDING; - xhci_ring_cmd_db(xhci); + xhci_queue_new_dequeue_state(xhci, ep_ring, + slot_id, ep_index, &deq_state); } else { /* Otherwise just ring the doorbell to restart the ring */ ring_ep_doorbell(xhci, slot_id, ep_index); @@ -551,11 +584,15 @@ static void handle_set_deq_completion(struct xhci_hcd *xhci, unsigned int ep_index; struct xhci_ring *ep_ring; struct xhci_virt_device *dev; + struct xhci_ep_ctx *ep_ctx; + struct xhci_slot_ctx *slot_ctx; slot_id = TRB_TO_SLOT_ID(trb->generic.field[3]); ep_index = TRB_TO_EP_INDEX(trb->generic.field[3]); dev = xhci->devs[slot_id]; ep_ring = dev->ep_rings[ep_index]; + ep_ctx = xhci_get_ep_ctx(xhci, dev->out_ctx, ep_index); + slot_ctx = xhci_get_slot_ctx(xhci, dev->out_ctx); if (GET_COMP_CODE(event->status) != COMP_SUCCESS) { unsigned int ep_state; @@ -569,9 +606,9 @@ static void handle_set_deq_completion(struct xhci_hcd *xhci, case COMP_CTX_STATE: xhci_warn(xhci, "WARN Set TR Deq Ptr cmd failed due " "to incorrect slot or ep state.\n"); - ep_state = dev->out_ctx->ep[ep_index].ep_info; + ep_state = ep_ctx->ep_info; ep_state &= EP_STATE_MASK; - slot_state = dev->out_ctx->slot.dev_state; + slot_state = slot_ctx->dev_state; slot_state = GET_SLOT_STATE(slot_state); xhci_dbg(xhci, "Slot state = %u, EP state = %u\n", slot_state, ep_state); @@ -593,16 +630,33 @@ static void handle_set_deq_completion(struct xhci_hcd *xhci, * cancelling URBs, which might not be an error... */ } else { - xhci_dbg(xhci, "Successful Set TR Deq Ptr cmd, deq[0] = 0x%x, " - "deq[1] = 0x%x.\n", - dev->out_ctx->ep[ep_index].deq[0], - dev->out_ctx->ep[ep_index].deq[1]); + xhci_dbg(xhci, "Successful Set TR Deq Ptr cmd, deq = @%08llx\n", + ep_ctx->deq); } ep_ring->state &= ~SET_DEQ_PENDING; ring_ep_doorbell(xhci, slot_id, ep_index); } +static void handle_reset_ep_completion(struct xhci_hcd *xhci, + struct xhci_event_cmd *event, + union xhci_trb *trb) +{ + int slot_id; + unsigned int ep_index; + + slot_id = TRB_TO_SLOT_ID(trb->generic.field[3]); + ep_index = TRB_TO_EP_INDEX(trb->generic.field[3]); + /* This command will only fail if the endpoint wasn't halted, + * but we don't care. + */ + xhci_dbg(xhci, "Ignoring reset ep completion code of %u\n", + (unsigned int) GET_COMP_CODE(event->status)); + + /* Clear our internal halted state and restart the ring */ + xhci->devs[slot_id]->ep_rings[ep_index]->state &= ~EP_HALTED; + ring_ep_doorbell(xhci, slot_id, ep_index); +} static void handle_cmd_completion(struct xhci_hcd *xhci, struct xhci_event_cmd *event) @@ -611,7 +665,7 @@ static void handle_cmd_completion(struct xhci_hcd *xhci, u64 cmd_dma; dma_addr_t cmd_dequeue_dma; - cmd_dma = (((u64) event->cmd_trb[1]) << 32) + event->cmd_trb[0]; + cmd_dma = event->cmd_trb; cmd_dequeue_dma = xhci_trb_virt_to_dma(xhci->cmd_ring->deq_seg, xhci->cmd_ring->dequeue); /* Is the command ring deq ptr out of sync with the deq seg ptr? */ @@ -653,6 +707,9 @@ static void handle_cmd_completion(struct xhci_hcd *xhci, case TRB_TYPE(TRB_CMD_NOOP): ++xhci->noops_handled; break; + case TRB_TYPE(TRB_RESET_EP): + handle_reset_ep_completion(xhci, event, xhci->cmd_ring->dequeue); + break; default: /* Skip over unknown commands on the event ring */ xhci->error_bitmask |= 1 << 6; @@ -756,7 +813,9 @@ static int handle_tx_event(struct xhci_hcd *xhci, union xhci_trb *event_trb; struct urb *urb = 0; int status = -EINPROGRESS; + struct xhci_ep_ctx *ep_ctx; + xhci_dbg(xhci, "In %s\n", __func__); xdev = xhci->devs[TRB_TO_SLOT_ID(event->flags)]; if (!xdev) { xhci_err(xhci, "ERROR Transfer event pointed to bad slot\n"); @@ -765,17 +824,17 @@ static int handle_tx_event(struct xhci_hcd *xhci, /* Endpoint ID is 1 based, our index is zero based */ ep_index = TRB_TO_EP_ID(event->flags) - 1; + xhci_dbg(xhci, "%s - ep index = %d\n", __func__, ep_index); ep_ring = xdev->ep_rings[ep_index]; - if (!ep_ring || (xdev->out_ctx->ep[ep_index].ep_info & EP_STATE_MASK) == EP_STATE_DISABLED) { + ep_ctx = xhci_get_ep_ctx(xhci, xdev->out_ctx, ep_index); + if (!ep_ring || (ep_ctx->ep_info & EP_STATE_MASK) == EP_STATE_DISABLED) { xhci_err(xhci, "ERROR Transfer event pointed to disabled endpoint\n"); return -ENODEV; } - event_dma = event->buffer[0]; - if (event->buffer[1] != 0) - xhci_warn(xhci, "WARN ignoring upper 32-bits of 64-bit TRB dma address\n"); - + event_dma = event->buffer; /* This TRB should be in the TD at the head of this ring's TD list */ + xhci_dbg(xhci, "%s - checking for list empty\n", __func__); if (list_empty(&ep_ring->td_list)) { xhci_warn(xhci, "WARN Event TRB for slot %d ep %d with no TDs queued?\n", TRB_TO_SLOT_ID(event->flags), ep_index); @@ -785,11 +844,14 @@ static int handle_tx_event(struct xhci_hcd *xhci, urb = NULL; goto cleanup; } + xhci_dbg(xhci, "%s - getting list entry\n", __func__); td = list_entry(ep_ring->td_list.next, struct xhci_td, td_list); /* Is this a TRB in the currently executing TD? */ + xhci_dbg(xhci, "%s - looking for TD\n", __func__); event_seg = trb_in_td(ep_ring->deq_seg, ep_ring->dequeue, td->last_trb, event_dma); + xhci_dbg(xhci, "%s - found event_seg = %p\n", __func__, event_seg); if (!event_seg) { /* HC is busted, give up! */ xhci_err(xhci, "ERROR Transfer event TRB DMA ptr not part of current TD\n"); @@ -798,10 +860,10 @@ static int handle_tx_event(struct xhci_hcd *xhci, event_trb = &event_seg->trbs[(event_dma - event_seg->dma) / sizeof(*event_trb)]; xhci_dbg(xhci, "Event TRB with TRB type ID %u\n", (unsigned int) (event->flags & TRB_TYPE_BITMASK)>>10); - xhci_dbg(xhci, "Offset 0x00 (buffer[0]) = 0x%x\n", - (unsigned int) event->buffer[0]); - xhci_dbg(xhci, "Offset 0x04 (buffer[0]) = 0x%x\n", - (unsigned int) event->buffer[1]); + xhci_dbg(xhci, "Offset 0x00 (buffer lo) = 0x%x\n", + lower_32_bits(event->buffer)); + xhci_dbg(xhci, "Offset 0x04 (buffer hi) = 0x%x\n", + upper_32_bits(event->buffer)); xhci_dbg(xhci, "Offset 0x08 (transfer length) = 0x%x\n", (unsigned int) event->transfer_len); xhci_dbg(xhci, "Offset 0x0C (flags) = 0x%x\n", @@ -823,6 +885,7 @@ static int handle_tx_event(struct xhci_hcd *xhci, break; case COMP_STALL: xhci_warn(xhci, "WARN: Stalled endpoint\n"); + ep_ring->state |= EP_HALTED; status = -EPIPE; break; case COMP_TRB_ERR: @@ -833,6 +896,10 @@ static int handle_tx_event(struct xhci_hcd *xhci, xhci_warn(xhci, "WARN: transfer error on endpoint\n"); status = -EPROTO; break; + case COMP_BABBLE: + xhci_warn(xhci, "WARN: babble error on endpoint\n"); + status = -EOVERFLOW; + break; case COMP_DB_ERR: xhci_warn(xhci, "WARN: HC couldn't access mem fast enough\n"); status = -ENOSR; @@ -874,15 +941,26 @@ static int handle_tx_event(struct xhci_hcd *xhci, if (event_trb != ep_ring->dequeue) { /* The event was for the status stage */ if (event_trb == td->last_trb) { - td->urb->actual_length = - td->urb->transfer_buffer_length; + if (td->urb->actual_length != 0) { + /* Don't overwrite a previously set error code */ + if (status == -EINPROGRESS || status == 0) + /* Did we already see a short data stage? */ + status = -EREMOTEIO; + } else { + td->urb->actual_length = + td->urb->transfer_buffer_length; + } } else { /* Maybe the event was for the data stage? */ - if (GET_COMP_CODE(event->transfer_len) != COMP_STOP_INVAL) + if (GET_COMP_CODE(event->transfer_len) != COMP_STOP_INVAL) { /* We didn't stop on a link TRB in the middle */ td->urb->actual_length = td->urb->transfer_buffer_length - TRB_LEN(event->transfer_len); + xhci_dbg(xhci, "Waiting for status stage event\n"); + urb = NULL; + goto cleanup; + } } } } else { @@ -929,16 +1007,20 @@ static int handle_tx_event(struct xhci_hcd *xhci, TRB_LEN(event->transfer_len)); td->urb->actual_length = 0; } - if (td->urb->transfer_flags & URB_SHORT_NOT_OK) - status = -EREMOTEIO; - else - status = 0; + /* Don't overwrite a previously set error code */ + if (status == -EINPROGRESS) { + if (td->urb->transfer_flags & URB_SHORT_NOT_OK) + status = -EREMOTEIO; + else + status = 0; + } } else { td->urb->actual_length = td->urb->transfer_buffer_length; /* Ignore a short packet completion if the * untransferred length was zero. */ - status = 0; + if (status == -EREMOTEIO) + status = 0; } } else { /* Slow path - walk the list, starting from the dequeue @@ -965,19 +1047,30 @@ static int handle_tx_event(struct xhci_hcd *xhci, TRB_LEN(event->transfer_len); } } - /* The Endpoint Stop Command completion will take care of - * any stopped TDs. A stopped TD may be restarted, so don't update the - * ring dequeue pointer or take this TD off any lists yet. - */ if (GET_COMP_CODE(event->transfer_len) == COMP_STOP_INVAL || GET_COMP_CODE(event->transfer_len) == COMP_STOP) { + /* The Endpoint Stop Command completion will take care of any + * stopped TDs. A stopped TD may be restarted, so don't update + * the ring dequeue pointer or take this TD off any lists yet. + */ ep_ring->stopped_td = td; ep_ring->stopped_trb = event_trb; } else { - /* Update ring dequeue pointer */ - while (ep_ring->dequeue != td->last_trb) + if (GET_COMP_CODE(event->transfer_len) == COMP_STALL) { + /* The transfer is completed from the driver's + * perspective, but we need to issue a set dequeue + * command for this stalled endpoint to move the dequeue + * pointer past the TD. We can't do that here because + * the halt condition must be cleared first. + */ + ep_ring->stopped_td = td; + ep_ring->stopped_trb = event_trb; + } else { + /* Update ring dequeue pointer */ + while (ep_ring->dequeue != td->last_trb) + inc_deq(xhci, ep_ring, false); inc_deq(xhci, ep_ring, false); - inc_deq(xhci, ep_ring, false); + } /* Clean up the endpoint's TD list */ urb = td->urb; @@ -987,7 +1080,10 @@ static int handle_tx_event(struct xhci_hcd *xhci, list_del(&td->cancelled_td_list); ep_ring->cancels_pending--; } - kfree(td); + /* Leave the TD around for the reset endpoint function to use */ + if (GET_COMP_CODE(event->transfer_len) != COMP_STALL) { + kfree(td); + } urb->hcpriv = NULL; } cleanup: @@ -997,6 +1093,8 @@ cleanup: /* FIXME for multi-TD URBs (who have buffers bigger than 64MB) */ if (urb) { usb_hcd_unlink_urb_from_ep(xhci_to_hcd(xhci), urb); + xhci_dbg(xhci, "Giveback URB %p, len = %d, status = %d\n", + urb, td->urb->actual_length, status); spin_unlock(&xhci->lock); usb_hcd_giveback_urb(xhci_to_hcd(xhci), urb, status); spin_lock(&xhci->lock); @@ -1014,6 +1112,7 @@ void xhci_handle_event(struct xhci_hcd *xhci) int update_ptrs = 1; int ret; + xhci_dbg(xhci, "In %s\n", __func__); if (!xhci->event_ring || !xhci->event_ring->dequeue) { xhci->error_bitmask |= 1 << 1; return; @@ -1026,18 +1125,25 @@ void xhci_handle_event(struct xhci_hcd *xhci) xhci->error_bitmask |= 1 << 2; return; } + xhci_dbg(xhci, "%s - OS owns TRB\n", __func__); /* FIXME: Handle more event types. */ switch ((event->event_cmd.flags & TRB_TYPE_BITMASK)) { case TRB_TYPE(TRB_COMPLETION): + xhci_dbg(xhci, "%s - calling handle_cmd_completion\n", __func__); handle_cmd_completion(xhci, &event->event_cmd); + xhci_dbg(xhci, "%s - returned from handle_cmd_completion\n", __func__); break; case TRB_TYPE(TRB_PORT_STATUS): + xhci_dbg(xhci, "%s - calling handle_port_status\n", __func__); handle_port_status(xhci, event); + xhci_dbg(xhci, "%s - returned from handle_port_status\n", __func__); update_ptrs = 0; break; case TRB_TYPE(TRB_TRANSFER): + xhci_dbg(xhci, "%s - calling handle_tx_event\n", __func__); ret = handle_tx_event(xhci, &event->trans_event); + xhci_dbg(xhci, "%s - returned from handle_tx_event\n", __func__); if (ret < 0) xhci->error_bitmask |= 1 << 9; else @@ -1093,13 +1199,13 @@ static int prepare_ring(struct xhci_hcd *xhci, struct xhci_ring *ep_ring, */ xhci_warn(xhci, "WARN urb submitted to disabled ep\n"); return -ENOENT; - case EP_STATE_HALTED: case EP_STATE_ERROR: - xhci_warn(xhci, "WARN waiting for halt or error on ep " - "to be cleared\n"); + xhci_warn(xhci, "WARN waiting for error on ep to be cleared\n"); /* FIXME event handling code for error needs to clear it */ /* XXX not sure if this should be -ENOENT or not */ return -EINVAL; + case EP_STATE_HALTED: + xhci_dbg(xhci, "WARN halted endpoint, queueing URB anyway.\n"); case EP_STATE_STOPPED: case EP_STATE_RUNNING: break; @@ -1128,9 +1234,9 @@ static int prepare_transfer(struct xhci_hcd *xhci, gfp_t mem_flags) { int ret; - + struct xhci_ep_ctx *ep_ctx = xhci_get_ep_ctx(xhci, xdev->out_ctx, ep_index); ret = prepare_ring(xhci, xdev->ep_rings[ep_index], - xdev->out_ctx->ep[ep_index].ep_info & EP_STATE_MASK, + ep_ctx->ep_info & EP_STATE_MASK, num_trbs, mem_flags); if (ret) return ret; @@ -1285,6 +1391,7 @@ static int queue_bulk_sg_tx(struct xhci_hcd *xhci, gfp_t mem_flags, /* Queue the first TRB, even if it's zero-length */ do { u32 field = 0; + u32 length_field = 0; /* Don't change the cycle bit of the first TRB until later */ if (first_trb) @@ -1314,10 +1421,13 @@ static int queue_bulk_sg_tx(struct xhci_hcd *xhci, gfp_t mem_flags, (unsigned int) (addr + TRB_MAX_BUFF_SIZE) & ~(TRB_MAX_BUFF_SIZE - 1), (unsigned int) addr + trb_buff_len); } + length_field = TRB_LEN(trb_buff_len) | + TD_REMAINDER(urb->transfer_buffer_length - running_total) | + TRB_INTR_TARGET(0); queue_trb(xhci, ep_ring, false, - (u32) addr, - (u32) ((u64) addr >> 32), - TRB_LEN(trb_buff_len) | TRB_INTR_TARGET(0), + lower_32_bits(addr), + upper_32_bits(addr), + length_field, /* We always want to know if the TRB was short, * or we won't get an event when it completes. * (Unless we use event data TRBs, which are a @@ -1365,7 +1475,7 @@ int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags, struct xhci_generic_trb *start_trb; bool first_trb; int start_cycle; - u32 field; + u32 field, length_field; int running_total, trb_buff_len, ret; u64 addr; @@ -1443,10 +1553,13 @@ int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags, td->last_trb = ep_ring->enqueue; field |= TRB_IOC; } + length_field = TRB_LEN(trb_buff_len) | + TD_REMAINDER(urb->transfer_buffer_length - running_total) | + TRB_INTR_TARGET(0); queue_trb(xhci, ep_ring, false, - (u32) addr, - (u32) ((u64) addr >> 32), - TRB_LEN(trb_buff_len) | TRB_INTR_TARGET(0), + lower_32_bits(addr), + upper_32_bits(addr), + length_field, /* We always want to know if the TRB was short, * or we won't get an event when it completes. * (Unless we use event data TRBs, which are a @@ -1478,7 +1591,7 @@ int xhci_queue_ctrl_tx(struct xhci_hcd *xhci, gfp_t mem_flags, struct usb_ctrlrequest *setup; struct xhci_generic_trb *start_trb; int start_cycle; - u32 field; + u32 field, length_field; struct xhci_td *td; ep_ring = xhci->devs[slot_id]->ep_rings[ep_index]; @@ -1528,13 +1641,16 @@ int xhci_queue_ctrl_tx(struct xhci_hcd *xhci, gfp_t mem_flags, /* If there's data, queue data TRBs */ field = 0; + length_field = TRB_LEN(urb->transfer_buffer_length) | + TD_REMAINDER(urb->transfer_buffer_length) | + TRB_INTR_TARGET(0); if (urb->transfer_buffer_length > 0) { if (setup->bRequestType & USB_DIR_IN) field |= TRB_DIR_IN; queue_trb(xhci, ep_ring, false, lower_32_bits(urb->transfer_dma), upper_32_bits(urb->transfer_dma), - TRB_LEN(urb->transfer_buffer_length) | TRB_INTR_TARGET(0), + length_field, /* Event on short tx */ field | TRB_ISP | TRB_TYPE(TRB_DATA) | ep_ring->cycle_state); } @@ -1603,7 +1719,8 @@ int xhci_queue_slot_control(struct xhci_hcd *xhci, u32 trb_type, u32 slot_id) int xhci_queue_address_device(struct xhci_hcd *xhci, dma_addr_t in_ctx_ptr, u32 slot_id) { - return queue_command(xhci, in_ctx_ptr, 0, 0, + return queue_command(xhci, lower_32_bits(in_ctx_ptr), + upper_32_bits(in_ctx_ptr), 0, TRB_TYPE(TRB_ADDR_DEV) | SLOT_ID_FOR_TRB(slot_id)); } @@ -1611,7 +1728,8 @@ int xhci_queue_address_device(struct xhci_hcd *xhci, dma_addr_t in_ctx_ptr, int xhci_queue_configure_endpoint(struct xhci_hcd *xhci, dma_addr_t in_ctx_ptr, u32 slot_id) { - return queue_command(xhci, in_ctx_ptr, 0, 0, + return queue_command(xhci, lower_32_bits(in_ctx_ptr), + upper_32_bits(in_ctx_ptr), 0, TRB_TYPE(TRB_CONFIG_EP) | SLOT_ID_FOR_TRB(slot_id)); } @@ -1639,10 +1757,23 @@ static int queue_set_tr_deq(struct xhci_hcd *xhci, int slot_id, u32 type = TRB_TYPE(TRB_SET_DEQ); addr = xhci_trb_virt_to_dma(deq_seg, deq_ptr); - if (addr == 0) + if (addr == 0) { xhci_warn(xhci, "WARN Cannot submit Set TR Deq Ptr\n"); xhci_warn(xhci, "WARN deq seg = %p, deq pt = %p\n", deq_seg, deq_ptr); - return queue_command(xhci, (u32) addr | cycle_state, 0, 0, + return 0; + } + return queue_command(xhci, lower_32_bits(addr) | cycle_state, + upper_32_bits(addr), 0, trb_slot_id | trb_ep_index | type); } + +int xhci_queue_reset_ep(struct xhci_hcd *xhci, int slot_id, + unsigned int ep_index) +{ + u32 trb_slot_id = SLOT_ID_FOR_TRB(slot_id); + u32 trb_ep_index = EP_ID_FOR_TRB(ep_index); + u32 type = TRB_TYPE(TRB_RESET_EP); + + return queue_command(xhci, 0, 0, 0, trb_slot_id | trb_ep_index | type); +} diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h index 8936eeb5588..d31d32206ba 100644 --- a/drivers/usb/host/xhci.h +++ b/drivers/usb/host/xhci.h @@ -25,6 +25,7 @@ #include <linux/usb.h> #include <linux/timer.h> +#include <linux/kernel.h> #include "../core/hcd.h" /* Code sharing between pci-quirks and xhci hcd */ @@ -42,14 +43,6 @@ * xHCI register interface. * This corresponds to the eXtensible Host Controller Interface (xHCI) * Revision 0.95 specification - * - * Registers should always be accessed with double word or quad word accesses. - * - * Some xHCI implementations may support 64-bit address pointers. Registers - * with 64-bit address pointers should be written to with dword accesses by - * writing the low dword first (ptr[0]), then the high dword (ptr[1]) second. - * xHCI implementations that do not support 64-bit address pointers will ignore - * the high dword, and write order is irrelevant. */ /** @@ -96,6 +89,7 @@ struct xhci_cap_regs { #define HCS_ERST_MAX(p) (((p) >> 4) & 0xf) /* bit 26 Scratchpad restore - for save/restore HW state - not used yet */ /* bits 27:31 number of Scratchpad buffers SW must allocate for the HW */ +#define HCS_MAX_SCRATCHPAD(p) (((p) >> 27) & 0x1f) /* HCSPARAMS3 - hcs_params3 - bitmasks */ /* bits 0:7, Max U1 to U0 latency for the roothub ports */ @@ -166,10 +160,10 @@ struct xhci_op_regs { u32 reserved1; u32 reserved2; u32 dev_notification; - u32 cmd_ring[2]; + u64 cmd_ring; /* rsvd: offset 0x20-2F */ u32 reserved3[4]; - u32 dcbaa_ptr[2]; + u64 dcbaa_ptr; u32 config_reg; /* rsvd: offset 0x3C-3FF */ u32 reserved4[241]; @@ -254,7 +248,7 @@ struct xhci_op_regs { #define CMD_RING_RUNNING (1 << 3) /* bits 4:5 reserved and should be preserved */ /* Command Ring pointer - bit mask for the lower 32 bits. */ -#define CMD_RING_ADDR_MASK (0xffffffc0) +#define CMD_RING_RSVD_BITS (0x3f) /* CONFIG - Configure Register - config_reg bitmasks */ /* bits 0:7 - maximum number of device slots enabled (NumSlotsEn) */ @@ -382,8 +376,8 @@ struct xhci_intr_reg { u32 irq_control; u32 erst_size; u32 rsvd; - u32 erst_base[2]; - u32 erst_dequeue[2]; + u64 erst_base; + u64 erst_dequeue; }; /* irq_pending bitmasks */ @@ -453,6 +447,27 @@ struct xhci_doorbell_array { /** + * struct xhci_container_ctx + * @type: Type of context. Used to calculated offsets to contained contexts. + * @size: Size of the context data + * @bytes: The raw context data given to HW + * @dma: dma address of the bytes + * + * Represents either a Device or Input context. Holds a pointer to the raw + * memory used for the context (bytes) and dma address of it (dma). + */ +struct xhci_container_ctx { + unsigned type; +#define XHCI_CTX_TYPE_DEVICE 0x1 +#define XHCI_CTX_TYPE_INPUT 0x2 + + int size; + + u8 *bytes; + dma_addr_t dma; +}; + +/** * struct xhci_slot_ctx * @dev_info: Route string, device speed, hub info, and last valid endpoint * @dev_info2: Max exit latency for device number, root hub port number @@ -538,7 +553,7 @@ struct xhci_slot_ctx { struct xhci_ep_ctx { u32 ep_info; u32 ep_info2; - u32 deq[2]; + u64 deq; u32 tx_info; /* offset 0x14 - 0x1f reserved for HC internal use */ u32 reserved[3]; @@ -589,18 +604,16 @@ struct xhci_ep_ctx { /** - * struct xhci_device_control - * Input/Output context; see section 6.2.5. + * struct xhci_input_control_context + * Input control context; see section 6.2.5. * * @drop_context: set the bit of the endpoint context you want to disable * @add_context: set the bit of the endpoint context you want to enable */ -struct xhci_device_control { +struct xhci_input_control_ctx { u32 drop_flags; u32 add_flags; - u32 rsvd[6]; - struct xhci_slot_ctx slot; - struct xhci_ep_ctx ep[31]; + u32 rsvd2[6]; }; /* drop context bitmasks */ @@ -608,7 +621,6 @@ struct xhci_device_control { /* add context bitmasks */ #define ADD_EP(x) (0x1 << x) - struct xhci_virt_device { /* * Commands to the hardware are passed an "input context" that @@ -618,11 +630,10 @@ struct xhci_virt_device { * track of input and output contexts separately because * these commands might fail and we don't trust the hardware. */ - struct xhci_device_control *out_ctx; - dma_addr_t out_ctx_dma; + struct xhci_container_ctx *out_ctx; /* Used for addressing devices and configuration changes */ - struct xhci_device_control *in_ctx; - dma_addr_t in_ctx_dma; + struct xhci_container_ctx *in_ctx; + /* FIXME when stream support is added */ struct xhci_ring *ep_rings[31]; /* Temporary storage in case the configure endpoint command fails and we @@ -641,7 +652,7 @@ struct xhci_virt_device { */ struct xhci_device_context_array { /* 64-bit device addresses; we only write 32-bit addresses */ - u32 dev_context_ptrs[2*MAX_HC_SLOTS]; + u64 dev_context_ptrs[MAX_HC_SLOTS]; /* private xHCD pointers */ dma_addr_t dma; }; @@ -654,7 +665,7 @@ struct xhci_device_context_array { struct xhci_stream_ctx { /* 64-bit stream ring address, cycle state, and stream type */ - u32 stream_ring[2]; + u64 stream_ring; /* offset 0x14 - 0x1f reserved for HC internal use */ u32 reserved[2]; }; @@ -662,7 +673,7 @@ struct xhci_stream_ctx { struct xhci_transfer_event { /* 64-bit buffer address, or immediate data */ - u32 buffer[2]; + u64 buffer; u32 transfer_len; /* This field is interpreted differently based on the type of TRB */ u32 flags; @@ -744,7 +755,7 @@ struct xhci_transfer_event { struct xhci_link_trb { /* 64-bit segment pointer*/ - u32 segment_ptr[2]; + u64 segment_ptr; u32 intr_target; u32 control; }; @@ -755,7 +766,7 @@ struct xhci_link_trb { /* Command completion event TRB */ struct xhci_event_cmd { /* Pointer to command TRB, or the value passed by the event data trb */ - u32 cmd_trb[2]; + u64 cmd_trb; u32 status; u32 flags; }; @@ -848,8 +859,8 @@ union xhci_trb { #define TRB_CONFIG_EP 12 /* Evaluate Context Command */ #define TRB_EVAL_CONTEXT 13 -/* Reset Transfer Ring Command */ -#define TRB_RESET_RING 14 +/* Reset Endpoint Command */ +#define TRB_RESET_EP 14 /* Stop Transfer Ring Command */ #define TRB_STOP_RING 15 /* Set Transfer Ring Dequeue Pointer Command */ @@ -929,6 +940,7 @@ struct xhci_ring { unsigned int cancels_pending; unsigned int state; #define SET_DEQ_PENDING (1 << 0) +#define EP_HALTED (1 << 1) /* The TRB that was last reported in a stopped endpoint ring */ union xhci_trb *stopped_trb; struct xhci_td *stopped_td; @@ -940,9 +952,15 @@ struct xhci_ring { u32 cycle_state; }; +struct xhci_dequeue_state { + struct xhci_segment *new_deq_seg; + union xhci_trb *new_deq_ptr; + int new_cycle_state; +}; + struct xhci_erst_entry { /* 64-bit event ring segment address */ - u32 seg_addr[2]; + u64 seg_addr; u32 seg_size; /* Set to zero */ u32 rsvd; @@ -957,6 +975,13 @@ struct xhci_erst { unsigned int erst_size; }; +struct xhci_scratchpad { + u64 *sp_array; + dma_addr_t sp_dma; + void **sp_buffers; + dma_addr_t *sp_dma_buffers; +}; + /* * Each segment table entry is 4*32bits long. 1K seems like an ok size: * (1K bytes * 8bytes/bit) / (4*32 bits) = 64 segment entries in the table, @@ -1011,6 +1036,9 @@ struct xhci_hcd { struct xhci_ring *cmd_ring; struct xhci_ring *event_ring; struct xhci_erst erst; + /* Scratchpad */ + struct xhci_scratchpad *scratchpad; + /* slot enabling and address device helpers */ struct completion addr_dev; int slot_id; @@ -1071,13 +1099,43 @@ static inline unsigned int xhci_readl(const struct xhci_hcd *xhci, static inline void xhci_writel(struct xhci_hcd *xhci, const unsigned int val, __u32 __iomem *regs) { - if (!in_interrupt()) - xhci_dbg(xhci, - "`MEM_WRITE_DWORD(3'b000, 32'h%p, 32'h%0x, 4'hf);\n", - regs, val); + xhci_dbg(xhci, + "`MEM_WRITE_DWORD(3'b000, 32'h%p, 32'h%0x, 4'hf);\n", + regs, val); writel(val, regs); } +/* + * Registers should always be accessed with double word or quad word accesses. + * + * Some xHCI implementations may support 64-bit address pointers. Registers + * with 64-bit address pointers should be written to with dword accesses by + * writing the low dword first (ptr[0]), then the high dword (ptr[1]) second. + * xHCI implementations that do not support 64-bit address pointers will ignore + * the high dword, and write order is irrelevant. + */ +static inline u64 xhci_read_64(const struct xhci_hcd *xhci, + __u64 __iomem *regs) +{ + __u32 __iomem *ptr = (__u32 __iomem *) regs; + u64 val_lo = readl(ptr); + u64 val_hi = readl(ptr + 1); + return val_lo + (val_hi << 32); +} +static inline void xhci_write_64(struct xhci_hcd *xhci, + const u64 val, __u64 __iomem *regs) +{ + __u32 __iomem *ptr = (__u32 __iomem *) regs; + u32 val_lo = lower_32_bits(val); + u32 val_hi = upper_32_bits(val); + + xhci_dbg(xhci, + "`MEM_WRITE_DWORD(3'b000, 64'h%p, 64'h%0lx, 4'hf);\n", + regs, (long unsigned int) val); + writel(val_lo, ptr); + writel(val_hi, ptr + 1); +} + /* xHCI debugging */ void xhci_print_ir_set(struct xhci_hcd *xhci, struct xhci_intr_reg *ir_set, int set_num); void xhci_print_registers(struct xhci_hcd *xhci); @@ -1090,7 +1148,7 @@ void xhci_debug_ring(struct xhci_hcd *xhci, struct xhci_ring *ring); void xhci_dbg_erst(struct xhci_hcd *xhci, struct xhci_erst *erst); void xhci_dbg_cmd_ptrs(struct xhci_hcd *xhci); void xhci_dbg_ring_ptrs(struct xhci_hcd *xhci, struct xhci_ring *ring); -void xhci_dbg_ctx(struct xhci_hcd *xhci, struct xhci_device_control *ctx, dma_addr_t dma, unsigned int last_ep); +void xhci_dbg_ctx(struct xhci_hcd *xhci, struct xhci_container_ctx *ctx, unsigned int last_ep); /* xHCI memory managment */ void xhci_mem_cleanup(struct xhci_hcd *xhci); @@ -1128,6 +1186,7 @@ int xhci_urb_enqueue(struct usb_hcd *hcd, struct urb *urb, gfp_t mem_flags); int xhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status); int xhci_add_endpoint(struct usb_hcd *hcd, struct usb_device *udev, struct usb_host_endpoint *ep); int xhci_drop_endpoint(struct usb_hcd *hcd, struct usb_device *udev, struct usb_host_endpoint *ep); +void xhci_endpoint_reset(struct usb_hcd *hcd, struct usb_host_endpoint *ep); int xhci_check_bandwidth(struct usb_hcd *hcd, struct usb_device *udev); void xhci_reset_bandwidth(struct usb_hcd *hcd, struct usb_device *udev); @@ -1148,10 +1207,23 @@ int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags, struct urb *urb, int slot_id, unsigned int ep_index); int xhci_queue_configure_endpoint(struct xhci_hcd *xhci, dma_addr_t in_ctx_ptr, u32 slot_id); +int xhci_queue_reset_ep(struct xhci_hcd *xhci, int slot_id, + unsigned int ep_index); +void xhci_find_new_dequeue_state(struct xhci_hcd *xhci, + unsigned int slot_id, unsigned int ep_index, + struct xhci_td *cur_td, struct xhci_dequeue_state *state); +void xhci_queue_new_dequeue_state(struct xhci_hcd *xhci, + struct xhci_ring *ep_ring, unsigned int slot_id, + unsigned int ep_index, struct xhci_dequeue_state *deq_state); /* xHCI roothub code */ int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue, u16 wIndex, char *buf, u16 wLength); int xhci_hub_status_data(struct usb_hcd *hcd, char *buf); +/* xHCI contexts */ +struct xhci_input_control_ctx *xhci_get_input_control_ctx(struct xhci_hcd *xhci, struct xhci_container_ctx *ctx); +struct xhci_slot_ctx *xhci_get_slot_ctx(struct xhci_hcd *xhci, struct xhci_container_ctx *ctx); +struct xhci_ep_ctx *xhci_get_ep_ctx(struct xhci_hcd *xhci, struct xhci_container_ctx *ctx, unsigned int ep_index); + #endif /* __LINUX_XHCI_HCD_H */ diff --git a/drivers/usb/misc/Kconfig b/drivers/usb/misc/Kconfig index a68d91a11be..abe3aa67ed0 100644 --- a/drivers/usb/misc/Kconfig +++ b/drivers/usb/misc/Kconfig @@ -220,7 +220,7 @@ config USB_IOWARRIOR config USB_TEST tristate "USB testing driver" - depends on USB && USB_DEVICEFS + depends on USB help This driver is for testing host controller software. It is used with specialized device firmware for regression and stress testing, diff --git a/drivers/usb/misc/iowarrior.c b/drivers/usb/misc/iowarrior.c index 3c5fe5cee05..90e1a8dedfa 100644 --- a/drivers/usb/misc/iowarrior.c +++ b/drivers/usb/misc/iowarrior.c @@ -18,6 +18,7 @@ #include <linux/init.h> #include <linux/slab.h> #include <linux/sched.h> +#include <linux/smp_lock.h> #include <linux/poll.h> #include <linux/usb/iowarrior.h> diff --git a/drivers/usb/misc/rio500.c b/drivers/usb/misc/rio500.c index deb95bb49fd..d645f3899fe 100644 --- a/drivers/usb/misc/rio500.c +++ b/drivers/usb/misc/rio500.c @@ -32,6 +32,7 @@ #include <linux/kernel.h> #include <linux/signal.h> #include <linux/sched.h> +#include <linux/smp_lock.h> #include <linux/errno.h> #include <linux/random.h> #include <linux/poll.h> diff --git a/drivers/usb/misc/usblcd.c b/drivers/usb/misc/usblcd.c index e0ff9ccd866..29092b8e59c 100644 --- a/drivers/usb/misc/usblcd.c +++ b/drivers/usb/misc/usblcd.c @@ -16,6 +16,7 @@ #include <linux/kernel.h> #include <linux/init.h> #include <linux/slab.h> +#include <linux/smp_lock.h> #include <linux/errno.h> #include <linux/mutex.h> #include <asm/uaccess.h> diff --git a/drivers/usb/mon/mon_bin.c b/drivers/usb/mon/mon_bin.c index f8d9045d668..0f7a30b7d2d 100644 --- a/drivers/usb/mon/mon_bin.c +++ b/drivers/usb/mon/mon_bin.c @@ -1261,7 +1261,7 @@ static int mon_alloc_buff(struct mon_pgmap *map, int npages) return -ENOMEM; } map[n].ptr = (unsigned char *) vaddr; - map[n].pg = virt_to_page(vaddr); + map[n].pg = virt_to_page((void *) vaddr); } return 0; } diff --git a/drivers/usb/musb/Kconfig b/drivers/usb/musb/Kconfig index 70073b157f0..803adcb5ac1 100644 --- a/drivers/usb/musb/Kconfig +++ b/drivers/usb/musb/Kconfig @@ -12,6 +12,7 @@ config USB_MUSB_HDRC depends on !SUPERH select NOP_USB_XCEIV if ARCH_DAVINCI select TWL4030_USB if MACH_OMAP_3430SDP + select NOP_USB_XCEIV if MACH_OMAP3EVM select USB_OTG_UTILS tristate 'Inventra Highspeed Dual Role Controller (TI, ADI, ...)' help diff --git a/drivers/usb/musb/cppi_dma.h b/drivers/usb/musb/cppi_dma.h index 8a39de3e6e4..59bf949e589 100644 --- a/drivers/usb/musb/cppi_dma.h +++ b/drivers/usb/musb/cppi_dma.h @@ -5,7 +5,6 @@ #include <linux/slab.h> #include <linux/list.h> -#include <linux/smp_lock.h> #include <linux/errno.h> #include <linux/dmapool.h> diff --git a/drivers/usb/musb/davinci.c b/drivers/usb/musb/davinci.c index 180d7daa409..e16ff605c45 100644 --- a/drivers/usb/musb/davinci.c +++ b/drivers/usb/musb/davinci.c @@ -35,13 +35,14 @@ #include <mach/hardware.h> #include <mach/memory.h> #include <mach/gpio.h> +#include <mach/cputype.h> #include <asm/mach-types.h> #include "musb_core.h" #ifdef CONFIG_MACH_DAVINCI_EVM -#define GPIO_nVBUS_DRV 87 +#define GPIO_nVBUS_DRV 144 #endif #include "davinci.h" @@ -329,7 +330,6 @@ static irqreturn_t davinci_interrupt(int irq, void *__hci) mod_timer(&otg_workaround, jiffies + POLL_SECONDS * HZ); WARNING("VBUS error workaround (delay coming)\n"); } else if (is_host_enabled(musb) && drvvbus) { - musb->is_active = 1; MUSB_HST_MODE(musb); musb->xceiv->default_a = 1; musb->xceiv->state = OTG_STATE_A_WAIT_VRISE; @@ -343,7 +343,9 @@ static irqreturn_t davinci_interrupt(int irq, void *__hci) portstate(musb->port1_status &= ~USB_PORT_STAT_POWER); } - /* NOTE: this must complete poweron within 100 msec */ + /* NOTE: this must complete poweron within 100 msec + * (OTG_TIME_A_WAIT_VRISE) but we don't check for that. + */ davinci_source_power(musb, drvvbus, 0); DBG(2, "VBUS %s (%s)%s, devctl %02x\n", drvvbus ? "on" : "off", @@ -411,6 +413,21 @@ int __init musb_platform_init(struct musb *musb) __raw_writel(phy_ctrl, USB_PHY_CTRL); } + /* On dm355, the default-A state machine needs DRVVBUS control. + * If we won't be a host, there's no need to turn it on. + */ + if (cpu_is_davinci_dm355()) { + u32 deepsleep = __raw_readl(DM355_DEEPSLEEP); + + if (is_host_enabled(musb)) { + deepsleep &= ~DRVVBUS_OVERRIDE; + } else { + deepsleep &= ~DRVVBUS_FORCE; + deepsleep |= DRVVBUS_OVERRIDE; + } + __raw_writel(deepsleep, DM355_DEEPSLEEP); + } + /* reset the controller */ musb_writel(tibase, DAVINCI_USB_CTRL_REG, 0x1); @@ -437,6 +454,15 @@ int musb_platform_exit(struct musb *musb) if (is_host_enabled(musb)) del_timer_sync(&otg_workaround); + /* force VBUS off */ + if (cpu_is_davinci_dm355()) { + u32 deepsleep = __raw_readl(DM355_DEEPSLEEP); + + deepsleep &= ~DRVVBUS_FORCE; + deepsleep |= DRVVBUS_OVERRIDE; + __raw_writel(deepsleep, DM355_DEEPSLEEP); + } + davinci_source_power(musb, 0 /*off*/, 1); /* delay, to avoid problems with module reload */ diff --git a/drivers/usb/musb/musb_core.c b/drivers/usb/musb/musb_core.c index 554a414f65d..c7c1ca0494c 100644 --- a/drivers/usb/musb/musb_core.c +++ b/drivers/usb/musb/musb_core.c @@ -1326,7 +1326,6 @@ static int __init musb_core_init(u16 musb_type, struct musb *musb) int i; /* log core options (read using indexed model) */ - musb_ep_select(mbase, 0); reg = musb_read_configdata(mbase); strcpy(aInfo, (reg & MUSB_CONFIGDATA_UTMIDW) ? "UTMI-16" : "UTMI-8"); @@ -1990,7 +1989,7 @@ bad_config: if (status < 0) goto fail2; -#ifdef CONFIG_USB_OTG +#ifdef CONFIG_USB_MUSB_OTG setup_timer(&musb->otg_timer, musb_otg_timer_func, (unsigned long) musb); #endif diff --git a/drivers/usb/musb/musb_core.h b/drivers/usb/musb/musb_core.h index f3772ca3b2c..381d648a36b 100644 --- a/drivers/usb/musb/musb_core.h +++ b/drivers/usb/musb/musb_core.h @@ -38,7 +38,6 @@ #include <linux/slab.h> #include <linux/list.h> #include <linux/interrupt.h> -#include <linux/smp_lock.h> #include <linux/errno.h> #include <linux/timer.h> #include <linux/clk.h> diff --git a/drivers/usb/musb/musb_gadget_ep0.c b/drivers/usb/musb/musb_gadget_ep0.c index 40ed50ecedf..7a6778675ad 100644 --- a/drivers/usb/musb/musb_gadget_ep0.c +++ b/drivers/usb/musb/musb_gadget_ep0.c @@ -407,7 +407,7 @@ stall: csr |= MUSB_RXCSR_P_SENDSTALL | MUSB_RXCSR_FLUSHFIFO | MUSB_RXCSR_CLRDATATOG - | MUSB_TXCSR_P_WZC_BITS; + | MUSB_RXCSR_P_WZC_BITS; musb_writew(regs, MUSB_RXCSR, csr); } diff --git a/drivers/usb/musb/musb_host.c b/drivers/usb/musb/musb_host.c index 94a2a350a41..cf94511485f 100644 --- a/drivers/usb/musb/musb_host.c +++ b/drivers/usb/musb/musb_host.c @@ -373,7 +373,7 @@ static void musb_advance_schedule(struct musb *musb, struct urb *urb, musb_save_toggle(qh, is_in, urb); break; case USB_ENDPOINT_XFER_ISOC: - if (urb->error_count) + if (status == 0 && urb->error_count) status = -EXDEV; break; } @@ -2235,13 +2235,30 @@ static void musb_h_stop(struct usb_hcd *hcd) static int musb_bus_suspend(struct usb_hcd *hcd) { struct musb *musb = hcd_to_musb(hcd); + u8 devctl; - if (musb->xceiv->state == OTG_STATE_A_SUSPEND) + if (!is_host_active(musb)) return 0; - if (is_host_active(musb) && musb->is_active) { - WARNING("trying to suspend as %s is_active=%i\n", - otg_state_string(musb), musb->is_active); + switch (musb->xceiv->state) { + case OTG_STATE_A_SUSPEND: + return 0; + case OTG_STATE_A_WAIT_VRISE: + /* ID could be grounded even if there's no device + * on the other end of the cable. NOTE that the + * A_WAIT_VRISE timers are messy with MUSB... + */ + devctl = musb_readb(musb->mregs, MUSB_DEVCTL); + if ((devctl & MUSB_DEVCTL_VBUS) == MUSB_DEVCTL_VBUS) + musb->xceiv->state = OTG_STATE_A_WAIT_BCON; + break; + default: + break; + } + + if (musb->is_active) { + WARNING("trying to suspend as %s while active\n", + otg_state_string(musb)); return -EBUSY; } else return 0; diff --git a/drivers/usb/musb/musb_regs.h b/drivers/usb/musb/musb_regs.h index de3b2f18db4..fbfd3fd9ce1 100644 --- a/drivers/usb/musb/musb_regs.h +++ b/drivers/usb/musb/musb_regs.h @@ -323,6 +323,7 @@ static inline void musb_write_rxfifoadd(void __iomem *mbase, u16 c_off) static inline u8 musb_read_configdata(void __iomem *mbase) { + musb_writeb(mbase, MUSB_INDEX, 0); return musb_readb(mbase, 0x10 + MUSB_CONFIGDATA); } diff --git a/drivers/usb/otg/Kconfig b/drivers/usb/otg/Kconfig index 69feeec1628..aa884d072f0 100644 --- a/drivers/usb/otg/Kconfig +++ b/drivers/usb/otg/Kconfig @@ -59,18 +59,4 @@ config NOP_USB_XCEIV built-in with usb ip or which are autonomous and doesn't require any phy programming such as ISP1x04 etc. -config USB_LANGWELL_OTG - tristate "Intel Langwell USB OTG dual-role support" - depends on USB && MRST - select USB_OTG - select USB_OTG_UTILS - help - Say Y here if you want to build Intel Langwell USB OTG - transciever driver in kernel. This driver implements role - switch between EHCI host driver and Langwell USB OTG - client driver. - - To compile this driver as a module, choose M here: the - module will be called langwell_otg. - endif # USB || OTG diff --git a/drivers/usb/otg/Makefile b/drivers/usb/otg/Makefile index 6d1abdd3c0a..20816785652 100644 --- a/drivers/usb/otg/Makefile +++ b/drivers/usb/otg/Makefile @@ -9,7 +9,6 @@ obj-$(CONFIG_USB_OTG_UTILS) += otg.o obj-$(CONFIG_USB_GPIO_VBUS) += gpio_vbus.o obj-$(CONFIG_ISP1301_OMAP) += isp1301_omap.o obj-$(CONFIG_TWL4030_USB) += twl4030-usb.o -obj-$(CONFIG_USB_LANGWELL_OTG) += langwell_otg.o obj-$(CONFIG_NOP_USB_XCEIV) += nop-usb-xceiv.o ccflags-$(CONFIG_USB_DEBUG) += -DDEBUG diff --git a/drivers/usb/otg/langwell_otg.c b/drivers/usb/otg/langwell_otg.c deleted file mode 100644 index 6f628d0e9f3..00000000000 --- a/drivers/usb/otg/langwell_otg.c +++ /dev/null @@ -1,1915 +0,0 @@ -/* - * Intel Langwell USB OTG transceiver driver - * Copyright (C) 2008 - 2009, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * You should have received a copy of the GNU General Public License along with - * this program; if not, write to the Free Software Foundation, Inc., - * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. - * - */ -/* This driver helps to switch Langwell OTG controller function between host - * and peripheral. It works with EHCI driver and Langwell client controller - * driver together. - */ -#include <linux/module.h> -#include <linux/init.h> -#include <linux/pci.h> -#include <linux/errno.h> -#include <linux/interrupt.h> -#include <linux/kernel.h> -#include <linux/device.h> -#include <linux/moduleparam.h> -#include <linux/usb/ch9.h> -#include <linux/usb/gadget.h> -#include <linux/usb.h> -#include <linux/usb/otg.h> -#include <linux/notifier.h> -#include <asm/ipc_defs.h> -#include <linux/delay.h> -#include "../core/hcd.h" - -#include <linux/usb/langwell_otg.h> - -#define DRIVER_DESC "Intel Langwell USB OTG transceiver driver" -#define DRIVER_VERSION "3.0.0.32L.0002" - -MODULE_DESCRIPTION(DRIVER_DESC); -MODULE_AUTHOR("Henry Yuan <hang.yuan@intel.com>, Hao Wu <hao.wu@intel.com>"); -MODULE_VERSION(DRIVER_VERSION); -MODULE_LICENSE("GPL"); - -static const char driver_name[] = "langwell_otg"; - -static int langwell_otg_probe(struct pci_dev *pdev, - const struct pci_device_id *id); -static void langwell_otg_remove(struct pci_dev *pdev); -static int langwell_otg_suspend(struct pci_dev *pdev, pm_message_t message); -static int langwell_otg_resume(struct pci_dev *pdev); - -static int langwell_otg_set_host(struct otg_transceiver *otg, - struct usb_bus *host); -static int langwell_otg_set_peripheral(struct otg_transceiver *otg, - struct usb_gadget *gadget); -static int langwell_otg_start_srp(struct otg_transceiver *otg); - -static const struct pci_device_id pci_ids[] = {{ - .class = ((PCI_CLASS_SERIAL_USB << 8) | 0xfe), - .class_mask = ~0, - .vendor = 0x8086, - .device = 0x0811, - .subvendor = PCI_ANY_ID, - .subdevice = PCI_ANY_ID, -}, { /* end: all zeroes */ } -}; - -static struct pci_driver otg_pci_driver = { - .name = (char *) driver_name, - .id_table = pci_ids, - - .probe = langwell_otg_probe, - .remove = langwell_otg_remove, - - .suspend = langwell_otg_suspend, - .resume = langwell_otg_resume, -}; - -static const char *state_string(enum usb_otg_state state) -{ - switch (state) { - case OTG_STATE_A_IDLE: - return "a_idle"; - case OTG_STATE_A_WAIT_VRISE: - return "a_wait_vrise"; - case OTG_STATE_A_WAIT_BCON: - return "a_wait_bcon"; - case OTG_STATE_A_HOST: - return "a_host"; - case OTG_STATE_A_SUSPEND: - return "a_suspend"; - case OTG_STATE_A_PERIPHERAL: - return "a_peripheral"; - case OTG_STATE_A_WAIT_VFALL: - return "a_wait_vfall"; - case OTG_STATE_A_VBUS_ERR: - return "a_vbus_err"; - case OTG_STATE_B_IDLE: - return "b_idle"; - case OTG_STATE_B_SRP_INIT: - return "b_srp_init"; - case OTG_STATE_B_PERIPHERAL: - return "b_peripheral"; - case OTG_STATE_B_WAIT_ACON: - return "b_wait_acon"; - case OTG_STATE_B_HOST: - return "b_host"; - default: - return "UNDEFINED"; - } -} - -/* HSM timers */ -static inline struct langwell_otg_timer *otg_timer_initializer -(void (*function)(unsigned long), unsigned long expires, unsigned long data) -{ - struct langwell_otg_timer *timer; - timer = kmalloc(sizeof(struct langwell_otg_timer), GFP_KERNEL); - timer->function = function; - timer->expires = expires; - timer->data = data; - return timer; -} - -static struct langwell_otg_timer *a_wait_vrise_tmr, *a_wait_bcon_tmr, - *a_aidl_bdis_tmr, *b_ase0_brst_tmr, *b_se0_srp_tmr, *b_srp_res_tmr, - *b_bus_suspend_tmr; - -static struct list_head active_timers; - -static struct langwell_otg *the_transceiver; - -/* host/client notify transceiver when event affects HNP state */ -void langwell_update_transceiver() -{ - otg_dbg("transceiver driver is notified\n"); - queue_work(the_transceiver->qwork, &the_transceiver->work); -} -EXPORT_SYMBOL(langwell_update_transceiver); - -static int langwell_otg_set_host(struct otg_transceiver *otg, - struct usb_bus *host) -{ - otg->host = host; - - return 0; -} - -static int langwell_otg_set_peripheral(struct otg_transceiver *otg, - struct usb_gadget *gadget) -{ - otg->gadget = gadget; - - return 0; -} - -static int langwell_otg_set_power(struct otg_transceiver *otg, - unsigned mA) -{ - return 0; -} - -/* A-device drives vbus, controlled through PMIC CHRGCNTL register*/ -static void langwell_otg_drv_vbus(int on) -{ - struct ipc_pmic_reg_data pmic_data = {0}; - struct ipc_pmic_reg_data battery_data; - - /* Check if battery is attached or not */ - battery_data.pmic_reg_data[0].register_address = 0xd2; - battery_data.ioc = 0; - battery_data.num_entries = 1; - if (ipc_pmic_register_read(&battery_data)) { - otg_dbg("Failed to read PMIC register 0xd2.\n"); - return; - } - - if ((battery_data.pmic_reg_data[0].value & 0x20) == 0) { - otg_dbg("no battery attached\n"); - return; - } - - /* Workaround for battery attachment issue */ - if (battery_data.pmic_reg_data[0].value == 0x34) { - otg_dbg("battery \n"); - return; - } - - otg_dbg("battery attached\n"); - - pmic_data.ioc = 0; - pmic_data.pmic_reg_data[0].register_address = 0xD4; - pmic_data.num_entries = 1; - if (on) - pmic_data.pmic_reg_data[0].value = 0x20; - else - pmic_data.pmic_reg_data[0].value = 0xc0; - - if (ipc_pmic_register_write(&pmic_data, TRUE)) - otg_dbg("Failed to write PMIC.\n"); - -} - -/* charge vbus or discharge vbus through a resistor to ground */ -static void langwell_otg_chrg_vbus(int on) -{ - - u32 val; - - val = readl(the_transceiver->regs + CI_OTGSC); - - if (on) - writel((val & ~OTGSC_INTSTS_MASK) | OTGSC_VC, - the_transceiver->regs + CI_OTGSC); - else - writel((val & ~OTGSC_INTSTS_MASK) | OTGSC_VD, - the_transceiver->regs + CI_OTGSC); - -} - -/* Start SRP */ -static int langwell_otg_start_srp(struct otg_transceiver *otg) -{ - u32 val; - - otg_dbg("Start SRP ->\n"); - - val = readl(the_transceiver->regs + CI_OTGSC); - - writel((val & ~OTGSC_INTSTS_MASK) | OTGSC_HADP, - the_transceiver->regs + CI_OTGSC); - - /* Check if the data plus is finished or not */ - msleep(8); - val = readl(the_transceiver->regs + CI_OTGSC); - if (val & (OTGSC_HADP | OTGSC_DP)) - otg_dbg("DataLine SRP Error\n"); - - /* FIXME: VBus SRP */ - - return 0; -} - - -/* stop SOF via bus_suspend */ -static void langwell_otg_loc_sof(int on) -{ - struct usb_hcd *hcd; - int err; - - otg_dbg("loc_sof -> %d\n", on); - - hcd = bus_to_hcd(the_transceiver->otg.host); - if (on) - err = hcd->driver->bus_resume(hcd); - else - err = hcd->driver->bus_suspend(hcd); - - if (err) - otg_dbg("Failed to resume/suspend bus - %d\n", err); -} - -static void langwell_otg_phy_low_power(int on) -{ - u32 val; - - otg_dbg("phy low power mode-> %d\n", on); - - val = readl(the_transceiver->regs + CI_HOSTPC1); - if (on) - writel(val | HOSTPC1_PHCD, the_transceiver->regs + CI_HOSTPC1); - else - writel(val & ~HOSTPC1_PHCD, the_transceiver->regs + CI_HOSTPC1); -} - -/* Enable/Disable OTG interrupt */ -static void langwell_otg_intr(int on) -{ - u32 val; - - otg_dbg("interrupt -> %d\n", on); - - val = readl(the_transceiver->regs + CI_OTGSC); - if (on) { - val = val | (OTGSC_INTEN_MASK | OTGSC_IDPU); - writel(val, the_transceiver->regs + CI_OTGSC); - } else { - val = val & ~(OTGSC_INTEN_MASK | OTGSC_IDPU); - writel(val, the_transceiver->regs + CI_OTGSC); - } -} - -/* set HAAR: Hardware Assist Auto-Reset */ -static void langwell_otg_HAAR(int on) -{ - u32 val; - - otg_dbg("HAAR -> %d\n", on); - - val = readl(the_transceiver->regs + CI_OTGSC); - if (on) - writel((val & ~OTGSC_INTSTS_MASK) | OTGSC_HAAR, - the_transceiver->regs + CI_OTGSC); - else - writel((val & ~OTGSC_INTSTS_MASK) & ~OTGSC_HAAR, - the_transceiver->regs + CI_OTGSC); -} - -/* set HABA: Hardware Assist B-Disconnect to A-Connect */ -static void langwell_otg_HABA(int on) -{ - u32 val; - - otg_dbg("HABA -> %d\n", on); - - val = readl(the_transceiver->regs + CI_OTGSC); - if (on) - writel((val & ~OTGSC_INTSTS_MASK) | OTGSC_HABA, - the_transceiver->regs + CI_OTGSC); - else - writel((val & ~OTGSC_INTSTS_MASK) & ~OTGSC_HABA, - the_transceiver->regs + CI_OTGSC); -} - -static int langwell_otg_check_se0_srp(int on) -{ - u32 val; - - int delay_time = TB_SE0_SRP * 10; /* step is 100us */ - - otg_dbg("check_se0_srp -> \n"); - - do { - udelay(100); - if (!delay_time--) - break; - val = readl(the_transceiver->regs + CI_PORTSC1); - val &= PORTSC_LS; - } while (!val); - - otg_dbg("check_se0_srp <- \n"); - return val; -} - -/* The timeout callback function to set time out bit */ -static void set_tmout(unsigned long indicator) -{ - *(int *)indicator = 1; -} - -void langwell_otg_nsf_msg(unsigned long indicator) -{ - switch (indicator) { - case 2: - case 4: - case 6: - case 7: - printk(KERN_ERR "OTG:NSF-%lu - deivce not responding\n", - indicator); - break; - case 3: - printk(KERN_ERR "OTG:NSF-%lu - deivce not supported\n", - indicator); - break; - default: - printk(KERN_ERR "Do not have this kind of NSF\n"); - break; - } -} - -/* Initialize timers */ -static void langwell_otg_init_timers(struct otg_hsm *hsm) -{ - /* HSM used timers */ - a_wait_vrise_tmr = otg_timer_initializer(&set_tmout, TA_WAIT_VRISE, - (unsigned long)&hsm->a_wait_vrise_tmout); - a_wait_bcon_tmr = otg_timer_initializer(&set_tmout, TA_WAIT_BCON, - (unsigned long)&hsm->a_wait_bcon_tmout); - a_aidl_bdis_tmr = otg_timer_initializer(&set_tmout, TA_AIDL_BDIS, - (unsigned long)&hsm->a_aidl_bdis_tmout); - b_ase0_brst_tmr = otg_timer_initializer(&set_tmout, TB_ASE0_BRST, - (unsigned long)&hsm->b_ase0_brst_tmout); - b_se0_srp_tmr = otg_timer_initializer(&set_tmout, TB_SE0_SRP, - (unsigned long)&hsm->b_se0_srp); - b_srp_res_tmr = otg_timer_initializer(&set_tmout, TB_SRP_RES, - (unsigned long)&hsm->b_srp_res_tmout); - b_bus_suspend_tmr = otg_timer_initializer(&set_tmout, TB_BUS_SUSPEND, - (unsigned long)&hsm->b_bus_suspend_tmout); -} - -/* Free timers */ -static void langwell_otg_free_timers(void) -{ - kfree(a_wait_vrise_tmr); - kfree(a_wait_bcon_tmr); - kfree(a_aidl_bdis_tmr); - kfree(b_ase0_brst_tmr); - kfree(b_se0_srp_tmr); - kfree(b_srp_res_tmr); - kfree(b_bus_suspend_tmr); -} - -/* Add timer to timer list */ -static void langwell_otg_add_timer(void *gtimer) -{ - struct langwell_otg_timer *timer = (struct langwell_otg_timer *)gtimer; - struct langwell_otg_timer *tmp_timer; - u32 val32; - - /* Check if the timer is already in the active list, - * if so update timer count - */ - list_for_each_entry(tmp_timer, &active_timers, list) - if (tmp_timer == timer) { - timer->count = timer->expires; - return; - } - timer->count = timer->expires; - - if (list_empty(&active_timers)) { - val32 = readl(the_transceiver->regs + CI_OTGSC); - writel(val32 | OTGSC_1MSE, the_transceiver->regs + CI_OTGSC); - } - - list_add_tail(&timer->list, &active_timers); -} - -/* Remove timer from the timer list; clear timeout status */ -static void langwell_otg_del_timer(void *gtimer) -{ - struct langwell_otg_timer *timer = (struct langwell_otg_timer *)gtimer; - struct langwell_otg_timer *tmp_timer, *del_tmp; - u32 val32; - - list_for_each_entry_safe(tmp_timer, del_tmp, &active_timers, list) - if (tmp_timer == timer) - list_del(&timer->list); - - if (list_empty(&active_timers)) { - val32 = readl(the_transceiver->regs + CI_OTGSC); - writel(val32 & ~OTGSC_1MSE, the_transceiver->regs + CI_OTGSC); - } -} - -/* Reduce timer count by 1, and find timeout conditions.*/ -static int langwell_otg_tick_timer(u32 *int_sts) -{ - struct langwell_otg_timer *tmp_timer, *del_tmp; - int expired = 0; - - list_for_each_entry_safe(tmp_timer, del_tmp, &active_timers, list) { - tmp_timer->count--; - /* check if timer expires */ - if (!tmp_timer->count) { - list_del(&tmp_timer->list); - tmp_timer->function(tmp_timer->data); - expired = 1; - } - } - - if (list_empty(&active_timers)) { - otg_dbg("tick timer: disable 1ms int\n"); - *int_sts = *int_sts & ~OTGSC_1MSE; - } - return expired; -} - -static void reset_otg(void) -{ - u32 val; - int delay_time = 1000; - - otg_dbg("reseting OTG controller ...\n"); - val = readl(the_transceiver->regs + CI_USBCMD); - writel(val | USBCMD_RST, the_transceiver->regs + CI_USBCMD); - do { - udelay(100); - if (!delay_time--) - otg_dbg("reset timeout\n"); - val = readl(the_transceiver->regs + CI_USBCMD); - val &= USBCMD_RST; - } while (val != 0); - otg_dbg("reset done.\n"); -} - -static void set_host_mode(void) -{ - u32 val; - - reset_otg(); - val = readl(the_transceiver->regs + CI_USBMODE); - val = (val & (~USBMODE_CM)) | USBMODE_HOST; - writel(val, the_transceiver->regs + CI_USBMODE); -} - -static void set_client_mode(void) -{ - u32 val; - - reset_otg(); - val = readl(the_transceiver->regs + CI_USBMODE); - val = (val & (~USBMODE_CM)) | USBMODE_DEVICE; - writel(val, the_transceiver->regs + CI_USBMODE); -} - -static void init_hsm(void) -{ - struct langwell_otg *langwell = the_transceiver; - u32 val32; - - /* read OTGSC after reset */ - val32 = readl(langwell->regs + CI_OTGSC); - otg_dbg("%s: OTGSC init value = 0x%x\n", __func__, val32); - - /* set init state */ - if (val32 & OTGSC_ID) { - langwell->hsm.id = 1; - langwell->otg.default_a = 0; - set_client_mode(); - langwell->otg.state = OTG_STATE_B_IDLE; - langwell_otg_drv_vbus(0); - } else { - langwell->hsm.id = 0; - langwell->otg.default_a = 1; - set_host_mode(); - langwell->otg.state = OTG_STATE_A_IDLE; - } - - /* set session indicator */ - if (val32 & OTGSC_BSE) - langwell->hsm.b_sess_end = 1; - if (val32 & OTGSC_BSV) - langwell->hsm.b_sess_vld = 1; - if (val32 & OTGSC_ASV) - langwell->hsm.a_sess_vld = 1; - if (val32 & OTGSC_AVV) - langwell->hsm.a_vbus_vld = 1; - - /* defautly power the bus */ - langwell->hsm.a_bus_req = 1; - langwell->hsm.a_bus_drop = 0; - /* defautly don't request bus as B device */ - langwell->hsm.b_bus_req = 0; - /* no system error */ - langwell->hsm.a_clr_err = 0; -} - -static irqreturn_t otg_dummy_irq(int irq, void *_dev) -{ - void __iomem *reg_base = _dev; - u32 val; - u32 int_mask = 0; - - val = readl(reg_base + CI_USBMODE); - if ((val & USBMODE_CM) != USBMODE_DEVICE) - return IRQ_NONE; - - val = readl(reg_base + CI_USBSTS); - int_mask = val & INTR_DUMMY_MASK; - - if (int_mask == 0) - return IRQ_NONE; - - /* clear hsm.b_conn here since host driver can't detect it - * otg_dummy_irq called means B-disconnect happened. - */ - if (the_transceiver->hsm.b_conn) { - the_transceiver->hsm.b_conn = 0; - if (spin_trylock(&the_transceiver->wq_lock)) { - queue_work(the_transceiver->qwork, - &the_transceiver->work); - spin_unlock(&the_transceiver->wq_lock); - } - } - /* Clear interrupts */ - writel(int_mask, reg_base + CI_USBSTS); - return IRQ_HANDLED; -} - -static irqreturn_t otg_irq(int irq, void *_dev) -{ - struct langwell_otg *langwell = _dev; - u32 int_sts, int_en; - u32 int_mask = 0; - int flag = 0; - - int_sts = readl(langwell->regs + CI_OTGSC); - int_en = (int_sts & OTGSC_INTEN_MASK) >> 8; - int_mask = int_sts & int_en; - if (int_mask == 0) - return IRQ_NONE; - - if (int_mask & OTGSC_IDIS) { - otg_dbg("%s: id change int\n", __func__); - langwell->hsm.id = (int_sts & OTGSC_ID) ? 1 : 0; - flag = 1; - } - if (int_mask & OTGSC_DPIS) { - otg_dbg("%s: data pulse int\n", __func__); - langwell->hsm.a_srp_det = (int_sts & OTGSC_DPS) ? 1 : 0; - flag = 1; - } - if (int_mask & OTGSC_BSEIS) { - otg_dbg("%s: b session end int\n", __func__); - langwell->hsm.b_sess_end = (int_sts & OTGSC_BSE) ? 1 : 0; - flag = 1; - } - if (int_mask & OTGSC_BSVIS) { - otg_dbg("%s: b session valid int\n", __func__); - langwell->hsm.b_sess_vld = (int_sts & OTGSC_BSV) ? 1 : 0; - flag = 1; - } - if (int_mask & OTGSC_ASVIS) { - otg_dbg("%s: a session valid int\n", __func__); - langwell->hsm.a_sess_vld = (int_sts & OTGSC_ASV) ? 1 : 0; - flag = 1; - } - if (int_mask & OTGSC_AVVIS) { - otg_dbg("%s: a vbus valid int\n", __func__); - langwell->hsm.a_vbus_vld = (int_sts & OTGSC_AVV) ? 1 : 0; - flag = 1; - } - - if (int_mask & OTGSC_1MSS) { - /* need to schedule otg_work if any timer is expired */ - if (langwell_otg_tick_timer(&int_sts)) - flag = 1; - } - - writel((int_sts & ~OTGSC_INTSTS_MASK) | int_mask, - langwell->regs + CI_OTGSC); - if (flag) - queue_work(langwell->qwork, &langwell->work); - - return IRQ_HANDLED; -} - -static void langwell_otg_work(struct work_struct *work) -{ - struct langwell_otg *langwell = container_of(work, - struct langwell_otg, work); - int retval; - - otg_dbg("%s: old state = %s\n", __func__, - state_string(langwell->otg.state)); - - switch (langwell->otg.state) { - case OTG_STATE_UNDEFINED: - case OTG_STATE_B_IDLE: - if (!langwell->hsm.id) { - langwell_otg_del_timer(b_srp_res_tmr); - langwell->otg.default_a = 1; - langwell->hsm.a_srp_det = 0; - - langwell_otg_chrg_vbus(0); - langwell_otg_drv_vbus(0); - - set_host_mode(); - langwell->otg.state = OTG_STATE_A_IDLE; - queue_work(langwell->qwork, &langwell->work); - } else if (langwell->hsm.b_srp_res_tmout) { - langwell->hsm.b_srp_res_tmout = 0; - langwell->hsm.b_bus_req = 0; - langwell_otg_nsf_msg(6); - } else if (langwell->hsm.b_sess_vld) { - langwell_otg_del_timer(b_srp_res_tmr); - langwell->hsm.b_sess_end = 0; - langwell->hsm.a_bus_suspend = 0; - - langwell_otg_chrg_vbus(0); - if (langwell->client_ops) { - langwell->client_ops->resume(langwell->pdev); - langwell->otg.state = OTG_STATE_B_PERIPHERAL; - } else - otg_dbg("client driver not loaded.\n"); - - } else if (langwell->hsm.b_bus_req && - (langwell->hsm.b_sess_end)) { - /* workaround for b_se0_srp detection */ - retval = langwell_otg_check_se0_srp(0); - if (retval) { - langwell->hsm.b_bus_req = 0; - otg_dbg("LS is not SE0, try again later\n"); - } else { - /* Start SRP */ - langwell_otg_start_srp(&langwell->otg); - langwell_otg_add_timer(b_srp_res_tmr); - } - } - break; - case OTG_STATE_B_SRP_INIT: - if (!langwell->hsm.id) { - langwell->otg.default_a = 1; - langwell->hsm.a_srp_det = 0; - - langwell_otg_drv_vbus(0); - langwell_otg_chrg_vbus(0); - - langwell->otg.state = OTG_STATE_A_IDLE; - queue_work(langwell->qwork, &langwell->work); - } else if (langwell->hsm.b_sess_vld) { - langwell_otg_chrg_vbus(0); - if (langwell->client_ops) { - langwell->client_ops->resume(langwell->pdev); - langwell->otg.state = OTG_STATE_B_PERIPHERAL; - } else - otg_dbg("client driver not loaded.\n"); - } - break; - case OTG_STATE_B_PERIPHERAL: - if (!langwell->hsm.id) { - langwell->otg.default_a = 1; - langwell->hsm.a_srp_det = 0; - - langwell_otg_drv_vbus(0); - langwell_otg_chrg_vbus(0); - set_host_mode(); - - if (langwell->client_ops) { - langwell->client_ops->suspend(langwell->pdev, - PMSG_FREEZE); - } else - otg_dbg("client driver has been removed.\n"); - - langwell->otg.state = OTG_STATE_A_IDLE; - queue_work(langwell->qwork, &langwell->work); - } else if (!langwell->hsm.b_sess_vld) { - langwell->hsm.b_hnp_enable = 0; - - if (langwell->client_ops) { - langwell->client_ops->suspend(langwell->pdev, - PMSG_FREEZE); - } else - otg_dbg("client driver has been removed.\n"); - - langwell->otg.state = OTG_STATE_B_IDLE; - } else if (langwell->hsm.b_bus_req && langwell->hsm.b_hnp_enable - && langwell->hsm.a_bus_suspend) { - - if (langwell->client_ops) { - langwell->client_ops->suspend(langwell->pdev, - PMSG_FREEZE); - } else - otg_dbg("client driver has been removed.\n"); - - langwell_otg_HAAR(1); - langwell->hsm.a_conn = 0; - - if (langwell->host_ops) { - langwell->host_ops->probe(langwell->pdev, - langwell->host_ops->id_table); - langwell->otg.state = OTG_STATE_B_WAIT_ACON; - } else - otg_dbg("host driver not loaded.\n"); - - langwell->hsm.a_bus_resume = 0; - langwell->hsm.b_ase0_brst_tmout = 0; - langwell_otg_add_timer(b_ase0_brst_tmr); - } - break; - - case OTG_STATE_B_WAIT_ACON: - if (!langwell->hsm.id) { - langwell_otg_del_timer(b_ase0_brst_tmr); - langwell->otg.default_a = 1; - langwell->hsm.a_srp_det = 0; - - langwell_otg_drv_vbus(0); - langwell_otg_chrg_vbus(0); - set_host_mode(); - - langwell_otg_HAAR(0); - if (langwell->host_ops) - langwell->host_ops->remove(langwell->pdev); - else - otg_dbg("host driver has been removed.\n"); - langwell->otg.state = OTG_STATE_A_IDLE; - queue_work(langwell->qwork, &langwell->work); - } else if (!langwell->hsm.b_sess_vld) { - langwell_otg_del_timer(b_ase0_brst_tmr); - langwell->hsm.b_hnp_enable = 0; - langwell->hsm.b_bus_req = 0; - langwell_otg_chrg_vbus(0); - langwell_otg_HAAR(0); - - if (langwell->host_ops) - langwell->host_ops->remove(langwell->pdev); - else - otg_dbg("host driver has been removed.\n"); - langwell->otg.state = OTG_STATE_B_IDLE; - } else if (langwell->hsm.a_conn) { - langwell_otg_del_timer(b_ase0_brst_tmr); - langwell_otg_HAAR(0); - langwell->otg.state = OTG_STATE_B_HOST; - queue_work(langwell->qwork, &langwell->work); - } else if (langwell->hsm.a_bus_resume || - langwell->hsm.b_ase0_brst_tmout) { - langwell_otg_del_timer(b_ase0_brst_tmr); - langwell_otg_HAAR(0); - langwell_otg_nsf_msg(7); - - if (langwell->host_ops) - langwell->host_ops->remove(langwell->pdev); - else - otg_dbg("host driver has been removed.\n"); - - langwell->hsm.a_bus_suspend = 0; - langwell->hsm.b_bus_req = 0; - - if (langwell->client_ops) - langwell->client_ops->resume(langwell->pdev); - else - otg_dbg("client driver not loaded.\n"); - - langwell->otg.state = OTG_STATE_B_PERIPHERAL; - } - break; - - case OTG_STATE_B_HOST: - if (!langwell->hsm.id) { - langwell->otg.default_a = 1; - langwell->hsm.a_srp_det = 0; - - langwell_otg_drv_vbus(0); - langwell_otg_chrg_vbus(0); - set_host_mode(); - if (langwell->host_ops) - langwell->host_ops->remove(langwell->pdev); - else - otg_dbg("host driver has been removed.\n"); - langwell->otg.state = OTG_STATE_A_IDLE; - queue_work(langwell->qwork, &langwell->work); - } else if (!langwell->hsm.b_sess_vld) { - langwell->hsm.b_hnp_enable = 0; - langwell->hsm.b_bus_req = 0; - langwell_otg_chrg_vbus(0); - if (langwell->host_ops) - langwell->host_ops->remove(langwell->pdev); - else - otg_dbg("host driver has been removed.\n"); - langwell->otg.state = OTG_STATE_B_IDLE; - } else if ((!langwell->hsm.b_bus_req) || - (!langwell->hsm.a_conn)) { - langwell->hsm.b_bus_req = 0; - langwell_otg_loc_sof(0); - if (langwell->host_ops) - langwell->host_ops->remove(langwell->pdev); - else - otg_dbg("host driver has been removed.\n"); - - langwell->hsm.a_bus_suspend = 0; - - if (langwell->client_ops) - langwell->client_ops->resume(langwell->pdev); - else - otg_dbg("client driver not loaded.\n"); - - langwell->otg.state = OTG_STATE_B_PERIPHERAL; - } - break; - - case OTG_STATE_A_IDLE: - langwell->otg.default_a = 1; - if (langwell->hsm.id) { - langwell->otg.default_a = 0; - langwell->hsm.b_bus_req = 0; - langwell_otg_drv_vbus(0); - langwell_otg_chrg_vbus(0); - - langwell->otg.state = OTG_STATE_B_IDLE; - queue_work(langwell->qwork, &langwell->work); - } else if (langwell->hsm.a_sess_vld) { - langwell_otg_drv_vbus(1); - langwell->hsm.a_srp_det = 1; - langwell->hsm.a_wait_vrise_tmout = 0; - langwell_otg_add_timer(a_wait_vrise_tmr); - langwell->otg.state = OTG_STATE_A_WAIT_VRISE; - queue_work(langwell->qwork, &langwell->work); - } else if (!langwell->hsm.a_bus_drop && - (langwell->hsm.a_srp_det || langwell->hsm.a_bus_req)) { - langwell_otg_drv_vbus(1); - langwell->hsm.a_wait_vrise_tmout = 0; - langwell_otg_add_timer(a_wait_vrise_tmr); - langwell->otg.state = OTG_STATE_A_WAIT_VRISE; - queue_work(langwell->qwork, &langwell->work); - } - break; - case OTG_STATE_A_WAIT_VRISE: - if (langwell->hsm.id) { - langwell_otg_del_timer(a_wait_vrise_tmr); - langwell->hsm.b_bus_req = 0; - langwell->otg.default_a = 0; - langwell_otg_drv_vbus(0); - langwell->otg.state = OTG_STATE_B_IDLE; - } else if (langwell->hsm.a_vbus_vld) { - langwell_otg_del_timer(a_wait_vrise_tmr); - if (langwell->host_ops) - langwell->host_ops->probe(langwell->pdev, - langwell->host_ops->id_table); - else - otg_dbg("host driver not loaded.\n"); - langwell->hsm.b_conn = 0; - langwell->hsm.a_set_b_hnp_en = 0; - langwell->hsm.a_wait_bcon_tmout = 0; - langwell_otg_add_timer(a_wait_bcon_tmr); - langwell->otg.state = OTG_STATE_A_WAIT_BCON; - } else if (langwell->hsm.a_wait_vrise_tmout) { - if (langwell->hsm.a_vbus_vld) { - if (langwell->host_ops) - langwell->host_ops->probe( - langwell->pdev, - langwell->host_ops->id_table); - else - otg_dbg("host driver not loaded.\n"); - langwell->hsm.b_conn = 0; - langwell->hsm.a_set_b_hnp_en = 0; - langwell->hsm.a_wait_bcon_tmout = 0; - langwell_otg_add_timer(a_wait_bcon_tmr); - langwell->otg.state = OTG_STATE_A_WAIT_BCON; - } else { - langwell_otg_drv_vbus(0); - langwell->otg.state = OTG_STATE_A_VBUS_ERR; - } - } - break; - case OTG_STATE_A_WAIT_BCON: - if (langwell->hsm.id) { - langwell_otg_del_timer(a_wait_bcon_tmr); - - langwell->otg.default_a = 0; - langwell->hsm.b_bus_req = 0; - if (langwell->host_ops) - langwell->host_ops->remove(langwell->pdev); - else - otg_dbg("host driver has been removed.\n"); - langwell_otg_drv_vbus(0); - langwell->otg.state = OTG_STATE_B_IDLE; - queue_work(langwell->qwork, &langwell->work); - } else if (!langwell->hsm.a_vbus_vld) { - langwell_otg_del_timer(a_wait_bcon_tmr); - - if (langwell->host_ops) - langwell->host_ops->remove(langwell->pdev); - else - otg_dbg("host driver has been removed.\n"); - langwell_otg_drv_vbus(0); - langwell->otg.state = OTG_STATE_A_VBUS_ERR; - } else if (langwell->hsm.a_bus_drop || - (langwell->hsm.a_wait_bcon_tmout && - !langwell->hsm.a_bus_req)) { - langwell_otg_del_timer(a_wait_bcon_tmr); - - if (langwell->host_ops) - langwell->host_ops->remove(langwell->pdev); - else - otg_dbg("host driver has been removed.\n"); - langwell_otg_drv_vbus(0); - langwell->otg.state = OTG_STATE_A_WAIT_VFALL; - } else if (langwell->hsm.b_conn) { - langwell_otg_del_timer(a_wait_bcon_tmr); - - langwell->hsm.a_suspend_req = 0; - langwell->otg.state = OTG_STATE_A_HOST; - if (!langwell->hsm.a_bus_req && - langwell->hsm.a_set_b_hnp_en) { - /* It is not safe enough to do a fast - * transistion from A_WAIT_BCON to - * A_SUSPEND */ - msleep(10000); - if (langwell->hsm.a_bus_req) - break; - - if (request_irq(langwell->pdev->irq, - otg_dummy_irq, IRQF_SHARED, - driver_name, langwell->regs) != 0) { - otg_dbg("request interrupt %d fail\n", - langwell->pdev->irq); - } - - langwell_otg_HABA(1); - langwell->hsm.b_bus_resume = 0; - langwell->hsm.a_aidl_bdis_tmout = 0; - langwell_otg_add_timer(a_aidl_bdis_tmr); - - langwell_otg_loc_sof(0); - langwell->otg.state = OTG_STATE_A_SUSPEND; - } else if (!langwell->hsm.a_bus_req && - !langwell->hsm.a_set_b_hnp_en) { - struct pci_dev *pdev = langwell->pdev; - if (langwell->host_ops) - langwell->host_ops->remove(pdev); - else - otg_dbg("host driver removed.\n"); - langwell_otg_drv_vbus(0); - langwell->otg.state = OTG_STATE_A_WAIT_VFALL; - } - } - break; - case OTG_STATE_A_HOST: - if (langwell->hsm.id) { - langwell->otg.default_a = 0; - langwell->hsm.b_bus_req = 0; - if (langwell->host_ops) - langwell->host_ops->remove(langwell->pdev); - else - otg_dbg("host driver has been removed.\n"); - langwell_otg_drv_vbus(0); - langwell->otg.state = OTG_STATE_B_IDLE; - queue_work(langwell->qwork, &langwell->work); - } else if (langwell->hsm.a_bus_drop || - (!langwell->hsm.a_set_b_hnp_en && !langwell->hsm.a_bus_req)) { - if (langwell->host_ops) - langwell->host_ops->remove(langwell->pdev); - else - otg_dbg("host driver has been removed.\n"); - langwell_otg_drv_vbus(0); - langwell->otg.state = OTG_STATE_A_WAIT_VFALL; - } else if (!langwell->hsm.a_vbus_vld) { - if (langwell->host_ops) - langwell->host_ops->remove(langwell->pdev); - else - otg_dbg("host driver has been removed.\n"); - langwell_otg_drv_vbus(0); - langwell->otg.state = OTG_STATE_A_VBUS_ERR; - } else if (langwell->hsm.a_set_b_hnp_en - && !langwell->hsm.a_bus_req) { - /* Set HABA to enable hardware assistance to signal - * A-connect after receiver B-disconnect. Hardware - * will then set client mode and enable URE, SLE and - * PCE after the assistance. otg_dummy_irq is used to - * clean these ints when client driver is not resumed. - */ - if (request_irq(langwell->pdev->irq, - otg_dummy_irq, IRQF_SHARED, driver_name, - langwell->regs) != 0) { - otg_dbg("request interrupt %d failed\n", - langwell->pdev->irq); - } - - /* set HABA */ - langwell_otg_HABA(1); - langwell->hsm.b_bus_resume = 0; - langwell->hsm.a_aidl_bdis_tmout = 0; - langwell_otg_add_timer(a_aidl_bdis_tmr); - langwell_otg_loc_sof(0); - langwell->otg.state = OTG_STATE_A_SUSPEND; - } else if (!langwell->hsm.b_conn || !langwell->hsm.a_bus_req) { - langwell->hsm.a_wait_bcon_tmout = 0; - langwell->hsm.a_set_b_hnp_en = 0; - langwell_otg_add_timer(a_wait_bcon_tmr); - langwell->otg.state = OTG_STATE_A_WAIT_BCON; - } - break; - case OTG_STATE_A_SUSPEND: - if (langwell->hsm.id) { - langwell_otg_del_timer(a_aidl_bdis_tmr); - langwell_otg_HABA(0); - free_irq(langwell->pdev->irq, langwell->regs); - langwell->otg.default_a = 0; - langwell->hsm.b_bus_req = 0; - if (langwell->host_ops) - langwell->host_ops->remove(langwell->pdev); - else - otg_dbg("host driver has been removed.\n"); - langwell_otg_drv_vbus(0); - langwell->otg.state = OTG_STATE_B_IDLE; - queue_work(langwell->qwork, &langwell->work); - } else if (langwell->hsm.a_bus_req || - langwell->hsm.b_bus_resume) { - langwell_otg_del_timer(a_aidl_bdis_tmr); - langwell_otg_HABA(0); - free_irq(langwell->pdev->irq, langwell->regs); - langwell->hsm.a_suspend_req = 0; - langwell_otg_loc_sof(1); - langwell->otg.state = OTG_STATE_A_HOST; - } else if (langwell->hsm.a_aidl_bdis_tmout || - langwell->hsm.a_bus_drop) { - langwell_otg_del_timer(a_aidl_bdis_tmr); - langwell_otg_HABA(0); - free_irq(langwell->pdev->irq, langwell->regs); - if (langwell->host_ops) - langwell->host_ops->remove(langwell->pdev); - else - otg_dbg("host driver has been removed.\n"); - langwell_otg_drv_vbus(0); - langwell->otg.state = OTG_STATE_A_WAIT_VFALL; - } else if (!langwell->hsm.b_conn && - langwell->hsm.a_set_b_hnp_en) { - langwell_otg_del_timer(a_aidl_bdis_tmr); - langwell_otg_HABA(0); - free_irq(langwell->pdev->irq, langwell->regs); - - if (langwell->host_ops) - langwell->host_ops->remove(langwell->pdev); - else - otg_dbg("host driver has been removed.\n"); - - langwell->hsm.b_bus_suspend = 0; - langwell->hsm.b_bus_suspend_vld = 0; - langwell->hsm.b_bus_suspend_tmout = 0; - - /* msleep(200); */ - if (langwell->client_ops) - langwell->client_ops->resume(langwell->pdev); - else - otg_dbg("client driver not loaded.\n"); - - langwell_otg_add_timer(b_bus_suspend_tmr); - langwell->otg.state = OTG_STATE_A_PERIPHERAL; - break; - } else if (!langwell->hsm.a_vbus_vld) { - langwell_otg_del_timer(a_aidl_bdis_tmr); - langwell_otg_HABA(0); - free_irq(langwell->pdev->irq, langwell->regs); - if (langwell->host_ops) - langwell->host_ops->remove(langwell->pdev); - else - otg_dbg("host driver has been removed.\n"); - langwell_otg_drv_vbus(0); - langwell->otg.state = OTG_STATE_A_VBUS_ERR; - } - break; - case OTG_STATE_A_PERIPHERAL: - if (langwell->hsm.id) { - langwell_otg_del_timer(b_bus_suspend_tmr); - langwell->otg.default_a = 0; - langwell->hsm.b_bus_req = 0; - if (langwell->client_ops) - langwell->client_ops->suspend(langwell->pdev, - PMSG_FREEZE); - else - otg_dbg("client driver has been removed.\n"); - langwell_otg_drv_vbus(0); - langwell->otg.state = OTG_STATE_B_IDLE; - queue_work(langwell->qwork, &langwell->work); - } else if (!langwell->hsm.a_vbus_vld) { - langwell_otg_del_timer(b_bus_suspend_tmr); - if (langwell->client_ops) - langwell->client_ops->suspend(langwell->pdev, - PMSG_FREEZE); - else - otg_dbg("client driver has been removed.\n"); - langwell_otg_drv_vbus(0); - langwell->otg.state = OTG_STATE_A_VBUS_ERR; - } else if (langwell->hsm.a_bus_drop) { - langwell_otg_del_timer(b_bus_suspend_tmr); - if (langwell->client_ops) - langwell->client_ops->suspend(langwell->pdev, - PMSG_FREEZE); - else - otg_dbg("client driver has been removed.\n"); - langwell_otg_drv_vbus(0); - langwell->otg.state = OTG_STATE_A_WAIT_VFALL; - } else if (langwell->hsm.b_bus_suspend) { - langwell_otg_del_timer(b_bus_suspend_tmr); - if (langwell->client_ops) - langwell->client_ops->suspend(langwell->pdev, - PMSG_FREEZE); - else - otg_dbg("client driver has been removed.\n"); - - if (langwell->host_ops) - langwell->host_ops->probe(langwell->pdev, - langwell->host_ops->id_table); - else - otg_dbg("host driver not loaded.\n"); - langwell->hsm.a_set_b_hnp_en = 0; - langwell->hsm.a_wait_bcon_tmout = 0; - langwell_otg_add_timer(a_wait_bcon_tmr); - langwell->otg.state = OTG_STATE_A_WAIT_BCON; - } else if (langwell->hsm.b_bus_suspend_tmout) { - u32 val; - val = readl(langwell->regs + CI_PORTSC1); - if (!(val & PORTSC_SUSP)) - break; - if (langwell->client_ops) - langwell->client_ops->suspend(langwell->pdev, - PMSG_FREEZE); - else - otg_dbg("client driver has been removed.\n"); - if (langwell->host_ops) - langwell->host_ops->probe(langwell->pdev, - langwell->host_ops->id_table); - else - otg_dbg("host driver not loaded.\n"); - langwell->hsm.a_set_b_hnp_en = 0; - langwell->hsm.a_wait_bcon_tmout = 0; - langwell_otg_add_timer(a_wait_bcon_tmr); - langwell->otg.state = OTG_STATE_A_WAIT_BCON; - } - break; - case OTG_STATE_A_VBUS_ERR: - if (langwell->hsm.id) { - langwell->otg.default_a = 0; - langwell->hsm.a_clr_err = 0; - langwell->hsm.a_srp_det = 0; - langwell->otg.state = OTG_STATE_B_IDLE; - queue_work(langwell->qwork, &langwell->work); - } else if (langwell->hsm.a_clr_err) { - langwell->hsm.a_clr_err = 0; - langwell->hsm.a_srp_det = 0; - reset_otg(); - init_hsm(); - if (langwell->otg.state == OTG_STATE_A_IDLE) - queue_work(langwell->qwork, &langwell->work); - } - break; - case OTG_STATE_A_WAIT_VFALL: - if (langwell->hsm.id) { - langwell->otg.default_a = 0; - langwell->otg.state = OTG_STATE_B_IDLE; - queue_work(langwell->qwork, &langwell->work); - } else if (langwell->hsm.a_bus_req) { - langwell_otg_drv_vbus(1); - langwell->hsm.a_wait_vrise_tmout = 0; - langwell_otg_add_timer(a_wait_vrise_tmr); - langwell->otg.state = OTG_STATE_A_WAIT_VRISE; - } else if (!langwell->hsm.a_sess_vld) { - langwell->hsm.a_srp_det = 0; - langwell_otg_drv_vbus(0); - set_host_mode(); - langwell->otg.state = OTG_STATE_A_IDLE; - } - break; - default: - ; - } - - otg_dbg("%s: new state = %s\n", __func__, - state_string(langwell->otg.state)); -} - - static ssize_t -show_registers(struct device *_dev, struct device_attribute *attr, char *buf) -{ - struct langwell_otg *langwell; - char *next; - unsigned size; - unsigned t; - - langwell = the_transceiver; - next = buf; - size = PAGE_SIZE; - - t = scnprintf(next, size, - "\n" - "USBCMD = 0x%08x \n" - "USBSTS = 0x%08x \n" - "USBINTR = 0x%08x \n" - "ASYNCLISTADDR = 0x%08x \n" - "PORTSC1 = 0x%08x \n" - "HOSTPC1 = 0x%08x \n" - "OTGSC = 0x%08x \n" - "USBMODE = 0x%08x \n", - readl(langwell->regs + 0x30), - readl(langwell->regs + 0x34), - readl(langwell->regs + 0x38), - readl(langwell->regs + 0x48), - readl(langwell->regs + 0x74), - readl(langwell->regs + 0xb4), - readl(langwell->regs + 0xf4), - readl(langwell->regs + 0xf8) - ); - size -= t; - next += t; - - return PAGE_SIZE - size; -} -static DEVICE_ATTR(registers, S_IRUGO, show_registers, NULL); - -static ssize_t -show_hsm(struct device *_dev, struct device_attribute *attr, char *buf) -{ - struct langwell_otg *langwell; - char *next; - unsigned size; - unsigned t; - - langwell = the_transceiver; - next = buf; - size = PAGE_SIZE; - - t = scnprintf(next, size, - "\n" - "current state = %s\n" - "a_bus_resume = \t%d\n" - "a_bus_suspend = \t%d\n" - "a_conn = \t%d\n" - "a_sess_vld = \t%d\n" - "a_srp_det = \t%d\n" - "a_vbus_vld = \t%d\n" - "b_bus_resume = \t%d\n" - "b_bus_suspend = \t%d\n" - "b_conn = \t%d\n" - "b_se0_srp = \t%d\n" - "b_sess_end = \t%d\n" - "b_sess_vld = \t%d\n" - "id = \t%d\n" - "a_set_b_hnp_en = \t%d\n" - "b_srp_done = \t%d\n" - "b_hnp_enable = \t%d\n" - "a_wait_vrise_tmout = \t%d\n" - "a_wait_bcon_tmout = \t%d\n" - "a_aidl_bdis_tmout = \t%d\n" - "b_ase0_brst_tmout = \t%d\n" - "a_bus_drop = \t%d\n" - "a_bus_req = \t%d\n" - "a_clr_err = \t%d\n" - "a_suspend_req = \t%d\n" - "b_bus_req = \t%d\n" - "b_bus_suspend_tmout = \t%d\n" - "b_bus_suspend_vld = \t%d\n", - state_string(langwell->otg.state), - langwell->hsm.a_bus_resume, - langwell->hsm.a_bus_suspend, - langwell->hsm.a_conn, - langwell->hsm.a_sess_vld, - langwell->hsm.a_srp_det, - langwell->hsm.a_vbus_vld, - langwell->hsm.b_bus_resume, - langwell->hsm.b_bus_suspend, - langwell->hsm.b_conn, - langwell->hsm.b_se0_srp, - langwell->hsm.b_sess_end, - langwell->hsm.b_sess_vld, - langwell->hsm.id, - langwell->hsm.a_set_b_hnp_en, - langwell->hsm.b_srp_done, - langwell->hsm.b_hnp_enable, - langwell->hsm.a_wait_vrise_tmout, - langwell->hsm.a_wait_bcon_tmout, - langwell->hsm.a_aidl_bdis_tmout, - langwell->hsm.b_ase0_brst_tmout, - langwell->hsm.a_bus_drop, - langwell->hsm.a_bus_req, - langwell->hsm.a_clr_err, - langwell->hsm.a_suspend_req, - langwell->hsm.b_bus_req, - langwell->hsm.b_bus_suspend_tmout, - langwell->hsm.b_bus_suspend_vld - ); - size -= t; - next += t; - - return PAGE_SIZE - size; -} -static DEVICE_ATTR(hsm, S_IRUGO, show_hsm, NULL); - -static ssize_t -get_a_bus_req(struct device *dev, struct device_attribute *attr, char *buf) -{ - struct langwell_otg *langwell; - char *next; - unsigned size; - unsigned t; - - langwell = the_transceiver; - next = buf; - size = PAGE_SIZE; - - t = scnprintf(next, size, "%d", langwell->hsm.a_bus_req); - size -= t; - next += t; - - return PAGE_SIZE - size; -} - -static ssize_t -set_a_bus_req(struct device *dev, struct device_attribute *attr, - const char *buf, size_t count) -{ - struct langwell_otg *langwell; - langwell = the_transceiver; - if (!langwell->otg.default_a) - return -1; - if (count > 2) - return -1; - - if (buf[0] == '0') { - langwell->hsm.a_bus_req = 0; - otg_dbg("a_bus_req = 0\n"); - } else if (buf[0] == '1') { - /* If a_bus_drop is TRUE, a_bus_req can't be set */ - if (langwell->hsm.a_bus_drop) - return -1; - langwell->hsm.a_bus_req = 1; - otg_dbg("a_bus_req = 1\n"); - } - if (spin_trylock(&langwell->wq_lock)) { - queue_work(langwell->qwork, &langwell->work); - spin_unlock(&langwell->wq_lock); - } - return count; -} -static DEVICE_ATTR(a_bus_req, S_IRUGO | S_IWUGO, get_a_bus_req, set_a_bus_req); - -static ssize_t -get_a_bus_drop(struct device *dev, struct device_attribute *attr, char *buf) -{ - struct langwell_otg *langwell; - char *next; - unsigned size; - unsigned t; - - langwell = the_transceiver; - next = buf; - size = PAGE_SIZE; - - t = scnprintf(next, size, "%d", langwell->hsm.a_bus_drop); - size -= t; - next += t; - - return PAGE_SIZE - size; -} - -static ssize_t -set_a_bus_drop(struct device *dev, struct device_attribute *attr, - const char *buf, size_t count) -{ - struct langwell_otg *langwell; - langwell = the_transceiver; - if (!langwell->otg.default_a) - return -1; - if (count > 2) - return -1; - - if (buf[0] == '0') { - langwell->hsm.a_bus_drop = 0; - otg_dbg("a_bus_drop = 0\n"); - } else if (buf[0] == '1') { - langwell->hsm.a_bus_drop = 1; - langwell->hsm.a_bus_req = 0; - otg_dbg("a_bus_drop = 1, then a_bus_req = 0\n"); - } - if (spin_trylock(&langwell->wq_lock)) { - queue_work(langwell->qwork, &langwell->work); - spin_unlock(&langwell->wq_lock); - } - return count; -} -static DEVICE_ATTR(a_bus_drop, S_IRUGO | S_IWUGO, - get_a_bus_drop, set_a_bus_drop); - -static ssize_t -get_b_bus_req(struct device *dev, struct device_attribute *attr, char *buf) -{ - struct langwell_otg *langwell; - char *next; - unsigned size; - unsigned t; - - langwell = the_transceiver; - next = buf; - size = PAGE_SIZE; - - t = scnprintf(next, size, "%d", langwell->hsm.b_bus_req); - size -= t; - next += t; - - return PAGE_SIZE - size; -} - -static ssize_t -set_b_bus_req(struct device *dev, struct device_attribute *attr, - const char *buf, size_t count) -{ - struct langwell_otg *langwell; - langwell = the_transceiver; - - if (langwell->otg.default_a) - return -1; - - if (count > 2) - return -1; - - if (buf[0] == '0') { - langwell->hsm.b_bus_req = 0; - otg_dbg("b_bus_req = 0\n"); - } else if (buf[0] == '1') { - langwell->hsm.b_bus_req = 1; - otg_dbg("b_bus_req = 1\n"); - } - if (spin_trylock(&langwell->wq_lock)) { - queue_work(langwell->qwork, &langwell->work); - spin_unlock(&langwell->wq_lock); - } - return count; -} -static DEVICE_ATTR(b_bus_req, S_IRUGO | S_IWUGO, get_b_bus_req, set_b_bus_req); - -static ssize_t -set_a_clr_err(struct device *dev, struct device_attribute *attr, - const char *buf, size_t count) -{ - struct langwell_otg *langwell; - langwell = the_transceiver; - - if (!langwell->otg.default_a) - return -1; - if (count > 2) - return -1; - - if (buf[0] == '1') { - langwell->hsm.a_clr_err = 1; - otg_dbg("a_clr_err = 1\n"); - } - if (spin_trylock(&langwell->wq_lock)) { - queue_work(langwell->qwork, &langwell->work); - spin_unlock(&langwell->wq_lock); - } - return count; -} -static DEVICE_ATTR(a_clr_err, S_IWUGO, NULL, set_a_clr_err); - -static struct attribute *inputs_attrs[] = { - &dev_attr_a_bus_req.attr, - &dev_attr_a_bus_drop.attr, - &dev_attr_b_bus_req.attr, - &dev_attr_a_clr_err.attr, - NULL, -}; - -static struct attribute_group debug_dev_attr_group = { - .name = "inputs", - .attrs = inputs_attrs, -}; - -int langwell_register_host(struct pci_driver *host_driver) -{ - int ret = 0; - - the_transceiver->host_ops = host_driver; - queue_work(the_transceiver->qwork, &the_transceiver->work); - otg_dbg("host controller driver is registered\n"); - - return ret; -} -EXPORT_SYMBOL(langwell_register_host); - -void langwell_unregister_host(struct pci_driver *host_driver) -{ - if (the_transceiver->host_ops) - the_transceiver->host_ops->remove(the_transceiver->pdev); - the_transceiver->host_ops = NULL; - the_transceiver->hsm.a_bus_drop = 1; - queue_work(the_transceiver->qwork, &the_transceiver->work); - otg_dbg("host controller driver is unregistered\n"); -} -EXPORT_SYMBOL(langwell_unregister_host); - -int langwell_register_peripheral(struct pci_driver *client_driver) -{ - int ret = 0; - - if (client_driver) - ret = client_driver->probe(the_transceiver->pdev, - client_driver->id_table); - if (!ret) { - the_transceiver->client_ops = client_driver; - queue_work(the_transceiver->qwork, &the_transceiver->work); - otg_dbg("client controller driver is registered\n"); - } - - return ret; -} -EXPORT_SYMBOL(langwell_register_peripheral); - -void langwell_unregister_peripheral(struct pci_driver *client_driver) -{ - if (the_transceiver->client_ops) - the_transceiver->client_ops->remove(the_transceiver->pdev); - the_transceiver->client_ops = NULL; - the_transceiver->hsm.b_bus_req = 0; - queue_work(the_transceiver->qwork, &the_transceiver->work); - otg_dbg("client controller driver is unregistered\n"); -} -EXPORT_SYMBOL(langwell_unregister_peripheral); - -static int langwell_otg_probe(struct pci_dev *pdev, - const struct pci_device_id *id) -{ - unsigned long resource, len; - void __iomem *base = NULL; - int retval; - u32 val32; - struct langwell_otg *langwell; - char qname[] = "langwell_otg_queue"; - - retval = 0; - otg_dbg("\notg controller is detected.\n"); - if (pci_enable_device(pdev) < 0) { - retval = -ENODEV; - goto done; - } - - langwell = kzalloc(sizeof *langwell, GFP_KERNEL); - if (langwell == NULL) { - retval = -ENOMEM; - goto done; - } - the_transceiver = langwell; - - /* control register: BAR 0 */ - resource = pci_resource_start(pdev, 0); - len = pci_resource_len(pdev, 0); - if (!request_mem_region(resource, len, driver_name)) { - retval = -EBUSY; - goto err; - } - langwell->region = 1; - - base = ioremap_nocache(resource, len); - if (base == NULL) { - retval = -EFAULT; - goto err; - } - langwell->regs = base; - - if (!pdev->irq) { - otg_dbg("No IRQ.\n"); - retval = -ENODEV; - goto err; - } - - langwell->qwork = create_workqueue(qname); - if (!langwell->qwork) { - otg_dbg("cannot create workqueue %s\n", qname); - retval = -ENOMEM; - goto err; - } - INIT_WORK(&langwell->work, langwell_otg_work); - - /* OTG common part */ - langwell->pdev = pdev; - langwell->otg.dev = &pdev->dev; - langwell->otg.label = driver_name; - langwell->otg.set_host = langwell_otg_set_host; - langwell->otg.set_peripheral = langwell_otg_set_peripheral; - langwell->otg.set_power = langwell_otg_set_power; - langwell->otg.start_srp = langwell_otg_start_srp; - langwell->otg.state = OTG_STATE_UNDEFINED; - if (otg_set_transceiver(&langwell->otg)) { - otg_dbg("can't set transceiver\n"); - retval = -EBUSY; - goto err; - } - - reset_otg(); - init_hsm(); - - spin_lock_init(&langwell->lock); - spin_lock_init(&langwell->wq_lock); - INIT_LIST_HEAD(&active_timers); - langwell_otg_init_timers(&langwell->hsm); - - if (request_irq(pdev->irq, otg_irq, IRQF_SHARED, - driver_name, langwell) != 0) { - otg_dbg("request interrupt %d failed\n", pdev->irq); - retval = -EBUSY; - goto err; - } - - /* enable OTGSC int */ - val32 = OTGSC_DPIE | OTGSC_BSEIE | OTGSC_BSVIE | - OTGSC_ASVIE | OTGSC_AVVIE | OTGSC_IDIE | OTGSC_IDPU; - writel(val32, langwell->regs + CI_OTGSC); - - retval = device_create_file(&pdev->dev, &dev_attr_registers); - if (retval < 0) { - otg_dbg("Can't register sysfs attribute: %d\n", retval); - goto err; - } - - retval = device_create_file(&pdev->dev, &dev_attr_hsm); - if (retval < 0) { - otg_dbg("Can't hsm sysfs attribute: %d\n", retval); - goto err; - } - - retval = sysfs_create_group(&pdev->dev.kobj, &debug_dev_attr_group); - if (retval < 0) { - otg_dbg("Can't register sysfs attr group: %d\n", retval); - goto err; - } - - if (langwell->otg.state == OTG_STATE_A_IDLE) - queue_work(langwell->qwork, &langwell->work); - - return 0; - -err: - if (the_transceiver) - langwell_otg_remove(pdev); -done: - return retval; -} - -static void langwell_otg_remove(struct pci_dev *pdev) -{ - struct langwell_otg *langwell; - - langwell = the_transceiver; - - if (langwell->qwork) { - flush_workqueue(langwell->qwork); - destroy_workqueue(langwell->qwork); - } - langwell_otg_free_timers(); - - /* disable OTGSC interrupt as OTGSC doesn't change in reset */ - writel(0, langwell->regs + CI_OTGSC); - - if (pdev->irq) - free_irq(pdev->irq, langwell); - if (langwell->regs) - iounmap(langwell->regs); - if (langwell->region) - release_mem_region(pci_resource_start(pdev, 0), - pci_resource_len(pdev, 0)); - - otg_set_transceiver(NULL); - pci_disable_device(pdev); - sysfs_remove_group(&pdev->dev.kobj, &debug_dev_attr_group); - device_remove_file(&pdev->dev, &dev_attr_hsm); - device_remove_file(&pdev->dev, &dev_attr_registers); - kfree(langwell); - langwell = NULL; -} - -static void transceiver_suspend(struct pci_dev *pdev) -{ - pci_save_state(pdev); - pci_set_power_state(pdev, PCI_D3hot); - langwell_otg_phy_low_power(1); -} - -static int langwell_otg_suspend(struct pci_dev *pdev, pm_message_t message) -{ - int ret = 0; - struct langwell_otg *langwell; - - langwell = the_transceiver; - - /* Disbale OTG interrupts */ - langwell_otg_intr(0); - - if (pdev->irq) - free_irq(pdev->irq, langwell); - - /* Prevent more otg_work */ - flush_workqueue(langwell->qwork); - spin_lock(&langwell->wq_lock); - - /* start actions */ - switch (langwell->otg.state) { - case OTG_STATE_A_IDLE: - case OTG_STATE_B_IDLE: - case OTG_STATE_A_WAIT_VFALL: - case OTG_STATE_A_VBUS_ERR: - transceiver_suspend(pdev); - break; - case OTG_STATE_A_WAIT_VRISE: - langwell_otg_del_timer(a_wait_vrise_tmr); - langwell->hsm.a_srp_det = 0; - langwell_otg_drv_vbus(0); - langwell->otg.state = OTG_STATE_A_IDLE; - transceiver_suspend(pdev); - break; - case OTG_STATE_A_WAIT_BCON: - langwell_otg_del_timer(a_wait_bcon_tmr); - if (langwell->host_ops) - ret = langwell->host_ops->suspend(pdev, message); - langwell_otg_drv_vbus(0); - break; - case OTG_STATE_A_HOST: - if (langwell->host_ops) - ret = langwell->host_ops->suspend(pdev, message); - langwell_otg_drv_vbus(0); - langwell_otg_phy_low_power(1); - break; - case OTG_STATE_A_SUSPEND: - langwell_otg_del_timer(a_aidl_bdis_tmr); - langwell_otg_HABA(0); - if (langwell->host_ops) - langwell->host_ops->remove(pdev); - else - otg_dbg("host driver has been removed.\n"); - langwell_otg_drv_vbus(0); - transceiver_suspend(pdev); - langwell->otg.state = OTG_STATE_A_WAIT_VFALL; - break; - case OTG_STATE_A_PERIPHERAL: - if (langwell->client_ops) - ret = langwell->client_ops->suspend(pdev, message); - else - otg_dbg("client driver has been removed.\n"); - langwell_otg_drv_vbus(0); - transceiver_suspend(pdev); - langwell->otg.state = OTG_STATE_A_WAIT_VFALL; - break; - case OTG_STATE_B_HOST: - if (langwell->host_ops) - langwell->host_ops->remove(pdev); - else - otg_dbg("host driver has been removed.\n"); - langwell->hsm.b_bus_req = 0; - transceiver_suspend(pdev); - langwell->otg.state = OTG_STATE_B_IDLE; - break; - case OTG_STATE_B_PERIPHERAL: - if (langwell->client_ops) - ret = langwell->client_ops->suspend(pdev, message); - else - otg_dbg("client driver has been removed.\n"); - break; - case OTG_STATE_B_WAIT_ACON: - langwell_otg_del_timer(b_ase0_brst_tmr); - langwell_otg_HAAR(0); - if (langwell->host_ops) - langwell->host_ops->remove(pdev); - else - otg_dbg("host driver has been removed.\n"); - langwell->hsm.b_bus_req = 0; - langwell->otg.state = OTG_STATE_B_IDLE; - transceiver_suspend(pdev); - break; - default: - otg_dbg("error state before suspend\n "); - break; - } - spin_unlock(&langwell->wq_lock); - - return ret; -} - -static void transceiver_resume(struct pci_dev *pdev) -{ - pci_restore_state(pdev); - pci_set_power_state(pdev, PCI_D0); - langwell_otg_phy_low_power(0); -} - -static int langwell_otg_resume(struct pci_dev *pdev) -{ - int ret = 0; - struct langwell_otg *langwell; - - langwell = the_transceiver; - - spin_lock(&langwell->wq_lock); - - switch (langwell->otg.state) { - case OTG_STATE_A_IDLE: - case OTG_STATE_B_IDLE: - case OTG_STATE_A_WAIT_VFALL: - case OTG_STATE_A_VBUS_ERR: - transceiver_resume(pdev); - break; - case OTG_STATE_A_WAIT_BCON: - langwell_otg_add_timer(a_wait_bcon_tmr); - langwell_otg_drv_vbus(1); - if (langwell->host_ops) - ret = langwell->host_ops->resume(pdev); - break; - case OTG_STATE_A_HOST: - langwell_otg_drv_vbus(1); - langwell_otg_phy_low_power(0); - if (langwell->host_ops) - ret = langwell->host_ops->resume(pdev); - break; - case OTG_STATE_B_PERIPHERAL: - if (langwell->client_ops) - ret = langwell->client_ops->resume(pdev); - else - otg_dbg("client driver not loaded.\n"); - break; - default: - otg_dbg("error state before suspend\n "); - break; - } - - if (request_irq(pdev->irq, otg_irq, IRQF_SHARED, - driver_name, the_transceiver) != 0) { - otg_dbg("request interrupt %d failed\n", pdev->irq); - ret = -EBUSY; - } - - /* enable OTG interrupts */ - langwell_otg_intr(1); - - spin_unlock(&langwell->wq_lock); - - queue_work(langwell->qwork, &langwell->work); - - - return ret; -} - -static int __init langwell_otg_init(void) -{ - return pci_register_driver(&otg_pci_driver); -} -module_init(langwell_otg_init); - -static void __exit langwell_otg_cleanup(void) -{ - pci_unregister_driver(&otg_pci_driver); -} -module_exit(langwell_otg_cleanup); diff --git a/drivers/usb/otg/nop-usb-xceiv.c b/drivers/usb/otg/nop-usb-xceiv.c index 9ed5ea56867..af456b48985 100644 --- a/drivers/usb/otg/nop-usb-xceiv.c +++ b/drivers/usb/otg/nop-usb-xceiv.c @@ -53,6 +53,7 @@ EXPORT_SYMBOL(usb_nop_xceiv_register); void usb_nop_xceiv_unregister(void) { platform_device_unregister(pd); + pd = NULL; } EXPORT_SYMBOL(usb_nop_xceiv_unregister); diff --git a/drivers/usb/serial/console.c b/drivers/usb/serial/console.c index 247b61bfb7f..0e4f2e41ace 100644 --- a/drivers/usb/serial/console.c +++ b/drivers/usb/serial/console.c @@ -169,9 +169,11 @@ static int usb_console_setup(struct console *co, char *options) kfree(tty); } } - /* So we know not to kill the hardware on a hangup on this - port. We have also bumped the use count by one so it won't go - idle */ + /* Now that any required fake tty operations are completed restore + * the tty port count */ + --port->port.count; + /* The console is special in terms of closing the device so + * indicate this port is now acting as a system console. */ port->console = 1; retval = 0; @@ -204,7 +206,7 @@ static void usb_console_write(struct console *co, dbg("%s - port %d, %d byte(s)", __func__, port->number, count); - if (!port->port.count) { + if (!port->console) { dbg("%s - port not opened", __func__); return; } @@ -300,8 +302,7 @@ void usb_serial_console_exit(void) { if (usbcons_info.port) { unregister_console(&usbcons); - if (usbcons_info.port->port.count) - usbcons_info.port->port.count--; + usbcons_info.port->console = 0; usbcons_info.port = NULL; } } diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c index 2b9eeda62bf..985cbcf48bd 100644 --- a/drivers/usb/serial/cp210x.c +++ b/drivers/usb/serial/cp210x.c @@ -67,6 +67,8 @@ static struct usb_device_id id_table [] = { { USB_DEVICE(0x10AB, 0x10C5) }, /* Siemens MC60 Cable */ { USB_DEVICE(0x10B5, 0xAC70) }, /* Nokia CA-42 USB */ { USB_DEVICE(0x10C4, 0x0F91) }, /* Vstabi */ + { USB_DEVICE(0x10C4, 0x1101) }, /* Arkham Technology DS101 Bus Monitor */ + { USB_DEVICE(0x10C4, 0x1601) }, /* Arkham Technology DS101 Adapter */ { USB_DEVICE(0x10C4, 0x800A) }, /* SPORTident BSM7-D-USB main station */ { USB_DEVICE(0x10C4, 0x803B) }, /* Pololu USB-serial converter */ { USB_DEVICE(0x10C4, 0x8053) }, /* Enfora EDG1228 */ @@ -78,6 +80,7 @@ static struct usb_device_id id_table [] = { { USB_DEVICE(0x10C4, 0x80F6) }, /* Suunto sports instrument */ { USB_DEVICE(0x10C4, 0x8115) }, /* Arygon NFC/Mifare Reader */ { USB_DEVICE(0x10C4, 0x813D) }, /* Burnside Telecom Deskmobile */ + { USB_DEVICE(0x10C4, 0x813F) }, /* Tams Master Easy Control */ { USB_DEVICE(0x10C4, 0x814A) }, /* West Mountain Radio RIGblaster P&P */ { USB_DEVICE(0x10C4, 0x814B) }, /* West Mountain Radio RIGtalk */ { USB_DEVICE(0x10C4, 0x815E) }, /* Helicomm IP-Link 1220-DVM */ @@ -94,7 +97,9 @@ static struct usb_device_id id_table [] = { { USB_DEVICE(0x10c4, 0x8293) }, /* Telegesys ETRX2USB */ { USB_DEVICE(0x10C4, 0x82F9) }, /* Procyon AVS */ { USB_DEVICE(0x10C4, 0x8341) }, /* Siemens MC35PU GPRS Modem */ + { USB_DEVICE(0x10C4, 0x8382) }, /* Cygnal Integrated Products, Inc. */ { USB_DEVICE(0x10C4, 0x83A8) }, /* Amber Wireless AMB2560 */ + { USB_DEVICE(0x10C4, 0x8411) }, /* Kyocera GPS Module */ { USB_DEVICE(0x10C4, 0x846E) }, /* BEI USB Sensor Interface (VCP) */ { USB_DEVICE(0x10C4, 0xEA60) }, /* Silicon Labs factory default */ { USB_DEVICE(0x10C4, 0xEA61) }, /* Silicon Labs factory default */ diff --git a/drivers/usb/serial/cypress_m8.c b/drivers/usb/serial/cypress_m8.c index 9734085fd2f..59adfe12311 100644 --- a/drivers/usb/serial/cypress_m8.c +++ b/drivers/usb/serial/cypress_m8.c @@ -1228,8 +1228,8 @@ static void cypress_read_int_callback(struct urb *urb) /* precursor to disconnect so just go away */ return; case -EPIPE: - usb_clear_halt(port->serial->dev, 0x81); - break; + /* Can't call usb_clear_halt while in_interrupt */ + /* FALLS THROUGH */ default: /* something ugly is going on... */ dev_err(&urb->dev->dev, diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c index 3dc3768ca71..8fec5d4455c 100644 --- a/drivers/usb/serial/ftdi_sio.c +++ b/drivers/usb/serial/ftdi_sio.c @@ -33,6 +33,7 @@ #include <linux/errno.h> #include <linux/init.h> #include <linux/slab.h> +#include <linux/smp_lock.h> #include <linux/tty.h> #include <linux/tty_driver.h> #include <linux/tty_flip.h> @@ -107,6 +108,7 @@ struct ftdi_sio_quirk { static int ftdi_jtag_probe(struct usb_serial *serial); static int ftdi_mtxorb_hack_setup(struct usb_serial *serial); +static int ftdi_NDI_device_setup(struct usb_serial *serial); static void ftdi_USB_UIRT_setup(struct ftdi_private *priv); static void ftdi_HE_TIRA1_setup(struct ftdi_private *priv); @@ -118,6 +120,10 @@ static struct ftdi_sio_quirk ftdi_mtxorb_hack_quirk = { .probe = ftdi_mtxorb_hack_setup, }; +static struct ftdi_sio_quirk ftdi_NDI_device_quirk = { + .probe = ftdi_NDI_device_setup, +}; + static struct ftdi_sio_quirk ftdi_USB_UIRT_quirk = { .port_probe = ftdi_USB_UIRT_setup, }; @@ -191,6 +197,7 @@ static struct usb_device_id id_table_combined [] = { { USB_DEVICE(FTDI_VID, FTDI_MTXORB_4_PID) }, { USB_DEVICE(FTDI_VID, FTDI_MTXORB_5_PID) }, { USB_DEVICE(FTDI_VID, FTDI_MTXORB_6_PID) }, + { USB_DEVICE(FTDI_VID, FTDI_R2000KU_TRUE_RNG) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_0100_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_0101_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_0102_PID) }, @@ -579,6 +586,9 @@ static struct usb_device_id id_table_combined [] = { { USB_DEVICE(FTDI_VID, FTDI_CCSICDU20_0_PID) }, { USB_DEVICE(FTDI_VID, FTDI_CCSICDU40_1_PID) }, { USB_DEVICE(FTDI_VID, FTDI_CCSMACHX_2_PID) }, + { USB_DEVICE(FTDI_VID, FTDI_CCSLOAD_N_GO_3_PID) }, + { USB_DEVICE(FTDI_VID, FTDI_CCSICDU64_4_PID) }, + { USB_DEVICE(FTDI_VID, FTDI_CCSPRIME8_5_PID) }, { USB_DEVICE(FTDI_VID, INSIDE_ACCESSO) }, { USB_DEVICE(INTREPID_VID, INTREPID_VALUECAN_PID) }, { USB_DEVICE(INTREPID_VID, INTREPID_NEOVI_PID) }, @@ -644,6 +654,16 @@ static struct usb_device_id id_table_combined [] = { { USB_DEVICE(FTDI_VID, FTDI_TACTRIX_OPENPORT_13S_PID) }, { USB_DEVICE(FTDI_VID, FTDI_TACTRIX_OPENPORT_13U_PID) }, { USB_DEVICE(ELEKTOR_VID, ELEKTOR_FT323R_PID) }, + { USB_DEVICE(FTDI_VID, FTDI_NDI_HUC_PID), + .driver_info = (kernel_ulong_t)&ftdi_NDI_device_quirk }, + { USB_DEVICE(FTDI_VID, FTDI_NDI_SPECTRA_SCU_PID), + .driver_info = (kernel_ulong_t)&ftdi_NDI_device_quirk }, + { USB_DEVICE(FTDI_VID, FTDI_NDI_FUTURE_2_PID), + .driver_info = (kernel_ulong_t)&ftdi_NDI_device_quirk }, + { USB_DEVICE(FTDI_VID, FTDI_NDI_FUTURE_3_PID), + .driver_info = (kernel_ulong_t)&ftdi_NDI_device_quirk }, + { USB_DEVICE(FTDI_VID, FTDI_NDI_AURORA_SCU_PID), + .driver_info = (kernel_ulong_t)&ftdi_NDI_device_quirk }, { USB_DEVICE(TELLDUS_VID, TELLDUS_TELLSTICK_PID) }, { USB_DEVICE(FTDI_VID, FTDI_MAXSTREAM_PID) }, { USB_DEVICE(FTDI_VID, FTDI_PHI_FISCO_PID) }, @@ -660,6 +680,8 @@ static struct usb_device_id id_table_combined [] = { .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk }, { USB_DEVICE(FTDI_VID, LMI_LM3S_EVAL_BOARD_PID), .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk }, + { USB_DEVICE(FTDI_VID, FTDI_TURTELIZER_PID), + .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk }, { USB_DEVICE(RATOC_VENDOR_ID, RATOC_PRODUCT_ID_USB60F) }, { USB_DEVICE(FTDI_VID, FTDI_REU_TINY_PID) }, { USB_DEVICE(PAPOUCH_VID, PAPOUCH_QUIDO4x4_PID) }, @@ -667,7 +689,6 @@ static struct usb_device_id id_table_combined [] = { { USB_DEVICE(FTDI_VID, FTDI_DOMINTELL_DUSB_PID) }, { USB_DEVICE(ALTI2_VID, ALTI2_N3_PID) }, { USB_DEVICE(FTDI_VID, DIEBOLD_BCS_SE923_PID) }, - { USB_DEVICE(FTDI_VID, FTDI_NDI_HUC_PID) }, { USB_DEVICE(ATMEL_VID, STK541_PID) }, { USB_DEVICE(DE_VID, STB_PID) }, { USB_DEVICE(DE_VID, WHT_PID) }, @@ -677,6 +698,10 @@ static struct usb_device_id id_table_combined [] = { { USB_DEVICE(MARVELL_VID, MARVELL_SHEEVAPLUG_PID), .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk }, { USB_DEVICE(LARSENBRUSGAARD_VID, LB_ALTITRACK_PID) }, + { USB_DEVICE(GN_OTOMETRICS_VID, AURICAL_USB_PID) }, + { USB_DEVICE(BAYER_VID, BAYER_CONTOUR_CABLE_PID) }, + { USB_DEVICE(FTDI_VID, MARVELL_OPENRD_PID), + .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk }, { }, /* Optional parameter entry */ { } /* Terminating entry */ }; @@ -1023,6 +1048,16 @@ static __u32 get_ftdi_divisor(struct tty_struct *tty, case FT2232C: /* FT2232C chip */ case FT232RL: if (baud <= 3000000) { + __u16 product_id = le16_to_cpu( + port->serial->dev->descriptor.idProduct); + if (((FTDI_NDI_HUC_PID == product_id) || + (FTDI_NDI_SPECTRA_SCU_PID == product_id) || + (FTDI_NDI_FUTURE_2_PID == product_id) || + (FTDI_NDI_FUTURE_3_PID == product_id) || + (FTDI_NDI_AURORA_SCU_PID == product_id)) && + (baud == 19200)) { + baud = 1200000; + } div_value = ftdi_232bm_baud_to_divisor(baud); } else { dbg("%s - Baud rate too high!", __func__); @@ -1554,6 +1589,39 @@ static void ftdi_HE_TIRA1_setup(struct ftdi_private *priv) } /* ftdi_HE_TIRA1_setup */ /* + * Module parameter to control latency timer for NDI FTDI-based USB devices. + * If this value is not set in modprobe.conf.local its value will be set to 1ms. + */ +static int ndi_latency_timer = 1; + +/* Setup for the NDI FTDI-based USB devices, which requires hardwired + * baudrate (19200 gets mapped to 1200000). + * + * Called from usbserial:serial_probe. + */ +static int ftdi_NDI_device_setup(struct usb_serial *serial) +{ + struct usb_device *udev = serial->dev; + int latency = ndi_latency_timer; + int rv = 0; + char buf[1]; + + if (latency == 0) + latency = 1; + if (latency > 99) + latency = 99; + + dbg("%s setting NDI device latency to %d", __func__, latency); + dev_info(&udev->dev, "NDI device with a latency value of %d", latency); + + rv = usb_control_msg(udev, usb_sndctrlpipe(udev, 0), + FTDI_SIO_SET_LATENCY_TIMER_REQUEST, + FTDI_SIO_SET_LATENCY_TIMER_REQUEST_TYPE, + latency, 0, buf, 0, WDR_TIMEOUT); + return 0; +} + +/* * First port on JTAG adaptors such as Olimex arm-usb-ocd or the FIC/OpenMoko * Neo1973 Debug Board is reserved for JTAG interface and can be accessed from * userspace using openocd. @@ -2121,7 +2189,7 @@ static void ftdi_process_read(struct work_struct *work) /* Note that the error flag is duplicated for every character received since we don't know which character it applied to */ - if (!usb_serial_handle_sysrq_char(port, + if (!usb_serial_handle_sysrq_char(tty, port, data[packet_offset + i])) tty_insert_flip_char(tty, data[packet_offset + i], @@ -2622,3 +2690,5 @@ MODULE_PARM_DESC(vendor, "User specified vendor ID (default=" module_param(product, ushort, 0); MODULE_PARM_DESC(product, "User specified product ID"); +module_param(ndi_latency_timer, int, S_IRUGO | S_IWUSR); +MODULE_PARM_DESC(ndi_latency_timer, "NDI device latency timer override"); diff --git a/drivers/usb/serial/ftdi_sio.h b/drivers/usb/serial/ftdi_sio.h index f1d440a728a..8c92b88166a 100644 --- a/drivers/usb/serial/ftdi_sio.h +++ b/drivers/usb/serial/ftdi_sio.h @@ -506,6 +506,7 @@ * * Armin Laeuger originally sent the PID for the UM 100 module. */ +#define FTDI_R2000KU_TRUE_RNG 0xFB80 /* R2000KU TRUE RNG */ #define FTDI_ELV_UR100_PID 0xFB58 /* USB-RS232-Umsetzer (UR 100) */ #define FTDI_ELV_UM100_PID 0xFB5A /* USB-Modul UM 100 */ #define FTDI_ELV_UO100_PID 0xFB5B /* USB-Modul UO 100 */ @@ -614,6 +615,9 @@ #define FTDI_CCSICDU20_0_PID 0xF9D0 #define FTDI_CCSICDU40_1_PID 0xF9D1 #define FTDI_CCSMACHX_2_PID 0xF9D2 +#define FTDI_CCSLOAD_N_GO_3_PID 0xF9D3 +#define FTDI_CCSICDU64_4_PID 0xF9D4 +#define FTDI_CCSPRIME8_5_PID 0xF9D5 /* Inside Accesso contactless reader (http://www.insidefr.com) */ #define INSIDE_ACCESSO 0xFAD0 @@ -736,6 +740,15 @@ #define FTDI_PYRAMID_PID 0xE6C8 /* Pyramid Appliance Display */ /* + * NDI (www.ndigital.com) product ids + */ +#define FTDI_NDI_HUC_PID 0xDA70 /* NDI Host USB Converter */ +#define FTDI_NDI_SPECTRA_SCU_PID 0xDA71 /* NDI Spectra SCU */ +#define FTDI_NDI_FUTURE_2_PID 0xDA72 /* NDI future device #2 */ +#define FTDI_NDI_FUTURE_3_PID 0xDA73 /* NDI future device #3 */ +#define FTDI_NDI_AURORA_SCU_PID 0xDA74 /* NDI Aurora SCU */ + +/* * Posiflex inc retail equipment (http://www.posiflex.com.tw) */ #define POSIFLEX_VID 0x0d3a /* Vendor ID */ @@ -848,9 +861,6 @@ #define TML_VID 0x1B91 /* Vendor ID */ #define TML_USB_SERIAL_PID 0x0064 /* USB - Serial Converter */ -/* NDI Polaris System */ -#define FTDI_NDI_HUC_PID 0xDA70 - /* Propox devices */ #define FTDI_PROPOX_JTAGCABLEII_PID 0xD738 @@ -934,6 +944,29 @@ #define MARVELL_VID 0x9e88 #define MARVELL_SHEEVAPLUG_PID 0x9e8f +#define FTDI_TURTELIZER_PID 0xBDC8 /* JTAG/RS-232 adapter by egnite GmBH */ + +/* + * GN Otometrics (http://www.otometrics.com) + * Submitted by Ville Sundberg. + */ +#define GN_OTOMETRICS_VID 0x0c33 /* Vendor ID */ +#define AURICAL_USB_PID 0x0010 /* Aurical USB Audiometer */ + +/* + * Bayer Ascensia Contour blood glucose meter USB-converter cable. + * http://winglucofacts.com/cables/ + */ +#define BAYER_VID 0x1A79 +#define BAYER_CONTOUR_CABLE_PID 0x6001 + +/* + * Marvell OpenRD Base, Client + * http://www.open-rd.org + * OpenRD Base, Client use VID 0x0403 + */ +#define MARVELL_OPENRD_PID 0x9e90 + /* * BmRequestType: 1100 0000b * bRequest: FTDI_E2_READ diff --git a/drivers/usb/serial/generic.c b/drivers/usb/serial/generic.c index 932d6241b78..ce57f6a32bd 100644 --- a/drivers/usb/serial/generic.c +++ b/drivers/usb/serial/generic.c @@ -424,10 +424,17 @@ static void flush_and_resubmit_read_urb(struct usb_serial_port *port) if (!tty) goto done; - /* Push data to tty */ - for (i = 0; i < urb->actual_length; i++, ch++) { - if (!usb_serial_handle_sysrq_char(port, *ch)) - tty_insert_flip_char(tty, *ch, TTY_NORMAL); + /* The per character mucking around with sysrq path it too slow for + stuff like 3G modems, so shortcircuit it in the 99.9999999% of cases + where the USB serial is not a console anyway */ + if (!port->console || !port->sysrq) + tty_insert_flip_string(tty, ch, urb->actual_length); + else { + /* Push data to tty */ + for (i = 0; i < urb->actual_length; i++, ch++) { + if (!usb_serial_handle_sysrq_char(tty, port, *ch)) + tty_insert_flip_char(tty, *ch, TTY_NORMAL); + } } tty_flip_buffer_push(tty); tty_kref_put(tty); @@ -527,11 +534,12 @@ void usb_serial_generic_unthrottle(struct tty_struct *tty) } } -int usb_serial_handle_sysrq_char(struct usb_serial_port *port, unsigned int ch) +int usb_serial_handle_sysrq_char(struct tty_struct *tty, + struct usb_serial_port *port, unsigned int ch) { if (port->sysrq && port->console) { if (ch && time_before(jiffies, port->sysrq)) { - handle_sysrq(ch, tty_port_tty_get(&port->port)); + handle_sysrq(ch, tty); port->sysrq = 0; return 1; } diff --git a/drivers/usb/serial/mos7720.c b/drivers/usb/serial/mos7720.c index bfc5ce000ef..ccd4dd340d2 100644 --- a/drivers/usb/serial/mos7720.c +++ b/drivers/usb/serial/mos7720.c @@ -521,7 +521,7 @@ static int mos7720_chars_in_buffer(struct tty_struct *tty) mos7720_port = usb_get_serial_port_data(port); if (mos7720_port == NULL) { dbg("%s:leaving ...........", __func__); - return -ENODEV; + return 0; } for (i = 0; i < NUM_URBS; ++i) { diff --git a/drivers/usb/serial/mos7840.c b/drivers/usb/serial/mos7840.c index c40f95c1951..270009afdf7 100644 --- a/drivers/usb/serial/mos7840.c +++ b/drivers/usb/serial/mos7840.c @@ -26,6 +26,7 @@ #include <linux/errno.h> #include <linux/init.h> #include <linux/slab.h> +#include <linux/smp_lock.h> #include <linux/tty.h> #include <linux/tty_driver.h> #include <linux/tty_flip.h> @@ -123,10 +124,13 @@ #define BANDB_DEVICE_ID_USOPTL4_4 0xAC44 #define BANDB_DEVICE_ID_USOPTL4_2 0xAC42 -/* This driver also supports the ATEN UC2324 device since it is mos7840 based - * - if I knew the device id it would also support the ATEN UC2322 */ +/* This driver also supports + * ATEN UC2324 device using Moschip MCS7840 + * ATEN UC2322 device using Moschip MCS7820 + */ #define USB_VENDOR_ID_ATENINTL 0x0557 #define ATENINTL_DEVICE_ID_UC2324 0x2011 +#define ATENINTL_DEVICE_ID_UC2322 0x7820 /* Interrupt Routine Defines */ @@ -176,6 +180,7 @@ static struct usb_device_id moschip_port_id_table[] = { {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USOPTL4_4)}, {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USOPTL4_2)}, {USB_DEVICE(USB_VENDOR_ID_ATENINTL, ATENINTL_DEVICE_ID_UC2324)}, + {USB_DEVICE(USB_VENDOR_ID_ATENINTL, ATENINTL_DEVICE_ID_UC2322)}, {} /* terminating entry */ }; @@ -185,6 +190,7 @@ static __devinitdata struct usb_device_id moschip_id_table_combined[] = { {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USOPTL4_4)}, {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USOPTL4_2)}, {USB_DEVICE(USB_VENDOR_ID_ATENINTL, ATENINTL_DEVICE_ID_UC2324)}, + {USB_DEVICE(USB_VENDOR_ID_ATENINTL, ATENINTL_DEVICE_ID_UC2322)}, {} /* terminating entry */ }; diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c index 575816e6ba3..c784ddbe7b6 100644 --- a/drivers/usb/serial/option.c +++ b/drivers/usb/serial/option.c @@ -66,8 +66,10 @@ static int option_tiocmget(struct tty_struct *tty, struct file *file); static int option_tiocmset(struct tty_struct *tty, struct file *file, unsigned int set, unsigned int clear); static int option_send_setup(struct usb_serial_port *port); +#ifdef CONFIG_PM static int option_suspend(struct usb_serial *serial, pm_message_t message); static int option_resume(struct usb_serial *serial); +#endif /* Vendor and product IDs */ #define OPTION_VENDOR_ID 0x0AF0 @@ -205,7 +207,9 @@ static int option_resume(struct usb_serial *serial); #define NOVATELWIRELESS_PRODUCT_MC727 0x4100 #define NOVATELWIRELESS_PRODUCT_MC950D 0x4400 #define NOVATELWIRELESS_PRODUCT_U727 0x5010 +#define NOVATELWIRELESS_PRODUCT_MC727_NEW 0x5100 #define NOVATELWIRELESS_PRODUCT_MC760 0x6000 +#define NOVATELWIRELESS_PRODUCT_OVMC760 0x6002 /* FUTURE NOVATEL PRODUCTS */ #define NOVATELWIRELESS_PRODUCT_EVDO_HIGHSPEED 0X6001 @@ -258,11 +262,6 @@ static int option_resume(struct usb_serial *serial); #define AXESSTEL_VENDOR_ID 0x1726 #define AXESSTEL_PRODUCT_MV110H 0x1000 -#define ONDA_VENDOR_ID 0x19d2 -#define ONDA_PRODUCT_MSA501HS 0x0001 -#define ONDA_PRODUCT_ET502HS 0x0002 -#define ONDA_PRODUCT_MT503HS 0x2000 - #define BANDRICH_VENDOR_ID 0x1A8D #define BANDRICH_PRODUCT_C100_1 0x1002 #define BANDRICH_PRODUCT_C100_2 0x1003 @@ -300,6 +299,7 @@ static int option_resume(struct usb_serial *serial); #define ZTE_PRODUCT_MF628 0x0015 #define ZTE_PRODUCT_MF626 0x0031 #define ZTE_PRODUCT_CDMA_TECH 0xfffe +#define ZTE_PRODUCT_AC8710 0xfff1 #define BENQ_VENDOR_ID 0x04a5 #define BENQ_PRODUCT_H10 0x4068 @@ -307,11 +307,25 @@ static int option_resume(struct usb_serial *serial); #define DLINK_VENDOR_ID 0x1186 #define DLINK_PRODUCT_DWM_652 0x3e04 +#define QISDA_VENDOR_ID 0x1da5 +#define QISDA_PRODUCT_H21_4512 0x4512 +#define QISDA_PRODUCT_H21_4523 0x4523 +#define QISDA_PRODUCT_H20_4515 0x4515 +#define QISDA_PRODUCT_H20_4519 0x4519 + /* TOSHIBA PRODUCTS */ #define TOSHIBA_VENDOR_ID 0x0930 #define TOSHIBA_PRODUCT_HSDPA_MINICARD 0x1302 +#define ALINK_VENDOR_ID 0x1e0e +#define ALINK_PRODUCT_3GU 0x9200 + +/* ALCATEL PRODUCTS */ +#define ALCATEL_VENDOR_ID 0x1bbb +#define ALCATEL_PRODUCT_X060S 0x0000 + + static struct usb_device_id option_ids[] = { { USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_COLT) }, { USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_RICOLA) }, @@ -428,8 +442,10 @@ static struct usb_device_id option_ids[] = { { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_EU870D) }, /* Novatel EU850D/EU860D/EU870D */ { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_MC950D) }, /* Novatel MC930D/MC950D */ { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_MC727) }, /* Novatel MC727/U727/USB727 */ + { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_MC727_NEW) }, /* Novatel MC727/U727/USB727 refresh */ { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_U727) }, /* Novatel MC727/U727/USB727 */ { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_MC760) }, /* Novatel MC760/U760/USB760 */ + { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_OVMC760) }, /* Novatel Ovation MC760 */ { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_HSPA_FULLSPEED) }, /* Novatel HSPA product */ { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_EVDO_EMBEDDED_FULLSPEED) }, /* Novatel EVDO Embedded product */ { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_HSPA_EMBEDDED_FULLSPEED) }, /* Novatel HSPA Embedded product */ @@ -463,42 +479,6 @@ static struct usb_device_id option_ids[] = { { USB_DEVICE(ANYDATA_VENDOR_ID, ANYDATA_PRODUCT_ADU_500A) }, { USB_DEVICE(ANYDATA_VENDOR_ID, ANYDATA_PRODUCT_ADU_620UW) }, { USB_DEVICE(AXESSTEL_VENDOR_ID, AXESSTEL_PRODUCT_MV110H) }, - { USB_DEVICE(ONDA_VENDOR_ID, ONDA_PRODUCT_MSA501HS) }, - { USB_DEVICE(ONDA_VENDOR_ID, ONDA_PRODUCT_ET502HS) }, - { USB_DEVICE(ONDA_VENDOR_ID, 0x0003) }, - { USB_DEVICE(ONDA_VENDOR_ID, 0x0004) }, - { USB_DEVICE(ONDA_VENDOR_ID, 0x0005) }, - { USB_DEVICE(ONDA_VENDOR_ID, 0x0006) }, - { USB_DEVICE(ONDA_VENDOR_ID, 0x0007) }, - { USB_DEVICE(ONDA_VENDOR_ID, 0x0008) }, - { USB_DEVICE(ONDA_VENDOR_ID, 0x0009) }, - { USB_DEVICE(ONDA_VENDOR_ID, 0x000a) }, - { USB_DEVICE(ONDA_VENDOR_ID, 0x000b) }, - { USB_DEVICE(ONDA_VENDOR_ID, 0x000c) }, - { USB_DEVICE(ONDA_VENDOR_ID, 0x000d) }, - { USB_DEVICE(ONDA_VENDOR_ID, 0x000e) }, - { USB_DEVICE(ONDA_VENDOR_ID, 0x000f) }, - { USB_DEVICE(ONDA_VENDOR_ID, 0x0010) }, - { USB_DEVICE(ONDA_VENDOR_ID, 0x0011) }, - { USB_DEVICE(ONDA_VENDOR_ID, 0x0012) }, - { USB_DEVICE(ONDA_VENDOR_ID, 0x0013) }, - { USB_DEVICE(ONDA_VENDOR_ID, 0x0014) }, - { USB_DEVICE(ONDA_VENDOR_ID, 0x0015) }, - { USB_DEVICE(ONDA_VENDOR_ID, 0x0016) }, - { USB_DEVICE(ONDA_VENDOR_ID, 0x0017) }, - { USB_DEVICE(ONDA_VENDOR_ID, 0x0018) }, - { USB_DEVICE(ONDA_VENDOR_ID, 0x0019) }, - { USB_DEVICE(ONDA_VENDOR_ID, 0x0020) }, - { USB_DEVICE(ONDA_VENDOR_ID, 0x0021) }, - { USB_DEVICE(ONDA_VENDOR_ID, 0x0022) }, - { USB_DEVICE(ONDA_VENDOR_ID, 0x0023) }, - { USB_DEVICE(ONDA_VENDOR_ID, 0x0024) }, - { USB_DEVICE(ONDA_VENDOR_ID, 0x0025) }, - { USB_DEVICE(ONDA_VENDOR_ID, 0x0026) }, - { USB_DEVICE(ONDA_VENDOR_ID, 0x0027) }, - { USB_DEVICE(ONDA_VENDOR_ID, 0x0028) }, - { USB_DEVICE(ONDA_VENDOR_ID, 0x0029) }, - { USB_DEVICE(ONDA_VENDOR_ID, ONDA_PRODUCT_MT503HS) }, { USB_DEVICE(YISO_VENDOR_ID, YISO_PRODUCT_U893) }, { USB_DEVICE(BANDRICH_VENDOR_ID, BANDRICH_PRODUCT_C100_1) }, { USB_DEVICE(BANDRICH_VENDOR_ID, BANDRICH_PRODUCT_C100_2) }, @@ -523,14 +503,85 @@ static struct usb_device_id option_ids[] = { { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x6613)}, /* Onda H600/ZTE MF330 */ { USB_DEVICE(MAXON_VENDOR_ID, 0x6280) }, /* BP3-USB & BP3-EXT HSDPA */ { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_UC864E) }, - { USB_DEVICE(ZTE_VENDOR_ID, ZTE_PRODUCT_MF622) }, - { USB_DEVICE(ZTE_VENDOR_ID, ZTE_PRODUCT_MF626) }, - { USB_DEVICE(ZTE_VENDOR_ID, ZTE_PRODUCT_MF628) }, - { USB_DEVICE(ZTE_VENDOR_ID, ZTE_PRODUCT_CDMA_TECH) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_MF622, 0xff, 0xff, 0xff) }, /* ZTE WCDMA products */ + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0002, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0003, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0004, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0005, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0006, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0007, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0008, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0009, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x000a, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x000b, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x000c, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x000d, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x000e, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x000f, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0010, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0011, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0012, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0013, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_MF628, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0016, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0017, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0018, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0019, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0020, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0021, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0022, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0023, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0024, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0025, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0026, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0028, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0029, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0030, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_MF626, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0032, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0033, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0037, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0039, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0042, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0043, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0048, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0049, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0051, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0052, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0054, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0055, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0057, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0058, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0061, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0062, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0063, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0064, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0066, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0069, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0076, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0078, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0082, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0086, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x2002, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x2003, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0014, 0xff, 0xff, 0xff) }, /* ZTE CDMA products */ + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0027, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0059, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0060, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0070, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0073, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_CDMA_TECH, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_AC8710, 0xff, 0xff, 0xff) }, { USB_DEVICE(BENQ_VENDOR_ID, BENQ_PRODUCT_H10) }, { USB_DEVICE(DLINK_VENDOR_ID, DLINK_PRODUCT_DWM_652) }, - { USB_DEVICE(0x1da5, 0x4515) }, /* BenQ H20 */ + { USB_DEVICE(QISDA_VENDOR_ID, QISDA_PRODUCT_H21_4512) }, + { USB_DEVICE(QISDA_VENDOR_ID, QISDA_PRODUCT_H21_4523) }, + { USB_DEVICE(QISDA_VENDOR_ID, QISDA_PRODUCT_H20_4515) }, + { USB_DEVICE(QISDA_VENDOR_ID, QISDA_PRODUCT_H20_4519) }, { USB_DEVICE(TOSHIBA_VENDOR_ID, TOSHIBA_PRODUCT_HSDPA_MINICARD ) }, /* Toshiba 3G HSDPA == Novatel Expedite EU870D MiniCard */ + { USB_DEVICE(ALINK_VENDOR_ID, 0x9000) }, + { USB_DEVICE_AND_INTERFACE_INFO(ALINK_VENDOR_ID, ALINK_PRODUCT_3GU, 0xff, 0xff, 0xff) }, + { USB_DEVICE(ALCATEL_VENDOR_ID, ALCATEL_PRODUCT_X060S) }, { } /* Terminating entry */ }; MODULE_DEVICE_TABLE(usb, option_ids); @@ -539,8 +590,10 @@ static struct usb_driver option_driver = { .name = "option", .probe = usb_serial_probe, .disconnect = usb_serial_disconnect, +#ifdef CONFIG_PM .suspend = usb_serial_suspend, .resume = usb_serial_resume, +#endif .id_table = option_ids, .no_dynamic_id = 1, }; @@ -572,8 +625,10 @@ static struct usb_serial_driver option_1port_device = { .disconnect = option_disconnect, .release = option_release, .read_int_callback = option_instat_callback, +#ifdef CONFIG_PM .suspend = option_suspend, .resume = option_resume, +#endif }; static int debug; @@ -732,7 +787,6 @@ static int option_write(struct tty_struct *tty, struct usb_serial_port *port, memcpy(this_urb->transfer_buffer, buf, todo); this_urb->transfer_buffer_length = todo; - this_urb->dev = port->serial->dev; err = usb_submit_urb(this_urb, GFP_ATOMIC); if (err) { dbg("usb_submit_urb %p (write bulk) failed " @@ -816,7 +870,6 @@ static void option_instat_callback(struct urb *urb) int status = urb->status; struct usb_serial_port *port = urb->context; struct option_port_private *portdata = usb_get_serial_port_data(port); - struct usb_serial *serial = port->serial; dbg("%s", __func__); dbg("%s: urb %p port %p has data %p", __func__, urb, port, portdata); @@ -860,7 +913,6 @@ static void option_instat_callback(struct urb *urb) /* Resubmit urb so we continue receiving IRQ data */ if (status != -ESHUTDOWN && status != -ENOENT) { - urb->dev = serial->dev; err = usb_submit_urb(urb, GFP_ATOMIC); if (err) dbg("%s: resubmit intr urb failed. (%d)", @@ -913,7 +965,6 @@ static int option_open(struct tty_struct *tty, struct usb_serial_port *port, struct file *filp) { struct option_port_private *portdata; - struct usb_serial *serial = port->serial; int i, err; struct urb *urb; @@ -921,23 +972,11 @@ static int option_open(struct tty_struct *tty, dbg("%s", __func__); - /* Reset low level data toggle and start reading from endpoints */ + /* Start reading from the IN endpoint */ for (i = 0; i < N_IN_URB; i++) { urb = portdata->in_urbs[i]; if (!urb) continue; - if (urb->dev != serial->dev) { - dbg("%s: dev %p != %p", __func__, - urb->dev, serial->dev); - continue; - } - - /* - * make sure endpoint data toggle is synchronized with the - * device - */ - usb_clear_halt(urb->dev, urb->pipe); - err = usb_submit_urb(urb, GFP_KERNEL); if (err) { dbg("%s: submit urb %d failed (%d) %d", @@ -946,16 +985,6 @@ static int option_open(struct tty_struct *tty, } } - /* Reset low level data toggle on out endpoints */ - for (i = 0; i < N_OUT_URB; i++) { - urb = portdata->out_urbs[i]; - if (!urb) - continue; - urb->dev = serial->dev; - /* usb_settoggle(urb->dev, usb_pipeendpoint(urb->pipe), - usb_pipeout(urb->pipe), 0); */ - } - option_send_setup(port); return 0; @@ -1195,6 +1224,7 @@ static void option_release(struct usb_serial *serial) } } +#ifdef CONFIG_PM static int option_suspend(struct usb_serial *serial, pm_message_t message) { dbg("%s entered", __func__); @@ -1218,7 +1248,6 @@ static int option_resume(struct usb_serial *serial) dbg("%s: No interrupt URB for port %d\n", __func__, i); continue; } - port->interrupt_in_urb->dev = serial->dev; err = usb_submit_urb(port->interrupt_in_urb, GFP_NOIO); dbg("Submitted interrupt URB for port %d (result %d)", i, err); if (err < 0) { @@ -1254,6 +1283,7 @@ static int option_resume(struct usb_serial *serial) } return 0; } +#endif MODULE_AUTHOR(DRIVER_AUTHOR); MODULE_DESCRIPTION(DRIVER_DESC); diff --git a/drivers/usb/serial/pl2303.c b/drivers/usb/serial/pl2303.c index ec6c132a25b..3e86815b270 100644 --- a/drivers/usb/serial/pl2303.c +++ b/drivers/usb/serial/pl2303.c @@ -94,6 +94,8 @@ static struct usb_device_id id_table [] = { { USB_DEVICE(YCCABLE_VENDOR_ID, YCCABLE_PRODUCT_ID) }, { USB_DEVICE(SUPERIAL_VENDOR_ID, SUPERIAL_PRODUCT_ID) }, { USB_DEVICE(HP_VENDOR_ID, HP_LD220_PRODUCT_ID) }, + { USB_DEVICE(CRESSI_VENDOR_ID, CRESSI_EDY_PRODUCT_ID) }, + { USB_DEVICE(SONY_VENDOR_ID, SONY_QN3USB_PRODUCT_ID) }, { } /* Terminating entry */ }; @@ -971,18 +973,46 @@ exit: __func__, retval); } +static void pl2303_push_data(struct tty_struct *tty, + struct usb_serial_port *port, struct urb *urb, + u8 line_status) +{ + unsigned char *data = urb->transfer_buffer; + /* get tty_flag from status */ + char tty_flag = TTY_NORMAL; + /* break takes precedence over parity, */ + /* which takes precedence over framing errors */ + if (line_status & UART_BREAK_ERROR) + tty_flag = TTY_BREAK; + else if (line_status & UART_PARITY_ERROR) + tty_flag = TTY_PARITY; + else if (line_status & UART_FRAME_ERROR) + tty_flag = TTY_FRAME; + dbg("%s - tty_flag = %d", __func__, tty_flag); + + tty_buffer_request_room(tty, urb->actual_length + 1); + /* overrun is special, not associated with a char */ + if (line_status & UART_OVERRUN_ERROR) + tty_insert_flip_char(tty, 0, TTY_OVERRUN); + if (port->console && port->sysrq) { + int i; + for (i = 0; i < urb->actual_length; ++i) + if (!usb_serial_handle_sysrq_char(tty, port, data[i])) + tty_insert_flip_char(tty, data[i], tty_flag); + } else + tty_insert_flip_string(tty, data, urb->actual_length); + tty_flip_buffer_push(tty); +} + static void pl2303_read_bulk_callback(struct urb *urb) { struct usb_serial_port *port = urb->context; struct pl2303_private *priv = usb_get_serial_port_data(port); struct tty_struct *tty; - unsigned char *data = urb->transfer_buffer; unsigned long flags; - int i; int result; int status = urb->status; u8 line_status; - char tty_flag; dbg("%s - port %d", __func__, port->number); @@ -1010,10 +1040,7 @@ static void pl2303_read_bulk_callback(struct urb *urb) } usb_serial_debug_data(debug, &port->dev, __func__, - urb->actual_length, data); - - /* get tty_flag from status */ - tty_flag = TTY_NORMAL; + urb->actual_length, urb->transfer_buffer); spin_lock_irqsave(&priv->lock, flags); line_status = priv->line_status; @@ -1021,26 +1048,9 @@ static void pl2303_read_bulk_callback(struct urb *urb) spin_unlock_irqrestore(&priv->lock, flags); wake_up_interruptible(&priv->delta_msr_wait); - /* break takes precedence over parity, */ - /* which takes precedence over framing errors */ - if (line_status & UART_BREAK_ERROR) - tty_flag = TTY_BREAK; - else if (line_status & UART_PARITY_ERROR) - tty_flag = TTY_PARITY; - else if (line_status & UART_FRAME_ERROR) - tty_flag = TTY_FRAME; - dbg("%s - tty_flag = %d", __func__, tty_flag); - tty = tty_port_tty_get(&port->port); if (tty && urb->actual_length) { - tty_buffer_request_room(tty, urb->actual_length + 1); - /* overrun is special, not associated with a char */ - if (line_status & UART_OVERRUN_ERROR) - tty_insert_flip_char(tty, 0, TTY_OVERRUN); - for (i = 0; i < urb->actual_length; ++i) - if (!usb_serial_handle_sysrq_char(port, data[i])) - tty_insert_flip_char(tty, data[i], tty_flag); - tty_flip_buffer_push(tty); + pl2303_push_data(tty, port, urb, line_status); } tty_kref_put(tty); /* Schedule the next read _if_ we are still open */ diff --git a/drivers/usb/serial/pl2303.h b/drivers/usb/serial/pl2303.h index 1d7a22e3a9f..ee9505e1dd9 100644 --- a/drivers/usb/serial/pl2303.h +++ b/drivers/usb/serial/pl2303.h @@ -122,3 +122,11 @@ /* Hewlett-Packard LD220-HP POS Pole Display */ #define HP_VENDOR_ID 0x03f0 #define HP_LD220_PRODUCT_ID 0x3524 + +/* Cressi Edy (diving computer) PC interface */ +#define CRESSI_VENDOR_ID 0x04b8 +#define CRESSI_EDY_PRODUCT_ID 0x0521 + +/* Sony, USB data cable for CMD-Jxx mobile phones */ +#define SONY_VENDOR_ID 0x054c +#define SONY_QN3USB_PRODUCT_ID 0x0437 diff --git a/drivers/usb/serial/sierra.c b/drivers/usb/serial/sierra.c index 032f7aeb40a..f48d05e0acc 100644 --- a/drivers/usb/serial/sierra.c +++ b/drivers/usb/serial/sierra.c @@ -181,35 +181,50 @@ static const struct sierra_iface_info direct_ip_interface_blacklist = { }; static struct usb_device_id id_table [] = { + { USB_DEVICE(0x0F3D, 0x0112) }, /* Airprime/Sierra PC 5220 */ + { USB_DEVICE(0x03F0, 0x1B1D) }, /* HP ev2200 a.k.a MC5720 */ + { USB_DEVICE(0x03F0, 0x1E1D) }, /* HP hs2300 a.k.a MC8775 */ + { USB_DEVICE(0x1199, 0x0017) }, /* Sierra Wireless EM5625 */ { USB_DEVICE(0x1199, 0x0018) }, /* Sierra Wireless MC5720 */ { USB_DEVICE(0x1199, 0x0218) }, /* Sierra Wireless MC5720 */ - { USB_DEVICE(0x03f0, 0x1b1d) }, /* HP ev2200 a.k.a MC5720 */ { USB_DEVICE(0x1199, 0x0020) }, /* Sierra Wireless MC5725 */ - { USB_DEVICE(0x1199, 0x0024) }, /* Sierra Wireless MC5727 */ { USB_DEVICE(0x1199, 0x0220) }, /* Sierra Wireless MC5725 */ + { USB_DEVICE(0x1199, 0x0022) }, /* Sierra Wireless EM5725 */ + { USB_DEVICE(0x1199, 0x0024) }, /* Sierra Wireless MC5727 */ + { USB_DEVICE(0x1199, 0x0224) }, /* Sierra Wireless MC5727 */ { USB_DEVICE(0x1199, 0x0019) }, /* Sierra Wireless AirCard 595 */ { USB_DEVICE(0x1199, 0x0021) }, /* Sierra Wireless AirCard 597E */ + { USB_DEVICE(0x1199, 0x0112) }, /* Sierra Wireless AirCard 580 */ { USB_DEVICE(0x1199, 0x0120) }, /* Sierra Wireless USB Dongle 595U */ - /* Sierra Wireless C597 */ + /* Sierra Wireless C597 */ { USB_DEVICE_AND_INTERFACE_INFO(0x1199, 0x0023, 0xFF, 0xFF, 0xFF) }, - /* Sierra Wireless Device */ + /* Sierra Wireless T598 */ { USB_DEVICE_AND_INTERFACE_INFO(0x1199, 0x0025, 0xFF, 0xFF, 0xFF) }, - { USB_DEVICE(0x1199, 0x0026) }, /* Sierra Wireless Device */ - { USB_DEVICE(0x1199, 0x0027) }, /* Sierra Wireless Device */ - { USB_DEVICE(0x1199, 0x0028) }, /* Sierra Wireless Device */ + { USB_DEVICE(0x1199, 0x0026) }, /* Sierra Wireless T11 */ + { USB_DEVICE(0x1199, 0x0027) }, /* Sierra Wireless AC402 */ + { USB_DEVICE(0x1199, 0x0028) }, /* Sierra Wireless MC5728 */ + { USB_DEVICE(0x1199, 0x0029) }, /* Sierra Wireless Device */ { USB_DEVICE(0x1199, 0x6802) }, /* Sierra Wireless MC8755 */ - { USB_DEVICE(0x1199, 0x6804) }, /* Sierra Wireless MC8755 */ { USB_DEVICE(0x1199, 0x6803) }, /* Sierra Wireless MC8765 */ + { USB_DEVICE(0x1199, 0x6804) }, /* Sierra Wireless MC8755 */ + { USB_DEVICE(0x1199, 0x6805) }, /* Sierra Wireless MC8765 */ + { USB_DEVICE(0x1199, 0x6808) }, /* Sierra Wireless MC8755 */ + { USB_DEVICE(0x1199, 0x6809) }, /* Sierra Wireless MC8765 */ { USB_DEVICE(0x1199, 0x6812) }, /* Sierra Wireless MC8775 & AC 875U */ - { USB_DEVICE(0x1199, 0x6813) }, /* Sierra Wireless MC8775 (Lenovo) */ + { USB_DEVICE(0x1199, 0x6813) }, /* Sierra Wireless MC8775 */ { USB_DEVICE(0x1199, 0x6815) }, /* Sierra Wireless MC8775 */ - { USB_DEVICE(0x03f0, 0x1e1d) }, /* HP hs2300 a.k.a MC8775 */ + { USB_DEVICE(0x1199, 0x6816) }, /* Sierra Wireless MC8775 */ { USB_DEVICE(0x1199, 0x6820) }, /* Sierra Wireless AirCard 875 */ { USB_DEVICE(0x1199, 0x6821) }, /* Sierra Wireless AirCard 875U */ + { USB_DEVICE(0x1199, 0x6822) }, /* Sierra Wireless AirCard 875E */ { USB_DEVICE(0x1199, 0x6832) }, /* Sierra Wireless MC8780 */ { USB_DEVICE(0x1199, 0x6833) }, /* Sierra Wireless MC8781 */ + { USB_DEVICE(0x1199, 0x6834) }, /* Sierra Wireless MC8780 */ + { USB_DEVICE(0x1199, 0x6835) }, /* Sierra Wireless MC8781 */ + { USB_DEVICE(0x1199, 0x6838) }, /* Sierra Wireless MC8780 */ + { USB_DEVICE(0x1199, 0x6839) }, /* Sierra Wireless MC8781 */ { USB_DEVICE(0x1199, 0x683A) }, /* Sierra Wireless MC8785 */ { USB_DEVICE(0x1199, 0x683B) }, /* Sierra Wireless MC8785 Composite */ /* Sierra Wireless MC8790, MC8791, MC8792 Composite */ @@ -227,16 +242,13 @@ static struct usb_device_id id_table [] = { { USB_DEVICE(0x1199, 0x685A) }, /* Sierra Wireless AirCard 885 E */ /* Sierra Wireless C885 */ { USB_DEVICE_AND_INTERFACE_INFO(0x1199, 0x6880, 0xFF, 0xFF, 0xFF)}, - /* Sierra Wireless Device */ + /* Sierra Wireless C888, Air Card 501, USB 303, USB 304 */ { USB_DEVICE_AND_INTERFACE_INFO(0x1199, 0x6890, 0xFF, 0xFF, 0xFF)}, - /* Sierra Wireless Device */ + /* Sierra Wireless C22/C33 */ { USB_DEVICE_AND_INTERFACE_INFO(0x1199, 0x6891, 0xFF, 0xFF, 0xFF)}, - /* Sierra Wireless Device */ + /* Sierra Wireless HSPA Non-Composite Device */ { USB_DEVICE_AND_INTERFACE_INFO(0x1199, 0x6892, 0xFF, 0xFF, 0xFF)}, - - { USB_DEVICE(0x1199, 0x0112) }, /* Sierra Wireless AirCard 580 */ - { USB_DEVICE(0x0F3D, 0x0112) }, /* Airprime/Sierra PC 5220 */ - + { USB_DEVICE(0x1199, 0x6893) }, /* Sierra Wireless Device */ { USB_DEVICE(0x1199, 0x68A3), /* Sierra Wireless Direct IP modems */ .driver_info = (kernel_ulong_t)&direct_ip_interface_blacklist }, @@ -814,7 +826,7 @@ static int sierra_startup(struct usb_serial *serial) return 0; } -static void sierra_disconnect(struct usb_serial *serial) +static void sierra_release(struct usb_serial *serial) { int i; struct usb_serial_port *port; @@ -830,7 +842,6 @@ static void sierra_disconnect(struct usb_serial *serial) if (!portdata) continue; kfree(portdata); - usb_set_serial_port_data(port, NULL); } } @@ -853,7 +864,7 @@ static struct usb_serial_driver sierra_device = { .tiocmget = sierra_tiocmget, .tiocmset = sierra_tiocmset, .attach = sierra_startup, - .disconnect = sierra_disconnect, + .release = sierra_release, .read_int_callback = sierra_instat_callback, }; diff --git a/drivers/usb/serial/ti_usb_3410_5052.c b/drivers/usb/serial/ti_usb_3410_5052.c index 991d8232e37..3bc609fe224 100644 --- a/drivers/usb/serial/ti_usb_3410_5052.c +++ b/drivers/usb/serial/ti_usb_3410_5052.c @@ -191,7 +191,6 @@ static struct usb_device_id ti_id_table_5052[5+TI_EXTRA_VID_PID_COUNT+1] = { { USB_DEVICE(TI_VENDOR_ID, TI_5152_BOOT_PRODUCT_ID) }, { USB_DEVICE(TI_VENDOR_ID, TI_5052_EEPROM_PRODUCT_ID) }, { USB_DEVICE(TI_VENDOR_ID, TI_5052_FIRMWARE_PRODUCT_ID) }, - { USB_DEVICE(IBM_VENDOR_ID, IBM_4543_PRODUCT_ID) }, }; static struct usb_device_id ti_id_table_combined[14+2*TI_EXTRA_VID_PID_COUNT+1] = { @@ -728,7 +727,7 @@ static int ti_write_room(struct tty_struct *tty) dbg("%s - port %d", __func__, port->number); if (tport == NULL) - return -ENODEV; + return 0; spin_lock_irqsave(&tport->tp_lock, flags); room = ti_buf_space_avail(tport->tp_write_buf); @@ -749,7 +748,7 @@ static int ti_chars_in_buffer(struct tty_struct *tty) dbg("%s - port %d", __func__, port->number); if (tport == NULL) - return -ENODEV; + return 0; spin_lock_irqsave(&tport->tp_lock, flags); chars = ti_buf_data_avail(tport->tp_write_buf); @@ -1658,7 +1657,7 @@ static int ti_do_download(struct usb_device *dev, int pipe, u8 cs = 0; int done; struct ti_firmware_header *header; - int status; + int status = 0; int len; for (pos = sizeof(struct ti_firmware_header); pos < size; pos++) diff --git a/drivers/usb/serial/usb-serial.c b/drivers/usb/serial/usb-serial.c index d595aa5586a..99188c92068 100644 --- a/drivers/usb/serial/usb-serial.c +++ b/drivers/usb/serial/usb-serial.c @@ -21,6 +21,7 @@ #include <linux/errno.h> #include <linux/init.h> #include <linux/slab.h> +#include <linux/smp_lock.h> #include <linux/tty.h> #include <linux/tty_driver.h> #include <linux/tty_flip.h> @@ -31,6 +32,7 @@ #include <linux/mutex.h> #include <linux/list.h> #include <linux/uaccess.h> +#include <linux/serial.h> #include <linux/usb.h> #include <linux/usb/serial.h> #include "pl2303.h" @@ -183,6 +185,7 @@ static int serial_open (struct tty_struct *tty, struct file *filp) struct usb_serial_port *port; unsigned int portNumber; int retval = 0; + int first = 0; dbg("%s", __func__); @@ -220,8 +223,9 @@ static int serial_open (struct tty_struct *tty, struct file *filp) tty->driver_data = port; tty_port_tty_set(&port->port, tty); - if (port->port.count == 1) { - + /* If the console is attached, the device is already open */ + if (port->port.count == 1 && !port->console) { + first = 1; /* lock this module before we call it * this may fail, which means we must bail out, * safe because we are called with BKL held */ @@ -244,13 +248,21 @@ static int serial_open (struct tty_struct *tty, struct file *filp) if (retval) goto bailout_interface_put; mutex_unlock(&serial->disc_mutex); + set_bit(ASYNCB_INITIALIZED, &port->port.flags); } mutex_unlock(&port->mutex); /* Now do the correct tty layer semantics */ retval = tty_port_block_til_ready(&port->port, tty, filp); - if (retval == 0) + if (retval == 0) { + if (!first) + usb_serial_put(serial); return 0; - + } + mutex_lock(&port->mutex); + if (first == 0) + goto bailout_mutex_unlock; + /* Undo the initial port actions */ + mutex_lock(&serial->disc_mutex); bailout_interface_put: usb_autopm_put_interface(serial->interface); bailout_module_put: @@ -333,8 +345,27 @@ static void serial_close(struct tty_struct *tty, struct file *filp) { struct usb_serial_port *port = tty->driver_data; + if (!port) + return; + dbg("%s - port %d", __func__, port->number); + /* FIXME: + This leaves a very narrow race. Really we should do the + serial_do_free() on tty->shutdown(), but tty->shutdown can + be called from IRQ context and serial_do_free can sleep. + + The right fix is probably to make the tty free (which is rare) + and thus tty->shutdown() occur via a work queue and simplify all + the drivers that use it. + */ + if (tty_hung_up_p(filp)) { + /* serial_hangup already called serial_down at this point. + Another user may have already reopened the port but + serial_do_free is refcounted */ + serial_do_free(port); + return; + } if (tty_port_close_start(&port->port, tty, filp) == 0) return; @@ -350,7 +381,8 @@ static void serial_hangup(struct tty_struct *tty) struct usb_serial_port *port = tty->driver_data; serial_do_down(port); tty_port_hangup(&port->port); - serial_do_free(port); + /* We must not free port yet - the USB serial layer depends on it's + continued existence */ } static int serial_write(struct tty_struct *tty, const unsigned char *buf, @@ -389,7 +421,6 @@ static int serial_chars_in_buffer(struct tty_struct *tty) struct usb_serial_port *port = tty->driver_data; dbg("%s = port %d", __func__, port->number); - WARN_ON(!port->port.count); /* if the device was unplugged then any remaining characters fell out of the connector ;) */ if (port->serial->disconnected) diff --git a/drivers/usb/storage/option_ms.c b/drivers/usb/storage/option_ms.c index d41cc0a970f..773a5cd38c5 100644 --- a/drivers/usb/storage/option_ms.c +++ b/drivers/usb/storage/option_ms.c @@ -118,6 +118,9 @@ static int option_inquiry(struct us_data *us) result = memcmp(buffer+8, "Option", 6); + if (result != 0) + result = memcmp(buffer+8, "ZCOPTION", 8); + /* Read the CSW */ usb_stor_bulk_transfer_buf(us, us->recv_bulk_pipe, diff --git a/drivers/usb/storage/transport.c b/drivers/usb/storage/transport.c index fcb32021721..e20dc525d17 100644 --- a/drivers/usb/storage/transport.c +++ b/drivers/usb/storage/transport.c @@ -961,7 +961,7 @@ int usb_stor_Bulk_max_lun(struct us_data *us) US_BULK_GET_MAX_LUN, USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_INTERFACE, - 0, us->ifnum, us->iobuf, 1, HZ); + 0, us->ifnum, us->iobuf, 1, 10*HZ); US_DEBUGP("GetMaxLUN command result is %d, data is %d\n", result, us->iobuf[0]); diff --git a/drivers/usb/storage/unusual_devs.h b/drivers/usb/storage/unusual_devs.h index 1b9c5dd0fb2..7477d411959 100644 --- a/drivers/usb/storage/unusual_devs.h +++ b/drivers/usb/storage/unusual_devs.h @@ -838,6 +838,13 @@ UNUSUAL_DEV( 0x066f, 0x8000, 0x0001, 0x0001, US_SC_DEVICE, US_PR_DEVICE, NULL, US_FL_FIX_CAPACITY ), +/* Reported by Rogerio Brito <rbrito@ime.usp.br> */ +UNUSUAL_DEV( 0x067b, 0x2317, 0x0001, 0x001, + "Prolific Technology, Inc.", + "Mass Storage Device", + US_SC_DEVICE, US_PR_DEVICE, NULL, + US_FL_NOT_LOCKABLE ), + /* Reported by Richard -=[]=- <micro_flyer@hotmail.com> */ /* Change to bcdDeviceMin (0x0100 to 0x0001) reported by * Thomas Bartosik <tbartdev@gmx-topmail.de> */ diff --git a/drivers/video/Kconfig b/drivers/video/Kconfig index d6d65ef85f5..3b54b394017 100644 --- a/drivers/video/Kconfig +++ b/drivers/video/Kconfig @@ -616,6 +616,8 @@ config FB_STI select FB_CFB_FILLRECT select FB_CFB_COPYAREA select FB_CFB_IMAGEBLIT + select STI_CONSOLE + select VT default y ---help--- STI refers to the HP "Standard Text Interface" which is a set of @@ -1117,12 +1119,13 @@ config FB_CARILLO_RANCH config FB_INTEL tristate "Intel 830M/845G/852GM/855GM/865G/915G/945G/945GM/965G/965GM support (EXPERIMENTAL)" - depends on EXPERIMENTAL && FB && PCI && X86 && AGP_INTEL + depends on EXPERIMENTAL && FB && PCI && X86 && AGP_INTEL && EMBEDDED select FB_MODE_HELPERS select FB_CFB_FILLRECT select FB_CFB_COPYAREA select FB_CFB_IMAGEBLIT select FB_BOOT_VESA_SUPPORT if FB_INTEL = y + depends on !DRM_I915 help This driver supports the on-board graphics built in to the Intel 830M/845G/852GM/855GM/865G/915G/915GM/945G/945GM/965G/965GM chipsets. diff --git a/drivers/video/amba-clcd.c b/drivers/video/amba-clcd.c index fb8163d181a..a21efcd10b7 100644 --- a/drivers/video/amba-clcd.c +++ b/drivers/video/amba-clcd.c @@ -226,9 +226,10 @@ static int clcdfb_set_par(struct fb_info *info) clcdfb_enable(fb, regs.cntl); #ifdef DEBUG - printk(KERN_INFO "CLCD: Registers set to\n" - KERN_INFO " %08x %08x %08x %08x\n" - KERN_INFO " %08x %08x %08x %08x\n", + printk(KERN_INFO + "CLCD: Registers set to\n" + " %08x %08x %08x %08x\n" + " %08x %08x %08x %08x\n", readl(fb->regs + CLCD_TIM0), readl(fb->regs + CLCD_TIM1), readl(fb->regs + CLCD_TIM2), readl(fb->regs + CLCD_TIM3), readl(fb->regs + CLCD_UBAS), readl(fb->regs + CLCD_LBAS), diff --git a/drivers/video/atafb.c b/drivers/video/atafb.c index 018850c116c..8cd279be74e 100644 --- a/drivers/video/atafb.c +++ b/drivers/video/atafb.c @@ -2405,6 +2405,9 @@ static int do_fb_set_var(struct fb_var_screeninfo *var, int isactive) return 0; } +/* fbhw->encode_fix() must be called with fb_info->mm_lock held + * if it is called after the register_framebuffer() - not a case here + */ static int atafb_get_fix(struct fb_fix_screeninfo *fix, struct fb_info *info) { struct atafb_par par; @@ -2414,7 +2417,8 @@ static int atafb_get_fix(struct fb_fix_screeninfo *fix, struct fb_info *info) if (err) return err; memset(fix, 0, sizeof(struct fb_fix_screeninfo)); - return fbhw->encode_fix(fix, &par); + err = fbhw->encode_fix(fix, &par); + return err; } static int atafb_get_var(struct fb_var_screeninfo *var, struct fb_info *info) @@ -2743,7 +2747,9 @@ static int atafb_set_par(struct fb_info *info) /* Decode wanted screen parameters */ fbhw->decode_var(&info->var, par); + mutex_lock(&info->mm_lock); fbhw->encode_fix(&info->fix, par); + mutex_unlock(&info->mm_lock); /* Set new videomode */ ata_set_par(par); diff --git a/drivers/video/atmel_lcdfb.c b/drivers/video/atmel_lcdfb.c index 5afd64482f5..da05f0801bb 100644 --- a/drivers/video/atmel_lcdfb.c +++ b/drivers/video/atmel_lcdfb.c @@ -261,6 +261,9 @@ static inline void atmel_lcdfb_free_video_memory(struct atmel_lcdfb_info *sinfo) /** * atmel_lcdfb_alloc_video_memory - Allocate framebuffer memory * @sinfo: the frame buffer to allocate memory for + * + * This function is called only from the atmel_lcdfb_probe() + * so no locking by fb_info->mm_lock around smem_len setting is needed. */ static int atmel_lcdfb_alloc_video_memory(struct atmel_lcdfb_info *sinfo) { diff --git a/drivers/video/aty/atyfb.h b/drivers/video/aty/atyfb.h index 7691e73823d..1f39a62f899 100644 --- a/drivers/video/aty/atyfb.h +++ b/drivers/video/aty/atyfb.h @@ -187,6 +187,8 @@ struct atyfb_par { int mtrr_reg; #endif u32 mem_cntl; + struct crtc saved_crtc; + union aty_pll saved_pll; }; /* @@ -217,6 +219,7 @@ struct atyfb_par { #define M64F_XL_DLL 0x00080000 #define M64F_MFB_FORCE_4 0x00100000 #define M64F_HW_TRIPLE 0x00200000 +#define M64F_XL_MEM 0x00400000 /* * Register access */ diff --git a/drivers/video/aty/atyfb_base.c b/drivers/video/aty/atyfb_base.c index 1207c208a30..63d3739d43a 100644 --- a/drivers/video/aty/atyfb_base.c +++ b/drivers/video/aty/atyfb_base.c @@ -66,6 +66,8 @@ #include <linux/spinlock.h> #include <linux/wait.h> #include <linux/backlight.h> +#include <linux/reboot.h> +#include <linux/dmi.h> #include <asm/io.h> #include <linux/uaccess.h> @@ -249,8 +251,6 @@ static int aty_init(struct fb_info *info); static int store_video_par(char *videopar, unsigned char m64_num); #endif -static struct crtc saved_crtc; -static union aty_pll saved_pll; static void aty_get_crtc(const struct atyfb_par *par, struct crtc *crtc); static void aty_set_crtc(const struct atyfb_par *par, const struct crtc *crtc); @@ -261,6 +261,8 @@ static void set_off_pitch(struct atyfb_par *par, const struct fb_info *info); static int read_aty_sense(const struct atyfb_par *par); #endif +static DEFINE_MUTEX(reboot_lock); +static struct fb_info *reboot_info; /* * Interface used by the world @@ -361,8 +363,8 @@ static unsigned long phys_guiregbase[FB_MAX] __devinitdata = { 0, }; #define ATI_CHIP_264GTPRO (ATI_MODERN_SET | M64F_SDRAM_MAGIC_PLL | M64F_HW_TRIPLE | M64F_FIFO_32 | M64F_RESET_3D) #define ATI_CHIP_264LTPRO (ATI_MODERN_SET | M64F_HW_TRIPLE | M64F_FIFO_32 | M64F_RESET_3D) -#define ATI_CHIP_264XL (ATI_MODERN_SET | M64F_HW_TRIPLE | M64F_FIFO_32 | M64F_RESET_3D | M64F_XL_DLL | M64F_MFB_FORCE_4) -#define ATI_CHIP_MOBILITY (ATI_MODERN_SET | M64F_HW_TRIPLE | M64F_FIFO_32 | M64F_RESET_3D | M64F_XL_DLL | M64F_MFB_FORCE_4 | M64F_MOBIL_BUS) +#define ATI_CHIP_264XL (ATI_MODERN_SET | M64F_HW_TRIPLE | M64F_FIFO_32 | M64F_RESET_3D | M64F_XL_DLL | M64F_MFB_FORCE_4 | M64F_XL_MEM) +#define ATI_CHIP_MOBILITY (ATI_MODERN_SET | M64F_HW_TRIPLE | M64F_FIFO_32 | M64F_RESET_3D | M64F_XL_DLL | M64F_MFB_FORCE_4 | M64F_XL_MEM | M64F_MOBIL_BUS) static struct { u16 pci_id; @@ -539,6 +541,7 @@ static char ram_edo[] __devinitdata = "EDO"; static char ram_sdram[] __devinitdata = "SDRAM (1:1)"; static char ram_sgram[] __devinitdata = "SGRAM (1:1)"; static char ram_sdram32[] __devinitdata = "SDRAM (2:1) (32-bit)"; +static char ram_wram[] __devinitdata = "WRAM"; static char ram_off[] __devinitdata = "OFF"; #endif /* CONFIG_FB_ATY_CT */ @@ -553,6 +556,10 @@ static char *aty_gx_ram[8] __devinitdata = { #ifdef CONFIG_FB_ATY_CT static char *aty_ct_ram[8] __devinitdata = { ram_off, ram_dram, ram_edo, ram_edo, + ram_sdram, ram_sgram, ram_wram, ram_resv +}; +static char *aty_xl_ram[8] __devinitdata = { + ram_off, ram_dram, ram_edo, ram_edo, ram_sdram, ram_sgram, ram_sdram32, ram_resv }; #endif /* CONFIG_FB_ATY_CT */ @@ -760,6 +767,17 @@ static void aty_set_crtc(const struct atyfb_par *par, const struct crtc *crtc) #endif /* CONFIG_FB_ATY_GENERIC_LCD */ } +static u32 calc_line_length(struct atyfb_par *par, u32 vxres, u32 bpp) +{ + u32 line_length = vxres * bpp / 8; + + if (par->ram_type == SGRAM || + (!M64_HAS(XL_MEM) && par->ram_type == WRAM)) + line_length = (line_length + 63) & ~63; + + return line_length; +} + static int aty_var_to_crtc(const struct fb_info *info, const struct fb_var_screeninfo *var, struct crtc *crtc) { @@ -769,13 +787,14 @@ static int aty_var_to_crtc(const struct fb_info *info, u32 h_total, h_disp, h_sync_strt, h_sync_end, h_sync_dly, h_sync_wid, h_sync_pol; u32 v_total, v_disp, v_sync_strt, v_sync_end, v_sync_wid, v_sync_pol, c_sync; u32 pix_width, dp_pix_width, dp_chain_mask; + u32 line_length; /* input */ - xres = var->xres; + xres = (var->xres + 7) & ~7; yres = var->yres; - vxres = var->xres_virtual; + vxres = (var->xres_virtual + 7) & ~7; vyres = var->yres_virtual; - xoffset = var->xoffset; + xoffset = (var->xoffset + 7) & ~7; yoffset = var->yoffset; bpp = var->bits_per_pixel; if (bpp == 16) @@ -827,7 +846,9 @@ static int aty_var_to_crtc(const struct fb_info *info, } else FAIL("invalid bpp"); - if (vxres * vyres * bpp / 8 > info->fix.smem_len) + line_length = calc_line_length(par, vxres, bpp); + + if (vyres * line_length > info->fix.smem_len) FAIL("not enough video RAM"); h_sync_pol = sync & FB_SYNC_HOR_HIGH_ACT ? 0 : 1; @@ -969,7 +990,9 @@ static int aty_var_to_crtc(const struct fb_info *info, crtc->xoffset = xoffset; crtc->yoffset = yoffset; crtc->bpp = bpp; - crtc->off_pitch = ((yoffset*vxres+xoffset)*bpp/64) | (vxres<<19); + crtc->off_pitch = + ((yoffset * line_length + xoffset * bpp / 8) / 8) | + ((line_length / bpp) << 22); crtc->vline_crnt_vline = 0; crtc->h_tot_disp = h_total | (h_disp<<16); @@ -1394,7 +1417,9 @@ static int atyfb_set_par(struct fb_info *info) } aty_st_8(DAC_MASK, 0xff, par); - info->fix.line_length = var->xres_virtual * var->bits_per_pixel/8; + info->fix.line_length = calc_line_length(par, var->xres_virtual, + var->bits_per_pixel); + info->fix.visual = var->bits_per_pixel <= 8 ? FB_VISUAL_PSEUDOCOLOR : FB_VISUAL_DIRECTCOLOR; @@ -1505,10 +1530,12 @@ static void set_off_pitch(struct atyfb_par *par, const struct fb_info *info) { u32 xoffset = info->var.xoffset; u32 yoffset = info->var.yoffset; - u32 vxres = par->crtc.vxres; + u32 line_length = info->fix.line_length; u32 bpp = info->var.bits_per_pixel; - par->crtc.off_pitch = ((yoffset * vxres + xoffset) * bpp / 64) | (vxres << 19); + par->crtc.off_pitch = + ((yoffset * line_length + xoffset * bpp / 8) / 8) | + ((line_length / bpp) << 22); } @@ -2201,7 +2228,7 @@ static void __devinit aty_calc_mem_refresh(struct atyfb_par *par, int xclk) const int *refresh_tbl; int i, size; - if (IS_XL(par->pci_id) || IS_MOBILITY(par->pci_id)) { + if (M64_HAS(XL_MEM)) { refresh_tbl = ragexl_tbl; size = ARRAY_SIZE(ragexl_tbl); } else { @@ -2335,7 +2362,10 @@ static int __devinit aty_init(struct fb_info *info) par->pll_ops = &aty_pll_ct; par->bus_type = PCI; par->ram_type = (aty_ld_le32(CNFG_STAT0, par) & 0x07); - ramname = aty_ct_ram[par->ram_type]; + if (M64_HAS(XL_MEM)) + ramname = aty_xl_ram[par->ram_type]; + else + ramname = aty_ct_ram[par->ram_type]; /* for many chips, the mclk is 67 MHz for SDRAM, 63 MHz otherwise */ if (par->pll_limits.mclk == 67 && par->ram_type < SDRAM) par->pll_limits.mclk = 63; @@ -2390,9 +2420,9 @@ static int __devinit aty_init(struct fb_info *info) #endif /* CONFIG_FB_ATY_CT */ /* save previous video mode */ - aty_get_crtc(par, &saved_crtc); + aty_get_crtc(par, &par->saved_crtc); if(par->pll_ops->get_pll) - par->pll_ops->get_pll(info, &saved_pll); + par->pll_ops->get_pll(info, &par->saved_pll); par->mem_cntl = aty_ld_le32(MEM_CNTL, par); gtb_memsize = M64_HAS(GTB_DSP); @@ -2667,8 +2697,8 @@ static int __devinit aty_init(struct fb_info *info) aty_init_exit: /* restore video mode */ - aty_set_crtc(par, &saved_crtc); - par->pll_ops->set_pll(info, &saved_pll); + aty_set_crtc(par, &par->saved_crtc); + par->pll_ops->set_pll(info, &par->saved_pll); #ifdef CONFIG_MTRR if (par->mtrr_reg >= 0) { @@ -3502,6 +3532,11 @@ static int __devinit atyfb_pci_probe(struct pci_dev *pdev, const struct pci_devi par->mmap_map[1].prot_flag = _PAGE_E; #endif /* __sparc__ */ + mutex_lock(&reboot_lock); + if (!reboot_info) + reboot_info = info; + mutex_unlock(&reboot_lock); + return 0; err_release_io: @@ -3614,8 +3649,8 @@ static void __devexit atyfb_remove(struct fb_info *info) struct atyfb_par *par = (struct atyfb_par *) info->par; /* restore video mode */ - aty_set_crtc(par, &saved_crtc); - par->pll_ops->set_pll(info, &saved_pll); + aty_set_crtc(par, &par->saved_crtc); + par->pll_ops->set_pll(info, &par->saved_pll); unregister_framebuffer(info); @@ -3661,6 +3696,11 @@ static void __devexit atyfb_pci_remove(struct pci_dev *pdev) { struct fb_info *info = pci_get_drvdata(pdev); + mutex_lock(&reboot_lock); + if (reboot_info == info) + reboot_info = NULL; + mutex_unlock(&reboot_lock); + atyfb_remove(info); } @@ -3808,6 +3848,56 @@ static int __init atyfb_setup(char *options) } #endif /* MODULE */ +static int atyfb_reboot_notify(struct notifier_block *nb, + unsigned long code, void *unused) +{ + struct atyfb_par *par; + + if (code != SYS_RESTART) + return NOTIFY_DONE; + + mutex_lock(&reboot_lock); + + if (!reboot_info) + goto out; + + if (!lock_fb_info(reboot_info)) + goto out; + + par = reboot_info->par; + + /* + * HP OmniBook 500's BIOS doesn't like the state of the + * hardware after atyfb has been used. Restore the hardware + * to the original state to allow successful reboots. + */ + aty_set_crtc(par, &par->saved_crtc); + par->pll_ops->set_pll(reboot_info, &par->saved_pll); + + unlock_fb_info(reboot_info); + out: + mutex_unlock(&reboot_lock); + + return NOTIFY_DONE; +} + +static struct notifier_block atyfb_reboot_notifier = { + .notifier_call = atyfb_reboot_notify, +}; + +static const struct dmi_system_id atyfb_reboot_ids[] = { + { + .ident = "HP OmniBook 500", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"), + DMI_MATCH(DMI_PRODUCT_NAME, "HP OmniBook PC"), + DMI_MATCH(DMI_PRODUCT_VERSION, "HP OmniBook 500 FA"), + }, + }, + + { } +}; + static int __init atyfb_init(void) { int err1 = 1, err2 = 1; @@ -3826,11 +3916,20 @@ static int __init atyfb_init(void) err2 = atyfb_atari_probe(); #endif - return (err1 && err2) ? -ENODEV : 0; + if (err1 && err2) + return -ENODEV; + + if (dmi_check_system(atyfb_reboot_ids)) + register_reboot_notifier(&atyfb_reboot_notifier); + + return 0; } static void __exit atyfb_exit(void) { + if (dmi_check_system(atyfb_reboot_ids)) + unregister_reboot_notifier(&atyfb_reboot_notifier); + #ifdef CONFIG_PCI pci_unregister_driver(&atyfb_driver); #endif diff --git a/drivers/video/aty/mach64_accel.c b/drivers/video/aty/mach64_accel.c index 0cc9724e61a..51fcc0a2c94 100644 --- a/drivers/video/aty/mach64_accel.c +++ b/drivers/video/aty/mach64_accel.c @@ -63,14 +63,17 @@ static void reset_GTC_3D_engine(const struct atyfb_par *par) void aty_init_engine(struct atyfb_par *par, struct fb_info *info) { u32 pitch_value; + u32 vxres; /* determine modal information from global mode structure */ - pitch_value = info->var.xres_virtual; + pitch_value = info->fix.line_length / (info->var.bits_per_pixel / 8); + vxres = info->var.xres_virtual; if (info->var.bits_per_pixel == 24) { /* In 24 bpp, the engine is in 8 bpp - this requires that all */ /* horizontal coordinates and widths must be adjusted */ pitch_value *= 3; + vxres *= 3; } /* On GTC (RagePro), we need to reset the 3D engine before */ @@ -133,7 +136,7 @@ void aty_init_engine(struct atyfb_par *par, struct fb_info *info) aty_st_le32(SC_LEFT, 0, par); aty_st_le32(SC_TOP, 0, par); aty_st_le32(SC_BOTTOM, par->crtc.vyres - 1, par); - aty_st_le32(SC_RIGHT, pitch_value - 1, par); + aty_st_le32(SC_RIGHT, vxres - 1, par); /* set background color to minimum value (usually BLACK) */ aty_st_le32(DP_BKGD_CLR, 0, par); diff --git a/drivers/video/backlight/jornada720_bl.c b/drivers/video/backlight/jornada720_bl.c index c3ebb6b41ce..7aed2565c1b 100644 --- a/drivers/video/backlight/jornada720_bl.c +++ b/drivers/video/backlight/jornada720_bl.c @@ -72,7 +72,7 @@ static int jornada_bl_update_status(struct backlight_device *bd) if (jornada_ssp_byte(SETBRIGHTNESS) != TXDUMMY) { printk(KERN_INFO "bl : failed to set brightness\n"); ret = -ETIMEDOUT; - goto out + goto out; } /* at this point we expect that the mcu has accepted diff --git a/drivers/video/backlight/pwm_bl.c b/drivers/video/backlight/pwm_bl.c index e641584e212..88716626744 100644 --- a/drivers/video/backlight/pwm_bl.c +++ b/drivers/video/backlight/pwm_bl.c @@ -145,6 +145,8 @@ static int pwm_backlight_suspend(struct platform_device *pdev, struct backlight_device *bl = platform_get_drvdata(pdev); struct pwm_bl_data *pb = dev_get_drvdata(&bl->dev); + if (pb->notify) + pb->notify(0); pwm_config(pb->pwm, 0, pb->period); pwm_disable(pb->pwm); return 0; diff --git a/drivers/video/backlight/tdo24m.c b/drivers/video/backlight/tdo24m.c index 1dae7f8f3c6..51422fc4f60 100644 --- a/drivers/video/backlight/tdo24m.c +++ b/drivers/video/backlight/tdo24m.c @@ -356,7 +356,7 @@ static int __devinit tdo24m_probe(struct spi_device *spi) lcd->power = FB_BLANK_POWERDOWN; lcd->mode = MODE_VGA; /* default to VGA */ - lcd->buf = kmalloc(TDO24M_SPI_BUFF_SIZE, sizeof(GFP_KERNEL)); + lcd->buf = kmalloc(TDO24M_SPI_BUFF_SIZE, GFP_KERNEL); if (lcd->buf == NULL) { kfree(lcd); return -ENOMEM; diff --git a/drivers/video/cobalt_lcdfb.c b/drivers/video/cobalt_lcdfb.c index 7bad24ed04e..108b89e09a8 100644 --- a/drivers/video/cobalt_lcdfb.c +++ b/drivers/video/cobalt_lcdfb.c @@ -1,7 +1,7 @@ /* * Cobalt server LCD frame buffer driver. * - * Copyright (C) 2008 Yoichi Yuasa <yoichi_yuasa@tripeaks.co.jp> + * Copyright (C) 2008 Yoichi Yuasa <yuasa@linux-mips.org> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by diff --git a/drivers/video/console/fbcon.c b/drivers/video/console/fbcon.c index 471a9a60376..3a44695b9c0 100644 --- a/drivers/video/console/fbcon.c +++ b/drivers/video/console/fbcon.c @@ -1082,7 +1082,6 @@ static void fbcon_init(struct vc_data *vc, int init) new_rows = FBCON_SWAP(ops->rotate, info->var.yres, info->var.xres); new_cols /= vc->vc_font.width; new_rows /= vc->vc_font.height; - vc_resize(vc, new_cols, new_rows); /* * We must always set the mode. The mode of the previous console @@ -1111,10 +1110,11 @@ static void fbcon_init(struct vc_data *vc, int init) * vc_{cols,rows}, but we must not set those if we are only * resizing the console. */ - if (!init) { + if (init) { vc->vc_cols = new_cols; vc->vc_rows = new_rows; - } + } else + vc_resize(vc, new_cols, new_rows); if (logo) fbcon_prepare_logo(vc, info, cols, rows, new_cols, new_rows); diff --git a/drivers/video/console/fbcon_rotate.h b/drivers/video/console/fbcon_rotate.h index 75be5ce53dc..e233444cda6 100644 --- a/drivers/video/console/fbcon_rotate.h +++ b/drivers/video/console/fbcon_rotate.h @@ -45,7 +45,7 @@ static inline void rotate_ud(const char *in, char *out, u32 width, u32 height) width = (width + 7) & ~7; for (i = 0; i < height; i++) { - for (j = 0; j < width; j++) { + for (j = 0; j < width - shift; j++) { if (pattern_test_bit(j, i, width, in)) pattern_set_bit(width - (1 + j + shift), height - (1 + i), diff --git a/drivers/video/console/sticore.c b/drivers/video/console/sticore.c index ef7870f5ea0..857b3668b3b 100644 --- a/drivers/video/console/sticore.c +++ b/drivers/video/console/sticore.c @@ -957,9 +957,14 @@ static int __devinit sticore_pci_init(struct pci_dev *pd, #ifdef CONFIG_PCI unsigned long fb_base, rom_base; unsigned int fb_len, rom_len; + int err; struct sti_struct *sti; - pci_enable_device(pd); + err = pci_enable_device(pd); + if (err < 0) { + dev_err(&pd->dev, "Cannot enable PCI device\n"); + return err; + } fb_base = pci_resource_start(pd, 0); fb_len = pci_resource_len(pd, 0); @@ -1048,7 +1053,7 @@ static void __devinit sti_init_roms(void) /* Register drivers for native & PCI cards */ register_parisc_driver(&pa_sti_driver); - pci_register_driver(&pci_sti_driver); + WARN_ON(pci_register_driver(&pci_sti_driver)); /* if we didn't find the given default sti, take the first one */ if (!default_sti) diff --git a/drivers/video/fbmem.c b/drivers/video/fbmem.c index f8a09bf8d0c..a85c818be94 100644 --- a/drivers/video/fbmem.c +++ b/drivers/video/fbmem.c @@ -16,7 +16,6 @@ #include <linux/compat.h> #include <linux/types.h> #include <linux/errno.h> -#include <linux/smp_lock.h> #include <linux/kernel.h> #include <linux/major.h> #include <linux/slab.h> @@ -1310,8 +1309,6 @@ static long fb_compat_ioctl(struct file *file, unsigned int cmd, static int fb_mmap(struct file *file, struct vm_area_struct * vma) -__acquires(&info->lock) -__releases(&info->lock) { int fbidx = iminor(file->f_path.dentry->d_inode); struct fb_info *info = registered_fb[fbidx]; @@ -1325,16 +1322,14 @@ __releases(&info->lock) off = vma->vm_pgoff << PAGE_SHIFT; if (!fb) return -ENODEV; + mutex_lock(&info->mm_lock); if (fb->fb_mmap) { int res; - mutex_lock(&info->lock); res = fb->fb_mmap(info, vma); - mutex_unlock(&info->lock); + mutex_unlock(&info->mm_lock); return res; } - mutex_lock(&info->lock); - /* frame buffer memory */ start = info->fix.smem_start; len = PAGE_ALIGN((start & ~PAGE_MASK) + info->fix.smem_len); @@ -1342,13 +1337,13 @@ __releases(&info->lock) /* memory mapped io */ off -= len; if (info->var.accel_flags) { - mutex_unlock(&info->lock); + mutex_unlock(&info->mm_lock); return -EINVAL; } start = info->fix.mmio_start; len = PAGE_ALIGN((start & ~PAGE_MASK) + info->fix.mmio_len); } - mutex_unlock(&info->lock); + mutex_unlock(&info->mm_lock); start &= PAGE_MASK; if ((vma->vm_end - vma->vm_start + off) > len) return -EINVAL; @@ -1518,6 +1513,7 @@ register_framebuffer(struct fb_info *fb_info) break; fb_info->node = i; mutex_init(&fb_info->lock); + mutex_init(&fb_info->mm_lock); fb_info->dev = device_create(fb_class, fb_info->device, MKDEV(FB_MAJOR, i), NULL, "fb%d", i); diff --git a/drivers/video/fbmon.c b/drivers/video/fbmon.c index 5c1a2c01778..9ae9cd32bd0 100644 --- a/drivers/video/fbmon.c +++ b/drivers/video/fbmon.c @@ -256,8 +256,8 @@ static void fix_edid(unsigned char *edid, int fix) static int edid_checksum(unsigned char *edid) { - unsigned char i, csum = 0, all_null = 0; - int err = 0, fix = check_edid(edid); + unsigned char csum = 0, all_null = 0; + int i, err = 0, fix = check_edid(edid); if (fix) fix_edid(edid, fix); diff --git a/drivers/video/fsl-diu-fb.c b/drivers/video/fsl-diu-fb.c index f153c581cbd..72d68b3dc47 100644 --- a/drivers/video/fsl-diu-fb.c +++ b/drivers/video/fsl-diu-fb.c @@ -750,24 +750,26 @@ static void update_lcdc(struct fb_info *info) static int map_video_memory(struct fb_info *info) { phys_addr_t phys; + u32 smem_len = info->fix.line_length * info->var.yres_virtual; pr_debug("info->var.xres_virtual = %d\n", info->var.xres_virtual); pr_debug("info->var.yres_virtual = %d\n", info->var.yres_virtual); pr_debug("info->fix.line_length = %d\n", info->fix.line_length); + pr_debug("MAP_VIDEO_MEMORY: smem_len = %u\n", smem_len); - info->fix.smem_len = info->fix.line_length * info->var.yres_virtual; - pr_debug("MAP_VIDEO_MEMORY: smem_len = %d\n", info->fix.smem_len); - info->screen_base = fsl_diu_alloc(info->fix.smem_len, &phys); + info->screen_base = fsl_diu_alloc(smem_len, &phys); if (info->screen_base == NULL) { printk(KERN_ERR "Unable to allocate fb memory\n"); return -ENOMEM; } + mutex_lock(&info->mm_lock); info->fix.smem_start = (unsigned long) phys; + info->fix.smem_len = smem_len; + mutex_unlock(&info->mm_lock); info->screen_size = info->fix.smem_len; pr_debug("Allocated fb @ paddr=0x%08lx, size=%d.\n", - info->fix.smem_start, - info->fix.smem_len); + info->fix.smem_start, info->fix.smem_len); pr_debug("screen base %p\n", info->screen_base); return 0; @@ -776,9 +778,11 @@ static int map_video_memory(struct fb_info *info) static void unmap_video_memory(struct fb_info *info) { fsl_diu_free(info->screen_base, info->fix.smem_len); + mutex_lock(&info->mm_lock); info->screen_base = NULL; info->fix.smem_start = 0; info->fix.smem_len = 0; + mutex_unlock(&info->mm_lock); } /* @@ -1219,12 +1223,6 @@ static int __devinit install_fb(struct fb_info *info) return -EINVAL; } - if (fsl_diu_set_par(info)) { - printk(KERN_ERR "fb_set_par failed"); - fb_dealloc_cmap(&info->cmap); - return -EINVAL; - } - if (register_framebuffer(info) < 0) { printk(KERN_ERR "register_framebuffer failed"); unmap_video_memory(info); diff --git a/drivers/video/hitfb.c b/drivers/video/hitfb.c index 020db7fc915..e7116a6d82d 100644 --- a/drivers/video/hitfb.c +++ b/drivers/video/hitfb.c @@ -44,9 +44,6 @@ static struct fb_fix_screeninfo hitfb_fix __initdata = { .accel = FB_ACCEL_NONE, }; -static u32 pseudo_palette[16]; -static struct fb_info fb_info; - static inline void hitfb_accel_wait(void) { while (fb_readw(HD64461_GRCFGR) & HD64461_GRCFGR_ACCSTATUS) ; @@ -331,6 +328,8 @@ static struct fb_ops hitfb_ops = { static int __init hitfb_probe(struct platform_device *dev) { unsigned short lcdclor, ldr3, ldvndr; + struct fb_info *info; + int ret; if (fb_get_options("hitfb", NULL)) return -ENODEV; @@ -384,32 +383,53 @@ static int __init hitfb_probe(struct platform_device *dev) break; } - fb_info.fbops = &hitfb_ops; - fb_info.var = hitfb_var; - fb_info.fix = hitfb_fix; - fb_info.pseudo_palette = pseudo_palette; - fb_info.flags = FBINFO_DEFAULT | FBINFO_HWACCEL_YPAN | + info = framebuffer_alloc(sizeof(u32) * 16, &dev->dev); + if (unlikely(!info)) + return -ENOMEM; + + info->fbops = &hitfb_ops; + info->var = hitfb_var; + info->fix = hitfb_fix; + info->pseudo_palette = info->par; + info->flags = FBINFO_DEFAULT | FBINFO_HWACCEL_YPAN | FBINFO_HWACCEL_FILLRECT | FBINFO_HWACCEL_COPYAREA; - fb_info.screen_base = (void *)hitfb_fix.smem_start; + info->screen_base = (void *)hitfb_fix.smem_start; - fb_alloc_cmap(&fb_info.cmap, 256, 0); + ret = fb_alloc_cmap(&info->cmap, 256, 0); + if (unlikely(ret < 0)) + goto err_fb; - if (register_framebuffer(&fb_info) < 0) - return -EINVAL; + ret = register_framebuffer(info); + if (unlikely(ret < 0)) + goto err; + + platform_set_drvdata(dev, info); printk(KERN_INFO "fb%d: %s frame buffer device\n", - fb_info.node, fb_info.fix.id); + info->node, info->fix.id); + return 0; + +err: + fb_dealloc_cmap(&info->cmap); +err_fb: + framebuffer_release(info); + return ret; } static int __exit hitfb_remove(struct platform_device *dev) { - return unregister_framebuffer(&fb_info); + struct fb_info *info = platform_get_drvdata(dev); + + unregister_framebuffer(info); + fb_dealloc_cmap(&info->cmap); + framebuffer_release(info); + + return 0; } -#ifdef CONFIG_PM -static int hitfb_suspend(struct platform_device *dev, pm_message_t state) +static int hitfb_suspend(struct device *dev) { u16 v; @@ -421,7 +441,7 @@ static int hitfb_suspend(struct platform_device *dev, pm_message_t state) return 0; } -static int hitfb_resume(struct platform_device *dev) +static int hitfb_resume(struct device *dev) { u16 v; @@ -435,17 +455,19 @@ static int hitfb_resume(struct platform_device *dev) return 0; } -#endif + +static struct dev_pm_ops hitfb_dev_pm_ops = { + .suspend = hitfb_suspend, + .resume = hitfb_resume, +}; static struct platform_driver hitfb_driver = { .probe = hitfb_probe, .remove = __exit_p(hitfb_remove), -#ifdef CONFIG_PM - .suspend = hitfb_suspend, - .resume = hitfb_resume, -#endif .driver = { .name = "hitfb", + .owner = THIS_MODULE, + .pm = &hitfb_dev_pm_ops, }, }; diff --git a/drivers/video/i810/i810_main.c b/drivers/video/i810/i810_main.c index 2e940199fc8..5743ea25e81 100644 --- a/drivers/video/i810/i810_main.c +++ b/drivers/video/i810/i810_main.c @@ -1090,8 +1090,10 @@ static int encode_fix(struct fb_fix_screeninfo *fix, struct fb_info *info) memset(fix, 0, sizeof(struct fb_fix_screeninfo)); strcpy(fix->id, "I810"); + mutex_lock(&info->mm_lock); fix->smem_start = par->fb.physical; fix->smem_len = par->fb.size; + mutex_unlock(&info->mm_lock); fix->type = FB_TYPE_PACKED_PIXELS; fix->type_aux = 0; fix->xpanstep = 8; @@ -2058,8 +2060,7 @@ static int __devinit i810fb_init_pci (struct pci_dev *dev, fb_var_to_videomode(&mode, &info->var); fb_add_videomode(&mode, &info->modelist); - encode_fix(&info->fix, info); - + i810fb_init_ringbuffer(info); err = register_framebuffer(info); diff --git a/drivers/video/matrox/matroxfb_DAC1064.c b/drivers/video/matrox/matroxfb_DAC1064.c index 0ce3b0a8979..a74e5da17aa 100644 --- a/drivers/video/matrox/matroxfb_DAC1064.c +++ b/drivers/video/matrox/matroxfb_DAC1064.c @@ -454,9 +454,9 @@ static void DAC1064_restore_2(WPMINFO2) { dprintk(KERN_DEBUG "DAC1064regs "); for (i = 0; i < sizeof(MGA1064_DAC_regs); i++) { dprintk("R%02X=%02X ", MGA1064_DAC_regs[i], ACCESS_FBINFO(hw).DACreg[i]); - if ((i & 0x7) == 0x7) dprintk("\n" KERN_DEBUG "continuing... "); + if ((i & 0x7) == 0x7) dprintk(KERN_DEBUG "continuing... "); } - dprintk("\n" KERN_DEBUG "DAC1064clk "); + dprintk(KERN_DEBUG "DAC1064clk "); for (i = 0; i < 6; i++) dprintk("C%02X=%02X ", i, ACCESS_FBINFO(hw).DACclk[i]); dprintk("\n"); diff --git a/drivers/video/matrox/matroxfb_Ti3026.c b/drivers/video/matrox/matroxfb_Ti3026.c index 13524821e24..4e825112a60 100644 --- a/drivers/video/matrox/matroxfb_Ti3026.c +++ b/drivers/video/matrox/matroxfb_Ti3026.c @@ -651,9 +651,9 @@ static void Ti3026_restore(WPMINFO2) { dprintk(KERN_DEBUG "3026DACregs "); for (i = 0; i < 21; i++) { dprintk("R%02X=%02X ", DACseq[i], hw->DACreg[i]); - if ((i & 0x7) == 0x7) dprintk("\n" KERN_DEBUG "continuing... "); + if ((i & 0x7) == 0x7) dprintk(KERN_DEBUG "continuing... "); } - dprintk("\n" KERN_DEBUG "DACclk "); + dprintk(KERN_DEBUG "DACclk "); for (i = 0; i < 6; i++) dprintk("C%02X=%02X ", i, hw->DACclk[i]); dprintk("\n"); diff --git a/drivers/video/matrox/matroxfb_base.c b/drivers/video/matrox/matroxfb_base.c index 8e7a275df50..0c1049b308b 100644 --- a/drivers/video/matrox/matroxfb_base.c +++ b/drivers/video/matrox/matroxfb_base.c @@ -724,8 +724,10 @@ static void matroxfb_update_fix(WPMINFO2) struct fb_fix_screeninfo *fix = &ACCESS_FBINFO(fbcon).fix; DBG(__func__) + mutex_lock(&ACCESS_FBINFO(fbcon).mm_lock); fix->smem_start = ACCESS_FBINFO(video.base) + ACCESS_FBINFO(curr.ydstorg.bytes); fix->smem_len = ACCESS_FBINFO(video.len_usable) - ACCESS_FBINFO(curr.ydstorg.bytes); + mutex_unlock(&ACCESS_FBINFO(fbcon).mm_lock); } static int matroxfb_check_var(struct fb_var_screeninfo *var, struct fb_info *info) @@ -1874,7 +1876,6 @@ static int initMatrox2(WPMINFO struct board* b){ } matroxfb_init_fix(PMINFO2); ACCESS_FBINFO(fbcon.screen_base) = vaddr_va(ACCESS_FBINFO(video.vbase)); - matroxfb_update_fix(PMINFO2); /* Normalize values (namely yres_virtual) */ matroxfb_check_var(&vesafb_defined, &ACCESS_FBINFO(fbcon)); /* And put it into "current" var. Do NOT program hardware yet, or we'll not take over @@ -2081,6 +2082,7 @@ static int matroxfb_probe(struct pci_dev* pdev, const struct pci_device_id* dumm spin_lock_init(&ACCESS_FBINFO(lock.accel)); init_rwsem(&ACCESS_FBINFO(crtc2.lock)); init_rwsem(&ACCESS_FBINFO(altout.lock)); + mutex_init(&ACCESS_FBINFO(fbcon).mm_lock); ACCESS_FBINFO(irq_flags) = 0; init_waitqueue_head(&ACCESS_FBINFO(crtc1.vsync.wait)); init_waitqueue_head(&ACCESS_FBINFO(crtc2.vsync.wait)); diff --git a/drivers/video/matrox/matroxfb_crtc2.c b/drivers/video/matrox/matroxfb_crtc2.c index 7ac4c5f6145..ebcb5c6b496 100644 --- a/drivers/video/matrox/matroxfb_crtc2.c +++ b/drivers/video/matrox/matroxfb_crtc2.c @@ -289,7 +289,12 @@ static int matroxfb_dh_release(struct fb_info* info, int user) { #undef m2info } -static void matroxfb_dh_init_fix(struct matroxfb_dh_fb_info *m2info) { +/* + * This function is called before the register_framebuffer so + * no locking is needed. + */ +static void matroxfb_dh_init_fix(struct matroxfb_dh_fb_info *m2info) +{ struct fb_fix_screeninfo *fix = &m2info->fbcon.fix; strcpy(fix->id, "MATROX DH"); diff --git a/drivers/video/mx3fb.c b/drivers/video/mx3fb.c index b7af5256e88..054ef29be47 100644 --- a/drivers/video/mx3fb.c +++ b/drivers/video/mx3fb.c @@ -669,7 +669,8 @@ static uint32_t bpp_to_pixfmt(int bpp) } static int mx3fb_blank(int blank, struct fb_info *fbi); -static int mx3fb_map_video_memory(struct fb_info *fbi); +static int mx3fb_map_video_memory(struct fb_info *fbi, unsigned int mem_len, + bool lock); static int mx3fb_unmap_video_memory(struct fb_info *fbi); /** @@ -711,12 +712,7 @@ static void mx3fb_dma_done(void *arg) complete(&mx3_fbi->flip_cmpl); } -/** - * mx3fb_set_par() - set framebuffer parameters and change the operating mode. - * @fbi: framebuffer information pointer. - * @return: 0 on success or negative error code on failure. - */ -static int mx3fb_set_par(struct fb_info *fbi) +static int __set_par(struct fb_info *fbi, bool lock) { u32 mem_len; struct ipu_di_signal_cfg sig_cfg; @@ -727,10 +723,6 @@ static int mx3fb_set_par(struct fb_info *fbi) struct idmac_video_param *video = &ichan->params.video; struct scatterlist *sg = mx3_fbi->sg; - dev_dbg(mx3fb->dev, "%s [%c]\n", __func__, list_empty(&ichan->queue) ? '-' : '+'); - - mutex_lock(&mx3_fbi->mutex); - /* Total cleanup */ if (mx3_fbi->txd) sdc_disable_channel(mx3_fbi); @@ -742,11 +734,8 @@ static int mx3fb_set_par(struct fb_info *fbi) if (fbi->fix.smem_start) mx3fb_unmap_video_memory(fbi); - fbi->fix.smem_len = mem_len; - if (mx3fb_map_video_memory(fbi) < 0) { - mutex_unlock(&mx3_fbi->mutex); + if (mx3fb_map_video_memory(fbi, mem_len, lock) < 0) return -ENOMEM; - } } sg_init_table(&sg[0], 1); @@ -792,7 +781,6 @@ static int mx3fb_set_par(struct fb_info *fbi) fbi->var.vsync_len, fbi->var.lower_margin + fbi->var.vsync_len, sig_cfg) != 0) { - mutex_unlock(&mx3_fbi->mutex); dev_err(fbi->device, "mx3fb: Error initializing panel.\n"); return -EINVAL; @@ -811,9 +799,30 @@ static int mx3fb_set_par(struct fb_info *fbi) if (mx3_fbi->blank == FB_BLANK_UNBLANK) sdc_enable_channel(mx3_fbi); + return 0; +} + +/** + * mx3fb_set_par() - set framebuffer parameters and change the operating mode. + * @fbi: framebuffer information pointer. + * @return: 0 on success or negative error code on failure. + */ +static int mx3fb_set_par(struct fb_info *fbi) +{ + struct mx3fb_info *mx3_fbi = fbi->par; + struct mx3fb_data *mx3fb = mx3_fbi->mx3fb; + struct idmac_channel *ichan = mx3_fbi->idmac_channel; + int ret; + + dev_dbg(mx3fb->dev, "%s [%c]\n", __func__, list_empty(&ichan->queue) ? '-' : '+'); + + mutex_lock(&mx3_fbi->mutex); + + ret = __set_par(fbi, true); + mutex_unlock(&mx3_fbi->mutex); - return 0; + return ret; } /** @@ -967,21 +976,11 @@ static int mx3fb_setcolreg(unsigned int regno, unsigned int red, return ret; } -/** - * mx3fb_blank() - blank the display. - */ -static int mx3fb_blank(int blank, struct fb_info *fbi) +static void __blank(int blank, struct fb_info *fbi) { struct mx3fb_info *mx3_fbi = fbi->par; struct mx3fb_data *mx3fb = mx3_fbi->mx3fb; - dev_dbg(fbi->device, "%s, blank = %d, base %p, len %u\n", __func__, - blank, fbi->screen_base, fbi->fix.smem_len); - - if (mx3_fbi->blank == blank) - return 0; - - mutex_lock(&mx3_fbi->mutex); mx3_fbi->blank = blank; switch (blank) { @@ -1000,6 +999,23 @@ static int mx3fb_blank(int blank, struct fb_info *fbi) sdc_set_brightness(mx3fb, mx3fb->backlight_level); break; } +} + +/** + * mx3fb_blank() - blank the display. + */ +static int mx3fb_blank(int blank, struct fb_info *fbi) +{ + struct mx3fb_info *mx3_fbi = fbi->par; + + dev_dbg(fbi->device, "%s, blank = %d, base %p, len %u\n", __func__, + blank, fbi->screen_base, fbi->fix.smem_len); + + if (mx3_fbi->blank == blank) + return 0; + + mutex_lock(&mx3_fbi->mutex); + __blank(blank, fbi); mutex_unlock(&mx3_fbi->mutex); return 0; @@ -1198,6 +1214,8 @@ static int mx3fb_resume(struct platform_device *pdev) /** * mx3fb_map_video_memory() - allocates the DRAM memory for the frame buffer. * @fbi: framebuffer information pointer + * @mem_len: length of mapped memory + * @lock: do not lock during initialisation * @return: Error code indicating success or failure * * This buffer is remapped into a non-cached, non-buffered, memory region to @@ -1205,23 +1223,29 @@ static int mx3fb_resume(struct platform_device *pdev) * area is remapped, all virtual memory access to the video memory should occur * at the new region. */ -static int mx3fb_map_video_memory(struct fb_info *fbi) +static int mx3fb_map_video_memory(struct fb_info *fbi, unsigned int mem_len, + bool lock) { int retval = 0; dma_addr_t addr; fbi->screen_base = dma_alloc_writecombine(fbi->device, - fbi->fix.smem_len, + mem_len, &addr, GFP_DMA); if (!fbi->screen_base) { dev_err(fbi->device, "Cannot allocate %u bytes framebuffer memory\n", - fbi->fix.smem_len); + mem_len); retval = -EBUSY; goto err0; } + if (lock) + mutex_lock(&fbi->mm_lock); fbi->fix.smem_start = addr; + fbi->fix.smem_len = mem_len; + if (lock) + mutex_unlock(&fbi->mm_lock); dev_dbg(fbi->device, "allocated fb @ p=0x%08x, v=0x%p, size=%d.\n", (uint32_t) fbi->fix.smem_start, fbi->screen_base, fbi->fix.smem_len); @@ -1251,8 +1275,10 @@ static int mx3fb_unmap_video_memory(struct fb_info *fbi) fbi->screen_base, fbi->fix.smem_start); fbi->screen_base = 0; + mutex_lock(&fbi->mm_lock); fbi->fix.smem_start = 0; fbi->fix.smem_len = 0; + mutex_unlock(&fbi->mm_lock); return 0; } @@ -1360,11 +1386,11 @@ static int init_fb_chan(struct mx3fb_data *mx3fb, struct idmac_channel *ichan) init_completion(&mx3fbi->flip_cmpl); disable_irq(ichan->eof_irq); dev_dbg(mx3fb->dev, "disabling irq %d\n", ichan->eof_irq); - ret = mx3fb_set_par(fbi); + ret = __set_par(fbi, false); if (ret < 0) goto esetpar; - mx3fb_blank(FB_BLANK_UNBLANK, fbi); + __blank(FB_BLANK_UNBLANK, fbi); dev_info(dev, "registered, using mode %s\n", fb_mode); diff --git a/drivers/video/omap/omapfb_main.c b/drivers/video/omap/omapfb_main.c index 060d72fe57c..8862233d57b 100644 --- a/drivers/video/omap/omapfb_main.c +++ b/drivers/video/omap/omapfb_main.c @@ -393,8 +393,10 @@ static void set_fb_fix(struct fb_info *fbi) rg = &plane->fbdev->mem_desc.region[plane->idx]; fbi->screen_base = rg->vaddr; + mutex_lock(&fbi->mm_lock); fix->smem_start = rg->paddr; fix->smem_len = rg->size; + mutex_unlock(&fbi->mm_lock); fix->type = FB_TYPE_PACKED_PIXELS; bpp = var->bits_per_pixel; @@ -886,8 +888,10 @@ static int omapfb_setup_mem(struct fb_info *fbi, struct omapfb_mem_info *mi) * plane memory is dealloce'd, the other * screen parameters in var / fix are invalid. */ + mutex_lock(&fbi->mm_lock); fbi->fix.smem_start = 0; fbi->fix.smem_len = 0; + mutex_unlock(&fbi->mm_lock); } } } @@ -1250,7 +1254,7 @@ static struct fb_ops omapfb_ops = { static ssize_t omapfb_show_caps_num(struct device *dev, struct device_attribute *attr, char *buf) { - struct omapfb_device *fbdev = (struct omapfb_device *)dev->driver_data; + struct omapfb_device *fbdev = dev_get_drvdata(dev); int plane; size_t size; struct omapfb_caps caps; @@ -1270,7 +1274,7 @@ static ssize_t omapfb_show_caps_num(struct device *dev, static ssize_t omapfb_show_caps_text(struct device *dev, struct device_attribute *attr, char *buf) { - struct omapfb_device *fbdev = (struct omapfb_device *)dev->driver_data; + struct omapfb_device *fbdev = dev_get_drvdata(dev); int i; struct omapfb_caps caps; int plane; @@ -1317,7 +1321,7 @@ static DEVICE_ATTR(caps_text, 0444, omapfb_show_caps_text, NULL); static ssize_t omapfb_show_panel_name(struct device *dev, struct device_attribute *attr, char *buf) { - struct omapfb_device *fbdev = (struct omapfb_device *)dev->driver_data; + struct omapfb_device *fbdev = dev_get_drvdata(dev); return snprintf(buf, PAGE_SIZE, "%s\n", fbdev->panel->name); } @@ -1326,7 +1330,7 @@ static ssize_t omapfb_show_bklight_level(struct device *dev, struct device_attribute *attr, char *buf) { - struct omapfb_device *fbdev = (struct omapfb_device *)dev->driver_data; + struct omapfb_device *fbdev = dev_get_drvdata(dev); int r; if (fbdev->panel->get_bklight_level) { @@ -1341,7 +1345,7 @@ static ssize_t omapfb_store_bklight_level(struct device *dev, struct device_attribute *attr, const char *buf, size_t size) { - struct omapfb_device *fbdev = (struct omapfb_device *)dev->driver_data; + struct omapfb_device *fbdev = dev_get_drvdata(dev); int r; if (fbdev->panel->set_bklight_level) { @@ -1360,7 +1364,7 @@ static ssize_t omapfb_store_bklight_level(struct device *dev, static ssize_t omapfb_show_bklight_max(struct device *dev, struct device_attribute *attr, char *buf) { - struct omapfb_device *fbdev = (struct omapfb_device *)dev->driver_data; + struct omapfb_device *fbdev = dev_get_drvdata(dev); int r; if (fbdev->panel->get_bklight_level) { @@ -1393,7 +1397,7 @@ static struct attribute_group panel_attr_grp = { static ssize_t omapfb_show_ctrl_name(struct device *dev, struct device_attribute *attr, char *buf) { - struct omapfb_device *fbdev = (struct omapfb_device *)dev->driver_data; + struct omapfb_device *fbdev = dev_get_drvdata(dev); return snprintf(buf, PAGE_SIZE, "%s\n", fbdev->ctrl->name); } diff --git a/drivers/video/platinumfb.c b/drivers/video/platinumfb.c index 03b3670130a..bacfabd9ce1 100644 --- a/drivers/video/platinumfb.c +++ b/drivers/video/platinumfb.c @@ -141,7 +141,9 @@ static int platinumfb_set_par (struct fb_info *info) offset = 0x10; info->screen_base = pinfo->frame_buffer + init->fb_offset + offset; + mutex_lock(&info->mm_lock); info->fix.smem_start = (pinfo->frame_buffer_phys) + init->fb_offset + offset; + mutex_unlock(&info->mm_lock); info->fix.visual = (pinfo->cmode == CMODE_8) ? FB_VISUAL_PSEUDOCOLOR : FB_VISUAL_DIRECTCOLOR; info->fix.line_length = vmode_attrs[pinfo->vmode-1].hres * (1<<pinfo->cmode) diff --git a/drivers/video/pxafb.c b/drivers/video/pxafb.c index 0889d50c328..6506117c134 100644 --- a/drivers/video/pxafb.c +++ b/drivers/video/pxafb.c @@ -815,8 +815,10 @@ static int overlayfb_map_video_memory(struct pxafb_layer *ofb) ofb->video_mem_phys = virt_to_phys(ofb->video_mem); ofb->video_mem_size = size; + mutex_lock(&ofb->fb.mm_lock); ofb->fb.fix.smem_start = ofb->video_mem_phys; ofb->fb.fix.smem_len = ofb->fb.fix.line_length * var->yres_virtual; + mutex_unlock(&ofb->fb.mm_lock); ofb->fb.screen_base = ofb->video_mem; return 0; } diff --git a/drivers/video/s3c-fb.c b/drivers/video/s3c-fb.c index 43680e54542..5a72083dc67 100644 --- a/drivers/video/s3c-fb.c +++ b/drivers/video/s3c-fb.c @@ -211,23 +211,21 @@ static int s3c_fb_check_var(struct fb_var_screeninfo *var, /** * s3c_fb_calc_pixclk() - calculate the divider to create the pixel clock. + * @id: window id. * @sfb: The hardware state. * @pixclock: The pixel clock wanted, in picoseconds. * * Given the specified pixel clock, work out the necessary divider to get * close to the output frequency. */ -static int s3c_fb_calc_pixclk(struct s3c_fb *sfb, unsigned int pixclk) +static int s3c_fb_calc_pixclk(unsigned char id, struct s3c_fb *sfb, unsigned int pixclk) { + struct s3c_fb_pd_win *win = sfb->pdata->win[id]; unsigned long clk = clk_get_rate(sfb->bus_clk); - unsigned long long tmp; unsigned int result; - tmp = (unsigned long long)clk; - tmp *= pixclk; - - do_div(tmp, 1000000000UL); - result = (unsigned int)tmp / 1000; + pixclk *= win->win_mode.refresh; + result = clk / pixclk; dev_dbg(sfb->dev, "pixclk=%u, clk=%lu, div=%d (%lu)\n", pixclk, clk, result, clk / result); @@ -267,6 +265,7 @@ static int s3c_fb_set_par(struct fb_info *info) struct s3c_fb *sfb = win->parent; void __iomem *regs = sfb->regs; int win_no = win->index; + u32 osdc_data = 0; u32 data; u32 pagewidth; int clkdiv; @@ -302,7 +301,7 @@ static int s3c_fb_set_par(struct fb_info *info) /* use window 0 as the basis for the lcd output timings */ if (win_no == 0) { - clkdiv = s3c_fb_calc_pixclk(sfb, var->pixclock); + clkdiv = s3c_fb_calc_pixclk(win_no, sfb, var->pixclock); data = sfb->pdata->vidcon0; data &= ~(VIDCON0_CLKVAL_F_MASK | VIDCON0_CLKDIR); @@ -359,8 +358,6 @@ static int s3c_fb_set_par(struct fb_info *info) data = var->xres * var->yres; - u32 osdc_data = 0; - osdc_data = VIDISD14C_ALPHA1_R(0xf) | VIDISD14C_ALPHA1_G(0xf) | VIDISD14C_ALPHA1_B(0xf); @@ -967,7 +964,7 @@ static int __devexit s3c_fb_remove(struct platform_device *pdev) struct s3c_fb *sfb = platform_get_drvdata(pdev); int win; - for (win = 0; win <= S3C_FB_MAX_WIN; win++) + for (win = 0; win < S3C_FB_MAX_WIN; win++) if (sfb->windows[win]) s3c_fb_release_win(sfb, sfb->windows[win]); @@ -991,7 +988,7 @@ static int s3c_fb_suspend(struct platform_device *pdev, pm_message_t state) struct s3c_fb_win *win; int win_no; - for (win_no = S3C_FB_MAX_WIN; win_no >= 0; win_no--) { + for (win_no = S3C_FB_MAX_WIN - 1; win_no >= 0; win_no--) { win = sfb->windows[win_no]; if (!win) continue; diff --git a/drivers/video/sh7760fb.c b/drivers/video/sh7760fb.c index 653bdfee305..9f6d6e61f0c 100644 --- a/drivers/video/sh7760fb.c +++ b/drivers/video/sh7760fb.c @@ -120,18 +120,6 @@ static int sh7760_setcolreg (u_int regno, return 0; } -static void encode_fix(struct fb_fix_screeninfo *fix, struct fb_info *info, - unsigned long stride) -{ - memset(fix, 0, sizeof(struct fb_fix_screeninfo)); - strcpy(fix->id, "sh7760-lcdc"); - - fix->smem_start = (unsigned long)info->screen_base; - fix->smem_len = info->screen_size; - - fix->line_length = stride; -} - static int sh7760fb_get_color_info(struct device *dev, u16 lddfr, int *bpp, int *gray) { @@ -334,7 +322,8 @@ static int sh7760fb_set_par(struct fb_info *info) iowrite32(ldsarl, par->base + LDSARL); /* mem for lower half of DSTN */ - encode_fix(&info->fix, info, stride); + info->fix.line_length = stride; + sh7760fb_check_var(&info->var, info); sh7760fb_blank(FB_BLANK_UNBLANK, info); /* panel on! */ @@ -435,6 +424,8 @@ static int sh7760fb_alloc_mem(struct fb_info *info) info->screen_base = fbmem; info->screen_size = vram; + info->fix.smem_start = (unsigned long)info->screen_base; + info->fix.smem_len = info->screen_size; return 0; } @@ -520,6 +511,8 @@ static int __devinit sh7760fb_probe(struct platform_device *pdev) info->var.transp.length = 0; info->var.transp.msb_right = 0; + strcpy(info->fix.id, "sh7760-lcdc"); + /* set the DON2 bit now, before cmap allocation, as it will randomize * palette memory. */ diff --git a/drivers/video/sh_mobile_lcdcfb.c b/drivers/video/sh_mobile_lcdcfb.c index f10d2fbeda0..07f22b62563 100644 --- a/drivers/video/sh_mobile_lcdcfb.c +++ b/drivers/video/sh_mobile_lcdcfb.c @@ -17,6 +17,7 @@ #include <linux/platform_device.h> #include <linux/dma-mapping.h> #include <linux/interrupt.h> +#include <linux/vmalloc.h> #include <video/sh_mobile_lcdc.h> #include <asm/atomic.h> @@ -30,9 +31,10 @@ struct sh_mobile_lcdc_chan { unsigned long enabled; /* ME and SE in LDCNT2R */ struct sh_mobile_lcdc_chan_cfg cfg; u32 pseudo_palette[PALETTE_NR]; - struct fb_info info; + struct fb_info *info; dma_addr_t dma_handle; struct fb_deferred_io defio; + struct scatterlist *sglist; unsigned long frame_end; wait_queue_head_t frame_end_wait; }; @@ -206,16 +208,38 @@ static void sh_mobile_lcdc_clk_on(struct sh_mobile_lcdc_priv *priv) {} static void sh_mobile_lcdc_clk_off(struct sh_mobile_lcdc_priv *priv) {} #endif +static int sh_mobile_lcdc_sginit(struct fb_info *info, + struct list_head *pagelist) +{ + struct sh_mobile_lcdc_chan *ch = info->par; + unsigned int nr_pages_max = info->fix.smem_len >> PAGE_SHIFT; + struct page *page; + int nr_pages = 0; + + sg_init_table(ch->sglist, nr_pages_max); + + list_for_each_entry(page, pagelist, lru) + sg_set_page(&ch->sglist[nr_pages++], page, PAGE_SIZE, 0); + + return nr_pages; +} + static void sh_mobile_lcdc_deferred_io(struct fb_info *info, struct list_head *pagelist) { struct sh_mobile_lcdc_chan *ch = info->par; + unsigned int nr_pages; /* enable clocks before accessing hardware */ sh_mobile_lcdc_clk_on(ch->lcdc); + nr_pages = sh_mobile_lcdc_sginit(info, pagelist); + dma_map_sg(info->dev, ch->sglist, nr_pages, DMA_TO_DEVICE); + /* trigger panel update */ lcdc_write_chan(ch, LDSM2R, 1); + + dma_unmap_sg(info->dev, ch->sglist, nr_pages, DMA_TO_DEVICE); } static void sh_mobile_lcdc_deferred_io_touch(struct fb_info *info) @@ -418,22 +442,22 @@ static int sh_mobile_lcdc_start(struct sh_mobile_lcdc_priv *priv) /* set bpp format in PKF[4:0] */ tmp = lcdc_read_chan(ch, LDDFR); tmp &= ~(0x0001001f); - tmp |= (priv->ch[k].info.var.bits_per_pixel == 16) ? 3 : 0; + tmp |= (ch->info->var.bits_per_pixel == 16) ? 3 : 0; lcdc_write_chan(ch, LDDFR, tmp); /* point out our frame buffer */ - lcdc_write_chan(ch, LDSA1R, ch->info.fix.smem_start); + lcdc_write_chan(ch, LDSA1R, ch->info->fix.smem_start); /* set line size */ - lcdc_write_chan(ch, LDMLSR, ch->info.fix.line_length); + lcdc_write_chan(ch, LDMLSR, ch->info->fix.line_length); /* setup deferred io if SYS bus */ tmp = ch->cfg.sys_bus_cfg.deferred_io_msec; if (ch->ldmt1r_value & (1 << 12) && tmp) { ch->defio.deferred_io = sh_mobile_lcdc_deferred_io; ch->defio.delay = msecs_to_jiffies(tmp); - ch->info.fbdefio = &ch->defio; - fb_deferred_io_init(&ch->info); + ch->info->fbdefio = &ch->defio; + fb_deferred_io_init(ch->info); /* one-shot mode */ lcdc_write_chan(ch, LDSM1R, 1); @@ -457,6 +481,9 @@ static int sh_mobile_lcdc_start(struct sh_mobile_lcdc_priv *priv) /* tell the board code to enable the panel */ for (k = 0; k < ARRAY_SIZE(priv->ch); k++) { ch = &priv->ch[k]; + if (!ch->enabled) + continue; + board_cfg = &ch->cfg.board_cfg; if (board_cfg->display_on) board_cfg->display_on(board_cfg->board_data); @@ -474,17 +501,19 @@ static void sh_mobile_lcdc_stop(struct sh_mobile_lcdc_priv *priv) /* clean up deferred io and ask board code to disable panel */ for (k = 0; k < ARRAY_SIZE(priv->ch); k++) { ch = &priv->ch[k]; + if (!ch->enabled) + continue; /* deferred io mode: * flush frame, and wait for frame end interrupt * clean up deferred io and enable clock */ - if (ch->info.fbdefio) { + if (ch->info->fbdefio) { ch->frame_end = 0; - schedule_delayed_work(&ch->info.deferred_work, 0); + schedule_delayed_work(&ch->info->deferred_work, 0); wait_event(ch->frame_end_wait, ch->frame_end); - fb_deferred_io_cleanup(&ch->info); - ch->info.fbdefio = NULL; + fb_deferred_io_cleanup(ch->info); + ch->info->fbdefio = NULL; sh_mobile_lcdc_clk_on(priv); } @@ -793,9 +822,16 @@ static int __init sh_mobile_lcdc_probe(struct platform_device *pdev) priv->base = ioremap_nocache(res->start, (res->end - res->start) + 1); for (i = 0; i < j; i++) { - info = &priv->ch[i].info; cfg = &priv->ch[i].cfg; + priv->ch[i].info = framebuffer_alloc(0, &pdev->dev); + if (!priv->ch[i].info) { + dev_err(&pdev->dev, "unable to allocate fb_info\n"); + error = -ENOMEM; + break; + } + + info = priv->ch[i].info; info->fbops = &sh_mobile_lcdc_ops; info->var.xres = info->var.xres_virtual = cfg->lcd_cfg.xres; info->var.yres = info->var.yres_virtual = cfg->lcd_cfg.yres; @@ -846,21 +882,31 @@ static int __init sh_mobile_lcdc_probe(struct platform_device *pdev) } for (i = 0; i < j; i++) { - error = register_framebuffer(&priv->ch[i].info); + struct sh_mobile_lcdc_chan *ch = priv->ch + i; + + info = ch->info; + + if (info->fbdefio) { + priv->ch->sglist = vmalloc(sizeof(struct scatterlist) * + info->fix.smem_len >> PAGE_SHIFT); + if (!priv->ch->sglist) { + dev_err(&pdev->dev, "cannot allocate sglist\n"); + goto err1; + } + } + + error = register_framebuffer(info); if (error < 0) goto err1; - } - for (i = 0; i < j; i++) { - info = &priv->ch[i].info; dev_info(info->dev, "registered %s/%s as %dx%d %dbpp.\n", pdev->name, - (priv->ch[i].cfg.chan == LCDC_CHAN_MAINLCD) ? + (ch->cfg.chan == LCDC_CHAN_MAINLCD) ? "mainlcd" : "sublcd", - (int) priv->ch[i].cfg.lcd_cfg.xres, - (int) priv->ch[i].cfg.lcd_cfg.yres, - priv->ch[i].cfg.bpp); + (int) ch->cfg.lcd_cfg.xres, + (int) ch->cfg.lcd_cfg.yres, + ch->cfg.bpp); /* deferred io mode: disable clock to save power */ if (info->fbdefio) @@ -881,20 +927,24 @@ static int sh_mobile_lcdc_remove(struct platform_device *pdev) int i; for (i = 0; i < ARRAY_SIZE(priv->ch); i++) - if (priv->ch[i].info.dev) - unregister_framebuffer(&priv->ch[i].info); + if (priv->ch[i].info->dev) + unregister_framebuffer(priv->ch[i].info); sh_mobile_lcdc_stop(priv); for (i = 0; i < ARRAY_SIZE(priv->ch); i++) { - info = &priv->ch[i].info; + info = priv->ch[i].info; - if (!info->device) + if (!info || !info->device) continue; + if (priv->ch[i].sglist) + vfree(priv->ch[i].sglist); + dma_free_coherent(&pdev->dev, info->fix.smem_len, info->screen_base, priv->ch[i].dma_handle); fb_dealloc_cmap(&info->cmap); + framebuffer_release(info); } #ifdef CONFIG_HAVE_CLK diff --git a/drivers/video/sis/sis_main.c b/drivers/video/sis/sis_main.c index 7072d19080d..4a067f0d0ce 100644 --- a/drivers/video/sis/sis_main.c +++ b/drivers/video/sis/sis_main.c @@ -1847,8 +1847,10 @@ sisfb_get_fix(struct fb_fix_screeninfo *fix, int con, struct fb_info *info) strcpy(fix->id, ivideo->myid); + mutex_lock(&info->mm_lock); fix->smem_start = ivideo->video_base + ivideo->video_offset; fix->smem_len = ivideo->sisfb_mem; + mutex_unlock(&info->mm_lock); fix->type = FB_TYPE_PACKED_PIXELS; fix->type_aux = 0; fix->visual = (ivideo->video_bpp == 8) ? FB_VISUAL_PSEUDOCOLOR : FB_VISUAL_TRUECOLOR; @@ -6365,7 +6367,6 @@ error_3: vfree(ivideo->bios_abase); sis_fb_info->fix = ivideo->sisfb_fix; sis_fb_info->screen_base = ivideo->video_vbase + ivideo->video_offset; sis_fb_info->fbops = &sisfb_ops; - sisfb_get_fix(&sis_fb_info->fix, -1, sis_fb_info); sis_fb_info->pseudo_palette = ivideo->pseudo_palette; fb_alloc_cmap(&sis_fb_info->cmap, 256 , 0); diff --git a/drivers/video/sm501fb.c b/drivers/video/sm501fb.c index eb5d73a0670..924d7946278 100644 --- a/drivers/video/sm501fb.c +++ b/drivers/video/sm501fb.c @@ -145,7 +145,7 @@ static inline void sm501fb_sync_regs(struct sm501fb_info *info) #define SM501_MEMF_ACCEL (8) static int sm501_alloc_mem(struct sm501fb_info *inf, struct sm501_mem *mem, - unsigned int why, size_t size) + unsigned int why, size_t size, u32 smem_len) { struct sm501fb_par *par; struct fb_info *fbi; @@ -172,7 +172,7 @@ static int sm501_alloc_mem(struct sm501fb_info *inf, struct sm501_mem *mem, if (ptr > 0) ptr &= ~(PAGE_SIZE - 1); - if (fbi && ptr < fbi->fix.smem_len) + if (fbi && ptr < smem_len) return -ENOMEM; break; @@ -197,7 +197,7 @@ static int sm501_alloc_mem(struct sm501fb_info *inf, struct sm501_mem *mem, case SM501_MEMF_ACCEL: fbi = inf->fb[HEAD_CRT]; - ptr = fbi ? fbi->fix.smem_len : 0; + ptr = fbi ? smem_len : 0; fbi = inf->fb[HEAD_PANEL]; if (fbi) { @@ -413,6 +413,7 @@ static int sm501fb_set_par_common(struct fb_info *info, unsigned int mem_type; unsigned int clock_type; unsigned int head_addr; + unsigned int smem_len; dev_dbg(fbi->dev, "%s: %dx%d, bpp = %d, virtual %dx%d\n", __func__, var->xres, var->yres, var->bits_per_pixel, @@ -453,18 +454,20 @@ static int sm501fb_set_par_common(struct fb_info *info, /* allocate fb memory within 501 */ info->fix.line_length = (var->xres_virtual * var->bits_per_pixel)/8; - info->fix.smem_len = info->fix.line_length * var->yres_virtual; + smem_len = info->fix.line_length * var->yres_virtual; dev_dbg(fbi->dev, "%s: line length = %u\n", __func__, info->fix.line_length); - if (sm501_alloc_mem(fbi, &par->screen, mem_type, - info->fix.smem_len)) { + if (sm501_alloc_mem(fbi, &par->screen, mem_type, smem_len, smem_len)) { dev_err(fbi->dev, "no memory available\n"); return -ENOMEM; } + mutex_lock(&info->mm_lock); info->fix.smem_start = fbi->fbmem_res->start + par->screen.sm_addr; + info->fix.smem_len = smem_len; + mutex_unlock(&info->mm_lock); info->screen_base = fbi->fbmem + par->screen.sm_addr; info->screen_size = info->fix.smem_len; @@ -637,7 +640,8 @@ static int sm501fb_set_par_crt(struct fb_info *info) if ((control & SM501_DC_CRT_CONTROL_SEL) == 0) { /* the head is displaying panel data... */ - sm501_alloc_mem(fbi, &par->screen, SM501_MEMF_CRT, 0); + sm501_alloc_mem(fbi, &par->screen, SM501_MEMF_CRT, 0, + info->fix.smem_len); goto out_update; } @@ -1289,7 +1293,8 @@ static int sm501_init_cursor(struct fb_info *fbi, unsigned int reg_base) par->cursor_regs = info->regs + reg_base; - ret = sm501_alloc_mem(info, &par->cursor, SM501_MEMF_CURSOR, 1024); + ret = sm501_alloc_mem(info, &par->cursor, SM501_MEMF_CURSOR, 1024, + fbi->fix.smem_len); if (ret < 0) return ret; @@ -1535,9 +1540,6 @@ static int sm501fb_init_fb(struct fb_info *fb, if (ret) dev_err(info->dev, "check_var() failed on initial setup?\n"); - /* ensure we've activated our new configuration */ - (fb->fbops->fb_set_par)(fb); - return 0; } @@ -1619,6 +1621,8 @@ static int __devinit sm501fb_start_one(struct sm501fb_info *info, if (!fbi) return 0; + mutex_init(&info->fb[head]->mm_lock); + ret = sm501fb_init_fb(info->fb[head], head, drvname); if (ret) { dev_err(info->dev, "cannot initialise fb %s\n", drvname); diff --git a/drivers/video/stifb.c b/drivers/video/stifb.c index eec9dcb7f59..6120f0c526f 100644 --- a/drivers/video/stifb.c +++ b/drivers/video/stifb.c @@ -1115,10 +1115,9 @@ static int __init stifb_init_fb(struct sti_struct *sti, int bpp_pref) if the device name contains the string "DX" and tell the user how to reconfigure the card. */ if (strstr(sti->outptr.dev_name, "DX")) { - printk(KERN_WARNING "WARNING: stifb framebuffer driver does not " - "support '%s' in double-buffer mode.\n" - KERN_WARNING "WARNING: Please disable the double-buffer mode " - "in IPL menu (the PARISC-BIOS).\n", + printk(KERN_WARNING +"WARNING: stifb framebuffer driver does not support '%s' in double-buffer mode.\n" +"WARNING: Please disable the double-buffer mode in IPL menu (the PARISC-BIOS).\n", sti->outptr.dev_name); goto out_err0; } diff --git a/drivers/video/via/hw.c b/drivers/video/via/hw.c index fcd53ceb88f..c8960003f47 100644 --- a/drivers/video/via/hw.c +++ b/drivers/video/via/hw.c @@ -2407,14 +2407,14 @@ int viafb_setmode(int vmode_index, int hor_res, int ver_res, int video_bpp, viafb_dvi_set_mode(viafb_get_mode_index (viaparinfo->tmds_setting_info->h_active, viaparinfo->tmds_setting_info-> - v_active, 1), + v_active), video_bpp1, viaparinfo-> tmds_setting_info->iga_path); } else { viafb_dvi_set_mode(viafb_get_mode_index (viaparinfo->tmds_setting_info->h_active, viaparinfo-> - tmds_setting_info->v_active, 0), + tmds_setting_info->v_active), video_bpp, viaparinfo-> tmds_setting_info->iga_path); } diff --git a/drivers/video/via/lcd.c b/drivers/video/via/lcd.c index 6c7290a6a44..78c6b338794 100644 --- a/drivers/video/via/lcd.c +++ b/drivers/video/via/lcd.c @@ -580,10 +580,7 @@ static void load_lcd_k400_patch_tbl(int set_hres, int set_vres, int reg_num = 0; struct io_reg *lcd_patch_reg = NULL; - if (viaparinfo->lvds_setting_info->iga_path == IGA2) - vmode_index = viafb_get_mode_index(set_hres, set_vres, 1); - else - vmode_index = viafb_get_mode_index(set_hres, set_vres, 0); + vmode_index = viafb_get_mode_index(set_hres, set_vres); switch (panel_id) { /* LCD 800x600 */ case LCD_PANEL_ID1_800X600: @@ -761,10 +758,7 @@ static void load_lcd_p880_patch_tbl(int set_hres, int set_vres, int reg_num = 0; struct io_reg *lcd_patch_reg = NULL; - if (viaparinfo->lvds_setting_info->iga_path == IGA2) - vmode_index = viafb_get_mode_index(set_hres, set_vres, 1); - else - vmode_index = viafb_get_mode_index(set_hres, set_vres, 0); + vmode_index = viafb_get_mode_index(set_hres, set_vres); switch (panel_id) { case LCD_PANEL_ID5_1400X1050: @@ -832,10 +826,7 @@ static void load_lcd_patch_regs(int set_hres, int set_vres, { int vmode_index; - if (viaparinfo->lvds_setting_info->iga_path == IGA2) - vmode_index = viafb_get_mode_index(set_hres, set_vres, 1); - else - vmode_index = viafb_get_mode_index(set_hres, set_vres, 0); + vmode_index = viafb_get_mode_index(set_hres, set_vres); viafb_unlock_crt(); diff --git a/drivers/video/via/viafbdev.c b/drivers/video/via/viafbdev.c index a0fec298216..72833f3334b 100644 --- a/drivers/video/via/viafbdev.c +++ b/drivers/video/via/viafbdev.c @@ -32,7 +32,6 @@ static u32 pseudo_pal[17]; /* video mode */ static char *viafb_mode = "640x480"; static char *viafb_mode1 = "640x480"; -static int viafb_resMode = VIA_RES_640X480; /* Added for specifying active devices.*/ char *viafb_active_dev = ""; @@ -56,47 +55,47 @@ static void viafb_get_video_device(u32 *video_dev_info); /* Mode information */ static const struct viafb_modeinfo viafb_modentry[] = { - {480, 640, VIA_RES_480X640, "480x640"}, - {640, 480, VIA_RES_640X480, "640x480"}, - {800, 480, VIA_RES_800X480, "800x480"}, - {800, 600, VIA_RES_800X600, "800x600"}, - {1024, 768, VIA_RES_1024X768, "1024x768"}, - {1152, 864, VIA_RES_1152X864, "1152x864"}, - {1280, 1024, VIA_RES_1280X1024, "1280x1024"}, - {1600, 1200, VIA_RES_1600X1200, "1600x1200"}, - {1440, 1050, VIA_RES_1440X1050, "1440x1050"}, - {1280, 768, VIA_RES_1280X768, "1280x768"}, - {1280, 800, VIA_RES_1280X800, "1280x800"}, - {1280, 960, VIA_RES_1280X960, "1280x960"}, - {1920, 1440, VIA_RES_1920X1440, "1920x1440"}, - {848, 480, VIA_RES_848X480, "848x480"}, - {1400, 1050, VIA_RES_1400X1050, "1400x1050"}, - {720, 480, VIA_RES_720X480, "720x480"}, - {720, 576, VIA_RES_720X576, "720x576"}, - {1024, 512, VIA_RES_1024X512, "1024x512"}, - {1024, 576, VIA_RES_1024X576, "1024x576"}, - {1024, 600, VIA_RES_1024X600, "1024x600"}, - {1280, 720, VIA_RES_1280X720, "1280x720"}, - {1920, 1080, VIA_RES_1920X1080, "1920x1080"}, - {1366, 768, VIA_RES_1368X768, "1368x768"}, - {1680, 1050, VIA_RES_1680X1050, "1680x1050"}, - {960, 600, VIA_RES_960X600, "960x600"}, - {1000, 600, VIA_RES_1000X600, "1000x600"}, - {1024, 576, VIA_RES_1024X576, "1024x576"}, - {1024, 600, VIA_RES_1024X600, "1024x600"}, - {1088, 612, VIA_RES_1088X612, "1088x612"}, - {1152, 720, VIA_RES_1152X720, "1152x720"}, - {1200, 720, VIA_RES_1200X720, "1200x720"}, - {1280, 600, VIA_RES_1280X600, "1280x600"}, - {1360, 768, VIA_RES_1360X768, "1360x768"}, - {1440, 900, VIA_RES_1440X900, "1440x900"}, - {1600, 900, VIA_RES_1600X900, "1600x900"}, - {1600, 1024, VIA_RES_1600X1024, "1600x1024"}, - {1792, 1344, VIA_RES_1792X1344, "1792x1344"}, - {1856, 1392, VIA_RES_1856X1392, "1856x1392"}, - {1920, 1200, VIA_RES_1920X1200, "1920x1200"}, - {2048, 1536, VIA_RES_2048X1536, "2048x1536"}, - {0, 0, VIA_RES_INVALID, "640x480"} + {480, 640, VIA_RES_480X640}, + {640, 480, VIA_RES_640X480}, + {800, 480, VIA_RES_800X480}, + {800, 600, VIA_RES_800X600}, + {1024, 768, VIA_RES_1024X768}, + {1152, 864, VIA_RES_1152X864}, + {1280, 1024, VIA_RES_1280X1024}, + {1600, 1200, VIA_RES_1600X1200}, + {1440, 1050, VIA_RES_1440X1050}, + {1280, 768, VIA_RES_1280X768,}, + {1280, 800, VIA_RES_1280X800}, + {1280, 960, VIA_RES_1280X960}, + {1920, 1440, VIA_RES_1920X1440}, + {848, 480, VIA_RES_848X480}, + {1400, 1050, VIA_RES_1400X1050}, + {720, 480, VIA_RES_720X480}, + {720, 576, VIA_RES_720X576}, + {1024, 512, VIA_RES_1024X512}, + {1024, 576, VIA_RES_1024X576}, + {1024, 600, VIA_RES_1024X600}, + {1280, 720, VIA_RES_1280X720}, + {1920, 1080, VIA_RES_1920X1080}, + {1366, 768, VIA_RES_1368X768}, + {1680, 1050, VIA_RES_1680X1050}, + {960, 600, VIA_RES_960X600}, + {1000, 600, VIA_RES_1000X600}, + {1024, 576, VIA_RES_1024X576}, + {1024, 600, VIA_RES_1024X600}, + {1088, 612, VIA_RES_1088X612}, + {1152, 720, VIA_RES_1152X720}, + {1200, 720, VIA_RES_1200X720}, + {1280, 600, VIA_RES_1280X600}, + {1360, 768, VIA_RES_1360X768}, + {1440, 900, VIA_RES_1440X900}, + {1600, 900, VIA_RES_1600X900}, + {1600, 1024, VIA_RES_1600X1024}, + {1792, 1344, VIA_RES_1792X1344}, + {1856, 1392, VIA_RES_1856X1392}, + {1920, 1200, VIA_RES_1920X1200}, + {2048, 1536, VIA_RES_2048X1536}, + {0, 0, VIA_RES_INVALID} }; static struct fb_ops viafb_ops; @@ -177,7 +176,7 @@ static int viafb_check_var(struct fb_var_screeninfo *var, if (var->vmode & FB_VMODE_INTERLACED || var->vmode & FB_VMODE_DOUBLE) return -EINVAL; - vmode_index = viafb_get_mode_index(var->xres, var->yres, 0); + vmode_index = viafb_get_mode_index(var->xres, var->yres); if (vmode_index == VIA_RES_INVALID) { DEBUG_MSG(KERN_INFO "viafb: Mode %dx%dx%d not supported!!\n", @@ -233,14 +232,14 @@ static int viafb_set_par(struct fb_info *info) viafb_update_device_setting(info->var.xres, info->var.yres, info->var.bits_per_pixel, viafb_refresh, 0); - vmode_index = viafb_get_mode_index(info->var.xres, info->var.yres, 0); + vmode_index = viafb_get_mode_index(info->var.xres, info->var.yres); if (viafb_SAMM_ON == 1) { DEBUG_MSG(KERN_INFO "viafb_second_xres = %d, viafb_second_yres = %d, bpp = %d\n", viafb_second_xres, viafb_second_yres, viafb_bpp1); vmode_index1 = viafb_get_mode_index(viafb_second_xres, - viafb_second_yres, 1); + viafb_second_yres); DEBUG_MSG(KERN_INFO "->viafb_SAMM_ON: index=%d\n", vmode_index1); @@ -1262,7 +1261,7 @@ static int viafb_sync(struct fb_info *info) return 0; } -int viafb_get_mode_index(int hres, int vres, int flag) +int viafb_get_mode_index(int hres, int vres) { u32 i; DEBUG_MSG(KERN_INFO "viafb_get_mode_index!\n"); @@ -1272,13 +1271,7 @@ int viafb_get_mode_index(int hres, int vres, int flag) viafb_modentry[i].yres == vres) break; - viafb_resMode = viafb_modentry[i].mode_index; - if (flag) - viafb_mode1 = viafb_modentry[i].mode_res; - else - viafb_mode = viafb_modentry[i].mode_res; - - return viafb_resMode; + return viafb_modentry[i].mode_index; } static void check_available_device_to_enable(int device_id) @@ -2199,7 +2192,7 @@ static int __devinit via_pci_probe(void) strict_strtoul(tmpc, 0, &default_xres); strict_strtoul(tmpm, 0, &default_yres); - vmode_index = viafb_get_mode_index(default_xres, default_yres, 0); + vmode_index = viafb_get_mode_index(default_xres, default_yres); DEBUG_MSG(KERN_INFO "0->index=%d\n", vmode_index); if (viafb_SAMM_ON == 1) { diff --git a/drivers/video/via/viafbdev.h b/drivers/video/via/viafbdev.h index a4158e87287..227b000feb3 100644 --- a/drivers/video/via/viafbdev.h +++ b/drivers/video/via/viafbdev.h @@ -81,7 +81,6 @@ struct viafb_modeinfo { u32 xres; u32 yres; int mode_index; - char *mode_res; }; extern unsigned int viafb_second_virtual_yres; extern unsigned int viafb_second_virtual_xres; @@ -102,7 +101,7 @@ extern int strict_strtoul(const char *cp, unsigned int base, void viafb_memory_pitch_patch(struct fb_info *info); void viafb_fill_var_timing_info(struct fb_var_screeninfo *var, int refresh, int mode_index); -int viafb_get_mode_index(int hres, int vres, int flag); +int viafb_get_mode_index(int hres, int vres); u8 viafb_gpio_i2c_read_lvds(struct lvds_setting_information *plvds_setting_info, struct lvds_chip_information *plvds_chip_info, u8 index); diff --git a/drivers/video/w100fb.c b/drivers/video/w100fb.c index d0674f1e3f1..2376f688ec8 100644 --- a/drivers/video/w100fb.c +++ b/drivers/video/w100fb.c @@ -523,6 +523,7 @@ static int w100fb_set_par(struct fb_info *info) info->fix.ywrapstep = 0; info->fix.line_length = par->xres * BITS_PER_PIXEL / 8; + mutex_lock(&info->mm_lock); if ((par->xres*par->yres*BITS_PER_PIXEL/8) > (MEM_INT_SIZE+1)) { par->extmem_active = 1; info->fix.smem_len = par->mach->mem->size+1; @@ -530,6 +531,7 @@ static int w100fb_set_par(struct fb_info *info) par->extmem_active = 0; info->fix.smem_len = MEM_INT_SIZE+1; } + mutex_unlock(&info->mm_lock); w100fb_activate_var(par); } @@ -746,8 +748,6 @@ int __init w100fb_probe(struct platform_device *pdev) goto out; } - w100fb_set_par(info); - if (register_framebuffer(info) < 0) { err = -EINVAL; goto out; diff --git a/drivers/video/xen-fbfront.c b/drivers/video/xen-fbfront.c index 15502d5e364..54cd9161017 100644 --- a/drivers/video/xen-fbfront.c +++ b/drivers/video/xen-fbfront.c @@ -454,6 +454,10 @@ static int __devinit xenfb_probe(struct xenbus_device *dev, xenfb_init_shared_page(info, fb_info); + ret = xenfb_connect_backend(dev, info); + if (ret < 0) + goto error; + ret = register_framebuffer(fb_info); if (ret) { fb_deferred_io_cleanup(fb_info); @@ -464,10 +468,6 @@ static int __devinit xenfb_probe(struct xenbus_device *dev, } info->fb_info = fb_info; - ret = xenfb_connect_backend(dev, info); - if (ret < 0) - goto error; - xenfb_make_preferred_console(); return 0; diff --git a/drivers/virtio/virtio_pci.c b/drivers/virtio/virtio_pci.c index 193c8f0e5cc..248e00ec4dc 100644 --- a/drivers/virtio/virtio_pci.c +++ b/drivers/virtio/virtio_pci.c @@ -52,8 +52,10 @@ struct virtio_pci_device char (*msix_names)[256]; /* Number of available vectors */ unsigned msix_vectors; - /* Vectors allocated */ + /* Vectors allocated, excluding per-vq vectors if any */ unsigned msix_used_vectors; + /* Whether we have vector per vq */ + bool per_vq_vectors; }; /* Constants for MSI-X */ @@ -258,7 +260,6 @@ static void vp_free_vectors(struct virtio_device *vdev) for (i = 0; i < vp_dev->msix_used_vectors; ++i) free_irq(vp_dev->msix_entries[i].vector, vp_dev); - vp_dev->msix_used_vectors = 0; if (vp_dev->msix_enabled) { /* Disable the vector used for configuration */ @@ -267,80 +268,77 @@ static void vp_free_vectors(struct virtio_device *vdev) /* Flush the write out to device */ ioread16(vp_dev->ioaddr + VIRTIO_MSI_CONFIG_VECTOR); - vp_dev->msix_enabled = 0; pci_disable_msix(vp_dev->pci_dev); + vp_dev->msix_enabled = 0; + vp_dev->msix_vectors = 0; } -} -static int vp_enable_msix(struct pci_dev *dev, struct msix_entry *entries, - int *options, int noptions) -{ - int i; - for (i = 0; i < noptions; ++i) - if (!pci_enable_msix(dev, entries, options[i])) - return options[i]; - return -EBUSY; + vp_dev->msix_used_vectors = 0; + kfree(vp_dev->msix_names); + vp_dev->msix_names = NULL; + kfree(vp_dev->msix_entries); + vp_dev->msix_entries = NULL; } -static int vp_request_vectors(struct virtio_device *vdev, unsigned max_vqs) +static int vp_request_vectors(struct virtio_device *vdev, int nvectors, + bool per_vq_vectors) { struct virtio_pci_device *vp_dev = to_vp_device(vdev); const char *name = dev_name(&vp_dev->vdev.dev); unsigned i, v; int err = -ENOMEM; - /* We want at most one vector per queue and one for config changes. - * Fallback to separate vectors for config and a shared for queues. - * Finally fall back to regular interrupts. */ - int options[] = { max_vqs + 1, 2 }; - int nvectors = max(options[0], options[1]); + + if (!nvectors) { + /* Can't allocate MSI-X vectors, use regular interrupt */ + vp_dev->msix_vectors = 0; + err = request_irq(vp_dev->pci_dev->irq, vp_interrupt, + IRQF_SHARED, name, vp_dev); + if (err) + return err; + vp_dev->intx_enabled = 1; + return 0; + } vp_dev->msix_entries = kmalloc(nvectors * sizeof *vp_dev->msix_entries, GFP_KERNEL); if (!vp_dev->msix_entries) - goto error_entries; + goto error; vp_dev->msix_names = kmalloc(nvectors * sizeof *vp_dev->msix_names, GFP_KERNEL); if (!vp_dev->msix_names) - goto error_names; + goto error; for (i = 0; i < nvectors; ++i) vp_dev->msix_entries[i].entry = i; - err = vp_enable_msix(vp_dev->pci_dev, vp_dev->msix_entries, - options, ARRAY_SIZE(options)); - if (err < 0) { - /* Can't allocate enough MSI-X vectors, use regular interrupt */ - vp_dev->msix_vectors = 0; - err = request_irq(vp_dev->pci_dev->irq, vp_interrupt, - IRQF_SHARED, name, vp_dev); - if (err) - goto error_irq; - vp_dev->intx_enabled = 1; - } else { - vp_dev->msix_vectors = err; - vp_dev->msix_enabled = 1; - - /* Set the vector used for configuration */ - v = vp_dev->msix_used_vectors; - snprintf(vp_dev->msix_names[v], sizeof *vp_dev->msix_names, - "%s-config", name); - err = request_irq(vp_dev->msix_entries[v].vector, - vp_config_changed, 0, vp_dev->msix_names[v], - vp_dev); - if (err) - goto error_irq; - ++vp_dev->msix_used_vectors; - - iowrite16(v, vp_dev->ioaddr + VIRTIO_MSI_CONFIG_VECTOR); - /* Verify we had enough resources to assign the vector */ - v = ioread16(vp_dev->ioaddr + VIRTIO_MSI_CONFIG_VECTOR); - if (v == VIRTIO_MSI_NO_VECTOR) { - err = -EBUSY; - goto error_irq; - } + err = pci_enable_msix(vp_dev->pci_dev, vp_dev->msix_entries, nvectors); + if (err > 0) + err = -ENOSPC; + if (err) + goto error; + vp_dev->msix_vectors = nvectors; + vp_dev->msix_enabled = 1; + + /* Set the vector used for configuration */ + v = vp_dev->msix_used_vectors; + snprintf(vp_dev->msix_names[v], sizeof *vp_dev->msix_names, + "%s-config", name); + err = request_irq(vp_dev->msix_entries[v].vector, + vp_config_changed, 0, vp_dev->msix_names[v], + vp_dev); + if (err) + goto error; + ++vp_dev->msix_used_vectors; + + iowrite16(v, vp_dev->ioaddr + VIRTIO_MSI_CONFIG_VECTOR); + /* Verify we had enough resources to assign the vector */ + v = ioread16(vp_dev->ioaddr + VIRTIO_MSI_CONFIG_VECTOR); + if (v == VIRTIO_MSI_NO_VECTOR) { + err = -EBUSY; + goto error; } - if (vp_dev->msix_vectors && vp_dev->msix_vectors != max_vqs + 1) { + if (!per_vq_vectors) { /* Shared vector for all VQs */ v = vp_dev->msix_used_vectors; snprintf(vp_dev->msix_names[v], sizeof *vp_dev->msix_names, @@ -349,28 +347,25 @@ static int vp_request_vectors(struct virtio_device *vdev, unsigned max_vqs) vp_vring_interrupt, 0, vp_dev->msix_names[v], vp_dev); if (err) - goto error_irq; + goto error; ++vp_dev->msix_used_vectors; } return 0; -error_irq: +error: vp_free_vectors(vdev); - kfree(vp_dev->msix_names); -error_names: - kfree(vp_dev->msix_entries); -error_entries: return err; } static struct virtqueue *vp_find_vq(struct virtio_device *vdev, unsigned index, void (*callback)(struct virtqueue *vq), - const char *name) + const char *name, + u16 vector) { struct virtio_pci_device *vp_dev = to_vp_device(vdev); struct virtio_pci_vq_info *info; struct virtqueue *vq; unsigned long flags, size; - u16 num, vector; + u16 num; int err; /* Select the queue we're interested in */ @@ -389,7 +384,7 @@ static struct virtqueue *vp_find_vq(struct virtio_device *vdev, unsigned index, info->queue_index = index; info->num = num; - info->vector = VIRTIO_MSI_NO_VECTOR; + info->vector = vector; size = PAGE_ALIGN(vring_size(num, VIRTIO_PCI_VRING_ALIGN)); info->queue = alloc_pages_exact(size, GFP_KERNEL|__GFP_ZERO); @@ -413,22 +408,7 @@ static struct virtqueue *vp_find_vq(struct virtio_device *vdev, unsigned index, vq->priv = info; info->vq = vq; - /* allocate per-vq vector if available and necessary */ - if (callback && vp_dev->msix_used_vectors < vp_dev->msix_vectors) { - vector = vp_dev->msix_used_vectors; - snprintf(vp_dev->msix_names[vector], sizeof *vp_dev->msix_names, - "%s-%s", dev_name(&vp_dev->vdev.dev), name); - err = request_irq(vp_dev->msix_entries[vector].vector, - vring_interrupt, 0, - vp_dev->msix_names[vector], vq); - if (err) - goto out_request_irq; - info->vector = vector; - ++vp_dev->msix_used_vectors; - } else - vector = VP_MSIX_VQ_VECTOR; - - if (callback && vp_dev->msix_enabled) { + if (vector != VIRTIO_MSI_NO_VECTOR) { iowrite16(vector, vp_dev->ioaddr + VIRTIO_MSI_QUEUE_VECTOR); vector = ioread16(vp_dev->ioaddr + VIRTIO_MSI_QUEUE_VECTOR); if (vector == VIRTIO_MSI_NO_VECTOR) { @@ -444,11 +424,6 @@ static struct virtqueue *vp_find_vq(struct virtio_device *vdev, unsigned index, return vq; out_assign: - if (info->vector != VIRTIO_MSI_NO_VECTOR) { - free_irq(vp_dev->msix_entries[info->vector].vector, vq); - --vp_dev->msix_used_vectors; - } -out_request_irq: vring_del_virtqueue(vq); out_activate_queue: iowrite32(0, vp_dev->ioaddr + VIRTIO_PCI_QUEUE_PFN); @@ -462,12 +437,13 @@ static void vp_del_vq(struct virtqueue *vq) { struct virtio_pci_device *vp_dev = to_vp_device(vq->vdev); struct virtio_pci_vq_info *info = vq->priv; - unsigned long size; + unsigned long flags, size; - iowrite16(info->queue_index, vp_dev->ioaddr + VIRTIO_PCI_QUEUE_SEL); + spin_lock_irqsave(&vp_dev->lock, flags); + list_del(&info->node); + spin_unlock_irqrestore(&vp_dev->lock, flags); - if (info->vector != VIRTIO_MSI_NO_VECTOR) - free_irq(vp_dev->msix_entries[info->vector].vector, vq); + iowrite16(info->queue_index, vp_dev->ioaddr + VIRTIO_PCI_QUEUE_SEL); if (vp_dev->msix_enabled) { iowrite16(VIRTIO_MSI_NO_VECTOR, @@ -489,36 +465,62 @@ static void vp_del_vq(struct virtqueue *vq) /* the config->del_vqs() implementation */ static void vp_del_vqs(struct virtio_device *vdev) { + struct virtio_pci_device *vp_dev = to_vp_device(vdev); struct virtqueue *vq, *n; + struct virtio_pci_vq_info *info; - list_for_each_entry_safe(vq, n, &vdev->vqs, list) + list_for_each_entry_safe(vq, n, &vdev->vqs, list) { + info = vq->priv; + if (vp_dev->per_vq_vectors) + free_irq(vp_dev->msix_entries[info->vector].vector, vq); vp_del_vq(vq); + } + vp_dev->per_vq_vectors = false; vp_free_vectors(vdev); } -/* the config->find_vqs() implementation */ -static int vp_find_vqs(struct virtio_device *vdev, unsigned nvqs, - struct virtqueue *vqs[], - vq_callback_t *callbacks[], - const char *names[]) +static int vp_try_to_find_vqs(struct virtio_device *vdev, unsigned nvqs, + struct virtqueue *vqs[], + vq_callback_t *callbacks[], + const char *names[], + int nvectors, + bool per_vq_vectors) { - int vectors = 0; - int i, err; - - /* How many vectors would we like? */ - for (i = 0; i < nvqs; ++i) - if (callbacks[i]) - ++vectors; + struct virtio_pci_device *vp_dev = to_vp_device(vdev); + u16 vector; + int i, err, allocated_vectors; - err = vp_request_vectors(vdev, vectors); + err = vp_request_vectors(vdev, nvectors, per_vq_vectors); if (err) goto error_request; + vp_dev->per_vq_vectors = per_vq_vectors; + allocated_vectors = vp_dev->msix_used_vectors; for (i = 0; i < nvqs; ++i) { - vqs[i] = vp_find_vq(vdev, i, callbacks[i], names[i]); - if (IS_ERR(vqs[i])) + if (!callbacks[i] || !vp_dev->msix_enabled) + vector = VIRTIO_MSI_NO_VECTOR; + else if (vp_dev->per_vq_vectors) + vector = allocated_vectors++; + else + vector = VP_MSIX_VQ_VECTOR; + vqs[i] = vp_find_vq(vdev, i, callbacks[i], names[i], vector); + if (IS_ERR(vqs[i])) { + err = PTR_ERR(vqs[i]); goto error_find; + } + /* allocate per-vq irq if available and necessary */ + if (vp_dev->per_vq_vectors && vector != VIRTIO_MSI_NO_VECTOR) { + snprintf(vp_dev->msix_names[vector], sizeof *vp_dev->msix_names, + "%s-%s", dev_name(&vp_dev->vdev.dev), names[i]); + err = request_irq(vp_dev->msix_entries[vector].vector, + vring_interrupt, 0, + vp_dev->msix_names[vector], vqs[i]); + if (err) { + vp_del_vq(vqs[i]); + goto error_find; + } + } } return 0; @@ -526,7 +528,37 @@ error_find: vp_del_vqs(vdev); error_request: - return PTR_ERR(vqs[i]); + return err; +} + +/* the config->find_vqs() implementation */ +static int vp_find_vqs(struct virtio_device *vdev, unsigned nvqs, + struct virtqueue *vqs[], + vq_callback_t *callbacks[], + const char *names[]) +{ + int vectors = 0; + int i, uninitialized_var(err); + + /* How many vectors would we like? */ + for (i = 0; i < nvqs; ++i) + if (callbacks[i]) + ++vectors; + + /* We want at most one vector per queue and one for config changes. */ + err = vp_try_to_find_vqs(vdev, nvqs, vqs, callbacks, names, + vectors + 1, true); + if (!err) + return 0; + /* Fallback to separate vectors for config and a shared for queues. */ + err = vp_try_to_find_vqs(vdev, nvqs, vqs, callbacks, names, + 2, false); + if (!err) + return 0; + /* Finally fall back to regular interrupts. */ + err = vp_try_to_find_vqs(vdev, nvqs, vqs, callbacks, names, + 0, false); + return err; } static struct virtio_config_ops virtio_pci_config_ops = { @@ -669,7 +701,7 @@ static int __init virtio_pci_init(void) err = pci_register_driver(&virtio_pci_driver); if (err) - device_unregister(virtio_pci_root); + root_device_unregister(virtio_pci_root); return err; } diff --git a/drivers/vlynq/Kconfig b/drivers/vlynq/Kconfig index f6542211db4..a9efb162532 100644 --- a/drivers/vlynq/Kconfig +++ b/drivers/vlynq/Kconfig @@ -13,7 +13,7 @@ config VLYNQ config VLYNQ_DEBUG bool "VLYNQ bus debug" - depends on VLYNQ && KERNEL_DEBUG + depends on VLYNQ && DEBUG_KERNEL help Turn on VLYNQ bus debugging. diff --git a/drivers/vlynq/vlynq.c b/drivers/vlynq/vlynq.c index 7335433b067..f05d2a36836 100644 --- a/drivers/vlynq/vlynq.c +++ b/drivers/vlynq/vlynq.c @@ -76,7 +76,7 @@ struct vlynq_regs { u32 int_device[8]; }; -#ifdef VLYNQ_DEBUG +#ifdef CONFIG_VLYNQ_DEBUG static void vlynq_dump_regs(struct vlynq_device *dev) { int i; diff --git a/drivers/w1/masters/omap_hdq.c b/drivers/w1/masters/omap_hdq.c index a7e3b706b9d..0d92969404c 100644 --- a/drivers/w1/masters/omap_hdq.c +++ b/drivers/w1/masters/omap_hdq.c @@ -687,6 +687,7 @@ static int omap_hdq_remove(struct platform_device *pdev) if (hdq_data->hdq_usecount) { dev_dbg(&pdev->dev, "removed when use count is not zero\n"); + mutex_unlock(&hdq_data->hdq_mutex); return -EBUSY; } diff --git a/drivers/watchdog/ar7_wdt.c b/drivers/watchdog/ar7_wdt.c index 3fe9742c23c..2f8643efe92 100644 --- a/drivers/watchdog/ar7_wdt.c +++ b/drivers/watchdog/ar7_wdt.c @@ -37,7 +37,7 @@ #include <linux/uaccess.h> #include <asm/addrspace.h> -#include <asm/ar7/ar7.h> +#include <asm/mach-ar7/ar7.h> #define DRVNAME "ar7_wdt" #define LONGNAME "TI AR7 Watchdog Timer" diff --git a/drivers/watchdog/bcm47xx_wdt.c b/drivers/watchdog/bcm47xx_wdt.c index 5c7011cda6a..751c003864a 100644 --- a/drivers/watchdog/bcm47xx_wdt.c +++ b/drivers/watchdog/bcm47xx_wdt.c @@ -161,7 +161,7 @@ static long bcm47xx_wdt_ioctl(struct file *file, { void __user *argp = (void __user *)arg; int __user *p = argp; - int new_value, retval = -EINVAL;; + int new_value, retval = -EINVAL; switch (cmd) { case WDIOC_GETSUPPORT: diff --git a/drivers/watchdog/coh901327_wdt.c b/drivers/watchdog/coh901327_wdt.c index fecb307d28e..aec7cefdef2 100644 --- a/drivers/watchdog/coh901327_wdt.c +++ b/drivers/watchdog/coh901327_wdt.c @@ -18,6 +18,7 @@ #include <linux/bitops.h> #include <linux/uaccess.h> #include <linux/clk.h> +#include <linux/delay.h> #define DRV_NAME "WDOG COH 901 327" @@ -92,6 +93,8 @@ static struct clk *clk; static void coh901327_enable(u16 timeout) { u16 val; + unsigned long freq; + unsigned long delay_ns; clk_enable(clk); /* Restart timer if it is disabled */ @@ -102,6 +105,14 @@ static void coh901327_enable(u16 timeout) /* Acknowledge any pending interrupt so it doesn't just fire off */ writew(U300_WDOG_IER_WILL_BARK_IRQ_ACK_ENABLE, virtbase + U300_WDOG_IER); + /* + * The interrupt is cleared in the 32 kHz clock domain. + * Wait 3 32 kHz cycles for it to take effect + */ + freq = clk_get_rate(clk); + delay_ns = (1000000000 + freq - 1) / freq; /* Freq to ns and round up */ + delay_ns = 3 * delay_ns; /* Wait 3 cycles */ + ndelay(delay_ns); /* Enable the watchdog interrupt */ writew(U300_WDOG_IMR_WILL_BARK_IRQ_ENABLE, virtbase + U300_WDOG_IMR); /* Activate the watchdog timer */ diff --git a/drivers/watchdog/ep93xx_wdt.c b/drivers/watchdog/ep93xx_wdt.c index e9f950ff86e..cdd55e0d09f 100644 --- a/drivers/watchdog/ep93xx_wdt.c +++ b/drivers/watchdog/ep93xx_wdt.c @@ -29,6 +29,7 @@ #include <linux/watchdog.h> #include <linux/timer.h> #include <linux/uaccess.h> +#include <linux/io.h> #include <mach/hardware.h> #define WDT_VERSION "0.3" diff --git a/drivers/watchdog/ks8695_wdt.c b/drivers/watchdog/ks8695_wdt.c index 00b03eb43bf..e1c82769b08 100644 --- a/drivers/watchdog/ks8695_wdt.c +++ b/drivers/watchdog/ks8695_wdt.c @@ -66,7 +66,7 @@ static inline void ks8695_wdt_stop(void) static inline void ks8695_wdt_start(void) { unsigned long tmcon; - unsigned long tval = wdt_time * CLOCK_TICK_RATE; + unsigned long tval = wdt_time * KS8695_CLOCK_RATE; spin_lock(&ks8695_lock); /* disable timer0 */ @@ -103,7 +103,7 @@ static inline void ks8695_wdt_reload(void) static int ks8695_wdt_settimeout(int new_time) { /* - * All counting occurs at SLOW_CLOCK / 128 = 0.256 Hz + * All counting occurs at KS8695_CLOCK_RATE / 128 = 0.256 Hz * * Since WDV is a 16-bit counter, the maximum period is * 65536 / 0.256 = 256 seconds. diff --git a/drivers/watchdog/sa1100_wdt.c b/drivers/watchdog/sa1100_wdt.c index ee1caae4d33..016245419fa 100644 --- a/drivers/watchdog/sa1100_wdt.c +++ b/drivers/watchdog/sa1100_wdt.c @@ -38,7 +38,7 @@ static unsigned long oscr_freq; static unsigned long sa1100wdt_users; -static int pre_margin; +static unsigned int pre_margin; static int boot_status; /* @@ -84,6 +84,7 @@ static const struct watchdog_info ident = { .options = WDIOF_CARDRESET | WDIOF_SETTIMEOUT | WDIOF_KEEPALIVEPING, .identity = "SA1100/PXA255 Watchdog", + .firmware_version = 1, }; static long sa1100dog_ioctl(struct file *file, unsigned int cmd, @@ -118,7 +119,7 @@ static long sa1100dog_ioctl(struct file *file, unsigned int cmd, if (ret) break; - if (time <= 0 || time > 255) { + if (time <= 0 || (oscr_freq * (long long)time >= 0xffffffff)) { ret = -EINVAL; break; } diff --git a/drivers/watchdog/w83627hf_wdt.c b/drivers/watchdog/w83627hf_wdt.c index 916890abffd..f201accc4e3 100644 --- a/drivers/watchdog/w83627hf_wdt.c +++ b/drivers/watchdog/w83627hf_wdt.c @@ -89,6 +89,11 @@ static void w83627hf_select_wd_register(void) c = ((inb_p(WDT_EFDR) & 0xf7) | 0x04); /* select WDT0 */ outb_p(0x2b, WDT_EFER); outb_p(c, WDT_EFDR); /* set GPIO3 to WDT0 */ + } else if (c == 0x88) { /* W83627EHF */ + outb_p(0x2d, WDT_EFER); /* select GPIO5 */ + c = inb_p(WDT_EFDR) & ~0x01; /* PIN77 -> WDT0# */ + outb_p(0x2d, WDT_EFER); + outb_p(c, WDT_EFDR); /* set GPIO5 to WDT0 */ } outb_p(0x07, WDT_EFER); /* point to logical device number reg */ diff --git a/drivers/watchdog/w83697ug_wdt.c b/drivers/watchdog/w83697ug_wdt.c index 883b5f79673..a6c12dec91a 100644 --- a/drivers/watchdog/w83697ug_wdt.c +++ b/drivers/watchdog/w83697ug_wdt.c @@ -149,8 +149,10 @@ static void wdt_ctrl(int timeout) { spin_lock(&io_lock); - if (w83697ug_select_wd_register() < 0) + if (w83697ug_select_wd_register() < 0) { + spin_unlock(&io_lock); return; + } outb_p(0xF4, WDT_EFER); /* Select CRF4 */ outb_p(timeout, WDT_EFDR); /* Write Timeout counter to CRF4 */ diff --git a/drivers/watchdog/wdrtas.c b/drivers/watchdog/wdrtas.c index a4fe7a38d9b..3bde56bce63 100644 --- a/drivers/watchdog/wdrtas.c +++ b/drivers/watchdog/wdrtas.c @@ -218,16 +218,14 @@ static void wdrtas_timer_keepalive(void) */ static int wdrtas_get_temperature(void) { - long result; + int result; int temperature = 0; - result = rtas_call(wdrtas_token_get_sensor_state, 2, 2, - (void *)__pa(&temperature), - WDRTAS_THERMAL_SENSOR, 0); + result = rtas_get_sensor(WDRTAS_THERMAL_SENSOR, 0, &temperature); if (result < 0) printk(KERN_WARNING "wdrtas: reading the thermal sensor " - "faild: %li\n", result); + "failed: %i\n", result); else temperature = ((temperature * 9) / 5) + 32; /* fahrenheit */ diff --git a/drivers/xen/events.c b/drivers/xen/events.c index 891d2e90753..abad71b1632 100644 --- a/drivers/xen/events.c +++ b/drivers/xen/events.c @@ -927,9 +927,9 @@ static struct irq_chip xen_dynamic_chip __read_mostly = { void __init xen_init_IRQ(void) { int i; - size_t size = nr_cpu_ids * sizeof(struct cpu_evtchn_s); - cpu_evtchn_mask_p = alloc_bootmem(size); + cpu_evtchn_mask_p = kcalloc(nr_cpu_ids, sizeof(struct cpu_evtchn_s), + GFP_KERNEL); BUG_ON(cpu_evtchn_mask_p == NULL); init_evtchn_cpu_bindings(); |